commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
7707e65ed591b890d91bcb7bf22923b8c17a113a | Add tests from Gregor's PR | readthedocs/rtd_tests/tests/test_api_permissions.py | readthedocs/rtd_tests/tests/test_api_permissions.py | from functools import partial
from mock import Mock
from unittest import TestCase
from readthedocs.restapi.permissions import APIRestrictedPermission
class APIRestrictedPermissionTests(TestCase):
def get_request(self, method, is_admin):
request = Mock()
request.method = method
request.user.is_staff = is_admin
return request
def assertAllow(self, handler, method, is_admin, obj=None):
if obj is None:
self.assertTrue(handler.has_permission(
request=self.get_request(method, is_admin=is_admin),
view=None))
else:
self.assertTrue(handler.has_object_permission(
request=self.get_request(method, is_admin=is_admin),
view=None,
obj=obj))
def assertDisallow(self, handler, method, is_admin, obj=None):
if obj is None:
self.assertFalse(handler.has_permission(
request=self.get_request(method, is_admin=is_admin),
view=None))
else:
self.assertFalse(handler.has_object_permission(
request=self.get_request(method, is_admin=is_admin),
view=None,
obj=obj))
def test_non_object_permissions(self):
handler = APIRestrictedPermission()
assertAllow = partial(self.assertAllow, handler, obj=None)
assertDisallow = partial(self.assertDisallow, handler, obj=None)
assertAllow('GET', is_admin=False)
assertAllow('HEAD', is_admin=False)
assertAllow('OPTIONS', is_admin=False)
assertDisallow('DELETE', is_admin=False)
assertDisallow('PATCH', is_admin=False)
assertDisallow('POST', is_admin=False)
assertDisallow('PUT', is_admin=False)
assertAllow('GET', is_admin=True)
assertAllow('HEAD', is_admin=True)
assertAllow('OPTIONS', is_admin=True)
assertAllow('DELETE', is_admin=True)
assertAllow('PATCH', is_admin=True)
assertAllow('POST', is_admin=True)
assertAllow('PUT', is_admin=True)
def test_object_permissions(self):
handler = APIRestrictedPermission()
obj = Mock()
assertAllow = partial(self.assertAllow, handler, obj=obj)
assertDisallow = partial(self.assertDisallow, handler, obj=obj)
assertAllow('GET', is_admin=False)
assertAllow('HEAD', is_admin=False)
assertAllow('OPTIONS', is_admin=False)
assertDisallow('DELETE', is_admin=False)
assertDisallow('PATCH', is_admin=False)
assertDisallow('POST', is_admin=False)
assertDisallow('PUT', is_admin=False)
assertAllow('GET', is_admin=True)
assertAllow('HEAD', is_admin=True)
assertAllow('OPTIONS', is_admin=True)
assertAllow('DELETE', is_admin=True)
assertAllow('PATCH', is_admin=True)
assertAllow('POST', is_admin=True)
assertAllow('PUT', is_admin=True)
| Python | 0 | |
44f70a0c8ea9613214ce6305c262a8508b4bc598 | create add_user.py | add_user.py | add_user.py | #!/usr/bin/python
import bluetooth
print("Scanning for bluetooth devices in discoverable mode...")
nearby_devices = bluetooth.discover_devices(lookup_names = True)
for i, (addr, name) in enumerate(nearby_devices):
print("[{}] {} {}".format(i, addr, name))
num = raw_input("Enter the number of your device (or type anything else to quit)\n")
if num.isdigit() and 0 <= int(num) < len(nearby_devices):
addr, name = nearby_devices[int(num)]
maybe_name = raw_input("Enter a name for this device (or press enter to use '{}')\n".format(name))
if maybe_name != '':
name = maybe_name
with open("users.txt", "a") as users_file:
users_file.write("{} {}\n".format(addr, name))
print("Successfully added '{}'".format(name))
else:
exit()
| Python | 0.000005 | |
c8c679221e0a36ac6074c0869bfc4b75d9745ae2 | Create a.py | abc066/a.py | abc066/a.py | a, b, c = map(int, input().split())
print(min(a + b, b + c, a + c))
| Python | 0.000489 | |
365152787cae36c12691e4da52a0575bd56d7d1b | Add tests for tril, triu and find | tests/cupyx_tests/scipy_tests/sparse_tests/test_extract.py | tests/cupyx_tests/scipy_tests/sparse_tests/test_extract.py | import unittest
import numpy
try:
import scipy.sparse
scipy_available = True
except ImportError:
scipy_available = False
import cupy
from cupy import testing
from cupyx.scipy import sparse
@testing.parameterize(*testing.product({
'shape': [(8, 3), (4, 4), (3, 8)],
'a_format': ['dense', 'csr', 'csc', 'coo'],
'out_format': [None, 'csr', 'csc'],
}))
@unittest.skipUnless(scipy_available, 'requires scipy')
class TestExtract(unittest.TestCase):
density = 0.75
def _make_matrix(self, dtype):
a = testing.shaped_random(self.shape, numpy, dtype=dtype)
a[a > self.density] = 0
b = cupy.array(a)
if self.a_format == 'csr':
a = scipy.sparse.csr_matrix(a)
b = sparse.csr_matrix(b)
elif self.a_format == 'csc':
a = scipy.sparse.csc_matrix(a)
b = sparse.csc_matrix(b)
elif self.a_format == 'coo':
a = scipy.sparse.coo_matrix(a)
b = sparse.coo_matrix(b)
return a, b
@testing.for_dtypes('fdFD')
def test_tril(self, dtype):
np_a, cp_a = self._make_matrix(dtype)
m, n = self.shape
for k in range(-m+1, n):
np_out = scipy.sparse.tril(np_a, k=k, format=self.out_format)
cp_out = sparse.tril(cp_a, k=k, format=self.out_format)
assert np_out.format == cp_out.format
assert np_out.nnz == cp_out.nnz
cupy.testing.assert_allclose(np_out.todense(), cp_out.todense())
@testing.for_dtypes('fdFD')
def test_triu(self, dtype):
np_a, cp_a = self._make_matrix(dtype)
m, n = self.shape
for k in range(-m+1, n):
np_out = scipy.sparse.triu(np_a, k=k, format=self.out_format)
cp_out = sparse.triu(cp_a, k=k, format=self.out_format)
assert np_out.format == cp_out.format
assert np_out.nnz == cp_out.nnz
cupy.testing.assert_allclose(np_out.todense(), cp_out.todense())
@testing.for_dtypes('fdFD')
def test_find(self, dtype):
if self.out_format is not None:
unittest.SkipTest()
np_a, cp_a = self._make_matrix(dtype)
np_row, np_col, np_data = scipy.sparse.find(np_a)
cp_row, cp_col, cp_data = sparse.find(cp_a)
# Note: Check the results by reconstructing the sparse matrix from the
# results of find, as SciPy and CuPy differ in the data order.
np_out = scipy.sparse.coo_matrix((np_data, (np_row, np_col)),
shape=self.shape)
cp_out = sparse.coo_matrix((cp_data, (cp_row, cp_col)),
shape=self.shape)
cupy.testing.assert_allclose(np_out.todense(), cp_out.todense())
| Python | 0 | |
a4d3056bbbe71d73d901c13927264157c9c51842 | Add lc004_median_of_two_sorted_arrays.py | lc004_median_of_two_sorted_arrays.py | lc004_median_of_two_sorted_arrays.py | """Leetcode 4. Median of Two Sorted Arrays
Hard
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays.
The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
class Solution(object):
def findMedianSortedArrays(self, num1, num2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.007128 | |
8a836213f7466de51c6d3d18d1a5ba74bb28de4a | Add hdf5-vol-async package. (#26874) | var/spack/repos/builtin/packages/hdf5-vol-async/package.py | var/spack/repos/builtin/packages/hdf5-vol-async/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hdf5VolAsync(CMakePackage):
"""This package enables asynchronous IO in HDF5."""
homepage = "https://sdm.lbl.gov/"
git = "https://github.com/hpc-io/vol-async"
maintainers = ['hyoklee']
version('v1.0')
depends_on('argobots@main')
depends_on('hdf5@develop-1.13+mpi+threadsafe')
def cmake_args(self):
"""Populate cmake arguments for HDF5 VOL."""
args = [
self.define('BUILD_SHARED_LIBS:BOOL', True),
self.define('BUILD_TESTING:BOOL=ON', self.run_tests)
]
return args
| Python | 0 | |
034fe49d29f229e8fafc6b1034fc2685cd896eb2 | Create create-studio-item | my-ACG/create-studio-item/edit.py | my-ACG/create-studio-item/edit.py | # -*- coding: utf-8 -*-
import argparse
import csv
import os
import re
import urllib.parse
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
site = pywikibot.Site()
site.login()
datasite = site.data_repository()
def main(studio):
data = {
'labels': {
'zh-tw': {
'language': 'zh-tw',
'value': studio
},
},
'sitelinks': {
'zhwiki': {
'site': 'zhwiki',
'title': studio,
'badges': [],
},
},
# https://www.mediawiki.org/wiki/Wikibase/DataModel/JSON#Snaks
'claims': {
'P3': [{
'mainsnak': {
'snaktype': 'value',
'property': 'P3',
'datatype': 'wikibase-item',
'datavalue': {
'value': {
'entity-type': 'item',
'numeric-id': 65,
},
'type': 'wikibase-entityid',
},
},
'type': 'statement',
'rank': 'normal',
}],
},
}
# claim = pywikibot.page.Claim(datasite, 'P25', datatype='wikibase-item')
# item.editEntity({'claims': [claim.toJSON()]})
print(data)
item = datasite.editEntity({}, data, summary=u'ๅปบ็ซๆฐ้
็ฎไธฆ้ฃ็ต')
print(item['entity']['id'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('studio')
args = parser.parse_args()
main(args.studio)
| Python | 0.000002 | |
9b572d4f53b23f3dc51dbfb98d46d0daa68d3569 | fix pep8 on core admin profile | opps/core/admin/profile.py | opps/core/admin/profile.py | # -*- coding: utf-8 -*-
from django.contrib import admin
from opps.core.models import Profile
class ProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(Profile, ProfileAdmin)
| # -*- coding: utf-8 -*-
from django.contrib import admin
from opps.core.models import Profile
class ProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(Profile, ProfileAdmin)
| Python | 0 |
419e001591566df909b03ffd0abff12171b62491 | Create binary_search_iter.py | binary_search_iter.py | binary_search_iter.py | #GLOBALS
#=======
FIRST_IDX = 0
def chop(number, int_list):
list_size = length(int_list)
start_idx = FIRST_IDX
end_idx = list_size-1
current_idx = end_idx/2
itr_counter = list_size
while itr_counter>0:
current_value = int_list[current_idx]
if current_value == number:
return current_idx
else if current_value > number:
end_idx = current_idx - 1
else if current_value < number:
start_idx = current_idx+1
current_idx = (end_idx + start_idx)/2
itr_counter /=2
if __name__=="__main__":
| Python | 0.000041 | |
6c4c26f5383740257b8bca56ce1ea9011053aff6 | add new package : keepalived (#14463) | var/spack/repos/builtin/packages/keepalived/package.py | var/spack/repos/builtin/packages/keepalived/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Keepalived(AutotoolsPackage):
"""
Keepalived implements a set of checkers to dynamically and adaptively
maintain and manage loadbalanced server pool according their health
"""
homepage = "http://www.keepalived.org"
url = "http://www.keepalived.org/software/keepalived-1.2.0.tar.gz"
version('2.0.19', sha256='0e2f8454765bc6a5fa26758bd9cec18aae42882843cdd24848aff0ae65ce4ca7')
version('2.0.18', sha256='1423a2b1b8e541211029b9e1e1452e683bbe5f4b0b287eddd609aaf5ff024fd0')
version('2.0.17', sha256='8965ffa2ffe243014f9c0245daa65f00a9930cf746edf33525d28a86f97497b4')
version('2.0.16', sha256='f0c7dc86147a286913c1c2c918f557735016285d25779d4d2fce5732fcb888df')
version('2.0.15', sha256='933ee01bc6346aa573453b998f87510d3cce4aba4537c9642b24e6dbfba5c6f4')
version('2.0.14', sha256='1bf586e56ee38b47b82f2a27b27e04d0e5b23f1810db6a8e801bde9d3eb8617b')
version('2.0.13', sha256='c7fb38e8a322fb898fb9f6d5d566827a30aa5a4cd1774f474bb4041c85bcbc46')
version('2.0.12', sha256='fd50e433d784cfd948de5726752cf89ab7001f587fe10a5110c6c7cbda4b7b5e')
version('2.0.11', sha256='a298b0c02a20959cfc365b62c14f45abd50d5e0595b2869f5bce10ec2392fa48')
depends_on('openssl', type='build')
| Python | 0 | |
5ef097bc394ef5be9b723ca0732bb842ab82e9e1 | Include app.wsgi into repository as an example #8 | website/app.wsgi | website/app.wsgi | import sys
from pathlib import Path
path = Path(__file__)
# when moving virual environment, update following line
venv_location = str(path.parents[2])
# in Python3 there is no builtin execfile shortcut - let's define one
def execfile(filename):
globals = dict( __file__ = filename)
exec(open(filename).read(), globals)
# add application directory to execution path
sys.path.insert(0, str(path.parent))
sys.path.insert(0, str(path.parents[1]))
# activate virual environment
activate_this = venv_location + '/virtual_environment/bin/activate_this.py'
execfile(activate_this)
# import application to serve
from app import app as application
| Python | 0 | |
397b7fbd676e283dc5203aa6c160a979c4e86305 | Add convention seed command | project/api/management/commands/seed_convention.py | project/api/management/commands/seed_convention.py | # Django
from django.core.management.base import BaseCommand
import json
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from itertools import chain
from api.factories import (
AppearanceFactory,
AssignmentFactory,
AwardFactory,
ChartFactory,
ContestantFactory,
ContestFactory,
ConventionFactory,
EntityFactory,
EntryFactory,
MemberFactory,
OfficeFactory,
OfficerFactory,
ParticipantFactory,
PersonFactory,
RepertoryFactory,
RoundFactory,
ScoreFactory,
SessionFactory,
SlotFactory,
SongFactory,
UserFactory,
VenueFactory,
)
from api.models import (
Appearance,
Assignment,
Award,
Chart,
Contest,
Contestant,
Convention,
Entity,
Entry,
Member,
Office,
Officer,
Participant,
Person,
Repertory,
Round,
Score,
Session,
Slot,
Song,
User,
Venue,
)
class Command(BaseCommand):
help="Command to seed convention."
def handle(self, *args, **options):
admin=UserFactory(
email='test@barberscore.com',
password='password',
is_staff=True,
person=None,
)
drcj_person=PersonFactory(
name='DRCJ Person',
email='drcj@barberscore.com',
)
drcj_user=UserFactory(
email=drcj_person.email,
person=drcj_person,
)
ca_person=PersonFactory(
name='CA Person',
email='ca@barberscore.com',
)
ca_user=UserFactory(
email=ca_person.email,
person=ca_person,
)
bhs=EntityFactory(
name='Barbershop Harmony Society',
long_name='Barbershop Harmony Society',
short_name='BHS',
kind=Entity.KIND.international,
)
drcj_office=OfficeFactory(
name='District Director C&J',
long_name='District Director C&J',
short_name='DRCJ',
is_cj=True,
is_drcj=True,
)
ca_office=OfficeFactory(
name='Contest Administrator',
long_name='Contest Administrator',
short_name='CA',
is_cj=True,
is_ca=True,
)
mus_office=OfficeFactory(
name='Music Judge',
long_name='Music Judge',
short_name='MUS',
is_cj=True,
)
per_office=OfficeFactory(
name='Performance Judge',
long_name='Performance Judge',
short_name='PER',
is_cj=True,
)
sng_office=OfficeFactory(
name='Singing Judge',
long_name='Singing Judge',
short_name='SNG',
is_cj=True,
)
rep_office=OfficeFactory(
name='Quartet Representative',
long_name='Quartet Representative',
short_name='QREP',
is_rep=True,
)
drcj_officer=OfficerFactory(
office=drcj_office,
person=drcj_person,
entity=bhs,
)
ca_officer=OfficerFactory(
office=ca_office,
person=ca_person,
entity=bhs,
)
mus_judges=OfficerFactory.create_batch(
size=5,
office=mus_office,
entity=bhs,
)
per_judges=OfficerFactory.create_batch(
size=5,
office=per_office,
entity=bhs,
)
sng_judges=OfficerFactory.create_batch(
size=5,
office=sng_office,
entity=bhs,
)
convention=ConventionFactory(
name='International Convention',
entity=bhs,
)
drcj_assignment=AssignmentFactory(
category=Assignment.CATEGORY.drcj,
person=drcj_person,
convention=convention,
status=Assignment.STATUS.confirmed,
)
ca_assignment=AssignmentFactory(
category=Assignment.CATEGORY.ca,
person=ca_person,
convention=convention,
status=Assignment.STATUS.confirmed,
)
for judge in mus_judges:
AssignmentFactory(
category=Assignment.CATEGORY.music,
person=judge.person,
convention=convention,
status=Assignment.STATUS.confirmed,
)
for judge in per_judges:
AssignmentFactory(
category=Assignment.CATEGORY.performance,
person=judge.person,
convention=convention,
status=Assignment.STATUS.confirmed,
)
for judge in sng_judges:
AssignmentFactory(
category=Assignment.CATEGORY.singing,
person=judge.person,
convention=convention,
status=Assignment.STATUS.confirmed,
)
quartet_session=SessionFactory(
convention=convention,
kind=Session.KIND.quartet,
)
i = 1
while i <= 3:
RoundFactory(
session=quartet_session,
num=i,
kind=(4 - i),
)
i += 1
quartet_award=AwardFactory(
name='International Quartet Championship',
entity=bhs,
)
quartet_contest = ContestFactory(
session=quartet_session,
award=quartet_award,
)
quartets = EntityFactory.create_batch(
size=50,
kind=Entity.KIND.quartet,
)
for quartet in quartets:
i = 1
while i <= 4:
MemberFactory(
entity=quartet,
part=i,
)
i += 1
OfficerFactory(
office=rep_office,
entity=quartet,
)
for quartet in quartets:
i = 1
while i <= 6:
RepertoryFactory(
entity=quartet,
)
i += 1
for quartet in quartets:
EntryFactory(
session=quartet_session,
entity=quartet,
is_evaluation=False,
status=Entry.STATUS.accepted,
)
for entry in quartet_session.entries.all():
ContestantFactory(
entry=entry,
contest=quartet_contest,
)
for member in entry.entity.members.all():
ParticipantFactory(
entry=entry,
member=member,
)
quartet_quarters = quartet_session.rounds.get(num=1)
i = 1
for entry in quartet_session.entries.all().order_by('?'):
slot = SlotFactory(
num=i,
round=quartet_quarters,
)
AppearanceFactory(
round=quartet_quarters,
entry=entry,
slot=slot,
)
i += 1
for appearance in quartet_quarters.appearances.all():
i = 1
while i <= appearance.round.num_songs:
song = SongFactory(
num=i,
appearance=appearance,
)
i += 1
for assignment in quartet_quarters.session.convention.assignments.filter(
status=Assignment.STATUS.confirmed,
category__gt=Assignment.CATEGORY.aca,
).order_by('kind'):
ScoreFactory(
category=assignment.category,
kind=assignment.kind,
song=song,
person=assignment.person,
)
| Python | 0.000001 | |
fb0f5340d9dcd28725f43dc3b7f93def78bdab92 | Add serialization tests for TMRegion | tests/unit/nupic/regions/tm_region_test.py | tests/unit/nupic/regions/tm_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""TMRegion unit tests."""
import tempfile
import unittest
import numpy as np
from nupic.regions.tm_region import TMRegion
from nupic.regions.tm_region_capnp import TMRegionProto
class TMRegionTest(unittest.TestCase):
def checkTMRegionImpl(self, impl):
output1 = {
"bottomUpOut": np.zeros((40,)),
"topDownOut": np.zeros((10,)),
"activeCells": np.zeros((40,)),
"predictedActiveCells": np.zeros((40,)),
"anomalyScore": np.zeros((1,)),
"lrnActiveStateT": np.zeros((40,)),
}
output2 = {
"bottomUpOut": np.zeros((40,)),
"topDownOut": np.zeros((10,)),
"activeCells": np.zeros((40,)),
"predictedActiveCells": np.zeros((40,)),
"anomalyScore": np.zeros((1,)),
"lrnActiveStateT": np.zeros((40,)),
}
a = np.zeros(10, dtype="int32")
a[[1, 3, 7]] = 1
b = np.zeros(10, dtype="int32")
b[[2, 4, 8]] = 1
inputA = {
"bottomUpIn": a,
"resetIn": np.zeros(1),
"sequenceIdIn": np.zeros(1),
}
inputB = {
"bottomUpIn": b,
"resetIn": np.zeros(1),
"sequenceIdIn": np.zeros(1),
}
region1 = TMRegion(10, 10, 4, temporalImp=impl)
region1.initialize()
region1.compute(inputA, output1)
proto1 = TMRegionProto.new_message()
region1.writeToProto(proto1)
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = TMRegionProto.read(f)
region2 = TMRegion.readFromProto(proto2)
region1.compute(inputB, output1)
region2.compute(inputB, output2)
self.assertTrue(np.array_equal(output1["bottomUpOut"],
output2["bottomUpOut"]))
self.assertTrue(np.array_equal(output1["topDownOut"],
output2["topDownOut"]))
self.assertTrue(np.array_equal(output1["activeCells"],
output2["activeCells"]))
self.assertTrue(np.array_equal(output1["predictedActiveCells"],
output2["predictedActiveCells"]))
self.assertTrue(np.array_equal(output1["anomalyScore"],
output2["anomalyScore"]))
self.assertTrue(np.array_equal(output1["lrnActiveStateT"],
output2["lrnActiveStateT"]))
def testWriteReadPy(self):
self.checkTMRegionImpl("py")
def testWriteReadCpp(self):
self.checkTMRegionImpl("cpp")
def testWriteReadTMPy(self):
self.checkTMRegionImpl("tm_py")
def testWriteReadTMCpp(self):
self.checkTMRegionImpl("tm_cpp")
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
8d6676f2e19ab9df01c681b6590c6f4adb0f938c | add profile model | fbmsgbot/models/profile.py | fbmsgbot/models/profile.py | class Profile():
def __init__(self, **kwargs):
self.first_name = kwargs['first_name']
self.last_name = kwargs['last_name']
self.profile_pic = kwargs['profile_pic']
self.locale = kwargs['locale']
self.timezone = kwargs['timezone']
self.gender = kwargs['gender']
| Python | 0.000001 | |
7a3a6720a47f380cf20a06aaa47634675099bf92 | Django learning site forms: add model forms QuizForm, TrueFalseQuestionForm, MultipleChoiceQuestionForm | python/django/learning_site_forms/courses/forms.py | python/django/learning_site_forms/courses/forms.py | from django import forms
from . import models
class QuizForm(forms.ModelForm):
class Meta:
model = models.Quiz
fields = [
'title',
'description',
'order',
'total_questions',
]
class TrueFalseQuestionForm(forms.ModelForm):
class Meta:
model = models.TrueFalseQuestion
fields = ['order', 'prompt']
class MultipleChoiceQuestionForm(forms.ModelForm):
class Meta:
model = models.MultipleChoiceQuestion
fields = ['order',
'prompt',
'shuffle_answers'
] | Python | 0.997456 | |
519b141349b4d39902416be560b989160d48b141 | add echo_delay to estimate the delay between two wav files | echo_delay.py | echo_delay.py |
import sys
import wave
import numpy as np
from gcc_phat import gcc_phat
if len(sys.argv) != 3:
print('Usage: {} near.wav far.wav'.format(sys.argv[0]))
sys.exit(1)
near = wave.open(sys.argv[1], 'rb')
far = wave.open(sys.argv[2], 'rb')
rate = near.getframerate()
N = rate
window = np.hanning(N)
while True:
sig = near.readframes(N)
if len(sig) != 2 * N:
break
ref = far.readframes(N)
sig_buf = np.fromstring(sig, dtype='int16')
ref_buf = np.fromstring(ref, dtype='int16')
tau = gcc_phat(sig_buf * window, ref_buf * window, fs=rate, max_tau=1)
# tau = gcc_phat(sig_buf, ref_buf, fs=rate, max_tau=1)
print(tau * 1000)
| Python | 0.000001 | |
c71924d4baea473a36f0c22f0878fea7a9ff2800 | Create constants.py | a2/constants.py | a2/constants.py | import re
import time
#first link to view the cruise
base_link = 'https://www.princess.com/find/cruiseDetails.do?voyageCode=2801'
#element to find
button_element = 'md-hidden'
#gets the current time
time = time.strftime('%I:%M:%S')
forming = 'building request'
seperator = 'โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ'
#xpath
# //*[contains(@class, 'col-pax-item selectable-blue-arrow col-xs-12 col-xs-pad-0 clearfix'), (@button, '')]
#//button[@data-num-pax="4"]/text()
#//button[@data-num-pax="4"]
# //*[contains(@class, 'col-pax-item selectable-blue-arrow col-xs-12 col-xs-pad-0 clearfix')//[contains(@button[contains(text(),'4')])]]
| Python | 0.000006 | |
3dcd012977d4dfea69ec4a51650ac9a4fd375842 | add missing migration file | registration/migrations/0007_auto_20160416_1217.py | registration/migrations/0007_auto_20160416_1217.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-16 03:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0006_auto_20160416_1202'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='payment_message',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| Python | 0.000001 | |
64d675304b2d66d89e55dcff167d1dd20e6b000c | add fragment molecule class for monte carlo simulation | afm/molecule.py | afm/molecule.py |
class FragmentMolecule(object):
def __init__(self, composition):
self.composition = composition
def __str__(self):
"""
Return a string representation.
"""
return self.composition
| Python | 0 | |
f3c2b9087a06b508a278cb8e6f79200caae1ac07 | Add a tool to encode udot instructions in asm code so we compile on any toolchain. | standalone/encode.py | standalone/encode.py | import sys
import re
def encode_udot_vector(line):
m = re.search(
r'\budot[ ]+v([0-9]+)[ ]*.[ ]*4s[ ]*,[ ]*v([0-9]+)[ ]*.[ ]*16b[ ]*,[ ]*v([0-9]+)[ ]*.[ ]*16b',
line)
if not m:
return 0, line
match = m.group(0)
accum = int(m.group(1))
lhs = int(m.group(2))
rhs = int(m.group(3))
assert accum >= 0 and accum <= 31
assert lhs >= 0 and lhs <= 31
assert rhs >= 0 and rhs <= 31
mcode = 0x6e809400 | (accum << 0) | (lhs << 5) | (rhs << 16)
return mcode, match
def encode_udot_element(line):
m = re.search(
r'\budot[ ]+v([0-9]+)[ ]*.[ ]*4s[ ]*,[ ]*v([0-9]+)[ ]*.[ ]*16b[ ]*,[ ]*v([0-9]+)[ ]*.[ ]*4b[ ]*\[([0-9])\]',
line)
if not m:
return 0, line
match = m.group(0)
accum = int(m.group(1))
lhs = int(m.group(2))
rhs = int(m.group(3))
lanegroup = int(m.group(4))
assert accum >= 0 and accum <= 31
assert lhs >= 0 and lhs <= 31
assert rhs >= 0 and rhs <= 31
assert lanegroup >= 0 and lanegroup <= 3
l = 1 if lanegroup & 1 else 0
h = 1 if lanegroup & 2 else 0
mcode = 0x6f80e000 | (accum << 0) | (lhs << 5) | (rhs << 16) | (l << 21) | (
h << 11)
return mcode, match
def encode(line):
mcode, match = encode_udot_vector(line)
if mcode:
return mcode, match
mcode, match = encode_udot_element(line)
if mcode:
return mcode, match
return 0, line
for line in sys.stdin:
mcode, match = encode(line)
if mcode:
line = line.replace(match, '.word 0x%x // %s' % (mcode, match))
sys.stdout.write(line)
| Python | 0 | |
74c23aff06485f323c45b24e7e3784dd3c72d576 | Create dokEchoServer.py | dokEchoServer.py | dokEchoServer.py | #!/usr/bin/env python2.7
# Created by Adam Melton (.dok) referenceing https://bitmessage.org/wiki/API_Reference for API documentation
# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This is an example of an echo server for PyBitmessage 0.3.0, by .dok (Version 0.2.1)
import ConfigParser
import xmlrpclib
import getopt
import json
import sys
import time
from time import strftime, gmtime
versionNo = 2.1
def logEcho(recTime,bmAddress):
global versionNo
config = ConfigParser.RawConfigParser()
echoLogFile = 'EchoLog.dat'
config.read(echoLogFile)
try: #try to open the file
config.get('EchoServer','processedTotal')
except:# if it fails, then initialize the EchoLog.dat file since this is the first time running the program
print 'Initializing EchoLog.dat'
config.add_section('EchoServer')
config.add_section('EchoLogs')
config.set('EchoServer','versionNumber',str(versionNo))
config.set('EchoServer','processedTotal','0')
processedTotal = int(config.get('EchoServer','processedTotal'))
processedTotal = processedTotal + 1
config.set('EchoServer','processedTotal',str(processedTotal)) #echo count
config.set('EchoLogs',recTime,bmAddress) #message information
with open(echoLogFile, 'wb') as configfile: #updates the total number of processed messages
config.write(configfile)
print 'Echo successfully logged.'
def processEcho():
api = xmlrpclib.ServerProxy("http://echo:echoPassword@localhost:8442/") #Connect to BitMessage using these api credentials
print 'Loaded from API successfully.'
timeStamp = gmtime() #set the timestamp before processing (to get the most accurate time)
inboxMessages = json.loads(api.getAllInboxMessages()) #Parse json data in to python data structure
print 'Loaded all inbox messages for processing.'
newestMessage = (len(inboxMessages['inboxMessages']) - 1) #Find the newest message (the only to echo)
replyAddress = inboxMessages['inboxMessages'][newestMessage]['fromAddress'] #Get the return address
myAddress = inboxMessages['inboxMessages'][newestMessage]['toAddress'] #Get my address
subject = 'ECHO'.encode('base64') #Set the subject
print 'Loaded and parsed data. Ready to build outgoing message.'
message = inboxMessages['inboxMessages'][newestMessage]['message'].decode('base64') #Gets the message sent by the user
if (len(message) > 256):
message = (message[:256] + '... Truncated to 256 characters.\n')
echoMessage = ('Message successfully received at ' + strftime("%Y-%m-%d %H:%M:%S",timeStamp) + ' UTC/GMT.\n' + '-------------------------------------------------------------------------------\n' + message + '\n\n\nThank you for using EchoServer. For More information about the BitMessage project, please visit https://BitMessage.org\n\nFeel free to contact me with questions or comments: BM-or9zPodxMUmkrmVVGCSV9xT1AibwdTFK9 \n.dok')
echoMessage = echoMessage.encode('base64') #Encode the message.
print 'Message built, ready to send. Sending...'
api.sendMessage(replyAddress,myAddress,subject,echoMessage) #build the message and send it
print 'Sent.'
print 'Begin logging echo.'
logEcho(strftime("%Y_%m_%d-%H_%M_%S",timeStamp), replyAddress) #Logs the echo to the EchoLog.dat file
if (newestMessage > 25): #Delete oldest message, trying to keep only 25 messages in the inbox. Only deletes one so it won't clear out your inbox (you can do that).
msgId = inboxMessages['inboxMessages'][0]['msgid'] #gets the message ID of the oldest message in the inbox (index 0)
api.trashMessage(msgId)
print 'Oldest message deleted.'
def main():
arg = sys.argv[1]
if arg == "startingUp":
sys.exit()
elif arg == "newMessage":
processEcho() #carries out the echo
print 'Done.'
sys.exit() #Done, exit
elif arg == "newBroadcast":
sys.exit()
else:
assert False, "unhandled option"
sys.exit() #Not a relevant argument, exit
if __name__ =="__main__":
main()
| Python | 0 | |
fb8b1d7cb6e98e97fb383ca7457cb1cd237f8184 | Add usernamer.py | examples/username.py | examples/username.py | # Madenning Username Generator
# Returns first char of first name and first 7 chars of last name
def usernamer(first_name, last_name):
username = first_name[0] + last_name[:7]
return username.lower()
if __name__ == '__main__':
# Testing
assert usernamer("Joshua", "Wedekind") == "jwedekin"
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
print(usernamer(first_name, last_name))
| Python | 0.000013 | |
864329b8a84f8d7df71b9f1d13b43118b7a3f522 | Remove consent status for draft participants (#2622) | rdr_service/tools/tool_libs/unconsent.py | rdr_service/tools/tool_libs/unconsent.py | import argparse
from sqlalchemy import and_
from sqlalchemy.orm import Session
from rdr_service.model.code import Code
from rdr_service.model.consent_file import ConsentFile
from rdr_service.model.participant import Participant
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.model.questionnaire import QuestionnaireQuestion
from rdr_service.model.questionnaire_response import QuestionnaireResponse, QuestionnaireResponseAnswer,\
QuestionnaireResponseExtension
from rdr_service.model.utils import from_client_participant_id
from rdr_service.tools.tool_libs.tool_base import cli_run, logger, ToolBase
tool_cmd = 'unconsent'
tool_desc = 'Remove participants that we have received ConsentPII payloads for, but have not actually consented'
class UnconsentTool(ToolBase):
def run(self):
super(UnconsentTool, self).run()
with self.get_session() as session:
for participant_id in self._load_participant_ids():
# Retrieve the participant record
participant_query = session.query(Participant).filter(
Participant.participantId == participant_id
)
if not self.args.dry_run:
# Obtain a lock on the participant to prevent possible
# race conditions with incoming questionnaire responses
summary_query = summary_query.with_for_update()
participant = participant_query.one_or_none()
if participant is None or participant.participantSummary is None:
logger.info(f'No participant summary found for P{participant_id}')
else:
if not self._participant_has_signed_consent(session, participant_id):
self._delete_participant_consent(session, participant_id)
else:
logger.info(f'P{participant_id} has signed consent')
# Commit to finalize the changes for this participant and release the locks
session.commit()
def _load_participant_ids(self) -> set:
participant_ids = set()
with open(self.args.pid_file) as file:
for participant_id_str in file:
participant_id = from_client_participant_id(participant_id_str)
participant_ids.add(participant_id)
return participant_ids
@classmethod
def _participant_has_signed_consent(cls, session: Session, participant_id: int):
"""
If the participant has a module where they've actually signed the consent, then don't unconsent them.
This is to make sure we don't remove participants that have consented since verifying the list.
"""
signed_response_query = (
session.query(QuestionnaireResponseAnswer.questionnaireResponseAnswerId)
.join(
QuestionnaireResponse,
and_(
QuestionnaireResponse.participantId == participant_id,
QuestionnaireResponse.questionnaireResponseId == QuestionnaireResponseAnswer.questionnaireResponseId
)
).join(
QuestionnaireQuestion,
QuestionnaireQuestion.questionnaireQuestionId == QuestionnaireResponseAnswer.questionId
).join(
Code,
and_(
Code.value == 'ExtraConsent_Signature',
Code.codeId == QuestionnaireQuestion.codeId
)
)
)
return signed_response_query.count() > 0
def _delete_participant_consent(self, session: Session, participant_id: int):
"""Remove the participant's consent status and responses from the RDR"""
if self.args.dry_run:
logger.info(f'would remove consent for P{participant_id}')
else:
# Delete the participant summary
session.query(ParticipantSummary).filter(
ParticipantSummary.participantId == participant_id
).delete()
# Delete the QuestionnaireResponses and associated objects
questionnaire_response_ids = session.query(QuestionnaireResponse.questionnaireResponseId).filter(
QuestionnaireResponse.participantId == participant_id
).all()
session.query(QuestionnaireResponseAnswer).filter(
QuestionnaireResponseAnswer.questionnaireResponseId.in_(questionnaire_response_ids)
).delete()
session.query(QuestionnaireResponseExtension).filter(
QuestionnaireResponseExtension.questionnaireResponseId.in_(questionnaire_response_ids)
).delete()
session.query(QuestionnaireResponse).filter(
QuestionnaireResponse.questionnaireResponseId.in_(questionnaire_response_ids)
).delete()
# Remove any consent file expectations
session.query(ConsentFile).filter(
ConsentFile.participant_id == participant_id
).delete()
def add_additional_arguments(parser: argparse.ArgumentParser):
parser.add_argument('--pid-file', required=True)
parser.add_argument('--dry-run', default=False, action="store_true")
def run():
cli_run(tool_cmd, tool_desc, UnconsentTool, add_additional_arguments)
| Python | 0 | |
1417d5345d68ef67ba6e832bbc45b8f0ddd911bc | Create testTemplate.py | data_structures/linked_list/utils/testTemplate.py | data_structures/linked_list/utils/testTemplate.py | # A test template for Python solutions.
import sys
def TestMain(sol, log=sys.stdout, doNotLogPassed=True) -> bool:
"""
@param sol: the function to be tested.
@param log: a stream or a file to log the tester output to.
@param doNotLogPassed: if True, all successful tests will not be logged.
@return: True if all tests in the TESTS array were successful, False otherwise.
All tester functions should follow the signature
of the TestMain function.
"""
def TestPredefined(solution: function, log):
raise NotImplementedError()
# Please add all tester functions to the TESTS tuple.
TESTS = (TestPredefined, )
areAllPassed = True
for Test in TESTS:
if not Test(solution, log):
areAllPassed = False
return areAllPassed
| Python | 0.000001 | |
969a424fae41d85ac141febab40d603f05b1b977 | Add command to remove unused import statements | dev_tools/src/d1_dev/src-remove-unused-imports.py | dev_tools/src/d1_dev/src-remove-unused-imports.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import d1_dev.util
import d1_common.iter.path
import d1_common.util
import git
logger = logging.getLogger(__name__)
def main():
"""Remove unused imports
Unsafe! Only tested on our codebase, which uses simple absolute imports on the form,
"import a.b.c".
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", nargs="+", help="File or directory path")
parser.add_argument("--exclude", nargs="+", help="Exclude glob patterns")
parser.add_argument(
"--no-recursive",
dest="recursive",
action="store_false",
help="Search directories recursively",
)
parser.add_argument(
"--ignore-invalid", action="store_true", help="Ignore invalid paths"
)
parser.add_argument(
"--pycharm", action="store_true", help="Enable PyCharm integration"
)
parser.add_argument(
"--diff",
dest="show_diff",
action="store_true",
help="Show diff and do not modify any files",
)
parser.add_argument(
"--dry-run", action="store_true", help="Process files but do not write results"
)
parser.add_argument("--debug", action="store_true", help="Debug level logging")
args = parser.parse_args()
d1_common.util.log_setup(args.debug)
repo_path = d1_dev.util.find_repo_root_by_path(__file__)
repo = git.Repo(repo_path)
specified_file_path_list = get_specified_file_path_list(args)
tracked_path_list = list(d1_dev.util.get_tracked_files(repo))
format_path_list = sorted(
set(specified_file_path_list).intersection(tracked_path_list)
)
for format_path in format_path_list:
comment_unused_imports(args, format_path)
def get_specified_file_path_list(args):
specified_file_path_list = [
os.path.realpath(p)
for p in d1_common.iter.path.path_generator(
path_list=args.path,
include_glob_list=["*.py"],
exclude_glob_list=args.exclude,
recursive=args.recursive,
ignore_invalid=args.ignore_invalid,
default_excludes=False,
return_dir_paths=False,
)
]
return specified_file_path_list
def comment_unused_imports(args, format_path):
logger.info("")
logger.info("{}".format(format_path))
r = d1_dev.util.redbaron_module_path_to_tree(format_path)
unused_import_list = get_unused_import_list(r)
if not unused_import_list:
return
for unused_dot_list in unused_import_list:
comment_import(r, unused_dot_list)
# d1_dev.util.update_module_file(r, format_path, show_diff=False, dry_run=True)
d1_dev.util.update_module_file(
r, format_path, show_diff=args.show_diff, dry_run=args.dry_run
)
def get_unused_import_list(r):
# logger.info(r.help(True))
import_list = get_import_list(r)
print_list("Imports", import_list)
atom_list = get_atomtrailer_list(r)
print_list("AtomTrailers", atom_list)
dotted_name_list = get_dotted_name_list(r)
print_list("DottedNames", dotted_name_list)
unused_import_list = []
for import_dot_list in import_list:
for atom_dot_list in sorted(set(atom_list) | set(dotted_name_list)):
if import_dot_list == atom_dot_list[: len(import_dot_list)]:
break
else:
unused_import_list.append(import_dot_list)
print_list("Unused imports", unused_import_list)
return unused_import_list
def comment_import(r, unused_dot_list):
"""Comment out import for {dot_str}."""
unused_dot_str = ".".join(unused_dot_list)
for n in r("ImportNode"):
if n.names()[0] == unused_dot_str:
# The "!" is inserted so that this line doesn't show up when searching for
# the comment pattern in code.
n.replace("#{}# {}".format("!", str(n)))
break
def print_list(head_str, dot_list):
logger.info("")
logger.info("{}:".format(head_str))
for v in dot_list:
logger.info(" {}".format(".".join(v)))
if not dot_list:
logger.info(" None")
def get_import_list(r):
dot_set = set()
for n in r.find_all(("dotted_as_name",)):
if n.parent.type != "import":
continue
# Can't handle import with alias ("import a.b.c as d")
if n.parent.value[0].target != "":
continue
name_list = []
for x in n.value:
name_list.append(x.value)
dot_set.add(tuple(name_list))
return sorted(dot_set)
def get_atomtrailer_list(r):
"""Capture only the leading dotted name list. A full sequence typically includes
function calls and parameters. pkga.pkgb.pkgc.one_call(arg1, arg2, arg3=4)
"""
dot_set = set()
for n in r.find_all(("atomtrailers",)):
name_list = []
for x in n.value:
if x.type != "name":
break
name_list.append(x.value)
if name_list:
dot_set.add(tuple(name_list))
return sorted(dot_set)
def get_dotted_name_list(r):
dot_set = set()
for n in r.find_all("dotted_name"):
name_list = []
for x in n.value.data:
if x.type == "name":
name_list.append(x.value)
if name_list:
dot_set.add(tuple(name_list))
return sorted(dot_set)
if __name__ == "__main__":
sys.exit(main())
| Python | 0.000002 | |
3ea318cf5c1b66106bf496d513efdd6e86d0f665 | add vowpal_wabbit requirement installation | robustus/detail/install_vowpal_wabbit.py | robustus/detail/install_vowpal_wabbit.py | # =============================================================================
# COPYRIGHT 2013 Brain Corporation.
# License under MIT license (see LICENSE file)
# =============================================================================
import logging
import os
from requirement import RequirementException
from utility import unpack, safe_remove, run_shell, ln
import shutil
import subprocess
def install(robustus, requirement_specifier, rob_file, ignore_index):
cwd = os.getcwd()
os.chdir(robustus.cache)
install_dir = os.path.join(robustus.cache, 'vowpal_wabbit-%s' % requirement_specifier.version)
# try to download precompiled Vowpal Wabbit from the remote cache first
if not os.path.isdir(install_dir) and not ignore_index:
wabbit_archive = robustus.download_compiled_archive('vowpal_wabbit', requirement_specifier.version)
if wabbit_archive is not None:
unpack(wabbit_archive)
logging.info('Initializing compiled vowpal_wabbit')
# install into wheelhouse
if not os.path.exists(install_dir):
raise RequirementException("Failed to unpack precompiled vowpal_wabbit archive")
if not os.path.isdir(install_dir) and not ignore_index:
archive_name = '%s.tar.gz' % requirement_specifier.version # e.g. "7.7.tar.gz"
if os.path.exists(archive_name):
safe_remove(archive_name)
# move sources to a folder in order to use a clean name for installation
src_dir = 'vowpal_wabbit-%s' % requirement_specifier.version
if os.path.exists(src_dir):
safe_remove(src_dir)
run_shell(['wget', 'https://github.com/JohnLangford/vowpal_wabbit/archive/%s' % (archive_name,)],
verbose=robustus.settings['verbosity'] >= 1)
run_shell(['tar', 'zxvf', archive_name],
verbose=robustus.settings['verbosity'] >= 1)
if os.path.exists(src_dir+'_src'):
safe_remove(src_dir+'_src')
shutil.move(src_dir, src_dir+'_src')
src_dir += '_src'
os.chdir(src_dir)
if os.path.exists(install_dir):
safe_remove(install_dir)
os.mkdir(install_dir)
retcode = run_shell(['make'], verbose=robustus.settings['verbosity'] >= 1)
if retcode:
raise RequirementException('Failed to compile Vowpal Wabbit')
retcode = run_shell('make install', shell=True)
if retcode:
raise RequirementException('Failed install Vowpal Wabbit')
os.chdir(robustus.cache)
shutil.rmtree(src_dir)
venv_install_folder = os.path.join(robustus.env, 'vowpal_wabbit')
if os.path.exists(venv_install_folder):
safe_remove(venv_install_folder)
shutil.copytree(install_dir, venv_install_folder)
executable_path = os.path.join(install_dir, 'bin', 'vw')
ln(executable_path, os.path.join(robustus.env, 'bin', 'vw'), force=True)
os.chdir(cwd)
# now install python part
robustus.install_through_wheeling(requirement_specifier, rob_file, ignore_index)
| Python | 0 | |
30567284410b9bb7154b8d39e5dfe7bc4bb1b269 | Add migration for on_delete SET_NULL | herald/migrations/0006_auto_20170825_1813.py | herald/migrations/0006_auto_20170825_1813.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-08-25 23:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('herald', '0005_merge_20170407_1316'),
]
operations = [
migrations.AlterField(
model_name='sentnotification',
name='user',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| Python | 0.000027 | |
05ecbd6ea7692ac85a96b35a39ca4609f0a88d86 | Create gapminder_data_analysis.py | gapminder_data_analysis.py | gapminder_data_analysis.py | # Importing the required libraries
# Note %matplotlib inline works only for ipython notebook. It will not work for PyCharm. It is used to show the plot distributions
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
# Reading the data where low_memory=False increases the program efficiency
data= pd.read_csv("gapminder.csv", low_memory=False)
# setting variables that you will be working with to numeric
data['breastcancerper100th']= data['breastcancerper100th'].convert_objects(convert_numeric=True)
data['femaleemployrate']= data['femaleemployrate'].convert_objects(convert_numeric=True)
data['alcconsumption']= data['alcconsumption'].convert_objects(convert_numeric=True)
# shows the number of rows and columns
print (len(data))
print (len(data.columns))
print (len(data.index))
# Print the column headers/headings
names=data.columns.values
print names
# using the describe function to get the standard deviation and other descriptive statistics of our variables
desc1=data['breastcancerper100th'].describe()
desc2=data['femaleemployrate'].describe()
desc3=data['alcconsumption'].describe()
print "\nBreast Cancer per 100th person\n", desc1
print "\nfemale employ rate\n", desc2
print "\nAlcohol consumption in litres\n", desc3
data.describe()
# Show the frequency distribution
print "\nAlcohol Consumption\nFrequency Distribution (in %)"
c1=data['alcconsumption'].value_counts(sort=False,dropna=False)
print c1
print "\nBreast Cancer per 100th"
c2=data['breastcancerper100th'].value_counts(sort=False)
print c2
print "\nFemale Employee Rate"
c3=data['femaleemployrate'].value_counts(sort=False)
print c3
# Show the frequency distribution of the quantitative variable using the groupby function
ac1=data.groupby('alcconsumption').size()
print "ac1\n",ac1
# Creating a subset of the data
sub1=data[(data['femaleemployrate']>40) & (data['alcconsumption']>=20)& (data['breastcancerper100th']<50)]
# creating a copy of the subset. This copy will be used for subsequent analysis
sub2=sub1.copy()
print "\nContries where Female Employee Rate is greater than 40 &" \
" Alcohol Consumption is greater than 20L & new breast cancer cases reported are less than 50\n"
print sub2
print "\nContries where Female Employee Rate is greater than 50 &" \
" Alcohol Consumption is greater than 10L & new breast cancer cases reported are greater than 70\n"
sub3=data[(data['alcconsumption']>10)&(data['breastcancerper100th']>70)&(data['femaleemployrate']>50)]
print sub3
# Checking for missing values in the data row-wise
print "Missing data rows count: ",sum([True for idx,row in data.iterrows() if any(row.isnull())])
# Checking for missing values in the data column-wise
print "Showing missing data coulmn-wise"
print data.isnull().sum()
# Create a copy of the original dataset as sub4 by using the copy() method
sub4=data.copy()
# Now showing the count of null values in the variables
print sub4.isnull().sum()
# Since the data is all continuous variables therefore the use the mean() for missing value imputation
# if dealing with categorical data, than use the mode() for missing value imputation
sub4.fillna(sub4['breastcancerper100th'].mean(), inplace=True)
sub4.fillna(sub4['femaleemployrate'].mean(), inplace=True)
sub4.fillna(sub4['alcconsumption'].mean(), inplace=True)
# Showing the count of null values after imputation
print sub4.isnull().sum()
# categorize quantitative variable based on customized splits using the cut function
sub4['alco']=pd.qcut(sub4.alcconsumption,6,labels=["0","1-4","5-9","10-14","15-19","20-24"])
sub4['brst']=pd.qcut(sub4.breastcancerper100th,5,labels=["1-20","21-40","41-60","61-80","81-90"])
sub4['emply']=pd.qcut(sub4.femaleemployrate,4,labels=["30-39","40-59","60-79","80-90"])
# Showing the frequency distribution of the categorised quantitative variables
print "Frequency distribution of the categorized quantitative variables\n"
fd1=sub4['alco'].value_counts(sort=False,dropna=False)
fd2=sub4['brst'].value_counts(sort=False,dropna=False)
fd3=sub4['emply'].value_counts(sort=False,dropna=False)
print "Alcohol Consumption\n",fd1
print "\n------------------------\n"
print "Breast Cancer per 100th\n",fd2
print "\n------------------------\n"
print "Female Employee Rate\n",fd3
print "\n------------------------\n"
# Now plotting the univariate quantitative variables using the distribution plot
sub5=sub4.copy()
sns.distplot(sub5['alcconsumption'].dropna(),kde=True)
plt.xlabel('Alcohol consumption in litres')
plt.title('Breast cancer in working class women')
plt.show() # Note: Although there is no need to use the show() method for ipython notebook as %matplotlib inline does the trick but
#I am adding it here because matplotlib inline does not work for an IDE like Pycharm and for that i need to use plt.show
sns.distplot(sub5['breastcancerper100th'].dropna(),kde=True)
plt.xlabel('Breast cancer per 100th women')
plt.title('Breast cancer in working class women')
plt.show()
sns.distplot(sub5['femaleemployrate'].dropna(),kde=True)
plt.xlabel('Female employee rate')
plt.title('Breast cancer in working class women')
plt.show()
# using scatter plot the visulaize quantitative variable.
# if categorical variable then use histogram
scat1= sns.regplot(x='alcconsumption', y='breastcancerper100th', data=data)
plt.xlabel('Alcohol consumption in liters')
plt.ylabel('Breast cancer per 100th person')
plt.title('Scatterplot for the Association between Alcohol Consumption and Breast Cancer 100th person')
scat2= sns.regplot(x='femaleemployrate', y='breastcancerper100th', data=data)
plt.xlabel('Female Employ Rate')
plt.ylabel('Breast cancer per 100th person')
plt.title('Scatterplot for the Association between Female Employ Rate and Breast Cancer per 100th Rate')
| Python | 0.000031 | |
1999295556ba404c7542d2001d7fdca80de54b5f | update api | functions/bcftools/main.py | functions/bcftools/main.py | """
Lambda example with external dependency
"""
import logging
from subprocess import Popen, PIPE
import json
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def return_msg(out, err, status = 200):
return {
'statusCode': status,
'body': json.dumps({"out": out, "err": err}),
'headers': {
'Content-Type': 'application/json',
}
}
def handle(event, context):
logger.info("%s ------ %s", event, context)
if 'body' not in event:
return return_msg(None, "Error: must specify VCF and region", 400)
body = event['body']
if 'vcf' not in body:
return return_msg(None, "Error: must specify VCF and region", 400)
logger.info("%s", event['body'])
out, err = Popen(["./bcftools"], stdout = PIPE, stderr = PIPE).communicate()
logger.info(out + " out")
logger.info(err + " err")
return return_msg(out, err, 200)
| Python | 0 | |
f3c4bac262c6d09730b3f0c4a24639fde8b4d923 | Add wsgi compatible example gunicorn application | gunicorn-app.py | gunicorn-app.py | from __future__ import unicode_literals
import multiprocessing
import gunicorn.app.base
from gunicorn.six import iteritems
def number_of_workers():
return (multiprocessing.cpu_count() * 2) + 1
def handler_app(environ, start_response):
response_body = b'Works fine'
status = '200 OK'
response_headers = [
('Content-Type', 'text/plain'),
]
start_response(status, response_headers)
return [response_body]
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
return self.application
if __name__ == '__main__':
options = {
'bind': '%s:%s' % ('127.0.0.1', '8080'),
'workers': number_of_workers(),
}
StandaloneApplication(handler_app, options).run()
| Python | 0 | |
cbf3d2bda86f6a1f3e6e1b975217bcde8c7fc0a9 | Create fresh_tomatoes.py | fresh_tomatoes.py | fresh_tomatoes.py | import webbrowser
import os
import re
# Styles and scripting for the page
main_page_head = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Favorite Movie!</title>
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<style type="text/css" media="screen">
body {
padding-top: 80px;
}
#trailer .modal-dialog {
margin-top: 200px;
width: 640px;
height: 480px;
}
.hanging-close {
position: absolute;
top: -12px;
right: -12px;
z-index: 9001;
}
#trailer-video {
width: 100%;
height: 100%;
}
.movie-tile {
margin-bottom: 20px;
padding-top: 20px;
}
.movie-tile:hover {
background-color: #EEE;
cursor: pointer;
}
.scale-media {
padding-bottom: 56.25%;
position: relative;
}
.scale-media iframe {
border: none;
height: 100%;
position: absolute;
width: 100%;
left: 0;
top: 0;
background-color: white;
}
</style>
<script type="text/javascript" charset="utf-8">
// Pause the video when the modal is closed
$(document).on('click', '.hanging-close, .modal-backdrop, .modal', function (event) {
// Remove the src so the player itself gets removed, as this is the only
// reliable way to ensure the video stops playing in IE
$("#trailer-video-container").empty();
});
// Start playing the video whenever the trailer modal is opened
$(document).on('click', '.movie-tile', function (event) {
var trailerYouTubeId = $(this).attr('data-trailer-youtube-id')
var sourceUrl = 'http://www.youtube.com/embed/' + trailerYouTubeId + '?autoplay=1&html5=1';
$("#trailer-video-container").empty().append($("<iframe></iframe>", {
'id': 'trailer-video',
'type': 'text-html',
'src': sourceUrl,
'frameborder': 0
}));
});
// Animate in the movies when the page loads
$(document).ready(function () {
$('.movie-tile').hide().first().show("fast", function showNext() {
$(this).next("div").show("fast", showNext);
});
});
</script>
</head>
"""
# The main page layout and title bar
main_page_content = '''
<body>
<!-- Trailer Video Modal -->
<div class="modal" id="trailer">
<div class="modal-dialog">
<div class="modal-content">
<a href="#" class="hanging-close" data-dismiss="modal" aria-hidden="true">
<img src="https://lh5.ggpht.com/v4-628SilF0HtHuHdu5EzxD7WRqOrrTIDi_MhEG6_qkNtUK5Wg7KPkofp_VJoF7RS2LhxwEFCO1ICHZlc-o_=s0#w=24&h=24"/>
</a>
<div class="scale-media" id="trailer-video-container">
</div>
</div>
</div>
</div>
<!-- Main Page Content -->
<div class="container">
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header col-md-6">
<img class="img-responsive " src="https://thegothamrogue.files.wordpress.com/2014/01/movies_logo.gif" alt="Movie logo" width="120" height="80" vspace="10">
</div>
<div class="col-md-6 text-right text-uppercase">
<h2 class="title-super text-thin"><font color="white">Negin Asghari </font></h2>
<h4><font color="white">Favorite Movies</font></h4>
</div>
</div>
</div>
</div>
<div class="container">
{movie_tiles}
</div>
</body>
</html>
'''
# A single movie entry html template
movie_tile_content = '''
<div class="col-md-3 movie-tile text-left" data-trailer-youtube-id="{trailer_youtube_id}" data-toggle="modal" data-target="#trailer ">
<img src="{poster_image_url}" width="220" height="342">
<h4> <strong>{movie_title} </strong></h4>
<div> <h4 class="text-left" data-toggle="modal" data-target="trailer">{movie_storyline}</h4></div>
</div>
'''
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(
r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(
r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match
else None)
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.title,
movie_storyline=movie.storyline,
poster_image_url=movie.poster_image_url,
trailer_youtube_id=trailer_youtube_id
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('favorite-movie.html', 'w')
# Replace the movie tiles placeholder generated content
rendered_content = main_page_content.format(
movie_tiles=create_movie_tiles_content(movies))
# Output the file
output_file.write(main_page_head + rendered_content)
output_file.close()
# open the output file in the browser (in a new tab, if possible)
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2)
| Python | 0.000342 | |
8d8522c95492f034db2a43e95a6c9cd3fb60c798 | Create glove2word2vec.py | glove2word2vec.py | glove2word2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Manas Ranjan Kar <manasrkar91@gmail.com>
# Licensed under the MIT License https://opensource.org/licenses/MIT
"""
CLI USAGE: python glove2word2vec.py <GloVe vector file> <Output model file>
Convert GloVe vectors into Gensim compatible format to instantiate from an existing file on disk in the word2vec C format;
model = gensim.models.Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
word2vec embeddings start with a line with the number of lines (tokens?) and the number of dimensions of the file. This allows gensim to allocate memory
accordingly for querying the model. Larger dimensions mean larger memory is held captive. Accordingly, this line has to be inserted into the GloVe
embeddings file.
"""
import re
import sys
import gensim
import smart_open
def glove2word2vec(glove_vector_file,output_model_file):
def get_info(glove_file_name):
"""
Function to calculate the number of lines and dimensions of the GloVe vectors to make it Gensim compatible
"""
num_lines = sum(1 for line in smart_open.smart_open(glove_vector_file))
if 'twitter' in glove_file_name:
dims= re.findall('\d+',glove_vector_file.split('.')[3])
dims=''.join(dims)
else:
dims=re.findall('\d+',glove_vector_file.split('.')[2])
dims=''.join(dims)
return num_lines,dims
def prepend_line(infile, outfile, line):
"""
Function to prepend lines using smart_open
"""
with smart_open.smart_open(infile, 'rb') as old:
with smart_open.smart_open(outfile, 'wb') as new:
new.write(str(line) + "\n")
for line in old:
new.write(line)
return outfile
num_lines,dims=get_info(glove_vector_file)
gensim_first_line = "{} {}".format(num_lines, dims)
print '%s lines with %s dimensions' %(num_lines,dims)
model_file=prepend_line(glove_vector_file,output_model_file,gensim_first_line)
# Demo: Loads the newly created glove_model.txt into gensim API.
model=gensim.models.Word2Vec.load_word2vec_format(model_file,binary=False) #GloVe Model
print 'Most similar to king are: ', model.most_similar(positive=['king'], topn=10)
print 'Similarity score between woman and man is: ', model.similarity('woman', 'man')
print 'Model %s successfully created !!'%output_model_file
return model_file
if __name__ == "__main__":
glove_vector_file=sys.argv[1]
output_model_file=sys.argv[2]
glove2word2vec(glove_vector_file,output_model_file)
| Python | 0.000735 | |
b4364727609fc17a33ab72d495e9e6d3a80480de | Add grab/spider/network_service.py | grab/spider/network_service.py | grab/spider/network_service.py | import time
from six.moves.queue import Empty
from grab.error import GrabNetworkError, GrabTooManyRedirectsError, GrabInvalidUrl
from grab.util.misc import camel_case_to_underscore
from grab.spider.base_service import BaseService
def make_class_abbr(name):
val = camel_case_to_underscore(name)
return val.replace("_", "-")
class NetworkServiceThreaded(BaseService):
def __init__(self, spider, thread_number):
super().__init__(spider)
self.thread_number = thread_number
self.worker_pool = []
for _ in range(self.thread_number):
self.worker_pool.append(self.create_worker(self.worker_callback))
self.register_workers(self.worker_pool)
def get_active_threads_number(self):
return sum(
1
for x in self.iterate_workers(self.worker_registry)
if x.is_busy_event.is_set()
)
# TODO: supervisor worker to restore failed worker threads
def worker_callback(self, worker):
while not worker.stop_event.is_set():
worker.process_pause_signal()
try:
task = self.spider.get_task_from_queue()
except Empty:
time.sleep(0.1)
else:
if task is None or task is True:
time.sleep(0.1)
else:
worker.is_busy_event.set()
try:
task.network_try_count += 1 # pylint: disable=no-member
is_valid, reason = self.spider.check_task_limits(task)
if is_valid:
grab = self.spider.setup_grab_for_task(task)
# TODO: almost duplicate of
# Spider.submit_task_to_transport
grab_config_backup = grab.dump_config()
self.spider.process_grab_proxy(task, grab)
self.spider.stat.inc("spider:request-network")
self.spider.stat.inc("spider:task-%s-network" % task.name)
# self.freelist.pop()
try:
result = {
"ok": True,
"ecode": None,
"emsg": None,
"error_abbr": None,
"grab": grab,
"grab_config_backup": (grab_config_backup),
"task": task,
"exc": None,
}
try:
grab.request()
except (
GrabNetworkError,
GrabInvalidUrl,
GrabTooManyRedirectsError,
) as ex:
is_redir_err = isinstance(
ex, GrabTooManyRedirectsError
)
orig_exc_name = (
ex.original_exc.__class__.__name__
if hasattr(ex, "original_exc")
else None
)
# UnicodeError: see #323
if (
is_redir_err
or isinstance(ex, GrabInvalidUrl)
or orig_exc_name == "error"
or orig_exc_name == "UnicodeError"
):
ex_cls = ex
else:
ex_cls = ex.original_exc
result.update(
{
"ok": False,
"exc": ex,
"error_abbr": (
"too-many-redirects"
if is_redir_err
else make_class_abbr(
ex_cls.__class__.__name__
)
),
}
)
(
self.spider.task_dispatcher.input_queue.put(
(result, task, None)
)
)
finally:
pass
# self.freelist.append(1)
else:
self.spider.log_rejected_task(task, reason)
# pylint: disable=no-member
handler = task.get_fallback_handler(self.spider)
# pylint: enable=no-member
if handler:
handler(task)
finally:
worker.is_busy_event.clear()
| Python | 0.000001 | |
c718cf1d483b2570b886269cf990458b195500b5 | Remove Access-Control-Allow-Origin after all | gratipay/utils/cache_static.py | gratipay/utils/cache_static.py | """
Handles caching of static resources.
"""
from base64 import b64encode
from hashlib import md5
from aspen import Response
ETAGS = {}
def asset_etag(path):
if path.endswith('.spt'):
return ''
if path in ETAGS:
h = ETAGS[path]
else:
with open(path) as f:
h = ETAGS[path] = b64encode(md5(f.read()).digest(), '-_').replace('=', '~')
return h
# algorithm functions
def get_etag_for_file(dispatch_result):
return {'etag': asset_etag(dispatch_result.match)}
def try_to_serve_304(website, dispatch_result, request, etag):
"""Try to serve a 304 for static resources.
"""
if not etag:
# This is a request for a dynamic resource.
return
qs_etag = request.line.uri.querystring.get('etag')
if qs_etag and qs_etag != etag:
# Don't serve one version of a file as if it were another.
raise Response(410)
headers_etag = request.headers.get('If-None-Match')
if not headers_etag:
# This client doesn't want a 304.
return
if headers_etag != etag:
# Cache miss, the client sent an old or invalid etag.
return
# Huzzah!
# =======
# We can serve a 304! :D
raise Response(304)
def add_caching_to_response(website, response, request=None, etag=None):
"""Set caching headers for static resources.
"""
if etag is None:
return
assert request is not None # sanity check
if response.code not in (200, 304):
return
# https://developers.google.com/speed/docs/best-practices/caching
response.headers['Vary'] = 'accept-encoding'
response.headers['Etag'] = etag
if request.line.uri.querystring.get('etag'):
# We can cache "indefinitely" when the querystring contains the etag.
response.headers['Cache-Control'] = 'public, max-age=31536000'
else:
# Otherwise we cache for 5 seconds
response.headers['Cache-Control'] = 'public, max-age=5'
| """
Handles caching of static resources.
"""
from base64 import b64encode
from hashlib import md5
from aspen import Response
ETAGS = {}
def asset_etag(path):
if path.endswith('.spt'):
return ''
if path in ETAGS:
h = ETAGS[path]
else:
with open(path) as f:
h = ETAGS[path] = b64encode(md5(f.read()).digest(), '-_').replace('=', '~')
return h
# algorithm functions
def get_etag_for_file(dispatch_result):
return {'etag': asset_etag(dispatch_result.match)}
def try_to_serve_304(website, dispatch_result, request, etag):
"""Try to serve a 304 for static resources.
"""
if not etag:
# This is a request for a dynamic resource.
return
qs_etag = request.line.uri.querystring.get('etag')
if qs_etag and qs_etag != etag:
# Don't serve one version of a file as if it were another.
raise Response(410)
headers_etag = request.headers.get('If-None-Match')
if not headers_etag:
# This client doesn't want a 304.
return
if headers_etag != etag:
# Cache miss, the client sent an old or invalid etag.
return
# Huzzah!
# =======
# We can serve a 304! :D
raise Response(304)
def add_caching_to_response(website, response, request=None, etag=None):
"""Set caching headers for static resources.
"""
if etag is None:
return
assert request is not None # sanity check
if response.code not in (200, 304):
return
# https://developers.google.com/speed/docs/best-practices/caching
response.headers['Vary'] = 'accept-encoding'
response.headers['Etag'] = etag
if 'Access-Control-Allow-Origin' not in response.headers:
response.headers['Access-Control-Allow-Origin'] = 'https://gratipay.com'
if request.line.uri.querystring.get('etag'):
# We can cache "indefinitely" when the querystring contains the etag.
response.headers['Cache-Control'] = 'public, max-age=31536000'
else:
# Otherwise we cache for 5 seconds
response.headers['Cache-Control'] = 'public, max-age=5'
| Python | 0 |
2f6bfddbff166115e59db7763a62258a06b4e789 | Apply orphaned migration | project/apps/api/migrations/0010_remove_chart_song.py | project/apps/api/migrations/0010_remove_chart_song.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0009_auto_20150722_1041'),
]
operations = [
migrations.RemoveField(
model_name='chart',
name='song',
),
]
| Python | 0 | |
b3ab8fa855a08f0d63885b6df206715d1f36a817 | Add DNS-over-HTTPS example script | mrequests/examples/dns-over-https.py | mrequests/examples/dns-over-https.py | import mrequests
from urlencode import urlencode
DOH_IP = "1.1.1.1"
DOH_SERVER = b"cloudflare-dns.com"
DOH_PATH = "/dns-query"
def gethostbyname(name):
params = urlencode({
"name": name,
"type": "A"
})
headers = {
b"accept": b"application/dns-json",
b"user-agent": b"mrequests.py",
b"Host": DOH_SERVER
}
req = mrequests.get(
"https://{}{}?{}".format(DOH_IP, DOH_PATH, params),
headers=headers
)
# ~ print(req.status_code)
if req.status == 200:
reply = req.json()
else:
reply = {}
req.close()
if reply.get("Status") == 0:
return [item["data"] for item in reply.get("Answer", [])]
if __name__ == '__main__':
import sys
#name = sys.argv[1]
name = "httpbin.org"
res = gethostbyname(name)
if res:
print(" ".join(res))
else:
print("Could not resolve host name '{}'.".format(name), file=sys.stderr)
| Python | 0 | |
4f765997c740f1f9b2dc985e7f3b0a467e8c311a | add code. | image_to_yymmdd_dir_by_EXIF.py | image_to_yymmdd_dir_by_EXIF.py | # -*- coding: utf-8 -*-
from PIL import Image
import os
import shutil
user_name = os.getlogin()
# image/hoge.jpg, image/fuga.png, etc...
src_dir = "/Users/" + user_name + "/Desktop/image/"
# create dst_dir/yyyymmdd/
dst_dir = "/Users/" + user_name + "/Desktop/dst_dir/"
if os.path.exists(dst_dir) == False:
os.mkdir(dst_dir)
for root, dirs, files in os.walk(src_dir):
for filename in files:
try:
image_info = Image.open(src_dir + filename)
# 36867 : EXIF DateTimeOriginal
date = image_info._getexif()[36867]
yyyy, mm, dd = date[:4], date[5:7], date[8:10]
yyyymmdd_dir = os.path.join(dst_dir, yyyy + mm + dd)
if os.path.exists(yyyymmdd_dir) == False:
os.mkdir(yyyymmdd_dir)
dst = os.path.join(yyyymmdd_dir, filename)
if os.path.exists(dst) == False:
shutil.copy2(src_dir + filename, dst)
except Exception as e:
# .DS_Store must Die
print filename + ' is fail.'
| Python | 0.000001 | |
f7a1998f67a02530604e4b727c7600704e4eb341 | update pelu to K2 | keras_contrib/layers/advanced_activations.py | keras_contrib/layers/advanced_activations.py | from .. import initializers
from keras.engine import Layer
from .. import backend as K
import numpy as np
class PELU(Layer):
"""Parametric Exponential Linear Unit.
It follows:
`f(x) = alphas * (exp(x / betas) - 1) for x < 0`,
`f(x) = (alphas / betas) * x for x >= 0`,
where `alphas` & `betas` are learned arrays with the same shape as x.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alphas_initializer: initialization function for the alpha variable weights.
betas_initializer: initialization function for the beta variable weights.
weights: initial weights, as a list of a single Numpy array.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
# References
- [PARAMETRIC EXPONENTIAL LINEAR UNIT FOR DEEP CONVOLUTIONAL NEURAL NETWORKS](https://arxiv.org/abs/1605.09332v3)
"""
def __init__(self, alphas_initializer='one', betas_initializer='one', weights=None, shared_axes=None, **kwargs):
self.supports_masking = True
self.alphas_initializer = initializers.get(alphas_initializer)
self.betas_initializer = initializers.get(betas_initializer)
self.initial_weights = weights
if not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
super(PELU, self).__init__(**kwargs)
def build(self, input_shape):
param_shape = list(input_shape[1:])
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes[0] is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
# Initialised as ones to emulate the default ELU
self.alphas = self.add_weight(param_shape,
name='alpha',
initializer=self.alphas_initializerializer)
self.betas = self.add_weight(param_shape, name='betas', initializer=self.betas_initializerializer)
self.trainable_weights = [self.alphas, self.betas]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def call(self, x, mask=None):
if K.backend() == 'theano':
pos = K.relu(x) * (K.pattern_broadcast(self.alphas, self.param_broadcast) /
K.pattern_broadcast(self.betas, self.param_broadcast))
neg = (K.pattern_broadcast(self.alphas, self.param_broadcast) *
(K.exp((-K.relu(-x)) / K.pattern_broadcast(self.betas, self.param_broadcast)) - 1))
else:
pos = K.relu(x) * self.alphas / self.betas
neg = self.alphas * (K.exp((-K.relu(-x)) / self.betas) - 1)
return neg + pos
def get_config(self):
config = {'alphas_initializer': initializers.serialize(self.alphas_initializer),
'betas_initializer': initializers.serialize(betas_initializer)}
base_config = super(PELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| from .. import initializers
from keras.engine import Layer
from .. import backend as K
import numpy as np
class PELU(Layer):
"""Parametric Exponential Linear Unit.
It follows:
`f(x) = alphas * (exp(x / betas) - 1) for x < 0`,
`f(x) = (alphas / betas) * x for x >= 0`,
where `alphas` & `betas` are learned arrays with the same shape as x.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alphas_initializer: initialization function for the alpha variable weights.
betas_initializer: initialization function for the beta variable weights.
weights: initial weights, as a list of a single Numpy array.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
# References
- [PARAMETRIC EXPONENTIAL LINEAR UNIT FOR DEEP CONVOLUTIONAL NEURAL NETWORKS](https://arxiv.org/abs/1605.09332v3)
"""
def __init__(self, alphas_initializer='one', betas_initializer='one', weights=None, shared_axes=None, **kwargs):
self.supports_masking = True
self.alphas_initializer = initializers.get(alphas_initializer)
self.betas_initializer = initializers.get(betas_initializer)
self.initial_weights = weights
if not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
super(PELU, self).__initializer__(**kwargs)
def build(self, input_shape):
param_shape = list(input_shape[1:])
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes[0] is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
# Initialised as ones to emulate the default ELU
self.alphas = self.add_weight(param_shape,
name='alpha',
initializer=self.alphas_initializerializer)
self.betas = self.add_weight(param_shape, name='betas', initializer=self.betas_initializerializer)
self.trainable_weights = [self.alphas, self.betas]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def call(self, x, mask=None):
if K.backend() == 'theano':
pos = K.relu(x) * (K.pattern_broadcast(self.alphas, self.param_broadcast) /
K.pattern_broadcast(self.betas, self.param_broadcast))
neg = (K.pattern_broadcast(self.alphas, self.param_broadcast) *
(K.exp((-K.relu(-x)) / K.pattern_broadcast(self.betas, self.param_broadcast)) - 1))
else:
pos = K.relu(x) * self.alphas / self.betas
neg = self.alphas * (K.exp((-K.relu(-x)) / self.betas) - 1)
return neg + pos
def get_config(self):
config = {'alphas_initializer': initializers.serialize(self.alphas_initializer),
'betas_initializer': initializers.serialize(betas_initializer)}
base_config = super(PELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| Python | 0 |
920dbe007501ea99b95c41f94fb8f4a48c40717a | Add SensorsCollector, which collects data from libsensors via PySensors | src/collectors/SensorsCollector/SensorsCollector.py | src/collectors/SensorsCollector/SensorsCollector.py | import diamond.collector
import sensors
class SensorsCollector(diamond.collector.Collector):
"""
This class collects data from libsensors. It should work against libsensors 2.x and 3.x, pending
support within the PySensors Ctypes binding: http://pypi.python.org/pypi/PySensors/
Requires: 'sensors' to be installed, configured, and the relevant kernel modules to be loaded.
Requires: PySensors requires Python 2.6+
If you're having issues, check your version of 'sensors'. This collector written against:
sensors version 3.1.2 with libsensors version 3.1.2
"""
def get_default_config(self):
"""
Returns default collector settings.
"""
return {
'enabled': 'True',
'path': 'sensors',
'fahrenheit': 'True'
}
def collect(self):
sensors.init()
try:
for chip in sensors.iter_detected_chips():
for feature in chip:
self.publish(".".join([str(chip), feature.label]), feature.get_value())
finally:
sensors.cleanup()
| Python | 0 | |
68e43eafc1bb8e060ee105bcc9e3c354486dfcd2 | add unit tests for dataset.py | dataset_tests.py | dataset_tests.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 16:18:02 2017
Unit tests for dataset.py
@author: duc
"""
import unittest
import dataset as ds
import numpy as np
from flesch_kincaid import get_flesch_grade_level as lvl
from nltk.corpus import cmudict
pronDict = cmudict.dict()
class DatasetTests(unittest.TestCase):
def test_normalize(self):
m = np.array([[1, 4, 0.5, 9], [0, 2, 0.2, 2], [0, 1, 0.01, 8], [1, 2.5, 0.3, 3]])
norm = np.array([[1, 1, 1, 1], [0, 0.5, 0.4, 0.222], [0, 0.25, 0.02, 0.888], [1, 0.625, 0.6, 0.333]])
decimal = 3
np.testing.assert_array_almost_equal(ds.normalize(m), norm, decimal)
def test_combined_keywords(self):
t1 = [[["fake", "media"]], [["fake", "news"]]]
t2 = [[["women", "hillary"]], [["media", "trump"]]]
keywords = set(["fake", "media", "news", "women", "hillary", "trump"])
self.assertEqual(ds.get_combined_keywords(t1, t2), keywords)
def test_keywords_count(self):
t = [["make", "america", "great", "again"],["america", "was", "great"]]
dict = {"make": 0, "america": 0, "great": 0, "again": 0, "was": 0}
counted = {"make": 1, "america": 2, "great": 2, "again": 1, "was": 1}
self.assertEqual(ds.get_keywords_count(t, dict), counted)
def test_extract_features(self):
t = ["Make america great again!", "America was great! Hillary Clinton"]
norm = [[["make", "america", "great", "again"]],[["america", "was", "great"], ["hillary", "clinton"]]]
count = {"make": 0, "america": 0, "great": 0, "again": 0, "was": 0, "hillary": 0, "clinton": 0}
features = [
[4, 1, lvl(norm[0], pronDict), 1, 1, 1, 1, 0, 0, 0],
[2.5, 1, lvl(norm[1], pronDict), 0, 1, 1, 0, 1, 1, 1]
]
print(features)
self.assertEqual(ds.extract_features(t, norm, count, pronDict), features)
def test_positive_negative_amount(self):
m = [[0, 1, 0.5, 1, 0.02], [1, 1, 1, 0.3, 0.99], [1, 0, 0, 0, 0]]
n = np.array(m)
self.assertEqual(ds.get_positive_negative_amount(m), (2, 1))
self.assertEqual(ds.get_positive_negative_amount(n), (2, 1))
def test_training_set(self):
# should have 50% positive and 50% negative examples
ts = ds.divide_data_into_sets(ds.get_prepared_tweet_data("realDonaldTrump", "HillaryClinton"), 0.1, 0.1, 0.8)[2]
count = ds.get_positive_negative_amount(ts)
self.assertEqual(count[0], count[1])
if __name__ == '__main__':
unittest.main() | Python | 0.000006 | |
66f32607d9d140be2a8e71270862074c53121a68 | Create dataUIwgt.py | pyside/pyside_basics/jamming/dataUIwgt.py | pyside/pyside_basics/jamming/dataUIwgt.py | from PySide import QtGui
class Data(object):
def __init__(self):
self.requiredNames = "A B C D E".split(' ')
self.availableActions = "Set Select Delete".split(' ')
def Set(self, name):
print "setting ", name
def Select(self, name):
print "selecting ", name
def Delete(self, name):
print "deleting ", name
class ActionButton(QtGui.QPushButton):
delegateActionSignal = QtCore.Signal((str, str))
def __init__(self, itemName, actionName, parent=None):
super(ActionButton, self).__init__(parent)
self.itemName = itemName
self.actionName = actionName
self.clicked.connect(self._delegate)
self.setText(self.actionName)
def _delegate(self):
self.delegateActionSignal.emit(self.itemName, self.actionName)
# def delegated(itemName, actionName):
# print itemName, actionName
#
# self = ActionButton('A', 'Set')
# self.delegateActionSignal.connect(delegated)
# self.show()
class DataUIWidget(QtGui.QWidget):
def __init__(self, data, parent=None):
super(DataUIWidget, self).__init__(parent)
self.data = data
self._setupUI()
def handleAction(self, itemName, actionName):
print itemName, actionName
def _setupUI(self):
layout = QtGui.QGridLayout()
self.setLayout(layout)
for index, name in enumerate(self.data.requiredNames):
lbl = QtGui.QLabel(name)
layout.addWidget(lbl, index, 0)
for ind, actName in enumerate(self.data.availableActions, 1):
btn = ActionButton(name, actName)
btn.delegateActionSignal.connect(self.handleAction)
layout.addWidget(btn, index, ind)
data = Data()
self = DataUIWidget(data)
self.show()
| Python | 0.000002 | |
ae948c95ea0087f33f13ef3463dc022eda0301a2 | Add a solution for the MadLibs lab | python/labs/make-a-short-story/mystory.py | python/labs/make-a-short-story/mystory.py | # Create a function for adjectives so I don't repeat myself in prompts.
def get_adjective():
return raw_input("Give me an adjective: ")
def get_noun():
return raw_input("Give me a noun: ")
def get_verb():
return raw_input("Give me a verb: ")
adjective1 = get_adjective()
noun1 = get_noun()
verb1 = get_verb()
adjective2 = get_adjective()
noun2 = get_noun()
verb2 = get_verb()
# Use parentheses so Python will "know" the string has multiple lines
print ("At CSSI we were all " + adjective1 + " when a " + noun1 +
" fell through the ceiling. See-Mong tried to " + verb1 + " it but it " +
"was too " + adjective2 + ". Instead, Zack gave it a " + noun2 + " which " +
"caused it to " + verb2 + ".")
| Python | 0.000008 | |
3cf1eb01540a126ef6a38219f89a41a0f05ad63f | Format fixing | constants.py | constants.py | UNITS = "SI"
UNIT_LENGTH = 1
UNIT_MASS = 1
UNIT_TIME = 1
DEFAULT_GRAVITATIONAL_CONSTANT = 6.673e-11 # m3 kg-1 s-2
DEFAULT_SPEED_OF_LIGHT = 299792458 # m s-1
DEFAULT_SOLAR_MASS = 1.98892e30 # kg
DEFAULT_PARSEC = 3.08568025e16 # m
DEFAULT_YEAR = 31556926 # s
DEFAULT_h = 0.73
G = GRAVITATIONAL_CONSTANT = DEFAULT_GRAVITATIONAL_CONSTANT
c = SPEED_OF_LIGHT = DEFAULT_SPEED_OF_LIGHT
def set_units(units):
global UNITS
global c, SPEED_OF_LIGHT, G, GRAVITATIONAL_CONSTANT
if units=="SI":
UNIT_LENGTH = 1
UNIT_MASS = 1
UNIT_TIME = 1
elif units=="GALACTIC":
UNIT_LENGTH = (1e6 * DEFAULT_PARSEC / DEFAULT_h) # 1.0 Mpc h^-1
UNIT_MASS = (1e10 * DEFAULT_SOLAR_MASS / DEFAULT_h) # 10^10 M_solar h^-1
UNIT_TIME = (1e3 * DEFAULT_PARSEC / DEFAULT_h) # 977.8 Gyr h^-1
elif units=="CGI":
UNIT_LENGTH = 0.01
UNIT_MASS = 0.001
UNIT_TIME = 1
UNITS = units
G = GRAVITATIONAL_CONSTANT = DEFAULT_GRAVITATIONAL_CONSTANT * UNIT_MASS * UNIT_TIME**2 / UNIT_LENGTH**3
c = SPEED_OF_LIGHT = DEFAULT_SPEED_OF_LIGHT * UNIT_TIME / UNIT_LENGTH;
set_units("SI") | Python | 0.000001 | |
476f2493576c55c0f412165e3c3ce8225599ba0a | Copy caller_checker.py | server/src/voodoo/gen2/caller_checker.py | server/src/voodoo/gen2/caller_checker.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduรฑa <pablo@ordunya.com>
#
ALL = 'All servers'
def caller_check(servers = ALL):
def func_wrapper(func):
def wrapped_func(*args, **kargs):
# TODO
# try:
# servers[0]
# except TypeError:
# all_servers = (servers,)
# else:
# all_servers = servers
#TODO: work with all_servers
return func(*args,**kargs)
wrapped_func.__name__ = func.__name__
wrapped_func.__doc__ = func.__doc__
return wrapped_func
return func_wrapper
| Python | 0.000004 | |
77b34390345208a6e0bc5ad30cdce62e42ca0c56 | Add simple command to list speakers and tickets | wafer/management/commands/pycon_speaker_tickets.py | wafer/management/commands/pycon_speaker_tickets.py | import sys
import csv
from optparse import make_option
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from wafer.talks.models import ACCEPTED
class Command(BaseCommand):
help = "List speakers and associated tickets."
option_list = BaseCommand.option_list + tuple([
make_option('--speakers', action="store_true", default=False,
help='List speakers and tickets (for accepted talks)'),
make_option('--allspeakers', action="store_true", default=False,
help='List speakers and tickets (for all talks)'),
])
def _speaker_tickets(self, options):
people = User.objects.filter(talks__isnull=False).distinct()
csv_file = csv.writer(sys.stdout)
for person in people:
# We query talks to filter out the speakers from ordinary
# accounts
if options['allspeakers']:
titles = [x.title for x in person.talks.all()]
else:
titles = [x.title for x in
person.talks.filter(status=ACCEPTED)]
if not titles:
continue
tickets = person.ticket.all()
if tickets:
ticket = '%d' % tickets[0].barcode
else:
ticket = 'NO TICKET PURCHASED'
row = [x.encode("utf-8") for x in (person.get_full_name(),
person.email,
ticket)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._speaker_tickets(options)
| Python | 0 | |
c2e882855ea56c265ef46646ec5e20f78d0ad064 | add migrations for missing phaselogs after fixing bulk project status updates | bluebottle/projects/migrations/0028_auto_20170619_1555.py | bluebottle/projects/migrations/0028_auto_20170619_1555.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-19 13:55
from __future__ import unicode_literals
import datetime
from django.db import migrations
def fix_phaselog_for_incorrect_project_statuses(apps, schema_editor):
"""
#BB-9886 : Fix to add a new project phase status logs for projects whose status does not correspond to the last
project phase status log. We have to fake a timestamp as we dont know when the status was really updated.
"""
Project = apps.get_model('projects', 'Project')
ProjectPhaseLog = apps.get_model('projects', 'ProjectPhaseLog')
for project in Project.objects.all():
last_project_phase_log = ProjectPhaseLog.objects.filter(project=project).order_by('start').last()
if project.status != last_project_phase_log.status:
start = last_project_phase_log.start + datetime.timedelta(minutes = 1)
log = ProjectPhaseLog.objects.create(project=project, status=project.status, start=start)
log.save()
def dummy(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0027_auto_20170602_2240'),
]
operations = [
migrations.RunPython(fix_phaselog_for_incorrect_project_statuses, dummy),
]
| Python | 0 | |
c628e5ed57effd4386c913b0cb47884e61c7db88 | Use camera height, and display disk | research/triangulation_3/Triangulation.py | research/triangulation_3/Triangulation.py |
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from math import sin, cos, tan, atan, pi
from pylab import imread
from mpl_toolkits.mplot3d import Axes3D
# In[2]:
image = imread("lines1.png")
plt.imshow(image)
plt.show()
#### Formules de position
# In[3]:
def position(laser, gamma, theta, phi):
"""
laser: position (x,y,z) du laser par rapport ร la camera
gamma: angle que fait le laser avec le plan ortogonal ร la camรฉra
theta: angle horizontal du rayon de la camera
phi : angle vertical du rayon de la camera
"""
# vecteur directeur du rayon sortant de la camera
ray = np.array([sin(theta), cos(theta), tan(phi)])
# Matrice tq (matrix) * (l, m, z) = (laser)
matrix = np.array([
[cos(gamma), 0, sin(theta)],
[sin(gamma), 0, cos(theta)],
[ 0, 1, tan(phi) ]
])
l, m, z = np.linalg.solve(matrix, -laser)
return z * ray
# In[4]:
CAMERA_HEIGHT = 39
PLATE_HEIGHT = 18.5
RELATIVE_HEIGHT = CAMERA_HEIGHT - PLATE_HEIGHT
CAM_DISTANCE = 53.2
def theta_phi(alpha, image_shape, position):
x, y = map(float, position)
w, h = map(float, image_shape)
ratio = w/h
beta = alpha / ratio
theta = (x - w/2)/w * alpha
phi = (h/2 - y)/h * beta
return theta, phi
#### Paramรจtres du sytรจme
# In[5]:
def deg2rad(x): return pi*float(x)/180
def rad2deg(x): return 180*float(x)/pi
GAMMA_D = deg2rad(83)
GAMMA_G = deg2rad(78)
ALPHA = deg2rad(60)
LASER_G = np.array([CAM_DISTANCE * tan(pi/2-GAMMA_G), 0, 0])
LASER_D = np.array([CAM_DISTANCE * tan(pi/2-GAMMA_D), 0, 0])
# In[6]:
tuple(position(LASER_G, GAMMA_G, 0, 0)) # Devrait รชtre (0, 53.2, 0)
#### Calcul des positions des points
# In[7]:
XYZ = []
IJ = []
H, W = image.shape[:2]
for i in range(H):
for j in range(W):
if tuple(image[i][j]) != (0, 0, 0):
IJ.append((j, i))
theta, phi = theta_phi(ALPHA/2, [W, H], [j, i])
gamma = GAMMA_G if theta < 0 else GAMMA_D
laser = LASER_G if theta < 0 else LASER_D
XYZ.append(position(laser, gamma, theta, phi))
X, Y, Z = map(np.array, zip(*XYZ))
I, J = map(np.array, zip(*IJ))
XYZ[0]
# In[8]:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X, Y, Z)
ax.plot([0, 0], [0, CAM_DISTANCE], [0, 0], color='red')
plt.xlim(-50, 50)
plt.ylim(0, 60)
plt.show()
# In[9]:
photo = imread("imgs/04.png")
h, w = photo.shape[:2]
plt.imshow(photo)
plt.scatter(I, J)
plt.plot([w/2, w/2], [0, h], 'y')
plt.show()
# In[10]:
get_ipython().magic(u'pinfo plt.grid')
# In[10]:
| Python | 0 | |
9cb5658c53a2202931e314ced3ee66714301a087 | Create _im_rot_manual_detect.py | resources/_py_in/_im_rot_manual_detect.py | resources/_py_in/_im_rot_manual_detect.py | # PYTHON
# MANISH DEVGAN
# https://github.com/gabru-md
# Program helps in detecting faces which are
# tilted right or left! The detection is done by
# rotating the image and the trying to detect the
# potential faces in it!
#BEGIN
# importing
import cv2
import numpy as np
import os
import sys
# function to rotate the image to a specific angle begins
def rotate(img,angle):
image = np.copy(img)
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
_im = cv2.warpAffine(image, M, (nW, nH))
# a new vairable is taken instead of the old one as it will then form 2 different copies
# instead of forming a reference of the object or altering the object itself
# now show the rotated image!
return _im
# function ends
# reading image which is to be rotated
# this image will then be further looked in for faces at different angles
image = cv2.imread('te.jpg')
cascPath = "haarcascade_frontalface_default.xml"
os.chdir('C:\Users\Manish\Desktop')
# range is taken from 0 to 360
# therefore we have range(360+1)
for i in range(361):
# new object of image type or numpy.ndarray is created and named _im
# this will have our rotated image
_im = rotate(image,i)
# converting our _im to grayscale to detect potential faces in it!
_gray = cv2.cvtColor(_im,cv2.COLOR_BGR2GRAY)
# declaring a classifier based on the cascade specified
# in this case it is : 'haarcascade_frontalface_default.xml'
faces = faceCascade.detectMultiScale(
_gray,
scaleFactor = 1.2,
minNeighbors=1,
minSize=(15,15),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
# drawing a box around the potential faces that have been identified
for (x,y,w,h) in faces:
cv2.rectangle(_im,(x+int(w*0.18),y+int(h*0.15)),(x+int(w*0.80),y+int(h*0.90)),(0,255,0),2)
# showing the rotated image to the user!
cv2.imshow('Rotated Image',_im)
if cv2.waitKey(0) == 27:
break
#END
| Python | 0.000064 | |
36ada2dc33ccb3cb1803f67a112e3559efd7e821 | Add file to initialize item endpoint - Add item_fields | app/api/item.py | app/api/item.py | """ Routes for bucket_item Functionality"""
# from flask import g
# from flask import Blueprint, request, jsonify
from flask_restplus import fields
# from app.models.bucketlist import Bucketlist
# from app import api
item_fields = {
'id': fields.Integer,
'name': fields.String,
'date_created': fields.DateTime(attribute='created_at'),
'date_modified': fields.DateTime(attribute='modified_at'),
'done': fields.Boolean
}
| Python | 0 | |
6c34347dc0bac58ca4e8e25f355f6ad0f7295ccd | Find/replace with regular expressions | ocradmin/plugins/util_nodes.py | ocradmin/plugins/util_nodes.py | """
Nodes to perform random things.
"""
import re
from nodetree import node, writable_node, manager
from ocradmin.plugins import stages, generic_nodes
NAME = "Utils"
from HTMLParser import HTMLParser
class HTMLContentHandler(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._data = []
self._ctag = None
self._cattrs = None
def data(self):
return "".join(self._data)
def content_data(self, data, tag, attrs):
"""ABC method. Does nothing by default."""
return data
def parsefile(self, filename):
with open(filename, "r") as f:
for line in f.readlines():
self.feed(line)
return self.data()
def parse(self, string):
self._data = []
self.feed(string)
return self.data()
def handle_decl(self, decl):
self._data.append("<!%s>" % decl)
def handle_comment(self, comment):
self._data.append("<!-- %s -->" % comment)
def handle_starttag(self, tag, attrs):
"""Simple add the tag to the data stack."""
self._ctag = tag
self._cattrs = attrs
self._data.append(
"<%s %s>" % (tag, " ".join(["%s='%s'" % a for a in attrs])))
def handle_data(self, data):
self._data.append(self.content_data(
data, self._ctag, self._cattrs))
def handle_endtag(self, tag):
self._data.append("</%s>" % tag)
class FindReplaceNode(node.Node, generic_nodes.TextWriterMixin):
"""
Find an replace stuff in input with output.
"""
stage = stages.UTILS
name = "Utils::FindReplace"
description = "Find and replace string in HOCR documents"
arity = 1
intypes = [generic_nodes.SafeUnicode]
outtype = generic_nodes.SafeUnicode
_parameters = [
dict(name="find", value=""),
dict(name="replace", value=""),
]
def __init__(self, *args, **kwargs):
super(FindReplaceNode, self).__init__(*args, **kwargs)
self._findre = None
self._replace = None
def _validate(self):
super(FindReplaceNode, self)._validate()
try:
re.compile(self._params.get("find"))
except Exception, err:
raise node.ValidationError(self, "find: regular expression error: %s" % err)
def content_data(self, data, tag, attrs):
"""Replace all content data."""
return self._findre.sub(self._replace, data)
def _eval(self):
"""
Run find/replace on input
"""
xml = self.eval_input(0)
find = self._params.get("find", "")
replace = self._params.get("replace", "")
if find.strip() == "" or replace.strip() == "":
return xml
self._findre = re.compile(find)
self._replace = replace
parser = HTMLContentHandler()
parser.content_data = self.content_data
return parser.parse(xml)
class Manager(manager.StandardManager):
"""
Handle Tesseract nodes.
"""
@classmethod
def get_node(self, name, **kwargs):
if name.find("::") != -1:
name = name.split("::")[-1]
if name == "FindReplace":
return FindReplaceNode(**kwargs)
@classmethod
def get_nodes(cls, *oftypes):
return super(Manager, cls).get_nodes(
*oftypes, globals=globals())
if __name__ == "__main__":
for n in Manager.get_nodes():
print n
| """
Nodes to perform random things.
"""
from nodetree import node, writable_node, manager
from ocradmin.plugins import stages, generic_nodes
NAME = "Utils"
from HTMLParser import HTMLParser
class HTMLContentHandler(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._data = []
self._ctag = None
self._cattrs = None
def data(self):
return "".join(self._data)
def content_data(self, data, tag, attrs):
"""ABC method. Does nothing by default."""
return data
def parsefile(self, filename):
with open(filename, "r") as f:
for line in f.readlines():
self.feed(line)
return self.data()
def parse(self, string):
self._data = []
self.feed(string)
return self.data()
def handle_decl(self, decl):
self._data.append("<!%s>" % decl)
def handle_comment(self, comment):
self._data.append("<!-- %s -->" % comment)
def handle_starttag(self, tag, attrs):
"""Simple add the tag to the data stack."""
self._ctag = tag
self._cattrs = attrs
self._data.append(
"<%s %s>" % (tag, " ".join(["%s='%s'" % a for a in attrs])))
def handle_data(self, data):
self._data.append(self.content_data(
data, self._ctag, self._cattrs))
def handle_endtag(self, tag):
self._data.append("</%s>" % tag)
class FindReplaceNode(node.Node, generic_nodes.TextWriterMixin):
"""
Find an replace stuff in input with output.
"""
stage = stages.UTILS
name = "Utils::FindReplace"
description = "Find and replace string in HOCR documents"
arity = 1
intypes = [generic_nodes.SafeUnicode]
outtype = generic_nodes.SafeUnicode
_parameters = [
dict(name="find", value=""),
dict(name="replace", value=""),
]
def content_data(self, data, tag, attrs):
"""Replace all content data."""
find = self._params.get("find")
repl = self._params.get("replace")
if not (find and repl):
return data
return data.replace(find, repl)
def _eval(self):
"""
Run find/replace on input
"""
xml = self.eval_input(0)
parser = HTMLContentHandler()
parser.content_data = self.content_data
return parser.parse(xml)
class Manager(manager.StandardManager):
"""
Handle Tesseract nodes.
"""
@classmethod
def get_node(self, name, **kwargs):
if name.find("::") != -1:
name = name.split("::")[-1]
if name == "FindReplace":
return FindReplaceNode(**kwargs)
@classmethod
def get_nodes(cls, *oftypes):
return super(Manager, cls).get_nodes(
*oftypes, globals=globals())
if __name__ == "__main__":
for n in Manager.get_nodes():
print n
| Python | 0.999564 |
bf53f738bb5408622b08eedb9b0b0c6f80487a0c | Create 0603_verbs_vehicles.py | 2019/0603_verbs_vehicles.py | 2019/0603_verbs_vehicles.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
NPR 2019-06-02
https://www.npr.org/2019/06/02/728600551/sunday-puzzle-lets-go-toe-to-toe?utm_medium=RSS&utm_campaign=sundaypuzzle
Think of a verb in its present and past tense forms.
Drop the first letter of each word.
The result will name two vehicles. What are they?
"""
import requests
import sys
sys.path.append('..')
import nprcommontools as nct
# URL with verb forms
URL = 'https://cdn.jsdelivr.net/gh/kulakowka/english-verbs-conjugation@master/src/services/ConjugationService/verbs.json'
r = requests.get(URL)
j = r.json()
VEHICLES = frozenset(nct.get_category_members('vehicle'))
#%%
for d in j:
verb = d[0]
past = d[1]
if past is not None:
v1 = verb[1:]
p1 = past[1:]
if v1 in VEHICLES and p1 in VEHICLES:
print(verb, past, v1, p1)
| Python | 0.000036 | |
60f54674cc7bb619d5275dbd49e346ecee276ff2 | fix reload module | importloader.py | importloader.py | ๏ปฟ#!/usr/bin/python
# -*- coding: UTF-8 -*-
def load(name):
try:
obj = __import__(name)
reload(obj)
return obj
except:
pass
try:
import importlib
obj = importlib.__import__(name)
importlib.reload(obj)
return obj
except:
pass
def loads(namelist):
for name in namelist:
obj = load(name)
if obj is not None:
return obj
| Python | 0.000001 | |
152db7b696b949c67b5121d42fba28ec31eceb47 | Create everyeno_keys.py | everyeno_keys.py | everyeno_keys.py | tumblr_consumer_key = ''
tumblr_consumer_secret = ''
tumblr_token_key = ''
tumblr_token_secret = ''
google_developerKey = ''
twitter_consumer_key = ''
twitter_consumer_secret = ''
twitter_token_key = ''
twitter_token_secret = ''
discogs_user_token = ''
| Python | 0 | |
faff5fc7665abfcbcf5ab497ca533d7d3d4e53ac | Split property system into it's own file. | ppapi/generators/idl_propertynode.py | ppapi/generators/idl_propertynode.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Hierarchical property system for IDL AST """
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_option import GetOption, Option, ParseOptions
#
# IDLPropertyNode
#
# A property node is a hierarchically aware system for mapping
# keys to values, such that a local dictionary is search first,
# followed by parent dictionaries in order.
#
class IDLPropertyNode(object):
def __init__(self):
self.parents = []
self.property_map = {}
def Error(self, msg):
name = self.GetProperty('NAME', 'Unknown')
parents = [parent.GetProperty('NAME', '???') for parent in self.parents]
ErrOut.Log('%s [%s] : %s' % (name, ' '.join(parents), msg))
def AddParent(self, parent):
assert parent
self.parents.append(parent)
def SetProperty(self, name, val):
self.property_map[name] = val
def _GetProperty_(self, name):
# Check locally for the property, and return it if found.
prop = self.property_map.get(name, None)
if prop is not None: return prop
# If not, seach parents in order
for parent in self.parents:
prop = parent.GetProperty(name)
if prop is not None: return prop
# Otherwise, it can not be found.
return None
def GetProperty(self, name, default=None):
prop = self._GetProperty_(name)
if prop is None:
return default
else:
return prop
def GetPropertyLocal(self, name, default=None):
# Search for the property, but only locally, returning the
# default if not found.
prop = self.property_map.get(name, default)
return prop
# Regular expression to parse property keys in a string such that a string
# "My string $NAME$" will find the key "NAME".
regex_var = re.compile('(?P<src>[^\\$]+)|(?P<key>\\$\\w+\\$)')
def GetPropertyList(self):
return self.property_map.keys()
# Recursively expands text keys in the form of $KEY$ with the value
# of the property of the same name. Since this is done recursively
# one property can be defined in terms of another.
def Replace(self, text):
itr = IDLPropertyNode.regex_var.finditer(text)
out = ''
for m in itr:
(start, stop) = m.span()
if m.lastgroup == 'src':
out += text[start:stop]
if m.lastgroup == 'key':
key = text[start+1:stop-1]
val = self.GetProperty(key, None)
if not val:
self.Error('No property "%s"' % key)
out += self.Replace(str(val))
return out
#
# Testing functions
#
# Build a property node, setting the properties including a name, and
# associate the children with this new node.
#
def BuildNode(name, props, children=[], parents=[]):
node = IDLPropertyNode()
node.SetProperty('NAME', name)
for prop in props:
toks = prop.split('=')
node.SetProperty(toks[0], toks[1])
for child in children:
child.AddParent(node)
for parent in parents:
node.AddParent(parent)
return node
def ExpectProp(node, name, val):
found = node.GetProperty(name)
if found != val:
ErrOut.Log('Got property %s expecting %s' % (found, val))
return 1
return 0
#
# Verify property inheritance
#
def PropertyTest():
errors = 0
left = BuildNode('Left', ['Left=Left'])
right = BuildNode('Right', ['Right=Right'])
top = BuildNode('Top', ['Left=Top', 'Right=Top'], [left, right])
errors += ExpectProp(top, 'Left', 'Top')
errors += ExpectProp(top, 'Right', 'Top')
errors += ExpectProp(left, 'Left', 'Left')
errors += ExpectProp(left, 'Right', 'Top')
errors += ExpectProp(right, 'Left', 'Top')
errors += ExpectProp(right, 'Right', 'Right')
if not errors: InfoOut.Log('Passed PropertyTest')
return errors
def ExpectText(node, text, val):
found = node.Replace(text)
if found != val:
ErrOut.Log('Got replacement %s expecting %s' % (found, val))
return 1
return 0
#
# Verify text replacement
#
def ReplaceTest():
errors = 0
left = BuildNode('Left', ['Left=Left'])
right = BuildNode('Right', ['Right=Right'])
top = BuildNode('Top', ['Left=Top', 'Right=Top'], [left, right])
errors += ExpectText(top, '$Left$', 'Top')
errors += ExpectText(top, '$Right$', 'Top')
errors += ExpectText(left, '$Left$', 'Left')
errors += ExpectText(left, '$Right$', 'Top')
errors += ExpectText(right, '$Left$', 'Top')
errors += ExpectText(right, '$Right$', 'Right')
if not errors: InfoOut.Log('Passed ReplaceTest')
return errors
def MultiParentTest():
errors = 0
parent1 = BuildNode('parent1', ['PARENT1=parent1', 'TOPMOST=$TOP$'])
parent2 = BuildNode('parent2', ['PARENT1=parent2', 'PARENT2=parent2'])
child = BuildNode('child', ['CHILD=child'], parents=[parent1, parent2])
BuildNode('top', ['TOP=top'], children=[parent1])
errors += ExpectText(child, '$CHILD$', 'child')
errors += ExpectText(child, '$PARENT1$', 'parent1')
errors += ExpectText(child, '$PARENT2$', 'parent2')
# Verify recursive resolution
errors += ExpectText(child, '$TOPMOST$', 'top')
if not errors: InfoOut.Log('Passed MultiParentTest')
return errors
def Main():
errors = 0
errors += PropertyTest()
errors += ReplaceTest()
errors += MultiParentTest()
if errors:
ErrOut.Log('IDLNode failed with %d errors.' % errors)
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
| Python | 0.000293 | |
d86bdff73f2c90667c8cd07750cfc120ca8a5a7d | Add BERT example. | examples/bert.py | examples/bert.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import torch_mlir
import iree_torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
def prepare_sentence_tokens(hf_model: str, sentence: str):
tokenizer = AutoTokenizer.from_pretrained(hf_model)
return torch.tensor([tokenizer.encode(sentence)])
class OnlyLogitsHuggingFaceModel(torch.nn.Module):
"""Wrapper that returns only the logits from a HuggingFace model."""
def __init__(self, model_name: str):
super().__init__()
self.model = AutoModelForSequenceClassification.from_pretrained(
model_name, # The pretrained model name.
# The number of output labels--2 for binary classification.
num_labels=2,
# Whether the model returns attentions weights.
output_attentions=False,
# Whether the model returns all hidden-states.
output_hidden_states=False,
torchscript=True,
)
self.model.eval()
def forward(self, input):
# Return only the logits.
return self.model(input)[0]
def _suppress_warnings():
import warnings
warnings.simplefilter("ignore")
import os
os.environ["TOKENIZERS_PARALLELISM"] = "true"
def _get_argparse():
parser = argparse.ArgumentParser(
description="Run a HuggingFace BERT Model.")
parser.add_argument("--model-name",
default="philschmid/MiniLM-L6-H384-uncased-sst2",
help="The HuggingFace model name to use.")
parser.add_argument("--sentence",
default="The quick brown fox jumps over the lazy dog.",
help="sentence to run the model on.")
return parser
def main():
_suppress_warnings()
args = _get_argparse().parse_args()
print("Parsing sentence tokens.")
example_input = prepare_sentence_tokens(args.model_name, args.sentence)
print("Instantiating model.")
model = OnlyLogitsHuggingFaceModel(args.model_name)
# TODO: Wrap up all these steps into a convenient, well-tested API.
# TODO: Add ability to run on IREE CUDA backend.
print("Tracing model.")
traced = torch.jit.trace(model, example_input)
print("Compiling with Torch-MLIR")
linalg_on_tensors_mlir = torch_mlir.compile(traced, example_input,
output_type=torch_mlir.OutputType.LINALG_ON_TENSORS)
print("Compiling with IREE")
iree_vmfb = iree_torch.compile_to_vmfb(linalg_on_tensors_mlir)
print("Loading in IREE")
invoker = iree_torch.load_vmfb(iree_vmfb)
print("Running on IREE")
import time
start = time.time()
result = invoker.forward(example_input)
end = time.time()
print("RESULT:", result)
print(f"Model execution took {end - start} seconds.")
if __name__ == "__main__":
main()
| Python | 0 | |
768b6fd5f4af994ca9af1470cfcc7fa7eb216a8f | Add a binding.gyp file. | binding.gyp | binding.gyp | {
'targets': [
{
'target_name': 'validation',
'cflags': [ '-O3' ],
'sources': [ 'src/validation.cc' ]
},
{
'target_name': 'bufferutil',
'cflags': [ '-O3' ],
'sources': [ 'src/bufferutil.cc' ]
}
]
}
| Python | 0 | |
c9e2a8eca6fbcfbf3ccf8fcc54a7652c9e377d38 | Update Xspider Background Management Command | xspider/xspider/management/commands/run.py | xspider/xspider/management/commands/run.py | #!usr/bin/env python
# -*- coding:utf-8 -*-
# Create on 2017.2.20
import os
import datetime
import string
import traceback
import threading
import multiprocessing
import subprocess
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from scheduler.scheduler import XspiderScheduler
HELP = """
Run Xspider Background.
Usage: python manage.py run {all/web/flower/generator/processor}
all: Run xspider all modules.
web: Run xspider web modules.
flower: Run xspider celery flower.
generator: Run xspider celery generator workers.
processor: Run xspider celery processor workers.
"""
class RunXspider(object):
"""
Run Xspider
"""
@staticmethod
def runweb():
"""
Run celery generator, flower and processor
:return:
"""
try:
subprocess.call("python manage.py runserver 2017", shell=True)
except Exception:
reason = traceback.format_exc()
raise CommandError('Failed to run web! Reason: %s' % (reason))
@staticmethod
def runscheduler():
"""
Run celery generator, flower and processor
:return:
"""
try:
scheduler = XspiderScheduler()
scheduler.run()
except Exception:
reason = traceback.format_exc()
raise CommandError('Failed to run scheduler! Reason: %s' % (reason))
@staticmethod
def runflower():
"""
Run celery generator, flower and processor
:return:
"""
try:
subprocess.call("celery -A xspider flower", shell=True)
except Exception:
reason = traceback.format_exc()
raise CommandError('Failed to run celery flower! Reason: %s' % (reason))
@staticmethod
def rungenerator():
"""
Run celery generator, flower and processor
:return:
"""
try:
subprocess.call("celery worker --app=xspider -l info -n worker1@%h -Q low-generator", shell=True)
except Exception:
reason = traceback.format_exc()
raise CommandError('Failed to run celery worker! Reason: %s' % (reason))
@staticmethod
def runprocessor():
"""
Run celery generator, flower and processor
:return:
"""
try:
subprocess.call("celery worker --app=xspider -l info -n worker2@%h -Q low-processor", shell=True)
except Exception:
reason = traceback.format_exc()
raise CommandError('Failed to run celery worker! Reason: %s' % (reason))
def _run(self, cmd):
"""
add run command to process
:return:
"""
t = multiprocessing.Process(target=cmd)
t.daemon = True
t.start()
def run(self, command):
"""
Run Scheduler
:return:
"""
try:
if command == "all":
self._run(self.runweb)
self._run(self.rungenerator)
self._run(self.runprocessor)
self._run(self.runflower)
self._run(self.runscheduler)
elif command == "web":
self._run(self.runweb)
elif command == "flower":
self._run(self.runflower)
elif command == "generator":
self._run(self.rungenerator)
elif command == "processor":
self._run(self.runprocessor)
elif command == "scheduler":
self._run(self.runscheduler)
else:
raise CommandError("error: too few arguments. {all/web/flower/generator/processor}")
while True:
pass
except KeyboardInterrupt:
print "Xspider Stoped."
except Exception:
reason = traceback.format_exc()
raise CommandError('Failed to run xspider! Reason: %s' % (reason))
class Command(BaseCommand):
help = """
Run Xspider Background Management.
Usage: python manage.py run {all/web/flower/generator/processor}
"""
def add_arguments(self, parser):
"""
add arguments
:param parser:
:return:
"""
print HELP
parser.add_argument('command', nargs='+', type=str)
@staticmethod
def handle(*args, **options):
"""
Run Xspider Background Management
:param args:
:param option
:return:
"""
cmd = options["command"][0]
if cmd in ['all', "web", 'generator', 'processor', 'flower']:
xspider = RunXspider()
xspider.run(cmd)
else:
print HELP
raise CommandError("error: too few arguments. {all/web/flower/generator/processor}")
| Python | 0 | |
e9b8330d71e48702198117652768ba6791bc1401 | adds hello world | app/helloworld.py | app/helloworld.py | #!/usr/local/bin/python3.7
from pprint import pprint
from bson.objectid import ObjectId
from pymongo import MongoClient
import datetime
client = MongoClient('mongodb://localhost:27017/')
db = client.test_database
collection = db.test_collection
post = {"author": "Mike",
"text": "My first blog post!",
"tags": ["mongodb", "python", "pymongo"],
"date": datetime.datetime.utcnow()}
posts = db.posts
post_id = posts.insert_one(post).inserted_id
pprint(f"post_id: {post_id}")
pprint(f"list_collection_names: {db.list_collection_names()}")
pprint(posts.find_one())
pprint(posts.find_one({"author": "Mike"}))
pprint(posts.find_one({"author": "Eliot"}))
pprint(posts.find_one({"_id": post_id}))
# The web framework gets post_id from the URL and passes it as a string
def get(post_id):
# Convert from string to ObjectId:
document = client.db.collection.find_one({'_id': ObjectId(post_id)})
pprint(f"getting post_id '{post_id}' with get(): {get(post_id)}")
| Python | 0.999588 | |
3bbf06964452683d986db401556183f575d15a55 | Add script for inserting project into DB | insert-project.py | insert-project.py | #!/usr/bin/env python3
import pymongo
import subprocess
import re
from datetime import datetime
import argparse
from json import load as load_json
import sys
def _info(msg):
sys.stdout.write(msg + '\n')
sys.stdout.flush()
cl_parser = argparse.ArgumentParser(description='Insert a project into Meteor\'s local MongoDB')
cl_parser.add_argument('input', help='JSON input file')
cl_parser.add_argument('--site', default=None, help='Specify Meteor site (default: localhost)')
args = cl_parser.parse_args()
with open(args.input) as input_file:
json = load_json(input_file)
command = ['meteor', 'mongo', '-U']
if args.site:
command.append(args.site)
_info('Getting Mongo URL...')
mongo_url = subprocess.check_output(command).decode().strip()
mongo_url, db_name = mongo_url.rsplit('/', 1)
_info('Connecting to MongoDB: {} (DB: {})'.format(mongo_url, db_name))
client = pymongo.MongoClient(mongo_url)
db = client[db_name]
project = {
'created': datetime.utcnow(),
'owner': json['owner'],
'projectId': json['id'],
'tags': json['tags'],
'text': json['description'],
'title': json['title'],
'instructions': json['instructions'],
'pictures': json['pictures'],
'files': json['files'],
'license': json['license'],
}
db.projects.update({'owner': project['owner'], 'projectId': project['projectId']}, project,
upsert=True)
_info('Successfully inserted project \'{}/{}\' ({})'.format(
project['owner'],
project['projectId'],
project['title'],
))
| Python | 0.000001 | |
28e483c32d3e946f0f9159fe7459531f284d50aa | Add shared counter support to cache. | app/molcounter.py | app/molcounter.py | from google.appengine.api import memcache
from google.appengine.ext import db
import random
import collections
import logging
class GeneralCounterShardConfig(db.Model):
"""Tracks the number of shards for each named counter."""
name = db.StringProperty(required=True)
num_shards = db.IntegerProperty(required=True, default=20)
class GeneralCounterShard(db.Model):
"""Shards for each named counter"""
name = db.StringProperty(required=True)
count = db.IntegerProperty(required=True, default=0)
def get_top_names(top_count, all_results):
logging.info('%s from request' % top_count)
d = collections.defaultdict(list)
for counter in GeneralCounterShard.all():
d[counter.name.split('-')[-1]].append(counter.count)
results = {}
for name, counts in d.iteritems():
results[name] = reduce(lambda x,y: x+y, counts)
top = {}
x = collections.defaultdict(list)
for name, count in results.iteritems():
x[count].append(name)
keys = x.keys()
keys.sort()
keys.reverse()
tc = top_count
for k in keys:
if top_count > 0:
logging.info(top_count)
top[reduce(lambda x,y: '%s,%s' % (x,y), x[k])] = k
top_count -= 1
else:
break
logging.info(top)
if all_results:
return {'top-%s' % tc: top, 'results': results}
else:
return {'top-%s' % tc: top}
def get_count(name):
"""Retrieve the value for a given sharded counter.
Parameters:
name - The name of the counter
"""
total = memcache.get(name)
if total is None:
total = 0
for counter in GeneralCounterShard.all().filter('name = ', name):
total += counter.count
memcache.add(name, total, 60)
return total
def increment(name):
"""Increment the value for a given sharded counter.
Parameters:
name - The name of the counter
"""
config = GeneralCounterShardConfig.get_or_insert(name, name=name)
def txn():
index = random.randint(0, config.num_shards - 1)
shard_name = name + str(index)
counter = GeneralCounterShard.get_by_key_name(shard_name)
if counter is None:
counter = GeneralCounterShard(key_name=shard_name, name=name)
counter.count += 1
counter.put()
db.run_in_transaction(txn)
# does nothing if the key does not exist
memcache.incr(name)
def increase_shards(name, num):
"""Increase the number of shards for a given sharded counter.
Will never decrease the number of shards.
Parameters:
name - The name of the counter
num - How many shards to use
"""
config = GeneralCounterShardConfig.get_or_insert(name, name=name)
def txn():
if config.num_shards < num:
config.num_shards = num
config.put()
db.run_in_transaction(txn)
| Python | 0 | |
dbed291584150ef3d219c487f32b47a8f4907195 | question 1.7 | crack_1_7.py | crack_1_7.py | test = [[0,1,2,3],
[1,0,2,3],
[1,2,0,3],
[1,2,3,0]]
raw = []
col = []
length = len(test)
for x in xrange(length):
for y in xrange(length):
if test[x][y] == 0:
raw.append(x)
col.append(y)
for x in raw:
for y in xrange(length):
test[x][y] = 0
for y in col:
for x in xrange(length):
test[x][y] = 0
for x in xrange(length):
for y in xrange(length):
print test[x][y],
print
| Python | 0.999967 | |
2c6700d7a16ec7e76847f3664655aaf6c8f171eb | Create test_servo5v.py | test/test_servo5v.py | test/test_servo5v.py | from gadgets.motors.servos import Servo5V
import time
import random
servo = Servo5V(pin_number=12,freq=100)
count = 0
while count < 185:
time.sleep(0.1)
servo.write(count)
count += 5
servo.cleanup()
| Python | 0.000003 | |
f177811b254376cba0b053fdeb1bda4e84382c7a | Add Tristam MacDonald's Shader class. | scikits/gpu/shader.py | scikits/gpu/shader.py | """
This module is based on code from
http://swiftcoder.wordpress.com/2008/12/19/simple-glsl-wrapper-for-pyglet/
which is
Copyright (c) 2008, Tristam MacDonald
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from pyglet.gl import *
class Shader:
# vert, frag and geom take arrays of source strings
# the arrays will be concattenated into one string by OpenGL
def __init__(self, vert=[], frag =[], geom=[]):
# create the program handle
self.handle = glCreateProgram()
# we are not linked yet
self.linked = False
# create the vertex shader
self.createShader(vert, GL_VERTEX_SHADER)
# create the fragment shader
self.createShader(frag, GL_FRAGMENT_SHADER)
# the geometry shader will be the same, once pyglet supports the
# extension
# self.createShader(frag, GL_GEOMETRY_SHADER_EXT)
# attempt to link the program
self.link()
def createShader(self, strings, type):
count = len(strings)
# if we have no source code, ignore this shader
if count < 1:
return
# create the shader handle
shader = glCreateShader(type)
# convert the source strings into a ctypes pointer-to-char array,
# and upload them. This is deep, dark, dangerous black magick -
# don't try stuff like this at home!
src = (c_char_p * count)(*strings)
glShaderSource(shader, count, cast(pointer(src),
POINTER(POINTER(c_char))), None)
# compile the shader
glCompileShader(shader)
temp = c_int(0)
# retrieve the compile status
glGetShaderiv(shader, GL_COMPILE_STATUS, byref(temp))
# if compilation failed, print the log
if not temp:
# retrieve the log length
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, byref(temp))
# create a buffer for the log
buffer = create_string_buffer(temp.value)
# retrieve the log text
glGetShaderInfoLog(shader, temp, None, buffer)
# print the log to the console
print buffer.value
else:
# all is well, so attach the shader to the program
glAttachShader(self.handle, shader);
def link(self):
# link the program
glLinkProgram(self.handle)
temp = c_int(0)
# retrieve the link status
glGetProgramiv(self.handle, GL_LINK_STATUS, byref(temp))
# if linking failed, print the log
if not temp:
# retrieve the log length
glGetProgramiv(self.handle, GL_INFO_LOG_LENGTH, byref(temp))
# create a buffer for the log
buffer = create_string_buffer(temp.value)
# retrieve the log text
glGetProgramInfoLog(self.handle, temp, None, buffer)
# print the log to the console
print buffer.value
else:
# all is well, so we are linked
self.linked = True
def bind(self):
# bind the program
glUseProgram(self.handle)
def unbind(self):
# unbind whatever program is currently bound - not necessarily
# this program, so this should probably be a class method instead
glUseProgram(0)
# upload a floating point uniform
# this program must be currently bound
def uniformf(self, name, *vals):
# check there are 1-4 values
if len(vals) in range(1, 5):
# select the correct function
{1 : glUniform1f,
2 : glUniform2f,
3 : glUniform3f,
4 : glUniform4f
# retrieve the uniform location, and set
}[len(vals)](glGetUniformLocation(self.handle, name), *vals)
# upload an integer uniform
# this program must be currently bound
def uniformi(self, name, *vals):
# check there are 1-4 values
if len(vals) in range(1, 5):
# select the correct function
{1 : glUniform1i,
2 : glUniform2i,
3 : glUniform3i,
4 : glUniform4i
# retrieve the uniform location, and set
}[len(vals)](glGetUniformLocation(self.handle, name), *vals)
# upload a uniform matrix
# works with matrices stored as lists,
# as well as euclid matrices
def uniform_matrixf(self, name, mat):
# obtian the uniform location
loc = glGetUniformLocation(self.Handle, name)
# uplaod the 4x4 floating point matrix
glUniformMatrix4fv(loc, 1, False, (c_float * 16)(*mat))
| Python | 0 | |
7c9c95795dbbc5f64b532720f5749b58361c222b | add collector for http://www.dshield.org/ | collectors/dshield.py | collectors/dshield.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
import requests
import ipwhois
from pprint import pprint
def get_url(url):
try:
res = requests.get(url)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("DNS lookup failures")
else:
if res.status_code != 200:
raise requests.exceptions.ConnectionError(
"the {}, answer with {} error".format(url, res.status_code))
return res
def get_ip(name):
attempts = 5
ip = "undefined"
while attempts:
try:
data = socket.gethostbyname_ex(name)
ip = data[2][0]
break
except (socket.herror, socket.gaierror):
attempts -= 1
return ip
def get_who_is_and_country(ip):
try:
ip_obj = ipwhois.IPWhois(ip)
who_is = ip_obj.lookup(retry_count=5)
return str(who_is), who_is['asn_country_code']
except ipwhois.exceptions.IPDefinedError:
return "Private-Use Networks", "undefined"
except ipwhois.exceptions.WhoisLookupError:
return "undefined", "undefined"
def gather():
attack_type = 'undefined'
base_url = "http://www.dshield.org/feeds/suspiciousdomains_High.txt"
res = get_url(base_url)
for line in res.iter_lines():
if line[:1] == "#" or line in ("Site", ""):
continue
host = line
if host[-1] == "\t":
host = line[:-1]
ip_address = get_ip(host)
if ip_address == "undefined":
who_is, country = "undefined", "undefined"
else:
who_is, country = get_who_is_and_country(ip_address)
doc = {
'IP': ip_address,
'SourceInfo': base_url,
'Type': attack_type,
'Country': country,
'Domain': host,
'URL': host,
'WhoIsInfo': who_is,
}
pprint(doc)
if __name__ == '__main__':
gather()
| Python | 0 | |
8fe73523b7141f93d8523e56a7c6a5cc2ed82051 | Test case for ioddrivesnmp class | src/collectors/iodrivesnmp/test/testiodrivesnmp.py | src/collectors/iodrivesnmp/test/testiodrivesnmp.py | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from iodrivesnmp import IODriveSNMPCollector
class TestIODriveSNMPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('IODriveSNMPCollector', {
'allowed_names': allowed_names,
'interval': 1
})
self.collector = IODriveSNMPCollector(config, None)
def test_import(self):
self.assertTrue(IODriveSNMPCollector)
| Python | 0 | |
2460bd91632da0e6b02e0faf379fe27b273575bc | Add rotate.py | rotate.py | rotate.py | """Funtion to rotate image 90 degress."""
def rotate(matrix):
pass | Python | 0.000004 | |
7a068872a071af2e60bf24ca7a00b3f1e999f139 | add request builder | builders.py | builders.py | # -*- coding: utf-8 -*-
import json
class PostBuilder(object):
def __init__(self):
self.parameters = {
'title': '',
'body': '',
'coediting': False,
'gist': False,
'private': False,
'tags': [],
'tweet': False
}
def body(self, content):
"""
Args:
content: str
"""
self.parameters['body'] = content
return self
def coediting(self, flag):
"""
Args:
flag: bool
"""
self.parameters['coediting'] = flag
return self
def gist(self, flag):
"""
Args:
flag: bool
"""
self.parameters['gist'] = flag
return self
def private(self, flag):
"""
Args:
flag: bool
"""
self.parameters['private'] = flag
return self
def tags(self, t):
"""
Args:
t: list[dict]
example : {"name": "tag_name", "versions": ["1.0"]}
"""
self.parameters['tags'] = t
return self
def title(self, t):
self.parameters['title'] = t
return self
def tweet(self, flag):
self.parameters['tweet'] = flag
return self
def __str__(self):
return json.dumps(self.parameters)
def encode(self):
"""
Returns:
condoded request json string
"""
return str(self).encode('utf-8')
| Python | 0 | |
857a5cb7effa03e9cd700fa69ae4d3b231212754 | Create business.py | business.py | business.py | # business logic here
# - account managing
# - create
# - edit
# - delete
# - payment data -> tokens
# - scripts running
# - statistics
| Python | 0.000003 | |
4a45256b614ebf8a8455562b63c1d50ec1521c71 | add a test class for auth.py | BigStash/t/test_auth.py | BigStash/t/test_auth.py | from mock import Mock
from testtools.matchers import Contains
from testtools import TestCase
class AuthTest(TestCase):
def setUp(self):
super(AuthTest, self).setUp()
def tearDown(self):
super(AuthTest, self).tearDown()
def _makeit(self, *args, **kwargs):
from BigStash.auth import Auth
return Auth(*args, **kwargs)
def test_auth_class(self):
assert self._makeit(self.getUniqueString(),
self.getUniqueString(),
self.getUniqueString())
def test_do_login(self, stdout):
requests = Mock()
requests.post.return_value = self.getUniqueString()
api_key = self.getUniqueString()
api_secret = self.getUniqueString()
url = self.getUniqueString()
auth = self._makeit(api_key, api_secret, url)
self.assertThat(auth.GetAPIKey(),
Contains('authentication succesfull'))
| Python | 0.000002 | |
c212d1c25095f3b6e2f88cfccdc5c49280b22be0 | Add test for tilequeue changes related to #1387. | integration-test/1387-business-and-spur-routes.py | integration-test/1387-business-and-spur-routes.py | from . import FixtureTest
class BusinessAndSpurRoutes(FixtureTest):
def test_first_capitol_dr_i70_business(self):
self.load_fixtures([
'https://www.openstreetmap.org/relation/1933234',
])
# check that First Capitol Dr, part of the above relation, is given
# a network that includes the "business" extension.
self.assert_has_feature(
16, 16294, 25097, 'roads',
{'id': 12276055, 'shield_text': '70', 'network': 'US:I:Business'})
| Python | 0 | |
672210c3af1a1b56a145b5265e5f316a1f6f36df | Add test folder | py3utils/test/__init__.py | py3utils/test/__init__.py | Python | 0 | ||
7ec15caf8f2c9d0a21581261a356f6decc548061 | Add some basic UI tests | test/ui_test.py | test/ui_test.py | from app import app
import unittest
class UiTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def test_index(self):
self.assertEqual(self.app.get('/').status_code, 200)
def test_no_page(self):
self.assertEqual(self.app.get('/missing-page').status_code, 200)
def test_all_pages(self):
self.assertEqual(self.app.get('/.all-pages').status_code, 200)
def test_edit(self):
self.assertEqual(self.app.get('/.edit/Index').status_code, 200)
self.assertEqual(self.app.get('/.edit/').status_code, 404)
| Python | 0.000001 | |
59cc25693f2185ddfe36370d7f6641b2795d4798 | Test File Upload | ladybug/test.py | ladybug/test.py | import epw
from comfort.pmv import PMV
| Python | 0.000001 | |
e9105e4bb42ec8191b14519d10012dc79337f717 | Create tournament.py | Intro_to_Relational_Databases/tournament.py | Intro_to_Relational_Databases/tournament.py | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def deleteMatches():
"""Remove all the match records from the database."""
dbConnection = connect()
c = dbConnection.cursor()
c.execute("DELETE FROM matches")
dbConnection.commit()
dbConnection.close()
def deletePlayers():
"""Remove all the player records from the database."""
dbConnection = connect()
c = dbConnection.cursor()
c.execute("DELETE FROM players")
dbConnection.commit()
dbConnection.close()
def countPlayers():
"""Returns the number of players currently registered."""
# TODO: Add in the SQL code to count players
dbConnection = connect()
c = dbConnection.cursor()
c.execute("SELECT count(name) FROM players;")
count = c.fetchone()[0]
dbConnection.commit()
dbConnection.close()
return count
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
dbConnection = connect()
c = dbConnection.cursor()
c.execute("INSERT INTO players (Name) VALUES (%s)", (name,))
dbConnection.commit()
dbConnection.close()
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
dbConnection = connect()
c = dbConnection.cursor()
c.execute("INSERT INTO matches (ID, Result) VALUES (%s, %s)", (winner, 'Win'))
c.execute("INSERT INTO matches (ID, Result) VALUES (%s, %s)", (loser, 'Loss'))
dbConnection.commit()
dbConnection.close()
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
dbConnection = connect()
c = dbConnection.cursor()
query = """SELECT Win.ID, Win.Name,
COALESCE(Win."WinCount", 0),
COALESCE(Mat."MatchCount", 0)
FROM
(
SELECT Pla.ID, Pla.Name,
COALESCE(wincount, 0) as "WinCount"
FROM Players Pla
LEFT JOIN (
SELECT Mat.ID, COUNT(Mat.Result) as wincount
FROM Matches Mat
WHERE Mat.Result = 'Win'
GROUP By Mat.ID
) WC
ON Pla.ID=WC.ID
) Win
LEFT JOIN
(
SELECT Mat.ID,
COALESCE(COUNT(Mat.Result), 0) as "MatchCount"
FROM Matches Mat
GROUP BY Mat.ID
) Mat
ON Win.ID=Mat.ID
ORDER By COALESCE(Win."WinCount", 0) Desc;"""
c.execute(query)
results = c.fetchall()
dbConnection.close()
return results
'''
return results
QUERY = """
SELECT players.ID as ID, players.Name as Name, sum(COALESCE(matches.result,0)) as Wins, count(matches.result) as Matches
FROM players
LEFT JOIN matches
ON players.ID = matches.ID
GROUP BY players.id;
"""
c.execute(QUERY)
results = c.fetchall()
dbConnection.close()
return results
'''
# TODO: FINISH
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
'''
standings = playerStandings()
lenStandings = (len(standings) / 2)
i = 0
pairings = []
while (i < lenStandings):
if (lenStandings < 1):
break
else:
pairings.append((standings[i][0], standings[i][1], standings[i+1][0], standings[i+1][1]))
i += 1
return pairings
'''
standings = playerStandings()
pairings = []
paired = []
for player in standings:
# if len < 4, less than 2 players are paired
# each player has a len of 2 because id and name are used for each player
if len(paired) < 4:
paired.append(player[0])
paired.append(player[1])
# if len == 4, 2 players are paired
if len(paired) == 4:
pairings.append(tuple(paired))
paired = []
return pairings
| Python | 0.000001 | |
d1b2d330d2a43814d89c7f17a347e425c434957d | Add Eoin's resampling function. | pyrate/tools/resampler.py | pyrate/tools/resampler.py | import pandas as pd
import numpy
# Does the resampling
# Called internally, one of the wrapper functions should be called if its needed
######################
def convert_messages_to_hourly_bins(df,period='H',fillnans=False,run_resample=True):
if df.empty:
return df
if run_resample:
speed_ts=df.sog.resample(period,how='mean')
draught_ts=df.draught.resample(period,how=numpy.max)
df_new=pd.DataFrame({'sog':speed_ts,'draught':draught_ts})
for col in df.columns:
if col != 'sog' and col!='draught':
df_new[col]=df[col].resample(period,how='first')
else:
df_new=[]
#set the time equal to the index
df_new['time']=df_new.index.values
# fill forward
if fillnans:
#forward fill first
df_new=df_new.fillna(method='pad')
#now backward fill for remain
df_new=df_new.fillna(method='bfill')
else:
#remove all entries where there are nans in speed
df_new=df_new.ix[pd.isnull(df_new.sog)==False]
return df_new
| Python | 0.000001 | |
3b064d6933ef7e910fab5634420358562866f1bc | Add test | tests/test_camera.py | tests/test_camera.py | # coding: utf-8
from __future__ import unicode_literals
import unittest
import tempfile
import shutil
from flask import Flask
from pitools import camera
app = Flask(__name__)
app.register_blueprint(camera.blueprint)
class CameraTestCase(unittest.TestCase):
def setUp(self):
self.workspace = tempfile.mkdtemp()
self.app = app.test_client()
def tearDown(self):
shutil.rmtree(self.workspace)
def test_post_shot_api(self):
'''
Should fail with 405 method not allowed
'''
rv = self.app.post('/camera/shot')
assert 405 == rv.status_code
def test_get_shot_api(self):
'''
Should return a image with image/* MIME
'''
rv = self.app.get('/camera/shot')
assert rv.content_type.startswith('image/')
print dir(rv)
if __name__ == '__main__':
unittest.main()
| Python | 0.000005 | |
7ddfb39256229aa8c985ed8d70a29479187c76ad | Create script for beta invites | lily/management/commands/generate_beta_invites.py | lily/management/commands/generate_beta_invites.py | import csv
import gc
import logging
from datetime import date
from hashlib import sha256
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.core.urlresolvers import reverse_lazy
from lily.tenant.models import Tenant
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, **kwargs):
current_site = 'app.hellolily.com'
with default_storage.open('beta_signups_with_invites.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
spamwriter.writerow(['company', 'email', 'first_name', 'last_name', 'invite', 'country'])
for row in self.read_csvfile('beta_signups.csv'):
company = row['company']
first_name = row['first_name']
last_name = row['last_name']
email = row['email']
country = row['country']
date_string = date.today().strftime('%d%m%Y')
tenant = Tenant.objects.create(name=company, country=country)
call_command('create_tenant', tenant=tenant.id)
invite_hash = sha256('%s-%s-%s-%s' % (
tenant.id,
email,
date_string,
settings.SECRET_KEY
)).hexdigest()
invite_link = '%s://%s%s' % ('https', current_site, reverse_lazy('invitation_accept', kwargs={
'tenant_id': tenant.id,
'first_name': first_name,
'email': email,
'date': date_string,
'hash': invite_hash,
}))
spamwriter.writerow([company, email, first_name, last_name, invite_link, country])
gc.collect()
def read_csvfile(self, file_name):
"""
Read from path assuming it's a file with ';' separated values.
"""
# Newlines are breaking correct csv parsing. Write correct temporary file to parse.
csv_file = default_storage.open(file_name, 'rU')
reader = csv.DictReader(csv_file, delimiter=';', quoting=csv.QUOTE_ALL)
for row in reader:
yield row
| Python | 0 | |
5bc089a98bf578fd0c56e3e50cf76888ee74aba2 | Add py solution for 537. Complex Number Multiplication | py/complex-number-multiplication.py | py/complex-number-multiplication.py | import re
class Solution(object):
def complexNumberMultiply(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
pat = re.compile(r'(-?\d+)\+(-?\d+)i')
mata = pat.match(a)
matb = pat.match(b)
a = int(mata.group(1)), int(mata.group(2))
b = int(matb.group(1)), int(matb.group(2))
ans = a[0] * b[0] - a[1] * b[1], a[1] * b[0] + a[0] * b[1]
return '%d+%di' % ans
| Python | 0.000635 | |
400ad736a271946569efa438e8fc9d00a7ce0075 | test for #22 | tests/test_issues.py | tests/test_issues.py | from tgbot import plugintest
from tgbot.botapi import Update
from test_plugin import TestPlugin
class TestPluginTest(plugintest.PluginTestCase):
def setUp(self):
self.plugin = TestPlugin()
self.bot = self.fake_bot(
'',
plugins=[self.plugin],
)
self.received_id = 1
def test_user_update(self):
"""Test for issue #22"""
sender = {
'id': 1,
'first_name': 'John',
'last_name': 'Doe',
}
self.receive_message('test', sender=sender)
self.assertEqual(self.bot.models.User.get(self.bot.models.User.id == 1).first_name, 'John')
sender['first_name'] = 'Paul'
self.receive_message('test', sender=sender)
self.assertEqual(self.bot.models.User.get(self.bot.models.User.id == 1).first_name, 'Paul')
def receive_message(self, text, sender=None, chat=None, reply_to_message_id=None):
if sender is None:
sender = {
'id': 1,
'first_name': 'John',
'last_name': 'Doe',
}
if chat is None:
chat = {'type': 'private'}
chat.update(sender)
reply_to_message = None
if reply_to_message_id is not None:
reply_to_message = {
'message_id': reply_to_message_id,
'chat': chat,
}
self.bot.process_update(
Update.from_dict({
'update_id': self.received_id,
'message': {
'message_id': self.received_id,
'text': text,
'chat': chat,
'from': sender,
'reply_to_message': reply_to_message,
}
})
)
self.received_id += 1
| Python | 0.000001 | |
06e82c471afa83bf0f08f0779b32dd8a09b8d1ba | Add py solution for 350. Intersection of Two Arrays II | py/intersection-of-two-arrays-ii.py | py/intersection-of-two-arrays-ii.py | from collections import Counter
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
c1, c2 = Counter(nums1), Counter(nums2)
return list((c1 & c2).elements())
| Python | 0.001158 | |
742e827178ee28663699acbb4a5f0ad5440649fc | add new keyboard_locks module | py3status/modules/keyboard_locks.py | py3status/modules/keyboard_locks.py | # -*- coding: utf-8 -*-
"""
Monitor CapsLock, NumLock, and ScrLock keys
NumLock: Allows the user to type numbers by pressing the keys on the number pad,
rather than having them act as up, down, left, right, page up, end, and so forth.
CapsLock: When enabled, letters the user types will be in uppercase by default
rather than lowercase.
ScrLock: In some applications, such as spreadsheets, the lock mode is used to
change the behavior of the cursor keys to scroll the document instead of the cursor.
Configuration parameters:
cache_timeout: refresh interval for this module (default 1)
icon_capslock_off: show when Caps Lock is off (default 'CAP')
icon_capslock_on: show when Caps Lock is on (default 'CAP')
icon_numlock_off: show when Num Lock is off (default 'NUM')
icon_numlock_on: show when Num Lock is off (default 'NUM')
icon_scrlock_off: show when Scroll Lock is off (default 'SCR')
icon_scrlock_on: show when Scroll Lock is on (default 'SCR')
Color options:
color_good: Lock on
color_bad: Lock off
@author lasers
"""
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 1
icon_capslock_off = "CAP"
icon_capslock_on = "CAP"
icon_numlock_off = "NUM"
icon_numlock_on = "NUM"
icon_scrlock_off = "SCR"
icon_scrlock_on = "SCR"
def keyboard_lock(self):
out = self.py3.command_output('xset -q')
capslock_color = self.py3.COLOR_BAD
capslock_icon = self.icon_capslock_off
numlock_color = self.py3.COLOR_BAD
numlock_icon = self.icon_numlock_off
scrlock_color = self.py3.COLOR_BAD
scrlock_icon = self.icon_scrlock_off
if 'on' in out.split("Caps Lock:")[1][0:6]:
capslock_color = self.py3.COLOR_GOOD
capslock_icon = self.icon_capslock_on
if 'on' in out.split("Num Lock:")[1][0:6]:
numlock_color = self.py3.COLOR_GOOD
numlock_icon = self.icon_numlock_on
if 'on' in out.split("Scroll Lock:")[1][0:6]:
scrlock_color = self.py3.COLOR_GOOD
scrlock_icon = self.icon_scrlock_on
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'composite': [
{
'color': capslock_color,
'full_text': capslock_icon,
},
{
'full_text': ' '
},
{
'color': numlock_color,
'full_text': numlock_icon,
},
{
'full_text': ' '
},
{
'color': scrlock_color,
'full_text': scrlock_icon,
},
]
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| Python | 0.000001 | |
3ff7c739cfc688c757396c465799ab42638c4a80 | Add toopher-pair utility | toopher-pair.py | toopher-pair.py | import StringIO
import getpass
import argparse
import signal
import time
import os
import sys
from wsgiref import validate
import configobj
import toopher
import validate
from common import *
TIMEOUT_PAIRING = 30
DEFAULT_USER_CONFIG_FILE = StringIO.StringIO("""\
# This is a user-specific Toopher configuration file. See toopher_config (5)
# for more information. This file provides a way for users to customize the
# behavior of their authentication flows which are configured to use the Toopher
# PAM module.
# Pairings: This section lists known pairing identifiers issued by the Toopher API
# server for hosts to use for pairings identification when making an authentication
# request. The values here may be populated using the 'toopher-pair' utility.
[Pairings]
""")
def sigint_handler(sig, frame):
sys.exit("Cancelled by user (Ctrl-C)")
signal.signal(signal.SIGINT, sigint_handler)
def main():
argparser = argparse.ArgumentParser(description="Pair an account with the Toopher app")
argparser.add_argument('--user', '-u', default='',
help="the user to pair (defaults to the current user, specifying a user is usually only useful for the superuser)")
argparser.add_argument('--shared', '-s', action="store_true",
help="create a shared pairing instead of a host-specific pairing")
argparser.add_argument('--phrase', '-p', metavar="PAIRING_PHRASE",
help="a pairing phrase generated by the Toopher app (will prompt if not supplied)")
args = argparser.parse_args()
# Retrieve API credentials from system configuration file
try:
system_config = get_system_config()
api = get_api_object(system_config)
except Exception:
sys.exit("Could not read the Toopher system config, please request that your administrator configure the system for Toopher.")
username = args.user
if not username:
username = getpass.getuser()
user_config_filename = get_user_config_filename(username)
try:
user_config = get_user_config(username)
except IOError: # Does not exist or cannot read
if os.path.exists(user_config_filename): # Exists but can't be read
sys.exit("Could not read user's Toopher config file ('%s')" % user_config_filename)
else: # Does not exist, make sure we can write it if we try
if not os.access(os.path.dirname(user_config_filename), os.W_OK):
sys.exit("Missing write permissions for the user's Toopher config file ('%s')" % user_config_filename)
user_config = configobj.ConfigObj(DEFAULT_USER_CONFIG_FILE)
user_config.filename = user_config_filename
except configobj.ConfigObjError, e: # Could not parse
sys.exit("Malformed configuration file ('%s'): %s" % (user_config_filename, e))
except validate.ValidateError, e: # Did not validate against spec
sys.exit("Problem validating user configuration file ('%s'):\n"
"%s\n"
"Please fix or remove user configuration file and try again."
% (user_config_filename, e))
else: # Exists, readable, parseable, and valid - make sure we can write it if we try
if not os.access(user_config_filename, os.W_OK):
sys.exit("Missing write permissions for the user's Toopher config file ('%s')" % user_config_filename)
phrase = args.phrase
while True:
if not phrase:
phrase = raw_input("Enter a pairing phrase (generated by the Toopher app): ")
if not phrase:
print 'Invalid pairing phrase, please try again.'
else:
break
full_username = get_full_username(username, args.shared)
try:
sys.stdout.write('Contacting server to perform pairing... ')
sys.stdout.flush()
pairing = api.pair(phrase, full_username)
print 'done.'
except Exception as error:
print 'error.'
sys.exit('This user could not be paired due to an error: %s' % error)
print 'Your Toopher app should now ask you to approve this pairing request. Please respond to continue.'
sys.stdout.write('Checking status..')
sys.stdout.flush()
start_time = time.time()
while time.time() - start_time < TIMEOUT:
sys.stdout.write(".")
sys.stdout.flush()
pairing_status = api.get_pairing_status(pairing.id)
if not pairing_status.pending:
if pairing_status.enabled:
print ' pairing approved.'
break
else:
print ' pairing denied.'
sys.exit('This pairing request was denied by the Toopher app user.')
time.sleep(1)
pairing_key = USER_CONFIG_PAIRINGS_KEY_SHARED_PAIRING_ID if args.shared else HOSTNAME
user_config[USER_CONFIG_PAIRINGS_SECTION][pairing_key] = pairing.id
user_config.write()
print 'Pairing successful - PAM services configured to use the Toopher module will now use this pairing to authenticate.'
if __name__ == '__main__':
try:
main()
except Exception, e:
sys.exit("An unexpected error was encountered. Please contact support@toopher.com for resolution: (Error: %s)"
% e) | Python | 0.000013 | |
9d7c348170fc0f9d339a2ef57a9e64b1ceaa7516 | Add demo MNH event scraper | web/whim/core/scrapers/mnh.py | web/whim/core/scrapers/mnh.py | from datetime import datetime, timezone, time
import requests
from bs4 import BeautifulSoup
from django.db import transaction
from .base import BaseScraper
from .exceptions import ScraperException
from whim.core.models import Event, Source, Category
from whim.core.utils import get_object_or_none
from whim.core.time import zero_time_with_timezone
class MNHScraper(BaseScraper):
def get_data(self):
url = "https://manxnationalheritage.im/whats-on/"
parsed = []
page = requests.get(url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html.parser')
events = soup.select(
"div.columns.no-padding-grid.push-top-m > div > a")
parsed = []
for e in events:
tmp = {
"link": e.get('href'),
"category": e.find("span", {"class": "badge"}).string
}
#get rest of data
article = e.find("div", {"class": "text"})
if article:
tmp["name"] = article.contents[0].string #h2
tmp["description"] = article.contents[3].contents[
0].string #p
#dates
try:
dates = article.contents[2].contents[0].string.replace(
" ", "").replace("โ", "-").split("-") #span
tmp["start_date"] = zero_time_with_timezone(
datetime.strptime(dates[0], "%d/%m/%Y"))
if len(dates) > 1:
tmp["end_date"] = zero_time_with_timezone(
datetime.strptime(dates[1], "%d/%m/%Y"))
except:
continue
parsed.append(tmp)
return parsed
else:
raise ScraperException("Unexpected status code")
@transaction.atomic
def run(self, source_id):
source = Source.objects.get(id=source_id)
for scraped_event in self.get_data():
event = get_object_or_none(
Event, source=source, name=scraped_event["name"])
if event is None:
category, _ = Category.objects.get_or_create_from_name(
scraped_event["category"])
Event.objects.create(
source=source,
category=category,
name=scraped_event["name"],
description=scraped_event["description"],
start_datetime=scraped_event["start_date"],
end_datetime=scraped_event.get("end_date"),
link=scraped_event["link"],
tags=[])
#mark this run
source.last_run_date = datetime.now(timezone.utc)
source.save()
| Python | 0 | |
632fea66f57f72d176fb8ad56f0cdaf5e4884110 | add test for multi-arch disasm | tests/test_multiarch_disasm.py | tests/test_multiarch_disasm.py | import os.path
import idb
def test_armel_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'armel', 'ls.idb')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x00002598) == 'push\t{r4, r5, r6, r7, r8, sb, sl, fp, lr}'
assert api.idc.GetDisasm(0x00012010) == 'b\t#0x12014'
def test_thumb_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'thumb', 'ls.idb')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x00011eac) == 'strb\tr4, [r3, r5]'
assert api.idc.GetDisasm(0x00011eae) == 'b\t#0x11ebc'
def test_arm64_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'arm64', 'ls.i64')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x00005d30) == 'cmp\tw5, #0x74'
assert api.idc.GetDisasm(0x00005d34) == 'csel\tw5, w5, w12, ne'
assert api.idc.GetDisasm(0x00005d38) == 'b\t#0x5c30'
def test_mips_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'mips', 'ls.idb')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x00005440) == 'sb\t$t2, ($t1)'
assert api.idc.GetDisasm(0x00005444) == 'addiu\t$t3, $t3, 1'
assert api.idc.GetDisasm(0x00005448) == 'b\t0x523c'
def test_mipsel_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'mipsel', 'ls.idb')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x0000543c) == 'sb\t$t2, ($t1)'
assert api.idc.GetDisasm(0x00005440) == 'addiu\t$t3, $t3, 1'
assert api.idc.GetDisasm(0x00005444) == 'b\t0x5238'
def test_mips64el_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'mips64el', 'ls.i64')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x0000b8c8) == 'addiu\t$s0, $s0, -0x57'
assert api.idc.GetDisasm(0x0000b8cc) == 'daddiu\t$v1, $v1, 1'
assert api.idc.GetDisasm(0x0000b8d0) == 'b\t0xb760'
| Python | 0.000001 | |
423554349177a5c8ed987f249b13fac9c8b8d79a | Add links to upgrade actions in the change log | gen-changelog.py | gen-changelog.py | # Writes a changelog in trac WikiFormatting based on a git log
from __future__ import unicode_literals, division, absolute_import
import codecs
from itertools import ifilter
import os
import re
import subprocess
import sys
import dateutil.parser
import requests
from flexget.utils.soup import get_soup
out_path = 'ChangeLog'
if len(sys.argv) > 1:
dir_name = os.path.dirname(sys.argv[1])
if dir_name and not os.path.isdir(dir_name):
print 'Output dir doesn\'t exist: %s' % sys.argv[1]
sys.exit(1)
out_path = sys.argv[1]
ua_response = requests.get('http://flexget.com/wiki/UpgradeActions')
ua_soup = get_soup(ua_response.text)
# 1.0.3280 was last revision on svn
git_log_output = subprocess.check_output(['git', 'log', '--pretty=%n---%n.%d%n%ci%n%h%n%s%n%-b%n---%n',
'--topo-order', '--decorate=full','refs/tags/1.0.3280..HEAD'])
git_log_iter = ifilter(None, git_log_output.decode('utf-8').splitlines())
with codecs.open(out_path, 'w', encoding='utf-8') as out_file:
for line in git_log_iter:
assert line == '---'
tag = re.search('refs/tags/([\d.]+)', next(git_log_iter))
date = dateutil.parser.parse(next(git_log_iter))
commit_hash = next(git_log_iter)
body = list(iter(git_log_iter.next, '---'))
if tag:
ver = tag.group(1)
ua_link = ''
result = ua_soup.find('h3', text=re.compile(re.escape(ver)))
if result:
ua_link = '^[wiki:UpgradeActions#%s upgrade actions]^ ' % result['id']
out_file.write('\n=== %s (%s) %s===\n\n' % (ver, date.strftime('%Y.%m.%d'), ua_link))
out_file.write(' * (%s) %s\n' % (commit_hash, '[[BR]]\n '.join(body)))
| # Writes a changelog in trac WikiFormatting based on a git log
from __future__ import unicode_literals, division, absolute_import
import codecs
from itertools import ifilter
import os
import re
import subprocess
import sys
import dateutil.parser
out_path = 'ChangeLog'
if len(sys.argv) > 1:
dir_name = os.path.dirname(sys.argv[1])
if dir_name and not os.path.isdir(dir_name):
print 'Output dir doesn\'t exist: %s' % sys.argv[1]
sys.exit(1)
out_path = sys.argv[1]
# 1.0.3280 was last revision on svn
git_log_output = subprocess.check_output(['git', 'log', '--pretty=%n---%n.%d%n%ci%n%h%n%s%n%-b%n---%n',
'--topo-order', '--decorate=full','refs/tags/1.0.3280..HEAD'])
git_log_iter = ifilter(None, git_log_output.decode('utf-8').splitlines())
with codecs.open(out_path, 'w', encoding='utf-8') as out_file:
for line in git_log_iter:
assert line == '---'
tag = re.search('refs/tags/([\d.]+)', next(git_log_iter))
date = dateutil.parser.parse(next(git_log_iter))
commit_hash = next(git_log_iter)
body = list(iter(git_log_iter.next, '---'))
if tag:
out_file.write('\n=== %s (%s) ===\n\n' % (tag.group(1), date.strftime('%Y.%m.%d')))
out_file.write(' * (%s) %s\n' % (commit_hash, '[[BR]]\n '.join(body)))
| Python | 0 |
35b1fc5e43f553e95ad4c8a42c37ca66639d9120 | add test for core.py | HARK/tests/test_core.py | HARK/tests/test_core.py | """
This file implements unit tests for interpolation methods
"""
from HARK.core import HARKobject
import numpy as np
import unittest
class testHARKobject(unittest.TestCase):
def setUp(self):
self.obj_a = HARKobject()
self.obj_b = HARKobject()
def test_distance(self):
self.assertRaises(AttributeError, self.obj_a.distance(self.obj_b))
| Python | 0 | |
be17fa5026fd7cd64ccfc6e7241137a3f864725b | add google doc generator | generate_gpad.py | generate_gpad.py | import httplib2
import webbrowser
from apiclient.discovery import build
from oauth2client import client
flow = client.flow_from_clientsecrets(
'client_secret.json',
scope=['https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/urlshortener'],
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
webbrowser.open(flow.step1_get_authorize_url())
auth_code = raw_input('Enter the auth code: ')
credentials = flow.step2_exchange(auth_code)
http = credentials.authorize(httplib2.Http())
service = build('drive', 'v2', http)
body = {
'mimeType': 'application/vnd.google-apps.document',
'title': 'hodor'
}
file = service.files().insert(body=body).execute()
body = {
'role': 'writer',
'type': 'anyone',
'withLink': True
}
service.permissions().insert(fileId=file['id'], body=body).execute()
file = service.files().get(fileId=file['id']).execute()
share = file['alternateLink']
service = build('urlshortener', 'v1', http)
body = { 'longUrl': share }
short = service.url().insert(body=body).execute()
print short['id']
| Python | 0 | |
e1fa00aac605d8aaa8544af706f50ef64896bf8a | build dataset | py/testdir_0xdata_only/sphere_gen.py | py/testdir_0xdata_only/sphere_gen.py | import time, sys, random, math
sys.path.extend(['.','..','py'])
# a truly uniform sphere
# http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
# he offers the exact solution: http://stackoverflow.com/questions/918736/random-number-generator-that-produces-a-power-law-distribution/918782#918782
# In spherical coordinates, taking advantage of the sampling rule:
# http://stackoverflow.com/questions/2106503/pseudorandom-number-generator-exponential-distribution/2106568#2106568
CLUSTERS = 15
GB_SPHERE_PTS = 4000000 # 1GB
SPHERE_PTS = 10 * GB_SPHERE_PTS
RANDOMIZE_SPHERE_PTS = True
JUMP_RANDOM_ALL_DIRS = True
SHUFFLE_SPHERES = False
RADIUS_NOISE = True
ALLOWED_CENTER_DELTA = 1
MAX_DIGITS_IN_DIMENSIONS = [2,1,3,4,8,5]
MAXINTS = [(pow(10,d) - 1) for d in MAX_DIGITS_IN_DIMENSIONS]
DIMENSIONS = len(MAX_DIGITS_IN_DIMENSIONS)
def getInterestingEnum():
# powerhouse data
# U0000000001070000E1300000000R50000000,07,4,277,1250,10000013,11400]
# U0000000001070000 (16)
# E1300000000 (10)
# R50000000 (8)
u = "U" + str(random.randint(1, pow(10,16)) - 1)
e = "E" + str(random.randint(1, pow(10,10)) - 1)
r = "R" + str(random.randint(1, pow(10,8)) - 1)
return u + e + r
# pts have 6 dimensions
# 07 (2) (0-99)
# 4, (1) (0-9)
# 277, (3) (0-999)
# 1250, (4) (0-9999)
# 10000013, (8) (0-99999999)
# 11400 (5) (0-99999)
def get_xyz_sphere(R):
u = random.random() # 0 to 1
# add a little noise
r = R * (u ** (1.0/3))
if RADIUS_NOISE:
rNoise = random.random() * .1 * r
r += rNoise
costheta = random.uniform(-1,1)
theta = math.acos(costheta)
phi = random.uniform(0, 2 * math.pi)
# now you have a (r, theta, phi) group which can be transformed to (x, y, z)
x = r * math.sin(theta) * math.cos(phi)
y = r * math.sin(theta) * math.sin(phi)
z = r * math.cos(theta)
# use the extra 0 cases for jump dimensions? (picture "time" and other dimensions)
# randomly shift the sphere xyz across the allowed dimension?
xyzzy = [x, y, z]
return xyzzy
def write_spheres_dataset(csvPathname, CLUSTERS, n):
dsf = open(csvPathname, "w+")
# going to do a bunch of spheres, with differing # of pts and R
# R is radius of the spheres
# separate them by 3 * the previous R
# keep track of the centers so we compare to a sorted result from H2O
centersList = []
currentCenter = None
totalRows = 0
sphereCnt = 0
# we might do more iterations if we get a "bad" center
for sphereCnt in range(CLUSTERS):
R = 10 * (sphereCnt+1)
# FIX! problems if we don't jump the other dimensions?
# try jumping in all dimensions
# newOffset[xyzChoice] = jump
# build a sphere at that center
# fixed number of pts?
if RANDOMIZE_SPHERE_PTS:
# pick a random # of points, from .5n to 1.5n
numPts = random.randint(int(.5*n), int(1.5*n))
else:
numPts = n
if DIMENSIONS < 3:
raise Exception("DIMENSIONS must be >= 3, is:"+DIMENSIONS)
xyzShift = random.randint(0,DIMENSIONS-3)
print "xyzShift:", xyzShift
# some random place in the allowed space. Let's just assume we don't get
# sphere overlap that kills us. With enough dimensions, that might be true?
# looks like we compare the sum of the centers above (in -1 to 1 space)
initial = [int((0.5 * random.randint(0,MAXINTS[i]))) for i in range(DIMENSIONS)]
# zero out the ones we'll mess with just to be safe/clear
# this will be the initial center? make it within the range of half
# the allowed int
scale = []
for i in range(3):
MAX = MAXINTS[xyzShift+i]
a = int(0.5 * random.randint(0,MAX) + R) # R plus something
if random.randint(0,1):
a = -a
initial[xyzShift+i] = a
# randomly sized "ball" to scale our "sphere"
# use only 1/2 the allowed range, letting the center skew by one half also
scale.append(0.5 * random.randint(0,MAX))
# figure out the next center
if currentCenter is None:
currentCenter = initial[:]
lastCenter = currentCenter[:]
else:
currentCenter = initial[:]
delta = [a-b for a, b in zip(lastCenter, currentCenter)]
maxDelta = max(delta)
# R is always getting bigger, so use the current R to check
if maxDelta < (ALLOWED_CENTER_DELTA * R):
print "currentCenter:", currentCenter, "lastCenter:", lastCenter
print "ERROR: adjacent centers are too close for our sort algorithm %s %s" % (maxDelta, R)
continue
lastCenter = currentCenter[:]
centersList.append(currentCenter)
print "currentCenter:", currentCenter, "R:", R, "numPts", numPts
for n in range(numPts):
interestingEnum = getInterestingEnum()
thisPt = currentCenter[:]
xyz = get_xyz_sphere(R)
for i in range(3):
thisPt[xyzShift+i] += int(xyz[i])
dsf.write(",".join(map(str,[interestingEnum] + thisPt))+"\n")
totalRows += 1
sphereCnt += 1 # end of while loop
dsf.close()
print "Spheres created:", len(centersList), "totalRows:", totalRows
return centersList
#*****************************************************
csvFilename = 'syn_sphere_gen.csv'
csvPathname = './' + csvFilename
centersList = write_spheres_dataset(csvPathname, CLUSTERS, SPHERE_PTS)
if SHUFFLE_SPHERES:
# since we create spheres in order
csvFilename2 = 'syn_sphere_gen_shuffled.csv'
csvPathname2 = './' + csvFilename2
import h2o_util
h2o_util.file_shuffle(csvPathname, csvPathname2)
else:
csvFilename2 = csvFilename
csvPathname2 = csvPathname
| Python | 0.000006 | |
f9e6176bc43262882a0d50f4d850c04c3460b9d8 | Add SS :-) | rna_pdb_tools/SecondaryStructure.py | rna_pdb_tools/SecondaryStructure.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Seq and secondary structure prediction"""
import os
import tempfile
import shutil
VARNA_PATH = '/Users/magnus/skills/rnax/varna_tut/'
def draw_ss(title,seq, ss, img_out):
""""""
curr = os.getcwd()
os.chdir(VARNA_PATH)#VARNAv3-93-src')
print os.getcwd()
t = tempfile.NamedTemporaryFile(delete=False)
t.name += '.png'
os.system('java -cp VARNA.jar fr.orsay.lri.varna.applications.VARNAcmd -sequenceDBN ' + seq + " -structureDBN '" + ss + "' -o " + t.name + " -title " + title + " -resolution '2.0'")
os.chdir(curr)
print img_out
shutil.move(t.name, img_out)
if __name__ == '__main__':
seq = 'AAAAAAA'
ss = '((...))'
img_out = 'out.png'
draw_ss('rna', seq, ss, img_out)
| Python | 0 | |
e5fed1895b69d824e3dc773dd6c6f88974e24f67 | discard module (#61452) | lib/ansible/modules/network/checkpoint/cp_mgmt_discard.py | lib/ansible/modules/network/checkpoint/cp_mgmt_discard.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_discard
short_description: All changes done by user are discarded and removed from database.
description:
- All changes done by user are discarded and removed from database.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
uid:
description:
- Session unique identifier. Specify it to discard a different session than the one you currently use.
type: str
extends_documentation_fragment: checkpoint_commands
"""
EXAMPLES = """
- name: discard
cp_mgmt_discard:
"""
RETURN = """
cp_mgmt_discard:
description: The checkpoint discard output.
returned: always.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_commands, api_command
def main():
argument_spec = dict(
uid=dict(type='str')
)
argument_spec.update(checkpoint_argument_spec_for_commands)
module = AnsibleModule(argument_spec=argument_spec)
command = "discard"
result = api_command(module, command)
module.exit_json(**result)
if __name__ == '__main__':
main()
| Python | 0 | |
1bd0669e67fc082cbd496b3aa54c6a6f6a0d5fce | Add grab.util.log::print_dict method for fuzzy displaying of dict objects in console | grab/util/log.py | grab/util/log.py | def repr_value(val):
if isinstance(val, unicode):
return val.encode('utf-8')
elif isinstance(val, (list, tuple)):
return '[%s]' % ', '.join(repr_val(x) for x in val)
elif isinstance(val, dict):
return '{%s}' % ', '.join('%s: %s' % (repr_val(x), repr_val(y)) for x, y in val.items())
else:
return str(val)
def print_dict(dic):
print '[---'
for key, val in sorted(dic.items(), key=lambda x: x[0]):
print key, ':', repr_value(val)
print '---]'
| Python | 0.000001 | |
e5bd12b67f58c1a099c2bd2dd66b043b43969267 | Add a tool to publish packages in the repo to pub. Review URL: https://codereview.chromium.org//11415191 | tools/publish_pkg.py | tools/publish_pkg.py | #!/usr/bin/env python
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# Script to push a package to pub.
#
# Usage: publish_pkg.py pkg_dir
import os
import os.path
import re
import shutil
import sys
import subprocess
import tempfile
def ReplaceInFiles(paths, subs):
'''Reads a series of files, applies a series of substitutions to each, and
saves them back out. subs should be a list of (pattern, replace) tuples.'''
for path in paths:
contents = open(path).read()
for pattern, replace in subs:
contents = re.sub(pattern, replace, contents)
dest = open(path, 'w')
dest.write(contents)
dest.close()
def ReadVersion(file, field):
for line in open(file).read().split('\n'):
[k, v] = re.split('\s+', line)
if field == k:
return int(v)
def Main(argv):
HOME = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
versionFile = os.path.join(HOME, 'tools', 'VERSION')
major = ReadVersion(versionFile, 'MAJOR')
minor = ReadVersion(versionFile, 'MINOR')
build = ReadVersion(versionFile, 'BUILD')
patch = ReadVersion(versionFile, 'PATCH')
if major == 0 and minor <= 1:
print 'Error: Do not run this script from a bleeding_edge checkout.'
return -1
version = '%d.%d.%d+%d' % (major, minor, build, patch)
tmpDir = tempfile.mkdtemp()
pkgName = argv[1].split('/').pop()
shutil.copytree(os.path.join(HOME, argv[1]),
os.path.join(tmpDir, pkgName))
# Add version to pubspec file.
pubspec = os.path.join(tmpDir, pkgName, 'pubspec.yaml')
pubspecFile = open(pubspec)
lines = pubspecFile.readlines()
pubspecFile.close()
pubspecFile = open(pubspec, 'w')
foundVersion = False
for line in lines:
if line.startswith('version:'):
foundVersion = True
if line.startswith('description:') and not foundVersion:
pubspecFile.write('version: ' + version + '\n')
if not line.startswith(' sdk:'):
pubspecFile.write(line)
pubspecFile.close()
# Replace '../*/pkg' imports and parts.
for root, dirs, files in os.walk(os.path.join(tmpDir, pkgName)):
for name in files:
if name.endswith('.dart'):
ReplaceInFiles([os.path.join(root, name)],
[(r'(import|part)(\s+)(\'|")(\.\./)+pkg/', r'\1\2\3package:')])
print 'publishing version ' + version + ' of ' + argv[1] + ' to pub\n'
print tmpDir
subprocess.call(['pub', 'publish'], cwd=os.path.join(tmpDir, pkgName))
shutil.rmtree(tmpDir)
if __name__ == '__main__':
sys.exit(Main(sys.argv))
| Python | 0.000003 | |
f734cbd91ff8997b9f2aac6bbec2238f8b5f7511 | Create __init__.py | graf/__init__.py | graf/__init__.py | Python | 0.000429 | ||
a1c2423c349757f4725ef1250b9de084a469683c | Fix indentation | ceph_medic/checks/cluster.py | ceph_medic/checks/cluster.py | from ceph_medic import metadata
#
# Error checks
#
def check_osds_exist():
code = 'ECLS1'
msg = 'There are no OSDs available'
osd_count = len(metadata['osds'].keys())
if not osd_count:
return code, msg
def check_nearfull():
"""
Checks if the osd capacity is at nearfull
"""
code = 'ECLS2'
msg = 'Cluster is nearfull'
try:
osd_map = metadata['cluster']['status']['osdmap']['osdmap']
except KeyError:
return
if osd_map['nearfull']:
return code, msg
| from ceph_medic import metadata
#
# Error checks
#
def check_osds_exist():
code = 'ECLS1'
msg = 'There are no OSDs available'
osd_count = len(metadata['osds'].keys())
if not osd_count:
return code, msg
def check_nearfull():
"""
Checks if the osd capacity is at nearfull
"""
code = 'ECLS2'
msg = 'Cluster is nearfull'
try:
osd_map = metadata['cluster']['status']['osdmap']['osdmap']
except KeyError:
return
if osd_map['nearfull']:
return code, msg | Python | 0.000065 |
84990a4ef20c2e0f42133ed06ade5ce2d4e98ae3 | Save team member picture with extension. | chmvh_website/team/models.py | chmvh_website/team/models.py | import os
from django.db import models
def team_member_image_name(instance, filename):
_, ext = os.path.splitext(filename)
return 'team/{0}{1}'.format(instance.name, ext)
class TeamMember(models.Model):
bio = models.TextField(
verbose_name='biography')
name = models.CharField(
max_length=50,
unique=True,
verbose_name='name')
picture = models.ImageField(
blank=True,
null=True,
upload_to=team_member_image_name)
def __str__(self):
"""Return the team member's name"""
return self.name
| from django.db import models
def team_member_image_name(instance, filename):
return 'team/{0}'.format(instance.name)
class TeamMember(models.Model):
bio = models.TextField(
verbose_name='biography')
name = models.CharField(
max_length=50,
unique=True,
verbose_name='name')
picture = models.ImageField(
blank=True,
null=True,
upload_to=team_member_image_name)
def __str__(self):
"""Return the team member's name"""
return self.name
| Python | 0 |
bf993439a7c53bcffe099a61138cf8c17c39f943 | Add Partner label factory | accelerator/migrations/0066_partnerlabel.py | accelerator/migrations/0066_partnerlabel.py | # Generated by Django 2.2.10 on 2021-08-24 13:27
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0065_organization_note'),
]
operations = [
migrations.CreateModel(
name='PartnerLabel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('label', models.CharField(max_length=255)),
('partners', models.ManyToManyField(blank=True, to=settings.ACCELERATOR_PARTNER_MODEL)),
],
options={
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PARTNERLABEL_MODEL',
},
),
]
| Python | 0 | |
6f9dcee86d986f05e289b39f6b4700d5d302f551 | add tests for base models | jsonrpc/tests/test_base.py | jsonrpc/tests/test_base.py | """ Test base JSON-RPC classes."""
import unittest
from ..base import JSONRPCBaseRequest, JSONRPCBaseResponse
class TestJSONRPCBaseRequest(unittest.TestCase):
""" Test JSONRPCBaseRequest functionality."""
def test_data(self):
request = JSONRPCBaseRequest()
self.assertEqual(request.data, {})
with self.assertRaises(ValueError):
request.data = []
with self.assertRaises(ValueError):
request.data = None
class TestJSONRPCBaseResponse(unittest.TestCase):
""" Test JSONRPCBaseResponse functionality."""
def test_data(self):
response = JSONRPCBaseResponse(result="")
self.assertEqual(response.data, {})
with self.assertRaises(ValueError):
response.data = []
with self.assertRaises(ValueError):
response.data = None
| Python | 0 | |
216b96e7f36d8b72ccd3ddf6809f0cc5af14d15a | Add fat_ready.py | fat_ready.py | fat_ready.py | #!/usr/bin/env python3
'''Make all files in a directory suitable for copying to a FAT filesystem.
'''
from __future__ import print_function
import os
import os.path
import sys
from six import u
if __name__ == u('__main__'):
if len(sys.argv) != 2:
print(u('Usage: {} <directory to make FAT ready>').format(sys.argv[0]),
file=sys.stderr)
sys.exit(1)
fat_ready_dir = sys.argv[1]
for root, dirs, files in os.walk(fat_ready_dir):
for name in files:
if u(':') in name:
new_name = name.replace(u(':'), u(' '))
full_path_old = os.path.join(root, name)
full_path_new = os.path.join(root, new_name)
print(u('Renaming {} to {}').format(full_path_old, full_path_new))
os.rename(full_path_old, full_path_new)
| Python | 0.00007 | |
b362f33e6c8c83fc01c97760fd24665f18e61adf | extract Axi_datapumpTC | hwtLib/amba/datapump/test.py | hwtLib/amba/datapump/test.py | from typing import Optional, List
from hwt.serializer.combLoopAnalyzer import CombLoopAnalyzer
from hwt.simulator.simTestCase import SingleUnitSimTestCase
from hwtLib.amba.constants import RESP_OKAY
from hwtLib.examples.errors.combLoops import freeze_set_of_sets
from pyMathBitPrecise.bit_utils import mask, get_bit_range, get_bit
class Axi_datapumpTC(SingleUnitSimTestCase):
def aTrans(self, addr, _len, _id):
axi = self.u.axi
if axi.HAS_R:
axi_a = axi.ar
else:
axi_a = axi.aw
if axi.LEN_WIDTH:
return axi_a._ag.create_addr_req(addr, _len, _id=_id)
else:
return axi_a._ag.create_addr_req(addr)
def test_no_comb_loops(self):
s = CombLoopAnalyzer()
s.visit_Unit(self.u)
comb_loops = freeze_set_of_sets(s.report())
# for loop in comb_loops:
# print(10 * "-")
# for s in loop:
# print(s.resolve()[1:])
self.assertEqual(comb_loops, frozenset())
def mkReq(self, addr, _len, rem=0, _id=0):
if self.LEN_MAX_VAL:
return (addr, _len, rem)
else:
assert _len == 0, _len
return (addr, rem)
def rTrans(self, data, _id=0, resp=RESP_OKAY, last=True):
return (_id, data, resp, int(last))
def rDriverTrans(self, data, last, strb=mask(64 // 8), id_=0):
return (data, strb, int(last))
def spotReadMemcpyTransactions(self,
base: int,
len_: int,
singleReqFrameLen: Optional[int],
data:List[int]=None,
addData: bool=True,
lastWordByteCnt=None):
"""
:param base: base address where to start
:param len_: total number of words to copy - 1
:param singleReqFrameLen: total max number of words in a single frame - 1
:param addData: if True transactions for data channels are prepared as well
:param lastWordByteCnt: if not None it is used to generate a strb (byte enable mask) in last
word of reference receive data
"""
u = self.u
addr_step = u.DATA_WIDTH // 8
req = u.driver._ag.req
assert base % addr_step == 0, base
MAGIC = 100
if singleReqFrameLen is None:
singleReqFrameLen = u.driver.req.MAX_LEN
if lastWordByteCnt is None:
lastWordByteCnt = 0
else:
assert lastWordByteCnt > 0 and lastWordByteCnt <= addr_step, lastWordByteCnt
lastWordByteCnt %= addr_step
AXI_LEN_MAX = min(2 ** u.axi.LEN_WIDTH, singleReqFrameLen + 1, len_ + 1)
offset = base
end = offset + (len_ + 1) * addr_step
while offset < end:
if offset != base and lastWordByteCnt != 0:
raise NotImplementedError()
len__ = min(singleReqFrameLen, (end - offset) // addr_step - 1)
req.data.append(self.mkReq(offset, len__, rem=lastWordByteCnt))
offset += addr_step * (singleReqFrameLen + 1)
offset = base
end = offset + (len_ + 1) * addr_step
ar_ref = []
while offset < end:
len__ = min(AXI_LEN_MAX, (end - offset) // addr_step) - 1
a = self.aTrans(
offset,
len__,
0
)
ar_ref.append(a)
offset += addr_step * AXI_LEN_MAX
rIn = u.axi.r._ag.data
r_ref = []
M_ALL = mask(addr_step)
if addData:
if data is not None:
assert len(data) == len_ + 1, (len_ + 1, data)
for i in range(len_ + 1):
lastForDriver = (i + 1) % (singleReqFrameLen + 1) == 0 or i == len_
lastForAxi = (i + 1) % AXI_LEN_MAX == 0
last = lastForDriver or lastForAxi
if data is not None:
d = data[i]
else:
d = MAGIC + base + i
rIn.append(self.rTrans(d, last=last))
if lastForDriver and lastWordByteCnt != 0:
m = mask(lastWordByteCnt)
else:
m = M_ALL
r_ref.append(self.rDriverTrans(d, int(lastForDriver), m))
else:
assert not data
return ar_ref, r_ref
def check_r_trans(self, ar_ref, driver_r_ref):
u = self.u
self.assertEqual(len(u.driver._ag.req.data), 0)
self.assertEqual(len(u.axi.r._ag.data), 0)
r_data = []
m_width = u.driver.r.strb._dtype.bit_length()
for (d, m, l) in u.driver.r._ag.data:
if l:
m = int(m)
invalid_seen = False
for B_i in range(m_width):
B = get_bit_range(d.vld_mask, B_i * 8, 8)
_m = get_bit(m, B_i)
if _m and B != 0xff:
d = None
break
if invalid_seen:
if _m:
raise ValueError("The value prefix is invalid, but there is a part of the value which is valid", d, B_i)
else:
if not _m:
invalid_seen = True
if d is not None:
# mask removes the potentially invalid bytes
d = d.val
r_data.append((d, m, l))
self.assertValSequenceEqual(r_data, driver_r_ref)
self.assertValSequenceEqual(u.axi.ar._ag.data, ar_ref)
| Python | 0.999998 | |
40070b6bab49fa0bd46c1040d92bc476e557b19b | add algorithms.fractionation to assess gene loss, bites, etc. | algorithms/fractionation.py | algorithms/fractionation.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Catalog gene losses, and bites within genes.
"""
import sys
from optparse import OptionParser
from itertools import groupby
from jcvi.formats.blast import Blast
from jcvi.utils.range import range_minmax, range_overlap
from jcvi.utils.cbook import gene_name
from jcvi.algorithms.synteny import add_beds, check_beds
from jcvi.apps.base import ActionDispatcher, debug
debug()
def main():
actions = (
('loss', 'extract likely gene loss candidates'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def region_str(region):
return "{0}:{1}-{2}".format(*region)
def loss(args):
"""
%prog loss a.b.i1.blocks a.b-genomic.blast
Extract likely gene loss candidates between genome a and b.
"""
p = OptionParser(loss.__doc__)
p.add_option("--gdist", default=20,
help="Gene distance [default: %default]")
p.add_option("--bdist", default=20000,
help="Base pair distance [default: %default]")
add_beds(p)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blocksfile, genomicblast = args
gdist, bdist = opts.gdist, opts.bdist
qbed, sbed, qorder, sorder, is_self = check_beds(blocksfile, p, opts)
blocks = []
fp = open(blocksfile)
genetrack = {}
proxytrack = {}
for row in fp:
a, b = row.split()
genetrack[a] = b
blocks.append((a, b))
data = []
for key, rows in groupby(blocks, key=lambda x: x[-1]):
rows = list(rows)
data.append((key, rows))
imax = len(data) - 1
for i, (key, rows) in enumerate(data):
if i == 0 or i == imax:
continue
if key != '.':
continue
before, br = data[i - 1]
after, ar = data[i + 1]
bi, bx = sorder[before]
ai, ax = sorder[after]
dist = abs(bi - ai)
if bx.seqid != ax.seqid or dist > gdist:
continue
start, end = range_minmax(((bx.start, bx.end), (ax.start, ax.end)))
proxy = (bx.seqid, start - bdist, end + bdist)
for a, b in rows:
proxytrack[a] = proxy
blast = Blast(genomicblast)
tags = {}
for query, bb in blast.iter_hits():
query = gene_name(query)
if query not in proxytrack:
continue
proxy = proxytrack[query]
tag = "NS"
for b in bb:
hsp = (b.subject, b.sstart, b.sstop)
if range_overlap(proxy, hsp):
proxytrack[query] = hsp
tag = "S"
break
tags[query] = tag
for b in qbed:
accn = b.accn
target_region = genetrack[accn]
if accn in proxytrack:
target_region = region_str(proxytrack[accn])
if accn in tags:
target_region += "[{0}]".format(tags[accn])
else:
target_region += "[NF]"
print "\t".join((accn, target_region))
if __name__ == '__main__':
main()
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.