commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
49b3c91ffdbd04fbce523599320820278bb5d8aa | Add data file. | michaelpacer/scipy_proceedings,katyhuff/scipy_proceedings,mikaem/euroscipy_proceedings,euroscipy/euroscipy_proceedings,katyhuff/scipy_proceedings,sbenthall/scipy_proceedings,sbenthall/scipy_proceedings,SepidehAlassi/euroscipy_proceedings,katyhuff/scipy_proceedings,dotsdl/scipy_proceedings,Stewori/euroscipy_proceedings,Stewori/euroscipy_proceedings,michaelpacer/scipy_proceedings,SepidehAlassi/euroscipy_proceedings,helgee/euroscipy_proceedings,euroscipy/euroscipy_proceedings,helgee/euroscipy_proceedings,chendaniely/scipy_proceedings,michaelpacer/scipy_proceedings,springcoil/euroscipy_proceedings,Stewori/euroscipy_proceedings,chendaniely/scipy_proceedings,juhasch/euroscipy_proceedings,mwcraig/scipy_proceedings,mwcraig/scipy_proceedings,juhasch/euroscipy_proceedings,sbenthall/scipy_proceedings,springcoil/euroscipy_proceedings,mikaem/euroscipy_proceedings,mwcraig/scipy_proceedings,mjklemm/euroscipy_proceedings,helgee/euroscipy_proceedings,SepidehAlassi/euroscipy_proceedings,mjklemm/euroscipy_proceedings,dotsdl/scipy_proceedings,mikaem/euroscipy_proceedings,mjklemm/euroscipy_proceedings,chendaniely/scipy_proceedings,dotsdl/scipy_proceedings,juhasch/euroscipy_proceedings,euroscipy/euroscipy_proceedings,springcoil/euroscipy_proceedings | data.py | data.py | # Ignore this file
{'paper_abstract': 'An abstract',
'authors': [{'first_names': 'XX',
'surname': 'XXX',
'address': 'XXX',
'country': 'XXX',
'email_address': 'xxx@XXX',
'institution': 'XXX'}],
'title': ''}
| bsd-2-clause | Python | |
885ed1e8e3256352d2fde771bef57997809c3c1e | Remove monthly_billing table from the database | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0209_remove_monthly_billing_.py | migrations/versions/0209_remove_monthly_billing_.py | """
Revision ID: 0209_remove_monthly_billing
Revises: 84c3b6eb16b3
Create Date: 2018-07-27 14:46:30.109811
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0209_remove_monthly_billing'
down_revision = '84c3b6eb16b3'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_monthly_billing_service_id', table_name='monthly_billing')
op.drop_table('monthly_billing')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('monthly_billing',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('notification_type', postgresql.ENUM('email', 'sms', 'letter', name='notification_type'), autoincrement=False, nullable=False),
sa.Column('monthly_totals', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=False),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('start_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('end_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], name='monthly_billing_service_id_fkey'),
sa.PrimaryKeyConstraint('id', name='monthly_billing_pkey'),
sa.UniqueConstraint('service_id', 'start_date', 'notification_type', name='uix_monthly_billing')
)
op.create_index('ix_monthly_billing_service_id', 'monthly_billing', ['service_id'], unique=False)
# ### end Alembic commands ###
| mit | Python | |
324f670e747af0b949bc2c9fb503c875b7f20a7b | Initialize 06.sameName3 | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter03/06.sameName3.py | books/AutomateTheBoringStuffWithPython/Chapter03/06.sameName3.py | # This program demonstrates global and local variable rules
def spam():
global eggs
eggs = 'spam' # this is the global (global statement)
def bacon():
eggs = 'bacon' # this is a local (assignment)
def ham():
print(eggs) # this is the global (no assignment)
eggs = 42 # this is the global (outside all functions)
spam()
print(eggs)
| mit | Python | |
6987558cefb1179c4501ee5f43e39618f67c49c7 | Initialize P02_writeCSV | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter14/P02_writeCSV.py | books/AutomateTheBoringStuffWithPython/Chapter14/P02_writeCSV.py | # This program uses the csv module to manipulate .csv files
import csv
# Writer Objects
outputFile = open("output.csv", "w", newline='')
outputWriter = csv.writer(outputFile)
print(outputWriter.writerow(['spam', 'eggs', 'bacon', 'ham']))
print(outputWriter.writerow(['Hello, world!', 'eggs', 'bacon', 'ham']))
print(outputWriter.writerow([1, 2, 3.141592, 4]))
outputFile.close()
| mit | Python | |
b8ddb1b64ef2216add5b0b136b09b72d91506767 | Add initial msgpack renderer | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/renderers/msgpack.py | salt/renderers/msgpack.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
# Import third party libs
import msgpack
def render(msgpack_data, saltenv='base', sls='', **kws):
'''
Accepts JSON as a string or as a file object and runs it through the JSON
parser.
:rtype: A Python data structure
'''
if not isinstance(msgpack_data, basestring):
msgpack_data = msgpack_data.read()
if msgpack_data.startswith('#!'):
msgpack_data = msgpack_data[(msgpack_data.find('\n') + 1):]
if not msgpack_data.strip():
return {}
return msgpack.loads(msgpack_data)
| apache-2.0 | Python | |
e665e9cb374fd67baec7ec598bfd352e04192210 | add gripper class to pick up pieces with electromagnet | joeymeyer/raspberryturk | raspberryturk/embedded/motion/gripper.py | raspberryturk/embedded/motion/gripper.py | import RPi.GPIO as GPIO
from time import sleep
electromagnet_pin = 40
servo_pin = 38
class Gripper(object):
def __init__(self):
self.previous_z = None
GPIO.setmode(GPIO.BOARD)
GPIO.setup(servo_pin, GPIO.OUT)
GPIO.setup(electromagnet_pin, GPIO.OUT)
def calibrate(self):
self.move(100)
def move(self, z):
z = max(0.0, min(z, 100.0))
dc = (z * 0.067) + 4.0
p = GPIO.PWM(servo_pin, 50.0)
p.start(dc)
if self.previous_z is None:
t = 10.0
else:
t = (abs(self.previous_z - z) / 10.0) + 0.5
sleep(t)
p.stop()
del p
self.previous_z = z
def electromagnet(self, on):
output = GPIO.HIGH if on else GPIO.LOW
GPIO.output(electromagnet_pin, output)
def pickup(self, z):
self.move(z)
sleep(0.4)
self.electromagnet(True)
sleep(0.2)
self.move(100)
def dropoff(self, z):
self.move(z)
sleep(0.2)
self.electromagnet(False)
sleep(0.4)
self.move(100)
def cleanup(self):
GPIO.cleanup()
| mit | Python | |
b790a10de84d0ffb40e9834c7393a8d905d1aab5 | Add missing migration | defivelo/db,defivelo/db,defivelo/db | apps/challenge/migrations/0052_auto_20190225_1631.py | apps/challenge/migrations/0052_auto_20190225_1631.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-02-25 15:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('challenge', '0051_auto_20181019_1411'),
]
operations = [
migrations.AlterField(
model_name='historicalhelperseasonworkwish',
name='helper',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Moniteur'),
),
migrations.AlterField(
model_name='historicalhelperseasonworkwish',
name='season',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='challenge.Season', verbose_name='Saison'),
),
migrations.AlterField(
model_name='historicalhelpersessionavailability',
name='helper',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Moniteur'),
),
migrations.AlterField(
model_name='historicalhelpersessionavailability',
name='session',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='challenge.Session', verbose_name='Session'),
),
migrations.AlterField(
model_name='historicalqualification',
name='activity_A',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='challenge.QualificationActivity', verbose_name='Agilité'),
),
migrations.AlterField(
model_name='historicalqualification',
name='activity_B',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='challenge.QualificationActivity', verbose_name='Mécanique'),
),
migrations.AlterField(
model_name='historicalqualification',
name='activity_C',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='challenge.QualificationActivity', verbose_name='Rencontre'),
),
migrations.AlterField(
model_name='historicalqualification',
name='actor',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Intervenant'),
),
migrations.AlterField(
model_name='historicalqualification',
name='leader',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Moniteur 2'),
),
migrations.AlterField(
model_name='historicalseason',
name='leader',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Chargé de projet'),
),
migrations.AlterField(
model_name='historicalsession',
name='address_ptr',
field=models.ForeignKey(auto_created=True, blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, parent_link=True, related_name='+', to='common.Address'),
),
migrations.AlterField(
model_name='historicalsession',
name='orga',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='orga.Organization', verbose_name='Établissement'),
),
migrations.AlterField(
model_name='historicalsession',
name='superleader',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Moniteur + / Photographe'),
),
]
| agpl-3.0 | Python | |
b94e4f369b5881f579ca47e48ae191324c19990e | Add .bmp image encoder | LonamiWebs/Py-Utils | image-generation/bmp.py | image-generation/bmp.py | #!/usr/bin/python3
import struct
def encode_bmp(f, width, height, data, bpp=24):
# 'bpp' stands for "bits per pixel" (usually 24 for RGB, 8+8+8)
# 'data' should be a bi-dimensional array matching 'width' and 'height'
# Calculate the BMP data size and the required padding (% 4)
bytes_per_row = width * (bpp // 8)
if bytes_per_row % 4 == 0:
padding = bytes(0)
else:
padding = bytes(4 - (bytes_per_row % 4))
data_size = height * (width + len(padding))
# BMP Header (14 bytes)
f.write(b'BM') # (2) ID
f.write(struct.pack('<I', 14 + 40 + data_size)) # (4) Size of the file
f.write(struct.pack('<H', 0)) # (2) Unused
f.write(struct.pack('<H', 0)) # (2) Unused
f.write(struct.pack('<I', 14 + 40)) # (4) Offset where the BMP data is located
# DIB Header (40 bytes)
f.write(struct.pack('<I', 40)) # (4) Number of bytes on the DIB Header from here
f.write(struct.pack('<I', width)) # (4) Width in pixels
f.write(struct.pack('<I', height)) # (4) Height in pixels
f.write(struct.pack('<H', 2)) # (2) Number of planes
f.write(struct.pack('<H', bpp)) # (2) Number of bits per pixel
f.write(struct.pack('<I', 0)) # (4) RGB, no compression
f.write(struct.pack('<I', 16)) # (4) TODO Size of the raw data
f.write(struct.pack('<I', 2835)) # (4) Horizontal print resolution per meter
f.write(struct.pack('<I', 2835)) # (4) Vertical print resolution per meter
f.write(struct.pack('<I', 0)) # (4) Number of colors in the palette
f.write(struct.pack('<I', 0)) # (4) Important colors (0 means all)
# Start of pixel array ('data_size')
for i in reversed(range(len(data))):
for j in range(len(data[i])):
for value in reversed(data[i][j]): # Should be RGB
f.write(struct.pack('<B', value))
f.write(padding)
# BMP written!
g = ( 0, 255, 0) # green
b = (180, 100, 40) # brown
d = ( 20, 20, 20) # dark
w = ( 0, 128, 255) # water
s = (255, 255, 0) # sun
e = ( 0, 0, 0) # empty
img = [
[e, e, e, e, e, e, e, s, s, e],
[e, e, e, e, e, e, e, s, s, e],
[e, e, g, g, g, e, e, e, e, e],
[e, e, g, g, g, e, e, e, e, e],
[e, e, g, b, g, e, e, e, e, d],
[e, e, e, b, e, e, e, e, w, d],
[e, e, e, b, e, e, e, e, w, d],
[e, e, e, b, e, e, e, w, d, d],
[e, d, d, d, d, w, w, w, d, d],
[d, d, d, d, d, d, d, d, d, d]
]
with open('tst.bmp', 'w+b') as f:
encode_bmp(f,
width=len(img[0]),
height=len(img),
data=img)
| mit | Python | |
59837bda53b958c7fdb50a3b2808a42fd667cd96 | Create z07-dnn_autoencoder_iris.py | hpssjellis/forth-tensorflow,hpssjellis/forth-tensorflow,hpssjellis/forth-tensorflow | skflow-examples/z07-dnn_autoencoder_iris.py | skflow-examples/z07-dnn_autoencoder_iris.py | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn import datasets
# Load Iris Data
iris = datasets.load_iris()
# Initialize a deep neural network autoencoder
# You can also add noise and add dropout if needed
# Details see TensorFlowDNNAutoencoder documentation.
autoencoder = learn.TensorFlowDNNAutoencoder(hidden_units=[10, 20])
# Fit with Iris data
transformed = autoencoder.fit_transform(iris.data)
print(transformed)
| mit | Python | |
96eba676abeb8c70dcaddb692133a9314e2255c3 | Add harvester for pcom | erinspace/scrapi,felliott/scrapi,felliott/scrapi,jeffreyliu3230/scrapi,erinspace/scrapi,mehanig/scrapi,mehanig/scrapi,CenterForOpenScience/scrapi,alexgarciac/scrapi,fabianvf/scrapi,CenterForOpenScience/scrapi,fabianvf/scrapi | scrapi/harvesters/pcom.py | scrapi/harvesters/pcom.py | '''
Harvester for the DigitalCommons@PCOM for the SHARE project
Example API call: http://digitalcommons.pcom.edu/do/oai/?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class PcomHarvester(OAIHarvester):
short_name = 'pcom'
long_name = 'DigitalCommons@PCOM'
url = 'http://digitalcommons.pcom.edu'
base_url = 'http://digitalcommons.pcom.edu/do/oai/'
property_list = ['date', 'source', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
approved_sets = [u'biomed', u'pa_systematic_reviews', u'psychology_dissertations',
u'scholarly_papers', u'research_day', u'posters']
| apache-2.0 | Python | |
e0b93ff74ee6eeabf29567a13d2e31de11a3b68a | Make migration | mfcovington/djangocms-lab-carousel,mfcovington/djangocms-lab-carousel | cms_lab_carousel/migrations/0003_auto_20150827_0111.py | cms_lab_carousel/migrations/0003_auto_20150827_0111.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
('cms_lab_publications', '0001_initial'),
('cms_lab_carousel', '0002_auto_20150508_1300'),
]
operations = [
migrations.RemoveField(
model_name='slide',
name='article_url',
),
migrations.RemoveField(
model_name='slide',
name='journal_name',
),
migrations.RemoveField(
model_name='slide',
name='pdf',
),
migrations.RemoveField(
model_name='slide',
name='pubmed_url',
),
migrations.AddField(
model_name='slide',
name='publication',
field=models.ForeignKey(help_text='<strong>If this slide is for a publication, select/create a publication.</strong><br>The publication info will be used to auto-populate the title, subtitle, and description fields when slide is saved (if those fields are left blank).<br>To override this auto-fill behavior, manually enter the title, subtitle, and/or description below.', blank=True, to='cms_lab_publications.Publication', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='description',
field=models.TextField(blank=True, help_text='<strong>Enter a description of this slide.</strong><br>If this is a slide for a publication and this field is left blank, it will be auto-populated with the abstract of the publication.', verbose_name='slide description'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='image',
field=filer.fields.image.FilerImageField(related_name='slide_image', help_text='<strong>Choose/upload an image for this slide.</strong><br>If this is a slide for a publication and this field is left blank, the image for the publication will be used.', blank=True, to='filer.Image', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='other_url_color',
field=models.CharField(help_text='If there is another relevant URL for this slide, choose the color for its button.', default='default', blank=True, choices=[('default', 'White'), ('primary', 'Blue'), ('info', 'Light Blue'), ('success', 'Green'), ('warning', 'Orange'), ('danger', 'Red')], max_length=7, verbose_name='other URL color'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='other_url_label',
field=models.CharField(blank=True, help_text='If there is another relevant URL for this slide, enter the label for its button.', max_length=20, verbose_name='other URL label'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='page_link_color',
field=models.CharField(help_text='If there is a page link for this slide, choose the color for its button.', default='default', blank=True, choices=[('default', 'White'), ('primary', 'Blue'), ('info', 'Light Blue'), ('success', 'Green'), ('warning', 'Orange'), ('danger', 'Red')], max_length=7, verbose_name='page link color'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='subtitle',
field=models.CharField(blank=True, help_text='<strong>Enter a subtitle to be overlayed on top of this slide.</strong><br>If this is a slide for a publication and this field is left blank, it will be auto-populated with the citation for the publication.', max_length=255, verbose_name='slide subtitle'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='title',
field=models.CharField(blank=True, help_text='<strong>Enter a title to be overlayed on top of this slide.</strong><br>If this is a slide for a publication and this field is left blank, it will be auto-populated with the title of the publication.', max_length=255, verbose_name='slide title'),
preserve_default=True,
),
]
| bsd-3-clause | Python | |
aefd4009393e5ebf05ea9e485a1723776689ed70 | add node and node to container link | echinopsii/net.echinopsii.ariane.community.cli.python3 | tests/acceptance/mapping/node_at.py | tests/acceptance/mapping/node_at.py | # Ariane CLI Python 3
# Node acceptance tests
#
# Copyright (C) 2015 echinopsii
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from ariane_clip3.mapping import MappingService, Node, Container, NodeService
__author__ = 'mffrench'
class NodeTest(unittest.TestCase):
def setUp(self):
args = {'type': 'REST', 'base_url': 'http://localhost:6969/ariane/', 'user': 'yoda', 'password': 'secret'}
MappingService(args)
self.container = Container(name="test_container", gate_uri="ssh://my_host/docker/test_container",
primary_admin_gate_name="container name space (pid)", company="Docker",
product="Docker", c_type="container")
self.container.save()
def test_create_remove_node_1(self):
node = Node(name="mysqld", container_id=self.container.cid)
node.save()
self.assertIsNotNone(node.nid)
self.container.__sync__()
self.assertTrue(node.nid in self.container.nodes_id)
self.assertIsNone(node.remove())
self.container.__sync__()
self.assertFalse(node.nid in self.container.nodes_id)
self.container.remove()
def test_create_remove_node_2(self):
node = Node(name="mysqld", container=self.container)
node.save()
self.assertIsNotNone(node.nid)
self.assertTrue(node.nid in self.container.nodes_id)
self.assertIsNone(node.remove())
self.assertFalse(node.nid in self.container.nodes_id)
self.container.remove()
def test_find_node_by_id(self):
node = Node(name="mysqld", container_id=self.container.cid)
node.save()
self.assertIsNotNone(NodeService.find_node(nid=node.nid))
node.remove()
self.assertIsNone(NodeService.find_node(nid=node.nid))
def test_find_node_by_endpoint(self):
pass
def test_get_nodes(self):
init_node_count = NodeService.get_nodes().__len__()
node = Node(name="mysqld", container_id=self.container.cid)
node.save()
self.assertEqual(NodeService.get_nodes().__len__(), init_node_count + 1)
node.remove()
self.assertEqual(NodeService.get_nodes().__len__(), init_node_count)
| agpl-3.0 | Python | |
84ae279c0044e63e00c7d21823c3159e34c73d03 | Add a memory test script | HERA-Team/pyuvdata,HERA-Team/pyuvdata,HERA-Team/pyuvdata,HERA-Team/pyuvdata | scripts/uvfits_memtest.py | scripts/uvfits_memtest.py | #!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from memory_profiler import profile
import numpy as np
from astropy import constants as const
from astropy.io import fits
from pyuvdata import UVData
@profile
def read_uvfits():
filename = '/Volumes/Data1/mwa_uvfits/1066571272.uvfits'
# first test uvdata.read_uvfits. First read metadata then full data
uv_obj = UVData()
uv_obj.read_uvfits(filename, metadata_only=True)
uv_obj.read_uvfits_data(filename)
del(uv_obj)
# now test details with astropy
hdu_list = fits.open(filename, memmap=True)
vis_hdu = hdu_list[0]
# only read in times, then uvws, then visibilities
time0_array = vis_hdu.data.par('date')
uvw_array = (np.array(np.stack((vis_hdu.data.par('UU'),
vis_hdu.data.par('VV'),
vis_hdu.data.par('WW')))) * const.c.to('m/s').value).T
if vis_hdu.header['NAXIS'] == 7:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, :, 1])
else:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, 1])
data_array = data_array[:, np.newaxis, :, :]
# test for releasing resources
del(time0_array)
del(uvw_array)
del(data_array)
# release file handles
del(vis_hdu)
del(hdu_list)
del(filename)
return
if __name__ == '__main__':
read_uvfits()
| bsd-2-clause | Python | |
6bb1d3939a076d7b7fe799cdac8885a5f67219e3 | add ex32 | AisakaTiger/Learn-Python-The-Hard-Way,AisakaTiger/Learn-Python-The-Hard-Way | ex32.py | ex32.py | the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
# this first kind of for-loop goes through a list for number in the_count
for number in the_count:
print "This is count %d" % number
#same as above
for fruit in fruits:
print "A fruit of type : %s" %fruit
#also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print "I got %r" % i
# we can also build lists,forst start with an empty one
elements = []
#then use the range function to do 0 to 5 counts
for i in range(0,6):
print "Adding %d to the list." % i
# append is a function that list understand
elements.append(i)
# now we can print them out too
for i in elements:
print "Element was: %d" % i
| mit | Python | |
ef21221842ac401bfd083614d879a7b6c19b816a | add song.py: add Song class | hypergravity/hrs,hypergravity/hrs,hypergravity/hrs | song/song.py | song/song.py | # -*- coding: utf-8 -*-
"""
Author
------
Bo Zhang
Email
-----
bozhang@nao.cas.cn
Created on
----------
- Fri Feb 24 16:00:00 2017
Modifications
-------------
-
Aims
----
- Song class
"""
import glob
import sys
import numpy as np
from astropy.io import fits
from astropy.table import Table, Column
from tqdm import trange
import ccdproc
from joblib import Parallel, delayed
from astropy.table import Table
from .utils import scan_files
class Config(object):
colnames = dict(
col_imagetype='IMAGETYP',
col_filepath='fps',
)
def __init__(self):
pass
class Song(Table):
""" represent SONG configuration """
cfg = Config()
def __init__(self, *args, **kwargs):
super(Song, self).__init__(*args, **kwargs)
# add other attributes here
# balabala
@staticmethod
def _init_from_dir(dirpath):
""" initiate from a directory path
Parameters
----------
dirpath: string
a directory path
Returns
-------
"""
return Song(scan_files(dirpath, xdriftcol=False))
def select_image(self, colname='default', value='FLAT', method='random',
n_images=10, return_colname=('fps'), verbose=False):
"""
Parameters
----------
colname: string
name of the column that will be matched
value:
the specified value
method: string, {'random', 'top', 'bottom'}
the method adopted
n_images:
the number of images that will be selected
return_colname:
the name(s) of the column that will be returned
verbose:
if True, print resutl
Returns
-------
the Song instance
Examples
--------
>>> s.list_image(imagetp='STAR', kwds=['OBJECT'])
"""
# get the colname of imagetype
if colname is 'default':
colname = self.cfg.colnames['col_imagetype']
# determine the matched images
ind_match = np.where(self[colname] == value)[0]
n_match = len(ind_match)
if n_match < 1:
print('@SONG: no images matched!')
return None
# determine the number of images to select
n_images = np.min([n_match, n_images])
# select according to method
assert method in {'random', 'top', 'bottom'}
if method is 'random':
ind_rand = random_ind(n_match, n_images)
if return_colname is 'ind':
result = ind_match[ind_rand]
else:
result = self[return_colname][ind_match[ind_rand]]
elif method is 'top':
ind_rand = np.arange(0, n_images, dtype=int)
if return_colname is 'ind':
result = ind_match[ind_rand]
else:
result = self[return_colname][ind_match[ind_rand]]
elif method is 'bottom':
ind_rand = np.arange(n_match-n_images, n_match, dtype=int)
if return_colname is 'ind':
result = ind_match[ind_rand]
else:
result = self[return_colname][ind_match[ind_rand]]
# verbose
if verbose:
print("@SONG: these are all images selected")
# here result is a Table
result.pprint()
# print("+ ----------------------------------------------")
# print result
# for r in result:
# print(r)
# print("+ ----------------------------------------------")
return result
def random_ind(n, m):
""" from n choose m randomly """
return np.argsort(np.random.rand(n,))[:m]
| bsd-3-clause | Python | |
46a71071ed4982b02d0e49818a678dc2744c1b23 | Bump version number to 1.0 | mitsuhiko/flask,pallets/flask,fkazimierczak/flask,pallets/flask,mitsuhiko/flask,fkazimierczak/flask,drewja/flask,drewja/flask,pallets/flask,drewja/flask,fkazimierczak/flask | flask/__init__.py | flask/__init__.py | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
__version__ = '1.0'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed, before_render_template
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that Flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
__version__ = '1.0-dev'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed, before_render_template
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that Flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| bsd-3-clause | Python |
59b1995789ede7da1757f23b5920e627e1d7818d | add maximum product subarray | haandol/algorithm_in_python | interview/amazon/mps.py | interview/amazon/mps.py | class Solution:
def maxProduct(self, A):
r = A[0]
n = len(A)
imin = r
imax = r
for i in xrange(1, n):
if A[i] < 0:
imax, imin = imin, imax
imax = max(A[i], imax * A[i])
imin = min(A[i], imin * A[i])
r = max(r, imax)
return r
if '__main__' == __name__:
A = [2, 3, -2, 4]
solution = Solution()
print solution.maxProduct(A)
A = [0]
solution = Solution()
print solution.maxProduct(A)
A = [-3, 2, -3]
solution = Solution()
print solution.maxProduct(A)
A = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 2, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, -3, -2, 0, 0, 2, 0, 2, 0, 0, 0, 0, -1, 0, 0, 0, -3,
0, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 0, 0, 0, 2, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, -2, 2, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -1, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 2, -2, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, -3, 0, -2, 0, -1, 0, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 2, 1, 0, 0, -2, 0, 0, 0, 0, 0, 0, 2, 0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 0]
solution = Solution()
print solution.maxProduct(A)
| mit | Python | |
4c9af992891ac5d39be50f9876a807f131a922e4 | prepare the organism annotation details | vipints/genomeutils,vipints/genomeutils,vipints/genomeutils | gfftools/gff_db.py | gfftools/gff_db.py | #!/usr/bin/env python
"""
Fetch the details about the features explained in a GFF type file.
Usage: python feature_info.py in.gff
Requirements:
gfftools : https://github.com/vipints/genomeutils/blob/master/gfftools
"""
import re
import sys
import GFFParser
def Intron_det(TDB):
"""
get the intron feature details MaxIntronLength MinIntronLength
"""
intron_size = dict()
exon_size = dict()
for ent1 in TDB:
for idx, tid in enumerate(ent1['transcripts']):
if not ent1['exons'][idx].any():
continue
exon_cnt = len(ent1['exons'][idx])
if exon_cnt > 1:
intron_start = 0
for xq, excod in enumerate(ent1['exons'][idx]):
if xq > 0:
#print intron_start, excod[0]-1
if excod[0]-intron_start==1:
continue
# intron size
intron_size[excod[0]-intron_start] = 1
#print tid, excod[0]-intron_start
intron_start = excod[1]+1
exon_size[intron_start-excod[0]] = 1
# sort the intron_size based on the keys
if intron_size:
keys_int = sorted(intron_size)
print 'MinIntronLength', int(keys_int[0]), int(keys_int[1]), int(keys_int[2])
print 'MaxIntronLength', int(keys_int[-1]), int(keys_int[-2]), int(keys_int[-3])
print
keys_ex = sorted(exon_size)
print 'MinExonLength', int(keys_ex[0]), int(keys_ex[1]), int(keys_ex[2])
print 'MaxExonLength', int(keys_ex[-1]), int(keys_ex[-2]), int(keys_ex[-3])
else:
print "Error in feature mapping, please check the source of parent child features"
print "May be the sources are different for parents and child features of the parent Gene"
def __main__():
try:
query_file = sys.argv[1]
except:
print __doc__
sys.exit(-1)
# get the annotated transcripts
Transdb = GFFParser.Parse(query_file)
# extract different features
Intron_det(Transdb)
if __name__ == "__main__":
__main__()
| bsd-3-clause | Python | |
a7787c60af7059f3b1a4dc3773da04dfc72631e2 | Add tools.ping | giserh/grab,shaunstanislaus/grab,istinspring/grab,alihalabyah/grab,shaunstanislaus/grab,alihalabyah/grab,raybuhr/grab,liorvh/grab,huiyi1990/grab,SpaceAppsXploration/grab,subeax/grab,codevlabs/grab,maurobaraldi/grab,lorien/grab,istinspring/grab,lorien/grab,raybuhr/grab,subeax/grab,maurobaraldi/grab,giserh/grab,pombredanne/grab-1,subeax/grab,DDShadoww/grab,kevinlondon/grab,huiyi1990/grab,pombredanne/grab-1,liorvh/grab,SpaceAppsXploration/grab,codevlabs/grab,kevinlondon/grab,DDShadoww/grab | grab/tools/ping.py | grab/tools/ping.py | from grab import Grab
import logging
import os
from grab.tools import html
from grab.tools.pwork import make_work
from grab.tools.encoding import smart_str
PING_XML = """<?xml version="1.0"?>
<methodCall>
<methodName>weblogUpdates.ping</methodName>
<params>
<param><value>%(name)s</value></param>
<param><value>%(url)s</value></param>
</params>
</methodCall>
"""
SERVER_LIST = """
http://audiorpc.weblogs.com/RPC2
http://blogsearch.google.com.ua/ping/RPC2
http://blogsearch.google.com/ping/RPC2
http://blogsearch.google.ru/ping/RPC2
http://ping.blogs.yandex.ru/RPC2
http://ping.myblog.jp/
http://rpc.weblogs.com/RPC2
http://xping.pubsub.com/ping
""".strip().splitlines()
def ping(name, url, grab, thread_number=10):
"""
Do XMLRPC ping of given site.
"""
name = smart_str(name)
url = smart_str(url)
def worker(rpc_url):
post = PING_XML % {
'url': html.escape(url),
'name': html.escape(name),
}
ok = False
try:
grab.go(rpc_url, post=post)
except Exception, ex:
logging.error(unicode(ex))
else:
if not '<boolean>0' in grab.response.body:
logging.error('%s : FAIL' % rpc_url)
logging.error(grab.response.body[:1000])
else:
ok = True
return rpc_url, ok
results = []
for rpc_url, ok in make_work(worker, SERVER_LIST, thread_number):
results.append((rpc_url, ok))
return results
if __name__ == '__main__':
#logging.basicConfig(level=logging.DEBUG)
g = Grab(timeout=15)
g.setup_proxylist('/web/proxy.txt', 'http', auto_change=True)
items = ping('seobeginner.ru', 'http://feeds2.feedburner.com/seobeginner',
g, thread_number=30)
print 'RESULT:'
for rpc, ok in items:
print rpc, ok
| mit | Python | |
f31a735ce54f74e7a38edaeb7c404a2dec8e5584 | add script | ashumeow/MeowRTC,ashumeow/MeowRTC,ashumeow/MeowRTC,ashumeow/MeowRTC,ashumeow/MeowRTC,ashumeow/MeowRTC | MeowRTC/cloud/stack/python/scripts/sample-p2p.py | MeowRTC/cloud/stack/python/scripts/sample-p2p.py | import logging
import jinja2
import webapp2
import os
import random
import json
from google.appengine.api import channel
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
logging.getLogger().setLevel(logging.DEBUG)
users = set()
def random_string():
str = ''
for _ in range(4):
str += random.choice('0123456789')
logging.info(str)
return str
class MainPage(webapp2.RequestHandler):
def get(self):
user = self.request.get('user')
if len(user) == 0:
user = random_string()
token = channel.create_channel(user)
template_values = {'token': token, 'user': user}
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
class DisconnectPage(webapp2.RequestHandler):
def post(self):
user = self.request.get('from')
logging.info("Disconnect: " + user)
try:
users.remove(user)
except KeyError:
logging.info('User not logged in')
class ConnectPage(webapp2.RequestHandler):
def post(self):
user = self.request.get('from')
logging.info("Connect: " + user)
users.add(user)
class MessagePage(webapp2.RequestHandler):
def post(self):
msg = json.loads(self.request.body)
for command in msg:
to_user = msg[command]['to']
from_user = msg[command]['from']
logging.info(from_user + ' -> ' + to_user + ": " + command)
if to_user in users:
channel.send_message(to_user, self.request.body)
else:
logging.info('User not found')
channel.send_message(from_user, '{"ERROR":"User not found ' + to_user + '"}')
app = webapp2.WSGIApplication([('/', MainPage), ('/message', MessagePage), ('/_ah/channel/connected/', ConnectPage), ('/_ah/channel/disconnected/', DisconnectPage)], debug=True)
| mit | Python | |
4c60f8f643fe05b69ca475242d8c46b02697d5d4 | Add example for type-checking chain | spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc | examples/howto/type_chain.py | examples/howto/type_chain.py | from thinc.api import chain, ReLu, MaxPool, Softmax, chain
# This example should be run with mypy. This is an example of type-level checking
# for network validity.
#
# We first define an invalid network.
# It's invalid because MaxPool expects Floats3d as input, while ReLu produces
# Floats2d as output. chain has type-logic to verify input and output types
# line up.
#
# You should see the error an error,
# examples/howto/type_chain.py:10: error: Cannot infer type argument 2 of "chain"
bad_model = chain(ReLu(10), MaxPool(), Softmax())
# Now let's try it with a network that does work, just to be sure.
good_model = chain(ReLu(10), ReLu(10), Softmax())
# Finally we can reveal_type on the good model, to see what it thinks.
reveal_type(good_model)
| mit | Python | |
f6bdab51054b08d203251b0e7e73ed7818613c8d | add models.py file so test runner will recognize the app | byteweaver/django-eca-catalogue | eca_catalogue/text/models.py | eca_catalogue/text/models.py | # Hello test runner
| bsd-3-clause | Python | |
ce792a1b167b268ba1f798b3b08e08679d962d02 | Create distributeOnSurface.py | aaronfang/personal_scripts | af_scripts/tmp/distributeOnSurface.py | af_scripts/tmp/distributeOnSurface.py | import random
import maya.mel
class distributeOnSurface(object):
def __init__(self):
pass
def _UI(self):
if cmds.window('dosWin',exists=True):
cmds.deleteUI('dosWin',window=True)
w=300
w2=180
cmds.window('dosWin',t="Distribute On Surface",s=0,rtf=1,mb=1,mxb=0,mnb=0,w=w)
cmds.columnLayout("mainColumn",p="BSMainWin",columnAttach=('both', 2), rowSpacing=10, columnWidth=w)
cmds.rowLayout("srcTgtNamesRow",p="mainColumn",w=w,numberOfColumns=3,columnWidth3=(w2,30,w2),
adjustableColumn=2, columnAlign3=[('center'),('center'),('center')],
columnAttach=[(1, 'both', 1), (2, 'both', 0), (3, 'both',5)])
cmds.textScrollList("srcList",p="srcTgtNamesRow",w=w2,numberOfRows=1, allowMultiSelection=False)
pm.popupMenu("srcListPopUp",p="srcList")
pm.menuItem(p="srcListPopUp",l="Add Source Geo",c=self.srcList)
cmds.textScrollList("tgtList",p="srcTgtNamesRow",w=w2,numberOfRows=1, allowMultiSelection=False)
pm.popupMenu("tgtListPopUp",p="tgtList")
pm.menuItem(p="tgtListPopUp",l="Add Base Geo",c=self.tgtList)
cmds.showWindow('dosWin')
src_obj = 'pCone1'
tgt_obj = 'pPlane1'
del_trans = [cmds.delete(x) for x in cmds.ls(sl=True,fl=True,dag=1,lf=1) if cmds.nodeType(x) != 'follicle']
fols = [x for x in cmds.ls(sl=True,fl=True,dag=1,lf=1) if cmds.nodeType(x) == 'follicle']
cmds.select(fols,r=1)
maya.mel.eval('randomizeFollicles 0.05')
rand_uv = 0.05
rand_l = 0.5
rand_offset = 1
dup_objs = []
for fol in fols:
dup_obj = pm.duplicate(src_obj,n='{0}_dup'.format(src_obj))[0]
dup_objs.append(dup_obj)
pm.parent(dup_obj,fol)
for attr in ['tx','ty','tz','rx','ry','rz']:
pm.setAttr('{0}.{1}'.format(dup_obj,attr),0)
# Random length
for obj in dup_objs:
lenght_var = random.uniform(-rand_l,rand_l)
pm.setAttr('{0}.sz'.format(obj),(1+lenght_var))
# Random offset
for obj in dup_objs:
offset_var = random.uniform(-rand_offset,rand_offset)
pm.setAttr('{0}.tz'.format(obj),(offset_var))
| mit | Python | |
e484b67372be22dae78a526c21e62661e4602913 | Add todo files with incomplete fixes | Vauxoo/autopep8,hhatto/autopep8,vauxoo-dev/autopep8,SG345/autopep8,SG345/autopep8,vauxoo-dev/autopep8,MeteorAdminz/autopep8,hhatto/autopep8,MeteorAdminz/autopep8,Vauxoo/autopep8 | test/todo.py | test/todo.py | raise KeyError, key
def foo(a
, b):
pass
| mit | Python | |
c669d498fa81ffb399d7d7c42654f5ac69428a28 | Update templates.py | googlefonts/oss-fuzz,kcc/oss-fuzz,skia-dev/oss-fuzz,google/oss-fuzz,google/oss-fuzz,ssbr/oss-fuzz,skia-dev/oss-fuzz,oliverchang/oss-fuzz,robertswiecki/oss-fuzz,googlefonts/oss-fuzz,google/oss-fuzz,googlefonts/oss-fuzz,oliverchang/oss-fuzz,robertswiecki/oss-fuzz,skia-dev/oss-fuzz,robertswiecki/oss-fuzz,kcc/oss-fuzz,kcc/oss-fuzz,robertswiecki/oss-fuzz,skia-dev/oss-fuzz,google/oss-fuzz,skia-dev/oss-fuzz,googlefonts/oss-fuzz,google/oss-fuzz,skia-dev/oss-fuzz,oliverchang/oss-fuzz,ssbr/oss-fuzz,google/oss-fuzz,ssbr/oss-fuzz,skia-dev/oss-fuzz,google/oss-fuzz,googlefonts/oss-fuzz,robertswiecki/oss-fuzz,robertswiecki/oss-fuzz,FeliciaLim/oss-fuzz,vitalybuka/oss-fuzz,googlefonts/oss-fuzz,kcc/oss-fuzz,googlefonts/oss-fuzz,google/oss-fuzz,FeliciaLim/oss-fuzz,robertswiecki/oss-fuzz,google/oss-fuzz,googlefonts/oss-fuzz,oliverchang/oss-fuzz,robertswiecki/oss-fuzz,robertswiecki/oss-fuzz,kcc/oss-fuzz,FeliciaLim/oss-fuzz,robertswiecki/oss-fuzz,vitalybuka/oss-fuzz,googlefonts/oss-fuzz,vitalybuka/oss-fuzz,skia-dev/oss-fuzz,google/oss-fuzz,robertswiecki/oss-fuzz,vitalybuka/oss-fuzz,googlefonts/oss-fuzz,FeliciaLim/oss-fuzz,skia-dev/oss-fuzz,FeliciaLim/oss-fuzz,ssbr/oss-fuzz,ssbr/oss-fuzz,skia-dev/oss-fuzz,vitalybuka/oss-fuzz,oliverchang/oss-fuzz,skia-dev/oss-fuzz,google/oss-fuzz | infra/templates.py | infra/templates.py | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
JENKINS_TEMPLATE = """\
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
def libfuzzerBuild = fileLoader.fromGit('infra/libfuzzer-pipeline.groovy',
'https://github.com/google/oss-fuzz.git')
libfuzzerBuild {
git = "put git url here"
}
"""
DOCKER_TEMPLATE = """\
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
FROM ossfuzz/base-libfuzzer
MAINTAINER your@email.com
RUN apt-get install -y make autoconf automake libtool
RUN git clone <git_url> # or use other version control
COPY build.sh /src/
"""
BUILD_TEMPLATE = """\
#!/bin/bash -eu
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
cd /src/%s
# build the target.
# e.g.
#
# ./autogen.sh
# ./configure
# make -j$(nproc) all
# build your fuzzer(s)
# e.g.
# $CXX $CXXFLAGS -std=c++11 -Iinclude \\
# /path/to/name_of_fuzzer.cc -o /out/name_of_fuzzer \\
# -lfuzzer /path/to/library.a $FUZZER_LDFLAGS
"""
| # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
JENKINS_TEMPLATE = """\
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
def libfuzzerBuild = fileLoader.fromGit('infra/libfuzzer-pipeline.groovy',
'https://github.com/google/oss-fuzz.git')
libfuzzerBuild {
git = "put git url here"
}
"""
DOCKER_TEMPLATE = """\
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
FROM ossfuzz/base-libfuzzer
MAINTAINER your@email.com
RUN apt-get install -y make autoconf automake libtool
RUN git clone <git_url> # or use other version control
COPY build.sh /src/
"""
BUILD_TEMPLATE = """\
#!/bin/bash -eu
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
cd /src/%s
# build the target.
# e.g.
#
# ./autogen.sh
# ./configure
# make clean all
# build your fuzzer(s)
# e.g.
# $CXX $CXXFLAGS -std=c++11 -Iinclude \\
# /path/to/name_of_fuzzer.cc -o /out/name_of_fuzzer \\
# -lfuzzer /path/to/library.a $FUZZER_LDFLAGS
"""
| apache-2.0 | Python |
47f7fa72ab3ba75ad4182592f6413702fd509ba7 | Create middleware to redirect users when accessing certain paths | praekelt/molo-iogt,praekelt/molo-iogt,praekelt/molo-iogt | iogt/middleware.py | iogt/middleware.py | from django.conf import settings
from django.http import HttpResponsePermanentRedirect
class SSLRedirectMiddleware(object):
def process_request(self, request):
HTTPS_PATHS = getattr(settings, 'HTTPS_PATHS', [])
response_should_be_secure = self.response_should_be_secure(
request, HTTPS_PATHS)
request_is_secure = self.request_is_secure(request)
if response_should_be_secure and not request_is_secure:
return HttpResponsePermanentRedirect(
"https://{}{}".format(request.get_host(),
request.get_full_path())
)
def response_should_be_secure(self, request, HTTPS_PATHS):
for path in HTTPS_PATHS:
if request.path.startswith(u'/{}'.format(path)):
return True
return False
def request_is_secure(self, request):
if 'HTTP_X_FORWARDED_PROTO' in request.META:
return request.META['HTTP_X_FORWARDED_PROTO'] == 'https'
return False
| bsd-2-clause | Python | |
eeab27ecc6843136938e7607a619baef8626118a | Make contest_ranking visible | DMOJ/site,Minkov/site,DMOJ/site,monouno/site,Minkov/site,DMOJ/site,Minkov/site,Phoenix1369/site,Minkov/site,Phoenix1369/site,monouno/site,DMOJ/site,Phoenix1369/site,monouno/site,monouno/site,monouno/site,Phoenix1369/site | judge/views/contests.py | judge/views/contests.py | from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.comments import comment_form, contest_comments
from judge.models import Contest
__all__ = ['contest_list', 'contest', 'contest_ranking']
def contest_list(request):
if request.user.is_authenticated() and request.user.profile.is_admin:
contests = Contest.objects.all()
else:
contests = Contest.objects.filter(is_public=True)
return render_to_response('contests.jade', {
'contests': contests,
'title': 'Contests'
}, context_instance=RequestContext(request))
def contest(request, key):
try:
contest = Contest.objects.get(code=key)
if not contest.is_public and not request.user.has_perm('judge.see_private_contest'):
raise ObjectDoesNotExist()
form = comment_form(request, 'p:' + key)
if form is None:
return HttpResponseRedirect(request.path)
return render_to_response('contest.jade', {'contest': contest,
'title': contest.name,
'comment_list': contest_comments(contest),
'comment_form': form},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return render_to_response('message.jade', {'message': 'Could not find a contest with the key "%s".' % key,
'title': 'No such contest'},
context_instance=RequestContext(request))
def contest_ranking(request, key):
return Http404()
| from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.comments import comment_form, contest_comments
from judge.models import Contest
__all__ = ['contest_list', 'contest']
def contest_list(request):
if request.user.is_authenticated() and request.user.profile.is_admin:
contests = Contest.objects.all()
else:
contests = Contest.objects.filter(is_public=True)
return render_to_response('contests.jade', {
'contests': contests,
'title': 'Contests'
}, context_instance=RequestContext(request))
def contest(request, key):
try:
contest = Contest.objects.get(code=key)
if not contest.is_public and not request.user.has_perm('judge.see_private_contest'):
raise ObjectDoesNotExist()
form = comment_form(request, 'p:' + key)
if form is None:
return HttpResponseRedirect(request.path)
return render_to_response('contest.jade', {'contest': contest,
'title': contest.name,
'comment_list': contest_comments(contest),
'comment_form': form},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return render_to_response('message.jade', {'message': 'Could not find a contest with the key "%s".' % key,
'title': 'No such contest'},
context_instance=RequestContext(request))
def contest_ranking(request, key):
return Http404()
| agpl-3.0 | Python |
65e4659ccd3f22f817403dc39869626873f9fb34 | Add test_runner.py | datamicroscopes/lda,datamicroscopes/lda,datamicroscopes/lda | test/test_runner.py | test/test_runner.py | from microscopes.lda.definition import model_definition
from microscopes.lda import model, runner
from microscopes.common.rng import rng
def test_runner_simple():
defn = model_definition(n=10, v=20)
r = rng()
data = [[1, 2, 3, 4, 5], [2, 3, 4]]
view = data
latent = model.initialize(defn=defn, data=view, r=r)
rnr = runner.runner(defn, view, latent)
rnr.run(r=r, niters=10)
| bsd-3-clause | Python | |
2bd52d823c9ae039dd0cc0adbabe7a47f003138e | Add unit tests | google/flax,google/flax | examples/ppo/unit_tests.py | examples/ppo/unit_tests.py |
import jax
import flax
from flax import nn
import numpy as onp
import numpy.testing as onp_testing
from absl.testing import absltest
#test GAE
from main import gae_advantages
class TestGAE(absltest.TestCase):
def test_gae_random(self):
# create random data, simulating 4 parallel envs and 20 time_steps
envs, steps = 10, 100
rewards = onp.random.choice([-1., 0., 1.], size=(steps, envs),
p=[0.01, 0.98, 0.01])
terminal_masks = onp.ones(shape=(steps, envs), dtype=onp.float64)
values = onp.random.random(size=(steps + 1, envs))
discount = 0.99
gae_param = 0.95
adv = gae_advantages(rewards, terminal_masks, values, discount, gae_param)
self.assertEqual(adv.shape, (steps, envs))
# test the property A_{t} = \delta_t + \gamma*\lambda*A_{t+1}
# for each agent separately
for e in range(envs):
for t in range(steps-1):
delta = rewards[t, e] + discount * values[t+1, e] - values[t, e]
lhs = adv[t, e]
rhs = delta + discount * gae_param * adv[t+1, e]
onp_testing.assert_almost_equal(lhs, rhs)
#test environment and preprocessing
from remote import RemoteSimulator, rcv_action_send_exp
from env import create_env
class TestEnvironmentPreprocessing(absltest.TestCase):
def test_creation(self):
frame_shape = (84, 84, 4)
env = create_env()
obs = env.reset()
self.assertTrue(obs.shape == frame_shape)
def test_step(self):
frame_shape = (84, 84, 4)
env = create_env()
obs = env.reset()
actions = [1, 2, 3, 0]
for a in actions:
obs, reward, done, info = env.step(a)
self.assertTrue(obs.shape == frame_shape)
self.assertTrue(reward <= 1. and reward >= -1.)
self.assertTrue(isinstance(done, bool))
self.assertTrue(isinstance(info, dict))
#test creation of the model and optimizer
from models import create_model, create_optimizer
class TestCreation(absltest.TestCase):
def test_create(self):
key = jax.random.PRNGKey(0)
key, subkey = jax.random.split(key)
policy_model = create_model(subkey)
policy_optimizer = create_optimizer(policy_model, learning_rate=1e-3)
self.assertTrue(isinstance(policy_model, nn.base.Model))
self.assertTrue(isinstance(policy_optimizer, flax.optim.base.Optimizer))
if __name__ == '__main__':
absltest.main() | apache-2.0 | Python | |
f6fb9266ec9a2acebae31b042647437e922648d1 | Add spinning cube example with texture From Euroscipy tutorial | hronoses/vispy,julienr/vispy,michaelaye/vispy,jdreaver/vispy,kkuunnddaannkk/vispy,ghisvail/vispy,RebeccaWPerry/vispy,jdreaver/vispy,sbtlaarzc/vispy,QuLogic/vispy,Eric89GXL/vispy,sbtlaarzc/vispy,sh4wn/vispy,sh4wn/vispy,hronoses/vispy,bollu/vispy,srinathv/vispy,bollu/vispy,julienr/vispy,QuLogic/vispy,srinathv/vispy,inclement/vispy,Eric89GXL/vispy,kkuunnddaannkk/vispy,dchilds7/Deysha-Star-Formation,Eric89GXL/vispy,jdreaver/vispy,julienr/vispy,kkuunnddaannkk/vispy,jay3sh/vispy,sh4wn/vispy,ghisvail/vispy,drufat/vispy,RebeccaWPerry/vispy,bollu/vispy,jay3sh/vispy,drufat/vispy,michaelaye/vispy,michaelaye/vispy,hronoses/vispy,dchilds7/Deysha-Star-Formation,QuLogic/vispy,inclement/vispy,dchilds7/Deysha-Star-Formation,RebeccaWPerry/vispy,ghisvail/vispy,jay3sh/vispy,sbtlaarzc/vispy,drufat/vispy,srinathv/vispy,inclement/vispy | examples/spinning-cube2.py | examples/spinning-cube2.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Show spinning cube using VBO's, and transforms, and texturing.
"""
import numpy as np
from vispy import app, gl, oogl, io
from transforms import perspective, translate, rotate
VERT_CODE = """
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
attribute vec3 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main()
{
v_texcoord = a_texcoord;
gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);
//gl_Position = vec4(a_position,1.0);
}
"""
FRAG_CODE = """
uniform sampler2D u_texture;
varying vec2 v_texcoord;
void main()
{
float ty = v_texcoord.y;
float tx = sin(ty*50.0)*0.01 + v_texcoord.x;
gl_FragColor = texture2D(u_texture, vec2(tx, ty));
}
"""
# Read cube data
positions, faces, normals, texcoords = io.read_mesh('cube.obj')
colors = np.random.uniform(0,1,positions.shape).astype('float32')
faces_buffer = oogl.ElementBuffer(faces.astype(np.uint16))
class Canvas(app.Canvas):
def __init__(self, **kwargs):
app.Canvas.__init__(self, **kwargs)
self.geometry = 0, 0, 400, 400
self.program = oogl.ShaderProgram( oogl.VertexShader(VERT_CODE),
oogl.FragmentShader(FRAG_CODE) )
# Set attributes
self.program.attributes['a_position'] = oogl.VertexBuffer(positions)
self.program.attributes['a_texcoord'] = oogl.VertexBuffer(texcoords)
self.program.uniforms['u_texture'] = oogl.Texture2D(io.crate())
# Handle transformations
self.init_transforms()
self.timer = app.Timer(1.0/60)
self.timer.connect(self.update_transforms)
self.timer.start()
def on_initialize(self, event):
gl.glClearColor(1,1,1,1)
gl.glEnable(gl.GL_DEPTH_TEST)
def on_resize(self, event):
width, height = event.size
gl.glViewport(0, 0, width, height)
self.projection = perspective( 45.0, width/float(height), 2.0, 10.0 )
self.program.uniforms['u_projection'] = self.projection
def on_paint(self, event):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
with self.program as prog:
prog.draw_elements(gl.GL_TRIANGLES, faces_buffer)
def init_transforms(self):
self.view = np.eye(4,dtype=np.float32)
self.model = np.eye(4,dtype=np.float32)
self.projection = np.eye(4,dtype=np.float32)
self.theta = 0
self.phi = 0
translate(self.view, 0,0,-5)
self.program.uniforms['u_model'] = self.model
self.program.uniforms['u_view'] = self.view
def update_transforms(self,event):
self.theta += .5
self.phi += .5
self.model = np.eye(4, dtype=np.float32)
rotate(self.model, self.theta, 0,0,1)
rotate(self.model, self.phi, 0,1,0)
self.program.uniforms['u_model'] = self.model
self.update()
if __name__ == '__main__':
c = Canvas()
c.show()
app.run()
| bsd-3-clause | Python | |
4c76f9bc68451eb41338173ffbda460098d1e24e | make form reprocessing more error tolerant and verbose | SEL-Columbia/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,gmimano/commcaretest,qedsoftware/commcare-hq,gmimano/commcaretest,gmimano/commcaretest,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq | corehq/apps/cleanup/management/commands/reprocess_error_forms.py | corehq/apps/cleanup/management/commands/reprocess_error_forms.py | from django.core.management.base import BaseCommand, CommandError, LabelCommand
from corehq.apps.cleanup.xforms import iter_problem_forms, reprocess_form_cases
from optparse import make_option
from dimagi.utils.parsing import string_to_datetime
class Command(BaseCommand):
args = '<domain> <since>'
help = ('Reprocesses all documents tagged as errors and tries to '
'regenerate the appropriate case blocks for them. Can pass in '
'a domain and date to process forms received after that date or '
'just a domain to process all problem forms in the domain.')
option_list = LabelCommand.option_list + \
(make_option('--dryrun', action='store_true', dest='dryrun', default=False,
help="Don't do the actual reprocessing, just print the ids that would be affected"),)
def handle(self, *args, **options):
domain = since = None
if len(args) == 1:
domain = args[0]
elif len(args) == 2:
domain = args[0]
since = string_to_datetime(args[1])
else:
raise CommandError('Usage: %s\n%s' % (self.args, self.help))
succeeded = []
failed = []
error_messages = set()
for form in iter_problem_forms(domain, since):
print "%s\t%s\t%s\t%s\t%s" % (form._id, form.received_on,
form.xmlns,
form.xpath('form/meta/username'),
form.problem.strip())
if not options["dryrun"]:
try:
reprocess_form_cases(form)
except Exception, e:
failed.append(form._id)
error_messages.add(str(e))
else:
succeeded.append(form._id)
print "%s / %s forms successfully processed, %s failures" % \
(len(succeeded), len(succeeded) + len(failed), len(failed))
if error_messages:
print r"The following errors were seen: \n%s" % (r"\n".join(error_messages))
| from django.core.management.base import BaseCommand, CommandError, LabelCommand
from corehq.apps.cleanup.xforms import iter_problem_forms, reprocess_form_cases
from optparse import make_option
from dimagi.utils.parsing import string_to_datetime
class Command(BaseCommand):
args = '<domain> <since>'
help = ('Reprocesses all documents tagged as errors and tries to '
'regenerate the appropriate case blocks for them. Can pass in '
'a domain and date to process forms received after that date or '
'just a domain to process all problem forms in the domain.')
option_list = LabelCommand.option_list + \
(make_option('--dryrun', action='store_true', dest='dryrun', default=False,
help="Don't do the actual reprocessing, just print the ids that would be affected"),)
def handle(self, *args, **options):
domain = since = None
if len(args) == 1:
domain = args[0]
elif len(args) == 2:
domain = args[0]
since = string_to_datetime(args[1])
else:
raise CommandError('Usage: %s\n%s' % (self.args, self.help))
for form in iter_problem_forms(domain, since):
print "%s\t%s\t%s\t%s\t%s" % (form._id, form.received_on,
form.xmlns,
form.xpath('form/meta/username'),
form.problem.strip())
if not options["dryrun"]:
reprocess_form_cases(form)
| bsd-3-clause | Python |
d344c91008198927d45cd7a3330915bd9e8fd89f | Add The Fucking Weather module from yano | Uname-a/knife_scraper,Uname-a/knife_scraper,Uname-a/knife_scraper | willie/modules/fuckingweather.py | willie/modules/fuckingweather.py | """
fuckingweather.py - Willie module for The Fucking Weather
Copyright 2013 Michael Yanovich
Copyright 2013 Edward Powell
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
from willie import web
import re
def fucking_weather(willie, trigger):
text = trigger.group(2)
if not text:
willie.reply("INVALID FUCKING PLACE. PLEASE ENTER A FUCKING ZIP CODE, OR A FUCKING CITY-STATE PAIR.")
return
text = web.quote(text)
page = web.get("http://thefuckingweather.com/?where=%s" % (text))
re_mark = re.compile('<p class="remark">(.*?)</p>')
results = re_mark.findall(page)
if results:
willie.reply(results[0])
else:
willie.reply("I CAN'T GET THE FUCKING WEATHER.")
fucking_weather.commands = ['fucking_weather', 'fw']
fucking_weather.rate = 30
fucking_weather.priority = 'low'
| mit | Python | |
27c955963bfc640f5e91d103a4aff8e2a897597c | Implement profiling | JayTeeGeezy/jtgpy | jtgpy/profiling.py | jtgpy/profiling.py | from datetime import datetime
import logging
def log_time(target, message, log_method=None, target_args=None, target_kwargs=None):
"""Execute target and log the start/elapsed time before and after execution"""
logger = logging.info
if log_method is not None:
logger = log_method
start_time = datetime.now()
logger('Started {message} at {start_time}'.format(message=message, start_time=start_time))
if target_args is None: target_args=[]
if target_kwargs is None: target_kwargs={}
output = target(*target_args, **target_kwargs)
logger('Finished {message} in {elapsed_time}'.format(message=message, elapsed_time=datetime.now() - start_time))
return output | mit | Python | |
d9a6ea57ad7bb1d7f3716fe16a49a8a24edceb67 | Fix migrations for python3 | praekeltfoundation/ndoh-hub,praekeltfoundation/ndoh-hub,praekeltfoundation/ndoh-hub | registrations/migrations/0010_auto_20180212_0802.py | registrations/migrations/0010_auto_20180212_0802.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2018-02-12 08:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registrations', '0009_auto_20171027_0928'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='reg_type',
field=models.CharField(choices=[('momconnect_prebirth', 'MomConnect pregnancy registration'), ('momconnect_postbirth', 'MomConnect baby registration'), ('whatsapp_prebirth', 'WhatsApp MomConnect pregnancy registration'), ('nurseconnect', 'Nurseconnect registration'), ('whatsapp_nurseconnect', 'WhatsApp Nurseconnect registration'), ('pmtct_prebirth', 'PMTCT pregnancy registration'), ('whatsapp_pmtct_prebirth', 'WhatsApp PMTCT pregnancy registration'), ('pmtct_postbirth', 'PMTCT baby registration'), ('whatsapp_pmtct_postbirth', 'WhatsApp PMTCT baby registration'), ('loss_general', 'Loss general registration')], max_length=30),
),
migrations.AlterField(
model_name='source',
name='authority',
field=models.CharField(choices=[('patient', 'Patient'), ('advisor', 'Trusted Advisor'), ('hw_partial', 'Health Worker Partial'), ('hw_full', 'Health Worker Full')], max_length=30),
),
]
| bsd-3-clause | Python | |
e3c6ebd09af5292c496e3a69af499d2a507ce7dd | add demo main file | jettify/aiohttp_admin,aio-libs/aiohttp_admin,jettify/aiohttp_admin,jettify/aiohttp_admin,aio-libs/aiohttp_admin,jettify/aiohttp_admin,aio-libs/aiohttp_admin | demos/blog/main.py | demos/blog/main.py | import asyncio
import logging
import pathlib
import yaml
import aiopg.sa
import aiohttp_jinja2
import jinja2
from aiohttp import web
import aiohttp_admin
from aiohttp_admin.backends.sa import SAResource
import db
PROJ_ROOT = pathlib.Path(__file__).parent.parent
TEMPLATES_ROOT = pathlib.Path(__file__).parent / 'templates'
class SiteHandler:
def __init__(self, pg):
self.pg = pg
@aiohttp_jinja2.template('index.html')
async def index(self, request):
return {}
def setup_admin(app, pg):
template_folder = str(TEMPLATES_ROOT)
admin = aiohttp_admin.setup(app=app, template_folder=template_folder)
admin.add_resource(SAResource(pg, db.post, url='posts'))
admin.add_resource(SAResource(pg, db.tag, url='tags'))
admin.add_resource(SAResource(pg, db.comment, url='comments'))
return admin
async def setup_pg(app, conf, loop):
# create connection to the database
pg = await init_postgres(conf['postgres'], loop)
async def close_pg(app):
pg.close()
await pg.wait_closed()
app.on_cleanup.append(close_pg)
return pg
async def init(loop):
# load config from yaml file
conf = load_config(str(PROJ_ROOT / 'config' / 'polls.yaml'))
# setup application and extensions
app = web.Application(loop=loop)
pg = await setup_pg(app, conf, loop)
# init modules
aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(str(TEMPLATES_ROOT)))
setup_admin(app, pg)
# setup views and routes
handler = SiteHandler(pg)
add_route = app.router.add_route
add_route('GET', '/', handler.index)
app.router.add_static('/static', path=str(PROJ_ROOT / 'static'))
host, port = conf['host'], conf['port']
return app, host, port
def load_config(fname):
with open(fname, 'rt') as f:
data = yaml.load(f)
# TODO: add config validation
return data
async def init_postgres(conf, loop):
engine = await aiopg.sa.create_engine(
database=conf['database'],
user=conf['user'],
password=conf['password'],
host=conf['host'],
port=conf['port'],
minsize=conf['minsize'],
maxsize=conf['maxsize'],
loop=loop)
return engine
def main():
# init logging
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
app, host, port = loop.run_until_complete(init(loop))
web.run_app(app, host=host, port=port)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
c4ee87fa4398eca3193331888086cb437436722e | Add some tests for hil_client | mghpcc-projects/user_level_slurm_reservations,mghpcc-projects/user_level_slurm_reservations | test/hil_client_test.py | test/hil_client_test.py | """
General info about these tests
The tests assusme that the nodes are in the <from_project> which is set to be the
"slurm" project, since that is what we are testing here.
If all tests pass successfully, then nodes are back in their original state.
Class TestHILReserve moves nodes out of the slurm project and into the free pool;
and TestHILRelease puts nodes back into the slurm project from the free pool
run the tests like this
py.test <path to testfile>
py.test hil_client_test
"""
import inspect
import sys
import pytest
import requests
from os.path import realpath, dirname, isfile, join
import uuid
libdir = realpath(join(dirname(inspect.getfile(inspect.currentframe())), '../common'))
sys.path.append(libdir)
import hil_slurm_client
# Some constants useful for tests
nodelist = ['slurm-compute1', 'slurm-compute2', 'slurm-compute3']
hil_client = hil_slurm_client.hil_init()
to_project = 'slurm'
from_project = 'slurm'
bad_hil_client = hil_slurm_client.hil_client_connect('http://127.3.2.1',
'baduser', 'badpassword')
class TestHILReserve:
"""Tests various hil_reserve cases"""
def test_hil_reserve_success(self):
"""test the regular success scenario"""
# should raise an error if <from_project> doesn't add up.
with pytest.raises(hil_slurm_client.ProjectMismatchError):
random_project = str(uuid.uuid4())
hil_slurm_client.hil_reserve_nodes(nodelist, random_project, hil_client)
# should run without any errors
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, hil_client)
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, bad_hil_client)
class TestHILRelease:
"""Test various hil_release cases"""
def test_hil_release(self):
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_free_nodes(nodelist, to_project, bad_hil_client)
# calling it with a functioning hil_client should work
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
# At this point, nodes are already owned by the <to_project>
# calling it again should have no affect.
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
| mit | Python | |
ed3ea4c4a03927bc351951aa325958db8103441c | Test CLI | jupe/mbed-ls,jupe/mbed-ls | test/base.py | test/base.py | #!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import errno
import logging
import re
import pkg_resources
import json
from mock import patch, MagicMock
from copy import deepcopy
from six import StringIO
import mbed_lstools.main as cli
try:
basestring
except NameError:
# Python 3
basestring = str
class CLIComands(unittest.TestCase):
""" Test the CLI
"""
def setUp(self):
self._stdout = patch('sys.stdout', new_callable=StringIO)
self.stdout = self._stdout.start()
self.mbeds = MagicMock()
self.args = MagicMock()
self.mbeds.list_mbeds.return_value = [
{'platform_name': 'foo', 'platform_name_unique': 'foo[0]',
'mount_point': 'a mount point', 'serial_port': 'a serial port',
'target_id': 'DEADBEEF', 'daplink_version': 'v12345'
}
]
def tearDown(self):
self._stdout.stop()
def test_print_version(self):
cli.print_version(self.mbeds, self.args)
self.assertIn(pkg_resources.require('mbed-ls')[0].version,
self.stdout.getvalue())
def test_print_table(self):
cli.print_table(self.mbeds, self.args)
for d in self.mbeds.list_mbeds.return_value:
for v in d.values():
self.assertIn(v, self.stdout.getvalue())
def test_print_simple(self):
cli.print_simple(self.mbeds, self.args)
for d in self.mbeds.list_mbeds.return_value:
for v in d.values():
self.assertIn(v, self.stdout.getvalue())
def test_mbeds_as_json(self):
cli.mbeds_as_json(self.mbeds, self.args)
self.assertEqual(self.mbeds.list_mbeds.return_value,
json.loads(self.stdout.getvalue()))
def test_json_by_target_id(self):
cli.json_by_target_id(self.mbeds, self.args)
out_dict = json.loads(self.stdout.getvalue())
for d in out_dict.values():
self.assertIn(d, self.mbeds.list_mbeds.return_value)
def test_json_platforms(self):
cli.json_platforms(self.mbeds, self.args)
platform_names = [d['platform_name'] for d
in self.mbeds.list_mbeds.return_value]
for name in json.loads(self.stdout.getvalue()):
self.assertIn(name, platform_names)
def test_json_platforms_ext(self):
cli.json_platforms_ext(self.mbeds, self.args)
platform_names = [d['platform_name'] for d
in self.mbeds.list_mbeds.return_value]
for name in json.loads(self.stdout.getvalue()).keys():
self.assertIn(name, platform_names)
def test_list_platform(self):
self.mbeds.list_manufacture_ids.return_value ="""
foo
bar
baz
"""
cli.list_platforms(self.mbeds, self.args)
self.assertIn(self.mbeds.list_manufacture_ids.return_value,
self.stdout.getvalue())
class CLIParser(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_parse_cli_defaults(self):
args = cli.parse_cli([])
assert callable(args.command)
def test_parse_cli_conflict(self):
try:
args = cli.parse_cli(["-j", "-J"])
assert False, "Parsing conflicting options should have failed"
except:
pass
def test_parse_cli_single_param(self):
for p in ['j', 'J', 'p', 'P', '-version', 'd', 'u', 'm']:
args = cli.parse_cli(['-' + p])
assert callable(args.command)
| apache-2.0 | Python | |
63b6dca1f3c72e81468a79afde19bb6a84d14791 | Add Landscape inventory plugin | thaim/ansible,thaim/ansible | plugins/inventory/landscape.py | plugins/inventory/landscape.py | #!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Canonical's
# Landscape (http://www.ubuntu.com/management/landscape-features).
#
# Requires the `landscape_api` Python module
# See:
# - https://landscape.canonical.com/static/doc/api/api-client-package.html
# - https://landscape.canonical.com/static/doc/api/python-api.html
#
# Environment variables
# ---------------------
# - `LANDSCAPE_API_URI`
# - `LANDSCAPE_API_KEY`
# - `LANDSCAPE_API_SECRET`
# - `LANDSCAPE_API_SSL_CA_FILE` (optional)
import argparse
import collections
import os
import sys
from landscape_api.base import API, HTTPError
try:
import json
except ImportError:
import simplejson as json
_key = 'landscape'
class EnvironmentConfig(object):
uri = os.getenv('LANDSCAPE_API_URI')
access_key = os.getenv('LANDSCAPE_API_KEY')
secret_key = os.getenv('LANDSCAPE_API_SECRET')
ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE')
def _landscape_client():
env = EnvironmentConfig()
return API(
uri=env.uri,
access_key=env.access_key,
secret_key=env.secret_key,
ssl_ca_file=env.ssl_ca_file)
def get_landscape_members_data():
return _landscape_client().get_computers()
def get_nodes(data):
return [node['hostname'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for value in node['tags']:
groups[value].append(node['hostname'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['hostname']] = {'tags': node['tags']}
return meta
def print_list():
data = get_landscape_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_landscape_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from landscape cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from landscape cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
| mit | Python | |
4f036669d604a902530e00ecc800b9baca6e69d1 | Initialize stream getter for current dates games | kshvmdn/nba.js,kshvmdn/nba-scores,kshvmdn/NBAScores | streams.py | streams.py | import praw
import collections
r = praw.Reddit('Getter for stream links from /r/nbastreams by /u/me')
| mit | Python | |
50383f51794babceb89503d091d520c5c3032db3 | add gc testing code | mehdisadeghi/saga-python,telamonian/saga-python,mehdisadeghi/saga-python,telamonian/saga-python,luis-rr/saga-python,luis-rr/saga-python,luis-rr/saga-python | tests/_andre/test_gc.py | tests/_andre/test_gc.py |
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import gc
import sys
import saga
from pprint import pprint as pp
try :
if True :
js1 = saga.job.Service ('ssh://localhost/bin/sh')
print sys.getrefcount (js1)
pp (gc.get_referrers (js1))
except saga.SagaException as e :
print str(e)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | Python | |
b04e10af3c0ecc3258b476dbe58c758ece888349 | add migration file required by new paperclip/mapentity | GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin | geotrek/common/migrations/0002_auto_20170323_1433.py | geotrek/common/migrations/0002_auto_20170323_1433.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import embed_video.fields
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='attachment_link',
field=models.URLField(verbose_name='Picture URL', blank=True),
),
migrations.AlterField(
model_name='attachment',
name='attachment_video',
field=embed_video.fields.EmbedVideoField(verbose_name='Video URL', blank=True),
),
]
| bsd-2-clause | Python | |
c556ac78efe4b9a9453016dfdf39219852b42676 | test app, certain to fail | phndiaye/graphnado,phndiaye/graphnado | tests/app.py | tests/app.py | import tornado.ioloop
import tornado.web
from graphnado import GraphQLHandler
if __name__ == '__main__':
application = tornado.web.Application([
(r'/graphql', GraphQLHandler)
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
| mit | Python | |
15eb41ba9ac22eb2ecc60b82807ca7f333f578b9 | Add basic methods for accessing user data | pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality | iatidq/dqusers.py | iatidq/dqusers.py |
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def user(user_id=None):
if user_id:
user = models.User.query.filter_by(id=user_id
).first()
return user
return None
def user_by_username(username=None):
if username:
user = models.User.query.filter_by(username=username
).first()
return user
return None
def addUser(data):
checkU = models.User.query.filter_by(username=data["username"]
).first()
if not checkU:
newU = models.User()
newU.setup(
username = data["username"],
password = app.config["ADMIN_PASSWORD"],
name = data.get('name'),
email_address = data.get('name')
)
db.session.add(newU)
db.session.commit()
return user
return None
| agpl-3.0 | Python | |
8c287ca7b3f184c692356c81a93007936a7a5b01 | fix import torngas but not found tornado | bukun/torngas,mqingyn/torngas,bukun/torngas,mqingyn/torngas | torngas/__init__.py | torngas/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mqingyn'
__version__ = '1.8.2'
version = tuple(map(int, __version__.split('.')))
try:
from settings_manager import settings
from webserver import Server, run
from exception import ConfigError, ArgumentError
from urlhelper import Url, route, include
from utils import is_future, RWLock, cached_property, lazyimport, Null, \
safestr, safeunicode, strips, iterbetter, sleep, request_context
from storage import storage, storify, sorteddict, ThreadedDict
except:
pass | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mqingyn'
__version__ = '1.8.1'
version = tuple(map(int, __version__.split('.')))
from settings_manager import settings
from webserver import Server, run
from exception import ConfigError, ArgumentError
from urlhelper import Url, route, include
from utils import is_future, RWLock, cached_property, lazyimport, Null, \
safestr, safeunicode, strips, iterbetter, sleep, request_context
from storage import storage, storify, sorteddict, ThreadedDict
| bsd-3-clause | Python |
19b588e4ac9811879fba7e98943cf5925a774a00 | add migration 102bbf265d4 | voltaire/website | migrations/versions/102bbf265d4_.py | migrations/versions/102bbf265d4_.py | """empty message
Revision ID: 102bbf265d4
Revises: 3d1138bbc68
Create Date: 2015-06-12 01:35:12.398937
"""
# revision identifiers, used by Alembic.
revision = '102bbf265d4'
down_revision = '3d1138bbc68'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('openid', sa.String(length=200), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'openid')
### end Alembic commands ###
| mit | Python | |
dd25e263b099b86e7b9538e474ad875798514be5 | Add StorageDevice class | onitake/Uranium,onitake/Uranium | Cura/StorageDevice.py | Cura/StorageDevice.py |
## Encapsulates a number of different ways of storing file data.
#
class StorageDevice(object):
def __init__(self):
super(StorageDevice, self).__init__()
self._properties = {}
## Open a file so it can be read from or written to.
# \param file_name The name of the file to open. Can be ignored if not applicable.
# \param mode What mode to open the file with. See Python's open() function for details. Can be ignored if not applicable.
# \return An open stream that can be read from or written to.
def openFile(self, file_name, mode):
raise NotImplementedError()
def getStorageProperty(self, name):
try:
return self._properties[name]
except KeyError:
return None
def setStorageProperty(self, name, value):
self._properties[name] = value
| agpl-3.0 | Python | |
a4a54ee5c86a09b5f01efe57b013fdc25648d326 | Add morango full facility sync management command. | benjaoming/kolibri,indirectlylit/kolibri,lyw07/kolibri,christianmemije/kolibri,lyw07/kolibri,DXCanas/kolibri,indirectlylit/kolibri,jonboiser/kolibri,lyw07/kolibri,learningequality/kolibri,DXCanas/kolibri,DXCanas/kolibri,jonboiser/kolibri,indirectlylit/kolibri,mrpau/kolibri,christianmemije/kolibri,benjaoming/kolibri,DXCanas/kolibri,benjaoming/kolibri,mrpau/kolibri,jonboiser/kolibri,jonboiser/kolibri,learningequality/kolibri,mrpau/kolibri,learningequality/kolibri,learningequality/kolibri,christianmemije/kolibri,indirectlylit/kolibri,mrpau/kolibri,christianmemije/kolibri,benjaoming/kolibri,lyw07/kolibri | kolibri/auth/management/commands/fullfacilitysync.py | kolibri/auth/management/commands/fullfacilitysync.py | from django.utils.six.moves import input
from kolibri.auth.models import FacilityUser
from kolibri.core.device.utils import device_provisioned
from kolibri.core.device.models import DevicePermissions, DeviceSettings
from kolibri.tasks.management.commands.base import AsyncCommand
from morango.controller import MorangoProfileController
from morango.certificates import Certificate, Filter
class Command(AsyncCommand):
def add_arguments(self, parser):
parser.add_argument('--dataset-id', type=str)
parser.add_argument('--no-push', type=bool, default=False)
parser.add_argument('--no-pull', type=bool, default=False)
parser.add_argument('--host', type=str)
parser.add_argument('--username', type=str)
parser.add_argument('--password', type=str)
def handle_async(self, *args, **options):
controller = MorangoProfileController('facilitydata')
with self.start_progress(total=5) as progress_update:
network_connection = controller.create_network_connection(options['host'])
progress_update(1)
# get list of facilities and if more than 1, display all choices to user
facility_resp = network_connection._request('api/facility/')
facility_resp.raise_for_status()
facilities = facility_resp.json()
if len(facilities) > 1 and not options['dataset_id']:
message = 'Please choose a facility to sync with:\n'
for idx, f in enumerate(facilities):
message += "{}. {}\n".format(idx + 1, f['name'])
idx = input(message)
options['dataset_id'] = facilities[int(idx-1)]['dataset']
elif not options['dataset_id']:
options['dataset_id'] = facilities[0]['dataset']
# get servers certificates which server has a private key for
server_certs = network_connection.get_remote_certificates(options['dataset_id'], scope_def_id='full-facility')
if not server_certs:
print('Server does not have any certificates for dataset_id: {}'.format(options['dataset_id']))
return
server_cert = server_certs[0]
progress_update(1)
# check for the certs we own for the specific facility
owned_certs = Certificate.objects.filter(id=options['dataset_id']) \
.get_descendants(include_self=True) \
.filter(scope_definition_id="full-facility") \
.exclude(_private_key=None)
# if we don't own any certs, do a csr request
if not owned_certs:
# prompt user for creds if not already specified
if not options['username'] or not options['password']:
options['username'] = input('Please enter username: ')
options['password'] = input('Please enter password: ')
client_cert = network_connection.certificate_signing_request(server_cert, 'full-facility', {'dataset_id': options['dataset_id']},
userargs=options['username'], password=options['password'])
else:
client_cert = owned_certs[0]
sync_client = network_connection.create_sync_session(client_cert, server_cert)
progress_update(1)
# pull from server and push our own data to server
if not options['no_pull']:
sync_client.initiate_pull(Filter(options['dataset_id']))
if not options['no_push']:
sync_client.initiate_push(Filter(options['dataset_id']))
progress_update(1)
# make the user with the given credentials, a superuser for this device
user = FacilityUser.objects.get(username=options['username'], dataset_id=options['dataset_id'])
# create permissions for the authorized user
DevicePermissions.objects.update_or_create(user=user, defaults={'is_superuser': True, 'can_manage_content': True})
# if device has not been provisioned, set it up
if not device_provisioned():
device_settings, created = DeviceSettings.objects.get_or_create()
device_settings.is_provisioned = True
device_settings.save()
sync_client.close_sync_session()
progress_update(1)
| mit | Python | |
0543bfa278e1bb2a8eb37bc0c8f065ddde2ed21f | Add object doubling as lxml Element and string #274 | monouno/site,monouno/site,Minkov/site,Minkov/site,Minkov/site,DMOJ/site,DMOJ/site,monouno/site,Phoenix1369/site,Phoenix1369/site,monouno/site,Phoenix1369/site,DMOJ/site,Minkov/site,monouno/site,DMOJ/site,Phoenix1369/site | judge/lxml_tree.py | judge/lxml_tree.py | from lxml import html
class HTMLTreeString(object):
def __init__(self, str):
self.tree = html.fromstring(str)
def __getattr__(self, attr):
return getattr(self.tree, attr)
def __unicode__(self):
return html.tostring(self.tree)
| agpl-3.0 | Python | |
1d5f23bf090e4a6d51beb310b5ecf4048afcc347 | add debug runner | missionpinball/mpf,missionpinball/mpf | tools/debug_run_game.py | tools/debug_run_game.py | import logging
import sys
from mpf.core.config_loader import YamlMultifileConfigLoader
from mpf.core.machine import MachineController
machine_path = sys.argv[1]
config_loader = YamlMultifileConfigLoader(machine_path, ["config.yaml"], False, False)
config = config_loader.load_mpf_config()
options = {
'force_platform': 'smart_virtual',
'production': False,
'mpfconfigfile': ["mpfconfig.yaml"],
'configfile': ["config.yaml"],
'debug': True,
'bcp': True,
'no_load_cache': False,
'create_config_cache': True,
'text_ui': False,
'consoleloglevel': logging.DEBUG,
}
logging.basicConfig(level=logging.DEBUG)
machine = MachineController(options, config)
machine.run()
| mit | Python | |
5468beaaf18ea8ac1b955b25bcea0aea1c650af0 | Add test for linear model | spacy-io/thinc,explosion/thinc,explosion/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc | thinc/tests/linear/test_linear.py | thinc/tests/linear/test_linear.py | from __future__ import division
import numpy
import pytest
import pickle
import io
from ...linear.linear import LinearModel
from ...neural.optimizers import SGD
from ...neural.ops import NumpyOps
from ...neural.util import to_categorical
@pytest.fixture
def instances():
lengths = numpy.asarray([5,4], dtype='int32')
keys = numpy.arange(9, dtype='uint64')
values = numpy.ones(9, dtype='float')
X = (keys, values, lengths)
y = numpy.asarray([0,2], dtype='int32')
return X, to_categorical(y, nb_classes=3)
@pytest.fixture
def sgd():
return SGD(NumpyOps(), 0.001)
def test_basic(instances, sgd):
X, y = instances
nr_class = 3
model = LinearModel(nr_class)
yh, backprop = model.begin_update(X)
loss1 = ((yh-y)**2).sum()
backprop(yh-y, sgd)
yh, backprop = model.begin_update(X)
loss2 = ((yh-y)**2).sum()
assert loss2 < loss1
print(loss2, loss1)
#@pytest.fixture
#def model(instances):
# templates = []
# for batch in instances:
# for _, feats in batch:
# for key in feats:
# templates.append((key,))
# templates = tuple(set(templates))
# model = AveragedPerceptron(templates)
# for batch in instances:
# model.time += 1
# for clas, feats in batch:
# for key, value in feats.items():
# model.update_weight(key, clas, value)
# return model
#
#def get_score(nr_class, model, feats, clas):
# eg = Example(nr_class)
# eg.features = feats
# eg.costs = [i != clas for i in range(nr_class)]
# model(eg)
# return eg.scores[clas]
#
#
#def get_scores(nr_class, model, feats):
# eg = Example(nr_class)
# eg.features = feats
# model(eg)
# return list(eg.scores)
#
#
#def test_averaging(model):
# model.end_training()
# nr_class = 4
# # Feature 1
# assert_near_eq(get_score(nr_class, model, {1: 1}, 1), sum([-1, -2, -3]) / 3.0)
# assert_near_eq(get_score(nr_class, model, {1: 1}, 2), sum([5, 4, 9]) / 3.0)
# assert_near_eq(get_score(nr_class, model, {1: 1}, 3), sum([3, 6, 6]) / 3.0)
# # Feature 2
# assert_near_eq(get_score(nr_class, model, {2: 1}, 1), sum([1, 2, 4]) / 3.0)
# assert_near_eq(get_score(nr_class, model, {2: 1}, 2), sum([-5, -3, -8]) / 3.0)
# assert_near_eq(get_score(nr_class, model, {2: 1}, 3), sum([-3, -6, -5]) / 3.0)
# # Feature 3 (absent)
# assert_near_eq(get_score(nr_class, model, {3: 1}, 1), 0)
# assert_near_eq(get_score(nr_class, model, {3: 1}, 2), 0)
# assert_near_eq(get_score(nr_class, model, {3: 1}, 3), 0)
# # Feature 4
# assert_near_eq(get_score(nr_class, model, {4: 1}, 1), sum([0, 0, 0]) / 3.0)
# assert_near_eq(get_score(nr_class, model, {4: 1}, 2), sum([0, 0, 0]) / 3.0)
# assert_near_eq(get_score(nr_class, model, {4: 1}, 3), sum([0, 0, 1]) / 3.0)
# # Feature 5
# assert_near_eq(get_score(nr_class, model, {5: 1}, 1), sum([0, 0, 0]) / 3.0)
# assert_near_eq(get_score(nr_class, model, {5: 1}, 2), sum([0, 0, 0]) / 3.0)
# assert_near_eq(get_score(nr_class, model, {5: 1}, 3), sum([0, 0, -7]) / 3.0)
#
#
#def test_dump_load(model):
# loc = tempfile.mkstemp()[1]
# model.end_training()
# model.dump(loc)
# string = open(loc, 'rb').read()
# assert string
# new_model = AveragedPerceptron([(1,), (2,), (3,), (4,)])
# nr_class = 5
# assert get_scores(nr_class, model, {1: 1, 3: 1, 4: 1}) != \
# get_scores(nr_class, new_model, {1:1, 3:1, 4:1})
# assert get_scores(nr_class, model, {2:1, 5:1}) != \
# get_scores(nr_class, new_model, {2:1, 5:1})
# assert get_scores(nr_class, model, {2:1, 3:1, 4:1}) != \
# get_scores(nr_class, new_model, {2:1, 3:1, 4:1})
# new_model.load(loc)
# assert get_scores(nr_class, model, {1:1, 3:1, 4:1}) == \
# get_scores(nr_class, new_model, {1:1, 3:1, 4:1})
# assert get_scores(nr_class, model, {2:1, 5:1}) == \
# get_scores(nr_class, new_model, {2:1, 5:1})
# assert get_scores(nr_class, model, {2:1, 3:1, 4:1}) == \
# get_scores(nr_class, new_model, {2:1, 3:1, 4:1})
#
#
### TODO: Need a test that exercises multiple lines. Example bug:
### in gather_weights, don't increment f_i per row, only per feature
### (so overwrite some lines we're gathering)
| mit | Python | |
d4a5feaaddb88b79809646b7ddab36d29ebf0830 | Create swig.py | vadimkantorov/wigwam | wigs/swig.py | wigs/swig.py | class swig(Wig):
tarball_uri = 'https://github.com/swig/swig/archive/rel-$RELEASE_VERSION$.tar.gz'
last_release_version = 'v3.0.10'
git_uri = 'https://github.com/swig/swig'
| mit | Python | |
956aff18c4791e0fda10b8e0a1103f3a0d53e4f1 | Add simple performance test suite | df3n5/redact-py,df3n5/redact | tests/perf/perf.py | tests/perf/perf.py | import redact
class Prisoner(redact.BaseModel):
def __init__(self, key, name=None, password=None):
super(Prisoner, self).__init__(key)
self.name = redact.KeyValueField('n', name)
self.password = redact.KeyValueField('p', password)
i = 0
def save_model():
global i
prisoner = Prisoner('num_{}'.format(i), "Patrick McGoohan", "iamnotanumber6")
redact.save(prisoner)
i += 1
def delete_model():
global i
prisoner = Prisoner('num_{}'.format(i))
redact.delete(prisoner)
i += 1
def read_model():
global i
prisoner = Prisoner('num_{}'.format(i), "Patrick McGoohan", "iamnotanumber6")
redact.load(prisoner)
i += 1
def update_model():
global i
prisoner = Prisoner('num_{}'.format(i), "Patrick McGoohan", "iamnotanumber6")
redact.load(prisoner)
i += 1
redact.save(prisoner)
if __name__ == '__main__':
global i
import timeit
n_writes = 10000
write_time = timeit.timeit("save_model()", setup="from __main__ import save_model", number=n_writes)
print("{} writes completed in {} seconds".format(n_writes, write_time))
i = 0
read_time = timeit.timeit("read_model()", setup="from __main__ import read_model", number=n_writes)
print("{} reads completed in {} seconds".format(n_writes, read_time))
i = 0
update_time = timeit.timeit("update_model()", setup="from __main__ import update_model", number=n_writes)
print("{} updates completed in {} seconds".format(n_writes, update_time))
i = 0
delete_time = timeit.timeit("delete_model()", setup="from __main__ import delete_model", number=n_writes)
print("{} deletes completed in {} seconds".format(n_writes, delete_time))
| mit | Python | |
68c664117612b15dab5add78e7b5614daf1c2c18 | Add missing __init__.py | elopezga/ErrorRate,dracorpg/python-ivi,getzze/python-ivi,sephalon/python-ivi,alexforencich/python-ivi,dracorpg/python-ivi,Diti24/python-ivi,margguo/python-ivi,python-ivi/python-ivi,adrianschlatter/python-ivi | ivi/agilent/test/__init__.py | ivi/agilent/test/__init__.py | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__all__ = []
| mit | Python | |
4f1b7b2a29fcb8802fbdee6ce832d9c5fb4a6f89 | add jobq tests | kalessin/python-hubstorage,scrapinghub/python-hubstorage,torymur/python-hubstorage | tests/test_jobq.py | tests/test_jobq.py | """
Test JobQ
"""
from hstestcase import HSTestCase
class ActivityTest(HSTestCase):
def setUp(self):
super(ActivityTest, self).setUp()
self.jobq = self.hsclient.get_jobq(self.projectid)
def test_basic(self):
#authpos(JOBQ_PUSH_URL, data="", expect=400)
spider1 = self.jobq.push('spidey')
spider2 = self.jobq.push(spider='spidey')
spider3 = self.jobq.push(spider='spidey', metatest='somekey')
spider4 = self.jobq.push('spidey')
summary = dict((s['name'], s) for s in self.jobq.summary())
pending = summary['pending']
pending_summaries = pending['summary']
assert len(pending_summaries) >= 4
assert len(pending_summaries) <= 8 # 8 are requested
assert pending['count'] >= len(pending_summaries)
# expected keys, in the order they should be in the queue
expected_keys = [spider4['key'], spider3['key'], spider2['key'], spider1['key']]
# only count the keys we inserted, as other tests may be running
def filter_test(summary):
"""filter out all summaries not in our test"""
return [s['key'] for s in summary if s['key'] in expected_keys]
received_keys = filter_test(pending_summaries)
assert expected_keys == received_keys
# change some job states
job1 = self.hsclient.get_job(spider1['key'])
job1.finished()
job2 = self.hsclient.get_job(spider2['key'])
job2.started()
# check job queues again
summary = dict((s['name'], s) for s in self.jobq.summary())
assert summary['pending']['count'] >= 2
assert summary['running']['count'] >= 1
assert summary['finished']['count'] >= 1
pending_keys = filter_test(summary['pending']['summary'])
assert pending_keys == [spider4['key'], spider3['key']]
running_keys = filter_test(summary['running']['summary'])
assert running_keys == [spider2['key']]
finished_keys = filter_test(summary['finished']['summary'])
assert finished_keys == [spider1['key']]
job2.finished()
summary = dict((s['name'], s) for s in self.jobq.summary())
finished_keys = filter_test(summary['finished']['summary'])
assert finished_keys == [spider2['key'], spider1['key']]
| bsd-3-clause | Python | |
ff698bf20eb400f9e6f6cadb3e809fcb684bdcc9 | Add simple main test | kingarmery/gnurr | tests/test_main.py | tests/test_main.py | from unittest import TestCase
from src.main.main import main
class TestMain(TestCase):
def test_read_commands(self):
s0 = main([])
s1 = main(["one"])
s2 = main(["one", "-2"])
self.assertTrue(0 == 0, "Testing if 0 equals 0.")
self.assertFalse(0 == 1, "Testing if 0 doesn't equal 1.") | mit | Python | |
a22f713ff4a366c0f05ffd6a7e513bc8fda7aa26 | add a maxcall test | joshspeagle/dynesty | tests/test_misc.py | tests/test_misc.py | import numpy as np
import dynesty
"""
Run a series of basic tests of the 2d eggbox
"""
# seed the random number generator
np.random.seed(56432)
nlive = 100
def loglike(x):
return -0.5 * np.sum(x**2)
def prior_transform(x):
return (2 * x - 1) * 10
def test_maxcall():
# hard test of dynamic sampler with high dlogz_init and small number
# of live points
ndim = 2
sampler = dynesty.NestedSampler(loglike,
prior_transform,
ndim,
nlive=nlive)
sampler.run_nested(dlogz=1, maxcall=1000)
sampler = dynesty.DynamicNestedSampler(loglike,
prior_transform,
ndim,
nlive=nlive)
sampler.run_nested(dlogz_init=1, maxcall=1000)
| mit | Python | |
0ce616d3c787060c6d1bfeacb0a53c5085494927 | Create tutorial2.py | tiggerntatie/empty-app | tutorial2.py | tutorial2.py | placeholder
| mit | Python | |
287cd795c92a86ee16a623230d0c59732a2f767d | Add demo on how to write unstructured point meshes in Vtk. | inducer/pyvisfile,inducer/pyvisfile,inducer/pyvisfile | examples/vtk-unstructured-points.py | examples/vtk-unstructured-points.py | import numpy as np
from pyvisfile.vtk import (
UnstructuredGrid, DataArray,
AppendedDataXMLGenerator,
VTK_VERTEX, VF_LIST_OF_VECTORS, VF_LIST_OF_COMPONENTS)
n = 5000
points = np.random.randn(n, 3)
data = [
("p", np.random.randn(n)),
("vel", np.random.randn(3, n)),
]
file_name = "points.vtu"
compressor = None
grid = UnstructuredGrid(
(n, DataArray("points", points, vector_format=VF_LIST_OF_VECTORS)),
cells=np.arange(n, dtype=np.uint32),
cell_types=np.asarray([VTK_VERTEX] * n, dtype=np.uint8))
for name, field in data:
grid.add_pointdata(DataArray(name, field,
vector_format=VF_LIST_OF_COMPONENTS))
from os.path import exists
if exists(file_name):
raise RuntimeError("output file '%s' already exists"
% file_name)
outf = open(file_name, "w")
AppendedDataXMLGenerator(compressor)(grid).write(outf)
outf.close()
| mit | Python | |
11111351f67afd3dc8ee2ec904a9cea595d68fb3 | Add script to calculate perplexity results | NLeSC/cptm,NLeSC/cptm | DilipadTopicModelling/experiment_calculate_perplexity.py | DilipadTopicModelling/experiment_calculate_perplexity.py | import pandas as pd
import logging
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir))
#corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
# topicDict='{}/topicDict.dict'.format(data_dir),
# opinionDict='{}/opinionDict.dict'.format(data_dir))
nIter = 200
beta = 0.02
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
nTopics = range(20, nIter+1, 20)
nPerplexity = range(0, nIter+1, 10)
topic_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
opinion_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
for n in nTopics:
# load sampler
sampler = GibbsSampler(corpus, nTopics=n, nIter=nIter, alpha=(50.0/n),
beta=beta, beta_o=beta,
out_dir=out_dir.format(n))
sampler._initialize()
sampler.run()
for s in nPerplexity:
tw_perp, ow_perp = sampler.perplexity(index=s)
topic_perp.set_value(s, n, tw_perp)
opinion_perp.set_value(s, n, ow_perp)
logger.info('nTopics: {}, nPerplexity: {}, topic perplexity: {}, '
'opinion perplexity: {}'.format(n, s, tw_perp, ow_perp))
topic_perp.to_csv(out_dir.format('perplexity_topic.csv'))
opinion_perp.to_csv(out_dir.format('perplexity_opinion.csv'))
| apache-2.0 | Python | |
2ed913c0add7740b9c8eb6ee8320b6924907e48f | Create q5.py | pollseed/python_script_lib,pollseed/script_lib,pollseed/script_lib,pollseed/script_lib,pollseed/script_lib,pollseed/python_script_lib,pollseed/python_script_lib,pollseed/python_script_lib,pollseed/python_script_lib,pollseed/script_lib,pollseed/script_lib,pollseed/python_script_lib | work/q5.py | work/q5.py | import itertools
def max_array():
return [x for x in range(0, 10)]
def compute():
return ['+', '-', '']
def sum(arr):
for i in itertools.product(compute(), repeat=10):
result = ''.join(map(str, union(max_array(), i)))
if result[len(result) - 1] in compute():
result = result[:len(result) - 1]
if eval(result) == 100:
print(result, '=', eval(result))
def union(arr1, arr2):
result = []
for i in range(1, len(arr1)):
result.append(arr1[i])
result.append(arr2[i])
return result
print(sum(max_array()))
| mit | Python | |
240274ea82db24dca578692a609a262497107ccc | Prepare for interview questions | noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit | decorator_examples.py | decorator_examples.py | def identity_decorator(fn):
def wrapper(*args):
return fn(*args)
return wrapper
@identity_decorator
def stringify(obj):
return str(obj)
print(stringify(78))
def uppercase(fn):
def wrapper(*args):
result = fn(*args)
return result.upper()
return wrapper
@uppercase
def stringify(obj):
return str(obj)
print(stringify('Hello'))
def cache(fn):
c = {}
def wrapper(*args):
if args in c:
return c[args]
result = fn(*args)
c[args] = result
return result
return wrapper
@cache
def fibonacci(n):
print('Calculating fibonacci({})'.format(n))
if n == 1:
return 1
return n * fibonacci(n - 1)
print(fibonacci(4))
print(fibonacci(5))
| mit | Python | |
caf8ab5d11d63a3850c5ad4f87e7334422014c88 | Break out HLS fetcher to module | OakNinja/svtplay-dl,OakNinja/svtplay-dl,selepo/svtplay-dl,spaam/svtplay-dl,leakim/svtplay-dl,OakNinja/svtplay-dl,leakim/svtplay-dl,olof/svtplay-dl,selepo/svtplay-dl,olof/svtplay-dl,qnorsten/svtplay-dl,dalgr/svtplay-dl,leakim/svtplay-dl,qnorsten/svtplay-dl,iwconfig/svtplay-dl,spaam/svtplay-dl,iwconfig/svtplay-dl,dalgr/svtplay-dl | lib/svtplay/hls.py | lib/svtplay/hls.py | import sys
import os
import re
import time
from datetime import timedelta
from svtplay.utils import get_http_data, select_quality
from svtplay.output import progressbar, progress_stream
from svtplay.log import log
if sys.version_info > (3, 0):
from io import BytesIO as StringIO
else:
from StringIO import StringIO
def download_hls(options, url, baseurl=None):
data = get_http_data(url)
globaldata, files = parsem3u(data)
streams = {}
for i in files:
streams[int(i[1]["BANDWIDTH"])] = i[0]
test = select_quality(options, streams)
m3u8 = get_http_data(test)
globaldata, files = parsem3u(m3u8)
encrypted = False
key = None
try:
keydata = globaldata["KEY"]
encrypted = True
except:
pass
if encrypted:
try:
from Crypto.Cipher import AES
except ImportError:
log.error("You need to install pycrypto to download encrypted HLS streams")
sys.exit(2)
match = re.search("URI=\"(http://.*)\"", keydata)
key = get_http_data(match.group(1))
rand = os.urandom(16)
decryptor = AES.new(key, AES.MODE_CBC, rand)
n = 1
if options.output != "-":
extension = re.search("(\.[a-z0-9]+)$", options.output)
if not extension:
options.output = "%s.ts" % options.output
log.info("Outfile: %s", options.output)
file_d = open(options.output, "wb")
else:
file_d = sys.stdout
start = time.time()
estimated = ""
for i in files:
item = i[0]
if options.output != "-":
progressbar(len(files), n, estimated)
if item[0:5] != "http:":
item = "%s/%s" % (baseurl, item)
data = get_http_data(item)
if encrypted:
lots = StringIO(data)
plain = b""
crypt = lots.read(1024)
decrypted = decryptor.decrypt(crypt)
while decrypted:
plain += decrypted
crypt = lots.read(1024)
decrypted = decryptor.decrypt(crypt)
data = plain
file_d.write(data)
now = time.time()
dt = now - start
et = dt / (n + 1) * len(files)
rt = et - dt
td = timedelta(seconds = int(rt))
estimated = "Estimated Remaining: " + str(td)
n += 1
if options.output != "-":
file_d.close()
progress_stream.write('\n')
def parsem3u(data):
if not data.startswith("#EXTM3U"):
raise ValueError("Does not apprear to be a ext m3u file")
files = []
streaminfo = {}
globdata = {}
data = data.replace("\r", "\n")
for l in data.split("\n")[1:]:
if not l:
continue
if l.startswith("#EXT-X-STREAM-INF:"):
#not a proper parser
info = [x.strip().split("=", 1) for x in l[18:].split(",")]
streaminfo.update({info[1][0]: info[1][1]})
elif l.startswith("#EXT-X-ENDLIST"):
break
elif l.startswith("#EXT-X-"):
globdata.update(dict([l[7:].strip().split(":", 1)]))
elif l.startswith("#EXTINF:"):
dur, title = l[8:].strip().split(",", 1)
streaminfo['duration'] = dur
streaminfo['title'] = title
elif l[0] == '#':
pass
else:
files.append((l, streaminfo))
streaminfo = {}
return globdata, files
| mit | Python | |
cd85c85b492de74a5dd049e610efeb85e2222326 | Create FoundationPlist.py | aysiu/OldMunkiPackages | FoundationPlist/FoundationPlist.py | FoundationPlist/FoundationPlist.py | #!/usr/bin/python
# encoding: utf-8
#
# Copyright 2009-2014 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FoundationPlist.py -- a tool to generate and parse MacOSX .plist files.
This is intended as a drop-in replacement for Python's included plistlib,
with a few caveats:
- readPlist() and writePlist() operate only on a filepath,
not a file object.
- there is no support for the deprecated functions:
readPlistFromResource()
writePlistToResource()
- there is no support for the deprecated Plist class.
The Property List (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, filepath)
function. 'rootObject' is the top level object, 'filepath' is a
filename.
To parse a plist from a file, use the readPlist(filepath) function,
with a file name. It returns the top level object (again, usually a
dictionary).
To work with plist data in strings, you can use readPlistFromString()
and writePlistToString().
"""
from Foundation import NSData, \
NSPropertyListSerialization, \
NSPropertyListMutableContainers, \
NSPropertyListXMLFormat_v1_0
class FoundationPlistException(Exception):
pass
class NSPropertyListSerializationException(FoundationPlistException):
pass
class NSPropertyListWriteException(FoundationPlistException):
pass
def readPlist(filepath):
"""
Read a .plist file from filepath. Return the unpacked root object
(which is usually a dictionary).
"""
plistData = NSData.dataWithContentsOfFile_(filepath)
dataObject, plistFormat, error = \
NSPropertyListSerialization.propertyListFromData_mutabilityOption_format_errorDescription_(
plistData, NSPropertyListMutableContainers, None, None)
if error:
error = error.encode('ascii', 'ignore')
errmsg = "%s in file %s" % (error, filepath)
raise NSPropertyListSerializationException(errmsg)
else:
return dataObject
def readPlistFromString(data):
'''Read a plist data from a string. Return the root object.'''
plistData = buffer(data)
dataObject, plistFormat, error = \
NSPropertyListSerialization.propertyListFromData_mutabilityOption_format_errorDescription_(
plistData, NSPropertyListMutableContainers, None, None)
if error:
error = error.encode('ascii', 'ignore')
raise NSPropertyListSerializationException(error)
else:
return dataObject
def writePlist(dataObject, filepath):
'''
Write 'rootObject' as a plist to filepath.
'''
plistData, error = \
NSPropertyListSerialization.dataFromPropertyList_format_errorDescription_(
dataObject, NSPropertyListXMLFormat_v1_0, None)
if error:
error = error.encode('ascii', 'ignore')
raise NSPropertyListSerializationException(error)
else:
if plistData.writeToFile_atomically_(filepath, True):
return
else:
raise NSPropertyListWriteException(
"Failed to write plist data to %s" % filepath)
def writePlistToString(rootObject):
'''Return 'rootObject' as a plist-formatted string.'''
plistData, error = \
NSPropertyListSerialization.dataFromPropertyList_format_errorDescription_(
rootObject, NSPropertyListXMLFormat_v1_0, None)
if error:
error = error.encode('ascii', 'ignore')
raise NSPropertyListSerializationException(error)
else:
return str(plistData)
| apache-2.0 | Python | |
71a55a1252ef87629f10e48c1041416c34742ea7 | Add input handling for ssh connections | halfbro/juliet | modules/juliet_input.py | modules/juliet_input.py | from threading import Thread
class Juliet_Input (Thread):
def __init(self):
Thread.__init(self)
def run(self):
while True:
char = raw_input()
if char == 'q':
break
| bsd-2-clause | Python | |
02887eb26b1c95abf6e26f30228c524d61335e40 | Add download_protected_file view | rtrembecky/roots,tbabej/roots,rtrembecky/roots,tbabej/roots,matus-stehlik/glowing-batman,matus-stehlik/glowing-batman,matus-stehlik/roots,matus-stehlik/roots,matus-stehlik/roots,tbabej/roots,rtrembecky/roots | downloads/views.py | downloads/views.py | from sendfile import sendfile
from django.conf import settings
from django.core.exceptions import PermissionDenied
def download_protected_file(request, model_class, path_prefix, path):
"""
This view allows download of the file at the specified path, if the user
is allowed to. This is checked by calling the model's can_access_files
method.
"""
# filepath is the absolute path, mediapath is relative to media folder
filepath = settings.SENDFILE_ROOT + path_prefix + path
filepath_mediapath = settings.SENDFILE_DIR + path_prefix + path
if request.user.is_authenticated():
# Superusers can access all files
if request.user.is_superuser:
return sendfile(request, filepath)
else:
# We need to check can_access_files on particular instance
obj = model_class.get_by_filepath(filepath_mediapath)
if obj is not None and obj.can_access_files(request.user):
return sendfile(request, filepath)
raise PermissionDenied
| mit | Python | |
0115d088061595fe6c6f8589d0599d1b8e970813 | Add dummy Keras inputs builder | lwtnn/lwtnn,jwsmithers/lwtnn,jwsmithers/lwtnn,jwsmithers/lwtnn,lwtnn/lwtnn,lwtnn/lwtnn | scripts/lwtnn-build-dummy-inputs.py | scripts/lwtnn-build-dummy-inputs.py | #!/usr/bin/env python3
"""Generate fake NN files to test the lightweight classes"""
import argparse
import json
import h5py
import numpy as np
def _run():
args = _get_args()
_build_keras_arch("arch.json")
_build_keras_inputs_file("inputs.json")
_build_keras_weights("weights.h5", verbose=args.verbose)
def _get_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
def _build_keras_arch(name):
arch = {
'layers': [
{'activation': 'relu', 'name': 'Dense'}
]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(arch, indent=2))
def _build_keras_inputs_file(name):
def build_input(num):
return {"name": "in{}".format(num), "offset": 0.0, "scale": 1.0}
top = {
"inputs": [build_input(x) for x in range(1,5)],
"class_labels": ["out{}".format(x) for x in range(1,5)]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(top, indent=2))
def _build_keras_weights(name, verbose):
half_swap = np.zeros((4,4))
half_swap[0,3] = 1.0
half_swap[1,2] = 1.0
if verbose:
print(half_swap)
bias = np.zeros(4)
with h5py.File(name, 'w') as h5_file:
layer0 = h5_file.create_group("layer_0")
layer0.create_dataset("param_0", data=half_swap)
layer0.create_dataset("param_1", data=bias)
if __name__ == "__main__":
_run()
| mit | Python | |
fdcfd4fbd2f94e646ba3716c20f7518c28e5a0c5 | Add files via upload | pjtatlow/geneysis,pjtatlow/geneysis,pjtatlow/geneysis,pjtatlow/geneysis | python/GenBankParser.py | python/GenBankParser.py | from sys import argv
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio.SeqFeature import FeatureLocation, SeqFeature
import random
startCodons = ["ATG", "GTG", "TTG"]
revStartCodons = ["CAT", "CAC", "CAA"]
def randShiftForward(seqLoc, genomeSeq):
upOrDown = random.randint(1,2)
streamShift = ""
if upOrDown == 2:
start = int(seqLoc.start)
upperRange = start - 150
searchSeq = str(genomeSeq[upperRange:start])
newStartCodon = -1
for codon in startCodons:
if newStartCodon == -1:
newStartCodon = searchSeq.find(codon)
newStart = upperRange + newStartCodon
streamShift = "up"
else:
start = int(seqLoc.start) + 2
lowerRange = start + 150
searchSeq = str(genomeSeq[start:lowerRange])
newStartCodon = -1
for codon in startCodons:
if newStartCodon == -1:
newStartCodon = searchSeq.find(codon)
newStart = start + newStartCodon
streamShift = "down"
newLoc = FeatureLocation(newStart, seqLoc.end, strand=seqLoc.strand)
return newLoc, streamShift
def randShiftReverse(seqLoc, genomeSeq):
upOrDown = random.randint(1,2)
streamShift = ""
if upOrDown == 2:
start = int(seqLoc.end) - 2
upperRange = start - 150
searchSeq = str(genomeSeq[upperRange:start])
newStartCodon = -1
for codon in revStartCodons:
if newStartCodon == -1:
newStartCodon = searchSeq.find(codon)
newStart = upperRange + newStartCodon + 3
streamShift = "down"
else:
start = int(seqLoc.end)
lowerRange = start + 150
searchSeq = str(genomeSeq[start:lowerRange])
newStartCodon = -1
for codon in revStartCodons:
if newStartCodon == -1:
newStartCodon = searchSeq.find(codon)
newStart = start + newStartCodon + 3
streamShift = "up"
newLoc = FeatureLocation(seqLoc.start, newStart, strand=seqLoc.strand)
return newLoc, streamShift
def shiftDescription(oldLoc, newLoc, streamShift):
if oldLoc.strand == 1:
nucShift = abs(oldLoc.start - newLoc.start)
description = "{} nuclotide shift {}stream ({} => {})".format(nucShift, streamShift, oldLoc.start, newLoc.start)
elif oldLoc.strand == -1:
nucShift = abs(oldLoc.end - newLoc.end)
description = "{} nuclotide shift {}stream ({} => {})".format(nucShift, streamShift, oldLoc.end, newLoc.end + 1)
return description
outputFile1 = open(argv[2], "w")
outputFile2 = open(argv[3], "w")
genBankFile = argv[1]
genome = SeqIO.read(genBankFile, "genbank")
seq = genome.seq
features = genome.features
record = SeqRecord(seq, id = genome.id, name = genome.name)
for feature in features:
if feature.type == "CDS":
local = feature.location
geneSeq = seq[local.start:local.end]
if local.strand == 1:
newGene, streamShift = randShiftForward(local, seq)
elif local.strand == -1:
newGene, streamShift = randShiftReverse(local, seq)
name = str(feature.qualifiers["locus_tag"])
featureWO = SeqFeature(newGene, type = name)
record.features.append(featureWO)
name = name.strip("['").strip("']")
shiftDescript = shiftDescription(local, newGene, streamShift)
outputFile1.write("New {}: {}\n".format(name, shiftDescript))
SeqIO.write(record, outputFile2, "genbank")
outputFile1.close()
outputFile2.close()
| mit | Python | |
ad1defca2f4d16cc5e13c579c945858b9f77c450 | Rename script | davidgasquez/kaggle-airbnb | snippets/clean_users_data_frames.py | snippets/clean_users_data_frames.py | import pandas as pd
train_users = pd.read_csv('../datasets/processed/processed_train_users.csv')
test_users = pd.read_csv('../datasets/processed/processed_test_users.csv')
percentage = 0.95
train_mask = train_users.isnull().sum() > train_users.shape[0] * percentage
train_to_remove = list(train_users.isnull().sum()[train_mask].index)
test_mask = test_users.isnull().sum() > test_users.shape[0] * percentage
test_to_remove = list(test_users.isnull().sum()[test_mask].index)
to_remove = list(set(train_to_remove).intersection(test_to_remove))
train_users.drop(to_remove, axis=1, inplace=True)
test_users.drop(to_remove, axis=1, inplace=True)
train_users.to_csv('clean_processed_train_users.csv')
test_users.to_csv('clean_processed_test_users.csv')
| mit | Python | |
874323b53790ee2121b82bd57b9941d4562995d0 | Add reddit downvoting script | voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts | reddit-downvoter.py | reddit-downvoter.py | #!/usr/bin/env python
import praw
import time
settings = {
'username': 'username',
'password': 'password',
'user_agent': 'angry /r/politics robot',
'subreddit': 'politics',
}
r = praw.Reddit(user_agent=settings['user_agent'])
r.login(settings['username'], settings['password'])
submissions = r.get_subreddit(settings['subreddit']).get_hot(limit=100)
for s in submissions:
# Skip if post has already been downvoted
if not s.likes and s.likes is not None:
continue
s.downvote()
print("Downvoted: '{}'".format(s.title[0:70]))
time.sleep(2)
| mit | Python | |
d7b7056b483d52e4d94321019f8e520c166511be | Add singleton decorator. | bueda/django-comrade | comrade/core/decorators.py | comrade/core/decorators.py | def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
| mit | Python | |
4bcc8ddb7df762155402cb1229f16849c03666c1 | Check DB consistence | Billy4195/electronic-blackboard,chenyang14/electronic-blackboard,SWLBot/electronic-blackboard,Billy4195/electronic-blackboard,stvreumi/electronic-blackboard,SWLBot/electronic-blackboard,stvreumi/electronic-blackboard,chenyang14/electronic-blackboard,Billy4195/electronic-blackboard,stvreumi/electronic-blackboard,Billy4195/electronic-blackboard,SWLBot/electronic-blackboard,stvreumi/electronic-blackboard,chenyang14/electronic-blackboard,SWLBot/electronic-blackboard | DB_consist.py | DB_consist.py | from mysql import mysql
from env_init import create_data_type
import os
def create_user_data_file():
print("check user_data dir and file...")
try:
if not os.path.exists("static/user_data"):
print('create dir "static/user_data"')
os.makedirs('static/user_data')
if not os.path.isfile("static/user_data/setting.txt"):
print('create file "static/user_data/setting.txt"')
with open("static/user_data/setting.txt", "w") as fp:
fp.write("bluetooth_enable 1")
print("check finish")
except Exception as e:
print("create user_data file failed",e)
def check_column_exist_or_add(db,table,column_name,data_type):
if len(db.query('show columns from %s like "%s"' % (table,column_name))) == 0:
print('%s doesn\'t in table %s' %(column_name,table))
print('add %s %s into table %s' %(column_name,data_type,table))
db.cmd('alter table %s add column %s %s' % (table,column_name,data_type))
def check_table_exist_or_create(db,table_name,sql):
if len(db.query('show tables like "%s"' % (table_name))) == 0:
print('Table %s doesn\'t exist' % table_name)
print('Create table %s' % table_name)
db.cmd(sql)
def check_bluetooth_DB(db):
create_user_data_file()
check_table_exist_or_create(db,'user_prefer','create table user_prefer ( \
pref_id varchar(14) unique key, \
user_id int default 0, \
pref_data_type_01 varchar(100), \
pref_data_type_02 varchar(100), \
pref_data_type_03 varchar(100), \
pref_data_type_04 varchar(100), \
pref_data_type_05 varchar(100), \
pref_set_time datetime default now(), \
pref_is_delete bit(1) default 0)')
check_column_exist_or_add(db,'user','user_bluetooth_id','varchar(50)')
check_column_exist_or_add(db,'user','user_profession','int default 0 not null')
check_column_exist_or_add(db,'image_data','img_like_count','int default 0')
check_column_exist_or_add(db,'user','user_birthday','datetime')
check_column_exist_or_add(db,'text_data','text_like_count','int default 0')
create_data_type("customized_text")
def main():
db = mysql()
db.connect()
check_bluetooth_DB(db)
main()
| apache-2.0 | Python | |
cb98b4a1580e4976de375722012483bf51ef9254 | Add interactive script to get papers from Mendeley API | isb-cgc/ISB-CGC-Webapp,isb-cgc/ISB-CGC-Webapp,isb-cgc/ISB-CGC-Webapp,isb-cgc/ISB-CGC-Webapp | scripts/get_mendeley_papers.py | scripts/get_mendeley_papers.py | ###
# Copyright 2015-2020, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from mendeley import Mendeley
def test():
client_id = 9526
client_secret = "AmIvWP7FRxeLHX7n"
redirect_uri = "https://isb-cgc.appspot.com/"
# These values should match the ones supplied when registering your application.
mendeley = Mendeley(client_id, client_secret=client_secret, redirect_uri=redirect_uri)
auth = mendeley.start_implicit_grant_flow()
# The user needs to visit this URL, and log in to Mendeley.
login_url = auth.get_login_url()
print("Go to this link to log in: \n" + login_url)
# After logging in, the user will be redirected to a URL, auth_response.
auth_response = input("Copy the redirect link here: \n")
auth_response = auth_response.rstrip()
# print("** Response is: " + auth_response)
session = auth.authenticate(auth_response)
# print(session.token['access_token'])
# List all groups I have access to
groups = session.groups.iter()
i = 1
for g in groups:
print("[{}] {}".format(i, g.name))
i = i + 1
# Let choose a group
selected_index = int(input('Select group to get paper from: '))
i = 1
group_id = ''
groups = session.groups.iter()
for g in groups:
if i == selected_index:
group_id = g.id
break
i = i + 1
if group_id == '':
quit()
# Get all the documents in the group
target_group = session.groups.get(group_id)
docs = target_group.documents.iter()
for d in docs:
print(d.title)
print("something")
if __name__ == "__main__":
test()
| apache-2.0 | Python | |
8ab0a37794cd131d4baef5f6ceecffc568947505 | Create FocusStack.py | cmcguinness/focusstack | FocusStack.py | FocusStack.py | """
Simple Focus Stacker
Author: Charles McGuinness (charles@mcguinness.us)
Copyright: Copyright 2015 Charles McGuinness
License: Apache License 2.0
This code will take a series of images and merge them so that each
pixel is taken from the image with the sharpest focus at that location.
The logic is roughly the following:
1. Scale and align the images. Changing the focus on a lens, even
if the camera remains fixed, causes a mild zooming on the images.
We need to correct the images so they line up perfectly on top
of each other.
2. Perform a gaussian blur on all images
3. Compute the laplacian on the blurred image to generate a gradient map
4. Create a blank output image with the same size as the original input
images
4. For each pixel [x,y] in the output image, copy the pixel [x,y] from
the input image which has the largest gradient [x,y]
This algorithm was inspired by the high-level description given at
http://stackoverflow.com/questions/15911783/what-are-some-common-focus-stacking-algorithms
"""
import numpy as np
import cv2
def findHomography(image_1_kp, image_2_kp, matches):
image_1_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
image_2_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
for i in range(0,len(matches)):
image_1_points[i] = image_1_kp[matches[i].queryIdx].pt
image_2_points[i] = image_2_kp[matches[i].trainIdx].pt
homography, mask = cv2.findHomography(image_1_points, image_2_points, cv2.RANSAC, ransacReprojThreshold=2.0)
return homography
#
# Align the images so they overlap properly...
#
#
def align_images(images):
use_sift = False
outimages = []
if use_sift:
detector = cv2.SIFT()
else:
detector = cv2.ORB(1000)
# We assume that image 0 is the "base" image and align everything to it
print "Detecting features of base image"
outimages.append(images[0])
image1gray = cv2.cvtColor(images[0],cv2.COLOR_BGR2GRAY)
image_1_kp, image_1_desc = detector.detectAndCompute(image1gray, None)
for i in range(1,len(images)):
print "Aligning image {}".format(i)
image_i_kp, image_i_desc = detector.detectAndCompute(images[i], None)
if use_sift:
bf = cv2.BFMatcher()
# This returns the top two matches for each feature point (list of list)
pairMatches = bf.knnMatch(image_i_desc,image_1_desc, k=2)
rawMatches = []
for m,n in pairMatches:
if m.distance < 0.7*n.distance:
rawMatches.append(m)
else:
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
rawMatches = bf.match(image_i_desc, image_1_desc)
sortMatches = sorted(rawMatches, key=lambda x: x.distance)
matches = sortMatches[0:128]
hom = findHomography(image_i_kp, image_1_kp, matches)
newimage = cv2.warpPerspective(images[i], hom, (images[i].shape[1], images[i].shape[0]), flags=cv2.INTER_LINEAR)
outimages.append(newimage)
return outimages
#
# Do a lapacian or other filter
def doLap(image):
kernel_size = 9 # YOU SHOULD TUNE THIS VALUE TO SUIT YOUR NEEDS
blurred = cv2.GaussianBlur(image, (kernel_size,kernel_size), 0)
return cv2.Laplacian(blurred, cv2.CV_64F, ksize=kernel_size)
#
# This routine finds the points of best focus in all images and produces a merged result...
#
def focus_stack(unimages):
images = align_images(unimages)
print "Computing the laplacian of the blurred images"
laps = []
for i in range(len(images)):
print "Lap {}".format(i)
laps.append(doLap(cv2.cvtColor(images[i],cv2.COLOR_BGR2GRAY)))
laps = np.asarray(laps)
print "Shape of array of laplacians = {}".format(laps.shape)
output = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
for y in range(0,images[0].shape[0]):
for x in range(0, images[0].shape[1]):
yxlaps = abs(laps[:, y, x])
index = (np.where(yxlaps == max(yxlaps)))[0][0]
output[y,x] = images[index][y,x]
return output
| apache-2.0 | Python | |
731bc1308e94cdb341511618ba5739f6fd37b0b7 | Add a base regressions package | jhumphry/regressions | regressions/__init__.py | regressions/__init__.py | # regressions
"""A package which implements various forms of regression."""
import numpy as np
try:
import scipy.linalg as linalg
linalg_source = 'scipy'
except ImportError:
import numpy.linalg as linalg
linalg_source = 'numpy'
class ParameterError(Exception):
"""Parameters passed to a regression routine are unacceptable"""
pass
# Maximum iterations that will be attempted by iterative routines by
# default
DEFAULT_MAX_ITERATIONS = 100
# A default epsilon value used in various places, such as to decide when
# iterations have converged
DEFAULT_EPSILON = 0.01
| isc | Python | |
2cdece43768e6bf9613020ae71785f9f158fd72d | Add lc0443_string_compression.py | bowen0701/algorithms_data_structures | lc0443_string_compression.py | lc0443_string_compression.py | """Leetcode 443. String Compression
Easy
URL: https://leetcode.com/problems/string-compression/
Given an array of characters, compress it in-place.
The length after compression must always be smaller than or equal to the
original array.
Every element of the array should be a character (not int) of length 1.
After you are done modifying the input array in-place, return the new length
of the array.
Follow up:
Could you solve it using only O(1) extra space?
Example 1:
Input:
["a","a","b","b","c","c","c"]
Output:
Return 6, and the first 6 characters of the input array should be:
["a","2","b","2","c","3"]
Explanation:
"aa" is replaced by "a2". "bb" is replaced by "b2". "ccc" is replaced by "c3".
Example 2:
Input:
["a"]
Output:
Return 1, and the first 1 characters of the input array should be: ["a"]
Explanation:
Nothing is replaced.
Example 3:
Input:
["a","b","b","b","b","b","b","b","b","b","b","b","b"]
Output:
Return 4, and the first 4 characters of the input array should be:
["a","b","1","2"].
Explanation:
Since the character "a" does not repeat, it is not compressed. "bbbbbbbbbbbb"
is replaced by "b12".
Notice each digit has it's own entry in the array.
Note:
All characters have an ASCII value in [35, 126].
1 <= len(chars) <= 1000.
"""
class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
e88ef8f047fc1c6d005f78a40da864a575c1cbe7 | Add tests from `index_brief` | alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api | tests/test_brief_utils.py | tests/test_brief_utils.py | import mock
from app.brief_utils import index_brief
from app.models import Brief, Framework
from tests.bases import BaseApplicationTest
@mock.patch('app.brief_utils.index_object', autospec=True)
class TestIndexBriefs(BaseApplicationTest):
def test_live_dos_2_brief_is_indexed(self, index_object, live_dos2_framework):
with self.app.app_context():
dos2 = Framework.query.filter(Framework.slug == 'digital-outcomes-and-specialists-2').first()
with mock.patch.object(Brief, "serialize", return_value={'serialized': 'object'}):
brief = Brief(status='live', framework=dos2, data={'requirementsLength': '1 week'})
index_brief(brief)
index_object.assert_called_once_with(
framework='digital-outcomes-and-specialists-2',
object_type='briefs',
object_id=None,
serialized_object={'serialized': 'object'},
)
def test_draft_dos_2_brief_is_not_indexed(self, index_object, live_dos2_framework):
with self.app.app_context():
dos2 = Framework.query.filter(Framework.slug == 'digital-outcomes-and-specialists-2').first()
with mock.patch.object(Brief, "serialize", return_value={'serialized': 'object'}):
brief = Brief(status='draft', framework=dos2, data={'requirementsLength': '1 week'})
index_brief(brief)
assert index_object.called is False
def test_object_not_on_dos_not_indexed(self, index_object, live_g8_framework):
with self.app.app_context():
g8 = Framework.query.filter(Framework.slug == 'g-cloud-8').first()
with mock.patch.object(Brief, "serialize", return_value={'serialized': 'object'}):
brief = Brief(status='live', framework=g8, data={'requirementsLength': '1 week'})
index_brief(brief)
assert index_object.called is False
| mit | Python | |
b9593297bef14fba20a0eaa4ce384c76447aa3ce | Add tests for deepgetattr | lumapps/lumbda,lumapps/lumbda | tests/test_deepgetattr.py | tests/test_deepgetattr.py | from lumbda.collection import deepgetattr
class MyClass(object):
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
def test_object_hasattr():
"""
Test that the looked for attribute is found when present
"""
my_object = MyClass(attribute=3, sub_object=MyClass(attribute=5))
assert deepgetattr(my_object, 'attribute') == 3, 'It should return my_object.attribute'
assert deepgetattr(my_object, 'sub_object.attribute') == 5, 'It should return my_object.sub_object.attribute'
def test_object_doesnt_haveattr():
"""
Test that the return value is the default one when the looked for
attribute is not present
"""
my_object = MyClass(attribute=3, sub_object=MyClass(attribute=5))
assert deepgetattr(my_object, 'hello') is None, 'It should return the default value'
assert deepgetattr(my_object, 'hello', False) is False, 'It should return the given default value'
assert deepgetattr(my_object, 'hello', 'world') == 'world', 'It should return the given default value'
assert deepgetattr(my_object, 'hello.world') is None, 'It should return the default value'
assert deepgetattr(my_object, 'attribute.hello') is None, 'It should return the default value'
assert deepgetattr(my_object, 'sub_object.hello') is None, 'It should return the default value'
assert deepgetattr(my_object, '') is None, 'It should return the default value'
| mit | Python | |
7abd4c3893e8c1ced664315ee561ae19d3b04191 | Add test_mods_import.py | nansencenter/DAPPER,nansencenter/DAPPER | tests/test_mods_import.py | tests/test_mods_import.py | import pytest
import os
from pathlib import Path
from importlib import import_module
HMMs = []
for root, dir, files in os.walk("."):
if "mods" in root:
for f in files:
if f.endswith(".py"):
filepath = Path(root) / f
lines = "".join(open(filepath).readlines())
if "HiddenMarkovModel" in lines:
HMMs.append(filepath)
@pytest.mark.parametrize(("module_path"), HMMs)
def test_tables_L63(module_path):
p = str(module_path.with_suffix("")).replace("/", ".")
import_module(p)
assert True
| mit | Python | |
c6c87e1aafa6b8a4f7929c491398574921417bd4 | Add initial framerate webcam test structure | daveol/Fedora-Test-Laptop,daveol/Fedora-Test-Laptop | tests/webcam_framerate.py | tests/webcam_framerate.py | #!/usr/bin/env python
import qrtools, gi, os
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk, Gst
from avocado import Test
from utils import webcam
class WebcamReadQR(Test):
"""
Uses the camera selected by v4l2src by default (/dev/video0) to get the
framerate by creating a pipeline with an fpsdisplaysink and initializing
Gtk main loop. For now is tested whether the framerate is 30 or more.
"""
def setUp(self):
self.error = None
#if not os.path.exists('/dev/video0'):
#self.skip("No webcam detected: /dev/video0 cannot be found");
def test(self):
elements = ['fpsdisplaysink video-sink=fakesink text-overlay=false '
'signal-fps-measurements=true']
webcam.create_video_pipeline(self, gst_elements=elements,
v4l2src_args="num-buffers=2000")
bus = self.video_player.get_bus()
bus.connect("fps-measurements", self.on_fps_measurement)
Gtk.main()
if self.error != None:
self.fail("Error: {0}".format(self.error))
if self.fps < 30:
self.fail("Measured fps is below 30, {0}".format(self.fps))
self.log.debug("Measured fps is 30 or more, {0}".format(self.fps))
def on_fps_measurement(self, fpsdisplaysink, fps, droprate, avgfps):
self.fps = avgfps
def on_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
webcam.exit(self)
elif t == Gst.MessageType.ERROR:
webcam.exit(self)
self.error = message.parse_error()
| mit | Python | |
764f819d5288abcece33c75934bbaa43bf29e055 | Add merge migration | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/users/migrations/0017_merge_20200608_1401.py | corehq/apps/users/migrations/0017_merge_20200608_1401.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-06-08 14:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0016_webappspermissions'),
('users', '0016_hqapikey'),
]
operations = [
]
| bsd-3-clause | Python | |
278bfd665c2537587b7743783e1da17772a4b1d9 | Implement q:lines | ElementalAlchemist/txircd,Heufneutje/txircd | txircd/modules/core/bans_qline.py | txircd/modules/core/bans_qline.py | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.modules.xlinebase import XLineBase
from txircd.utils import durationToSeconds, ircLower, now
from zope.interface import implements
from fnmatch import fnmatchcase
class QLine(ModuleData, XLineBase):
implements(IPlugin, IModuleData)
name = "QLine"
core = True
lineType = "Q"
def actions(self):
return [ ("register", 10, self.checkLines),
("commandpermission-NICK", 10, self.checkNick),
("commandpermission-QLINE", 10, self.restrictToOper),
("statstypename", 10, self.checkStatsType),
("statsruntype-QLINES", 10, self.listStats),
("burst", 10, self.burstXLines) ]
def userCommands(self):
return [ ("QLINE", 1, UserQLine(self)) ]
def serverCommands(self):
return [ ("ADDLINE", 1, ServerAddQLine(self)),
("DELLINE", 1, ServerDelQLine(self)) ]
def checkUserMatch(self, user, mask, data):
if data and "newnick" in data:
return fnmatchcase(ircLower(data["newnick"]), ircLower(mask))
return fnmatchcase(ircLower(user.nick), ircLower(mask))
def changeNick(self, user, reason, hasBeenConnected):
if hasBeenConnected:
user.sendMessage("NOTICE", "Your nickname has been changed, as it is now invalid. ({})".format(reason))
else:
user.sendMessage("NOTICE", "The nickname you chose was invalid. ({})".format(reason))
user.changeNick(user.uuid)
def checkLines(self, user):
reason = self.matchUser(user)
if reason:
self.changeNick(user, reason, False)
return True
def checkNick(self, user, data):
newNick = data["nick"]
if self.matchUser(user, { "newnick": newNick }):
user.sendMessage("NOTICE", "The nickname you chose was invalid. ({})".format(reason))
return False
return True
def restrictToOper(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-qline"):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
def checkStatsType(self, typeName):
if typeName == "Z":
return "ZLINES"
return None
def listStats(self):
return self.generateInfo()
def burstXLines(self, server):
self.burstLines(server)
class UserQLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
if len(params) < 1 or len(params) == 2:
user.sendSingleError("QLineParams", irc.ERR_NEEDMOREPARAMS, "QLINE", "Not enough parameters")
return None
if len(params) == 1:
return {
"mask": params[0]
}
return {
"mask": params[0],
"duration": durationToSeconds(params[1]),
"reason": " ".join(params[2:])
}
def execute(self, user, data):
banmask = data["mask"]
if "reason" in data:
if not self.module.addLine(banmask, now(), data["duration"], user.hostmask(), data["reason"]):
user.sendMessage("NOTICE", "*** Q:Line for {} is already set.".format(banmask))
return True
for checkUser in self.module.ircd.users.itervalues():
reason = self.module.matchUser(checkUser)
if reason:
self.module.changeNick(checkUser, reason, True)
user.sendMessage("NOTICE", "*** Q:Line for {} has been set.".format(banmask))
return True
if not self.module.delLine(banmask):
user.sendMessage("NOTICE", "*** Q:Line for {} doesn't exist.".format(banmask))
return True
user.sendMessage("NOTICE", "*** Q:Line for {} has been removed.".format(banmask))
return True
class ServerAddQLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerAddParams(server, params, prefix, tags)
def execute(self, server, data):
return self.module.executeServerAddCommand(server, data)
class ServerDelQLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerDelParams(server, params, prefix, tags)
def execute(self, server, data):
return self.module.executeServerDelCommand(server, data)
qlineModule = QLine() | bsd-3-clause | Python | |
0d67251672b6bfd818e6c1f9391f77a35fce9e0e | Create words2vec.py | Lopsy/word2vec-stream | words2vec.py | words2vec.py | """
V is an object behaving like a function.
V("rabbit") is the 300-element vector for rabbit.
You can also query whether strings are in the word2vec database:
> "rabbit" in V
(V also kind of behaves like a dict -- V[word] returns the word's location
in the big file -- but ignore that.)
nd is the normalized dot product:
> nd(V("rabbit"), V("carrot")) # 0.3630643116925243
> nd("rabbit", "carrot") # 0.3630643116925243
"""
# Replace this with the filepath to your gwords.txt file
WORD_FILEPATH = "random_files/gwords.txt"
import struct
from word2vec import add, neg, gstream, normalizedDot, unit
from heapq import nlargest # nlargest(3, L, key=None) -> largest 3 from L
class VecDatabase(dict):
def __init__(self, *args):
dict.__init__(self, *args)
self.loaded = False
self.wordlist = None
def __iter__(self):
if not self.wordlist: self.loadWordlist()
for word in self.wordlist: yield word
def loadWordlist(self):
self.wordlist = S()
def __len__(self):
if not self.loaded: self.loadWordLocations()
return dict.__len__(self)
def __call__(self, word):
if not self.loaded: self.loadWordLocations()
if word not in self: raise KeyError(word) # not in dictionary
with open("random_files/google_word2vec.bin", "rb") as f:
f.seek(self[word])
return struct.unpack("<"+"f"*300, f.read(1200))
def loadWordLocations(self):
with open(WORD_FILEPATH,"r") as f:
fileloc = 1217
for line in f.readlines():
word = line[:-1]
fileloc += len(word)+1
self[word] = fileloc
fileloc += 1200
self.loaded = True
def __getitem__(self, key):
# Thanks stackoverflow.
if isinstance(key, str):
return dict.__getitem__(self, key)
if not self.wordlist: self.loadWordlist()
if isinstance(key, slice):
return [self.wordlist[ii] for ii in xrange(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError, "The index (%d) is out of range."%key
return self.wordlist[key]
else:
raise TypeError, "Invalid argument type."
def V():
"""returns a function identical to word2vec... which is also a dictionary"""
w2v = VecDatabase()
w2v.loadWordLocations()
return w2v
V = V()
def S():
""" Returns the list of words in the word2vec database """
with open(WORD_FILEPATH,"r") as f:
return [word[:-1] for word in f.readlines()]
def nd(v,w):
""" Normalized dot product. """
if type(v) == str: v = V(v)
if type(w) == str: w = V(w)
return normalizedDot(v, w)
def together(word):
""" For example, together("canoe") is a list that includes "rental" and
"usa" (among other strings) because "Canoe_Rental" and "USA_Canoe_Kayak"
are both in the word2vec database. """
phrases = [thing for thing in V if word in thing.lower().split("_")]
return {word for phrase in phrases for word in phrase.lower().split("_")}
def parse(equation, unitVectors=True):
""" equation is like 'water - boat + car'. Returns a vector.
Assumes +, -, and # never appears in the words. """
equation = equation.replace(" ","")
if equation[0] != "-": equation = "+" + equation
equation = equation+"#"
total = [0 for i in xrange(300)]
index = 0
while True:
sign = equation[index]
index += 1
assert sign in "+-#"
if sign == "#": return total
word = ""
while equation[index] not in "+-#":
word += equation[index]
index += 1
wordVector = V(word)
if unitVectors: wordVector = unit(wordVector)
total=add(total,wordVector) if sign=="+" else add(total,neg(wordVector))
| mit | Python | |
b522fed0a1ca2570b8652ddb64b8c847d5964d11 | Add a script to generate all known codes and their decoding | baruch/lsi_decode_loginfo | list_all_codes.py | list_all_codes.py | #!/usr/bin/env python
import lsi_decode_loginfo as loginfo
def generate_values(data):
title = data[0]
mask = data[1]
sub = data[2]
for key in sub.keys():
v = sub[key]
key_name = v[0]
key_sub = v[1]
key_detail = v[2]
if key_sub is None:
yield [(title, key, key_name, key_detail)]
else:
for sub_val in generate_values(key_sub):
yield [(title, key, key_name, key_detail)] + sub_val
for entry in generate_values(loginfo.types):
val = 0
for line in entry:
val |= line[1]
print ' %-10s\t0x%08X' % ('Value', val)
for line in entry:
print ' %-10s\t0x%08X %s %s' % (line[0], line[1], line[2], line[3])
print
print ' '
print
| mit | Python | |
a86150c016fd88da9849c8abe58e7a3cf9233521 | Add sleepytime plugin | tomleese/smartbot,Muzer/smartbot,Cyanogenoid/smartbot,thomasleese/smartbot-old | smartbot/plugins/sleepytime.py | smartbot/plugins/sleepytime.py | from datetime import datetime, timedelta
import smartbot.plugin
class Plugin(smartbot.plugin.Plugin):
"""Check when you should wake up."""
names = ['sleepytime', 'sleepyti.me']
@staticmethod
def calculate_wake_up_times(now=None, time_to_sleep=14,
sleep_cycle_duration=90):
if now is None:
now = datetime.now()
now += timedelta(minutes=time_to_sleep)
for i in range(6):
wake_up_time = now + timedelta(minutes=sleep_cycle_duration)
yield wake_up_time
now = wake_up_time
def on_command(self, msg, stdin, stdout, reply):
times = list(self.calculate_wake_up_times())
msg = ' or '.join(time.strftime('%l:%M %p').strip() for time in times)
reply(msg)
| mit | Python | |
86ea6884d381f9153d088c634f5353537b967403 | solve 1 problem | Shuailong/Leetcode | solutions/binary-tree-paths.py | solutions/binary-tree-paths.py | #!/usr/bin/env python
# encoding: utf-8
"""
binary-tree-paths.py
Created by Shuailong on 2016-03-04.
https://leetcode.com/problems/binary-tree-paths/.
"""
from collections import deque
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def binaryTreePathsList(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root is None:
return []
if root.left is None and root.right is None:
return [[root.val]]
lpaths = self.binaryTreePathsList(root.left)
rpaths = self.binaryTreePathsList(root.right)
for path in lpaths:
path.insert(0, root.val)
for path in rpaths:
path.insert(0, root.val)
return lpaths + rpaths
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
res = []
paths = self.binaryTreePathsList(root)
for path in paths:
s = ''
for i in xrange(len(path)-1):
s += str(path[i]) + '->'
s += str(path[len(path)-1])
res.append(s)
return res
def main():
solution = Solution()
n1 = TreeNode(1)
n2 = TreeNode(2)
n3 = TreeNode(3)
n4 = TreeNode(5)
n1.left = n2
n1.right = n3
n2.right = n4
root = n1
print solution.binaryTreePaths(root)
if __name__ == '__main__':
main()
| mit | Python | |
4f4ea5b8c76f35e70368fef0e932f1630788a64a | read json data from file and parse out the good stuff | levisimons/CRASHLACMA,levisimons/CRASHLACMA | parse_info_from_json.py | parse_info_from_json.py | import json
from pprint import pprint
import re
json_data=open('crashlacma.20140524-102001.json')
data = json.load(json_data)
# TODO: handle test cases
# testcases:
# hollywood & vine, hollywood and vine
# order of operations: hashtag, img, address, other text.
# hashtag allcaps or lowercase
# uploaded image, link to hosted image
# if image is uploaded via twitter
img_url = data["entities"]["media"][0]["media_url"]
hashtag = data["entities"]["hashtags"][0]["text"] # won't need this parsed
tweet = data["text"]
tweet_text = ' '.join(re.sub("(@[A-Za-z0-9]+)|(#[A-Za-z0-9]+)|(\w+:\/\/\S+)"," ",tweet).split())
print(hashtag)
print(img_url)
print(tweet)
print(tweet_text)
json_data.close() | cc0-1.0 | Python | |
510adb95228852456e7a8074aee10d6d1dad167a | add metadata class for handling guppy geometry attributes | fortyninemaps/karta,fortyninemaps/karta,fortyninemaps/karta | vector/metadata.py | vector/metadata.py | """ Metadata management """
# Metadata objects should
# - maintain internal consistency
# - be concatenable
# - be iterable
import copy
class GeoMetadata(object):
""" Class for handling collections of metadata """
_dict = {}
_fieldtypes = []
def __init__(self, data):
""" Create a collection of metadata from *data*, which may be a list
with uniform type or a dictionary with equally-sized fields of uniform
type. """
if hasattr(data, 'keys') and hasattr(data.values, '__call__'):
# Dictionary of attributes
for k in data:
dtype = type(data[k][0])
if False in (isinstance(a, dtype) for a in data[k]):
raise GMetadataError("Data must have uniform type")
n = len(data[k])
if False in (len(data[k]) == n for k in data):
raise GMetadataError("Data must have uniform lengths")
else:
# Single attribute
if not hasattr(data, '__iter__'):
data = [data]
dtype = type(data[0])
if False in (isinstance(a, dtype) for a in data):
raise GMetadataError("Data must have uniform type")
else:
data = {'values': data}
self._data = data
self._fieldtypes = [type(data[k][0]) for k in data]
def __add__(self, other):
if isinstance(other, type(self)):
res = copy.deepcopy(self)
return res.extend(other)
def extend(self, other):
if isinstance(other, type(self)):
for i, k in enumerate(self._data):
if type(self._fieldtypes[i]) == type(other._fieldtypes[i]):
self._data[k] += other._data[k]
else:
raise GMetadataError("Cannot combine metadata instances "
"with different type hierarchies")
return self
def __iter__(self):
return self._data.__iter__()
def __getitem__(self, idx):
return tuple([self._data[k][idx] for k in self._data])
class GMetadataError(Exception):
def __init__(self, message=''):
self.message = message
def __str__(self):
return self.message
| mit | Python | |
fb1d3cd2898a37049ac5be11462bdb54727065e6 | Test auditlog auth. | inteligencia-coletiva-lsd/pybossa,OpenNewsLabs/pybossa,PyBossa/pybossa,stefanhahmann/pybossa,jean/pybossa,stefanhahmann/pybossa,Scifabric/pybossa,Scifabric/pybossa,OpenNewsLabs/pybossa,geotagx/pybossa,inteligencia-coletiva-lsd/pybossa,jean/pybossa,geotagx/pybossa,PyBossa/pybossa | test/test_authorization/test_auditlog_auth.py | test/test_authorization/test_auditlog_auth.py | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2014 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test, assert_not_raises
from pybossa.auth import require
from nose.tools import assert_raises
from werkzeug.exceptions import Forbidden, Unauthorized
from mock import patch
from test_authorization import mock_current_user
from factories import AppFactory, BlogpostFactory, UserFactory
from factories import reset_all_pk_sequences
from pybossa.core import project_repo, auditlog_repo
class TestAuditlogAuthorization(Test):
mock_anonymous = mock_current_user()
mock_authenticated = mock_current_user(anonymous=False, admin=False, id=2)
mock_admin = mock_current_user(anonymous=False, admin=True, id=1)
@patch('pybossa.auth.current_user', new=mock_anonymous)
@patch('pybossa.auth.blogpost.current_user', new=mock_anonymous)
def test_anonymous_user_cannot_read_auditlog(self):
"""Test anonymous users cannot read auditlogs"""
app = AppFactory.create()
app.hidden = 1
project_repo.update(app)
logs = auditlog_repo.filter_by(app_short_name=app.short_name)
for log in logs:
assert_raises(Unauthorized, getattr(require, 'auditlog').read, log)
@patch('pybossa.auth.current_user', new=mock_authenticated)
@patch('pybossa.auth.blogpost.current_user', new=mock_authenticated)
def test_owner_user_cannot_read_auditlog(self):
"""Test owner users cannot read auditlogs"""
owner = UserFactory.create_batch(2)[1]
app = AppFactory.create(owner=owner)
app.hidden = 1
project_repo.update(app)
logs = auditlog_repo.filter_by(app_short_name=app.short_name)
assert self.mock_authenticated.id == app.owner_id
for log in logs:
assert_raises(Unauthorized, getattr(require, 'auditlog').read, log)
@patch('pybossa.auth.current_user', new=mock_authenticated)
@patch('pybossa.auth.blogpost.current_user', new=mock_authenticated)
def test_pro_user_can_read_auditlog(self):
"""Test pro users can read auditlogs"""
owner = UserFactory.create_batch(2, pro=True)[1]
app = AppFactory.create(owner=owner)
app.hidden = 1
project_repo.update(app)
logs = auditlog_repo.filter_by(app_short_name=app.short_name)
assert self.mock_authenticated.id == app.owner_id
for log in logs:
assert_not_raises(Exception, getattr(require, 'auditlog').read, log)
@patch('pybossa.auth.current_user', new=mock_admin)
@patch('pybossa.auth.blogpost.current_user', new=mock_admin)
def test_admin_user_can_read_auditlog(self):
"""Test admin users can read auditlogs"""
owner = UserFactory.create_batch(2)[1]
app = AppFactory.create(owner=owner)
app.hidden = 1
project_repo.update(app)
logs = auditlog_repo.filter_by(app_short_name=app.short_name)
for log in logs:
assert_not_raises(Exception, getattr(require, 'auditlog').read, log)
| agpl-3.0 | Python | |
093a029ae8607f15b9b446f54293e15f13d44c0e | Create led.py | CMDann/learning-banana-pi,CMDann/learning-banana-pi,CMDann/learning-banana-pi,CMDann/learning-banana-pi | Python/led.py | Python/led.py | import RPi.GPIO as GPIO
import time
# blinking function
def blink(pin):
GPIO.output(pin,GPIO.HIGH)
time.sleep(1)
GPIO.output(pin,GPIO.LOW)
time.sleep(1)
return
# to use Raspberry Pi board pin numbers
GPIO.setmode(GPIO.BOARD)
# set up GPIO output channel
GPIO.setup(21, GPIO.OUT)
# blink GPIO21 50 times
for i in range(0,50):
blink(11)
GPIO.cleanup()
| apache-2.0 | Python | |
38452a8a2f95852f8ab7f0910ff875a5b894c2c9 | add shell_builder.py | wez/watchman,wez/watchman,facebook/watchman,facebook/watchman,wez/watchman,wez/watchman,facebook/watchman,wez/watchman,nodakai/watchman,nodakai/watchman,facebook/watchman,facebook/watchman,wez/watchman,nodakai/watchman,nodakai/watchman,nodakai/watchman,facebook/watchman,wez/watchman,nodakai/watchman,nodakai/watchman,wez/watchman,facebook/watchman,nodakai/watchman,facebook/watchman,facebook/watchman,wez/watchman,nodakai/watchman | build/fbcode_builder/shell_builder.py | build/fbcode_builder/shell_builder.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'''
shell_builder.py allows running the fbcode_builder logic
on the host rather than in a container.
It emits a bash script with set -exo pipefail configured such that
any failing step will cause the script to exit with failure.
== How to run it? ==
cd build
python fbcode_builder/shell_builder.py > ~/run.sh
bash ~/run.sh
'''
import os
import distutils.spawn
from fbcode_builder import FBCodeBuilder
from shell_quoting import (
raw_shell, shell_comment, shell_join, ShellQuoted
)
from utils import recursively_flatten_list
class ShellFBCodeBuilder(FBCodeBuilder):
def _render_impl(self, steps):
return raw_shell(shell_join('\n', recursively_flatten_list(steps)))
def workdir(self, dir):
return [
ShellQuoted('mkdir -p {d} && cd {d}').format(
d=dir
),
]
def run(self, shell_cmd):
return ShellQuoted('{cmd}').format(cmd=shell_cmd)
def step(self, name, actions):
assert '\n' not in name, 'Name {0} would span > 1 line'.format(name)
b = ShellQuoted('')
return [ShellQuoted('### {0} ###'.format(name)), b] + actions + [b]
def setup(self):
steps = [
ShellQuoted('set -exo pipefail'),
]
if self.has_option('ccache_dir'):
ccache_dir = self.option('ccache_dir')
steps += [
ShellQuoted(
# Set CCACHE_DIR before the `ccache` invocations below.
'export CCACHE_DIR={ccache_dir} '
'CC="ccache ${CC:-gcc}" CXX="ccache ${CXX:-g++}"'
).format(ccache_dir=ccache_dir)
]
return steps
def comment(self, comment):
return shell_comment(comment)
def copy_local_repo(self, dir, dest_name):
return [
ShellQuoted('cp -r {dir} {dest_name}').format(
dir=dir,
dest_name=dest_name
),
]
def find_project_root():
here = os.path.dirname(os.path.realpath(__file__))
maybe_root = os.path.dirname(os.path.dirname(here))
if os.path.isdir(os.path.join(maybe_root, '.git')):
return maybe_root
raise RuntimeError(
"I expected shell_builder.py to be in the "
"build/fbcode_builder subdir of a git repo")
def persistent_temp_dir(repo_root):
escaped = repo_root.replace('/', 'sZs').replace('\\', 'sZs').replace(':', '')
return os.path.join(os.path.expandvars("$HOME"), '.fbcode_builder-' + escaped)
if __name__ == '__main__':
from utils import read_fbcode_builder_config, build_fbcode_builder_config
repo_root = find_project_root()
temp = persistent_temp_dir(repo_root)
config = read_fbcode_builder_config('fbcode_builder_config.py')
builder = ShellFBCodeBuilder()
builder.add_option('projects_dir', temp)
if distutils.spawn.find_executable('ccache'):
builder.add_option('ccache_dir',
os.environ.get('CCACHE_DIR', os.path.join(temp, '.ccache')))
builder.add_option('prefix', os.path.join(temp, 'installed'))
builder.add_option('make_parallelism', 4)
builder.add_option(
'{project}:local_repo_dir'.format(project=config['github_project']),
repo_root)
make_steps = build_fbcode_builder_config(config)
steps = make_steps(builder)
print(builder.render(steps))
| mit | Python | |
30bc00ce84355cc40e62b1f46d32a17c6b07ac0c | Create GreenHat.py | SangramChavan/Ubuntu-16.04-new-installation,SangramChavan/Scripts,SangramChavan/Scripts,SangramChavan/Ubuntu-16.04-new-installation | GreenHat.py | GreenHat.py | # Copyright (c) 2015 Angus H. (4148)
# Distributed under the GNU General Public License v3.0 (GPLv3).
from datetime import date, timedelta
from random import randint
from time import sleep
import sys
import subprocess
import os
# returns a date string for the date that is N days before STARTDATE
def get_date_string(n, startdate):
d = startdate - timedelta(days=n)
rtn = d.strftime("%a %b %d %X %Y %z -0400")
return rtn
# main app
def main(argv):
if len(argv) < 1 or len(argv) > 2:
print "Error: Bad input."
sys.exit(1)
n = int(argv[0])
if len(argv) == 1:
startdate = date.today()
if len(argv) == 2:
startdate = date(int(argv[1][0:4]), int(argv[1][5:7]), int(argv[1][8:10]))
i = 0
while i <= n:
curdate = get_date_string(i, startdate)
num_commits = randint(1, 10)
for commit in range(0, num_commits):
subprocess.call("echo '" + curdate + str(randint(0, 1000000)) +"' > realwork.txt; git add realwork.txt; GIT_AUTHOR_DATE='" + curdate + "' GIT_COMMITTER_DATE='" + curdate + "' git commit -m 'update'; git push;", shell=True)
sleep(.5)
i += 1
subprocess.call("git rm realwork.txt; git commit -m 'delete'; git push;", shell=True)
if __name__ == "__main__":
main(sys.argv[1:])
| mit | Python | |
985f23ee5e107c647d5f5e5b245c3fb7ff2d411b | Write script to convert PMF-based result to expected value | kemskems/otdet | bin/to_expected.py | bin/to_expected.py | #!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert result from PMF'
' to expected value')
parser.add_argument('file', type=str,
help='Result DataFrame in HDF5 format')
parser.add_argument('outfile', type=str,
help='Output file')
parser.add_argument('--hdf-key', type=str, default='df',
help='Identifier in the HDF5 store')
args = parser.parse_args()
df = pd.read_hdf(args.file, args.hdf_key)
data = np.array([])
grouped = df.groupby(level=df.columns.names[:4], axis=1)
columns = []
for name, _ in grouped:
columns.append(name)
pmf = df[name].values
supp = np.array(df[name].columns)
expected = np.sum(supp*pmf, axis=1)
data = np.concatenate((data, expected))
index = df.index.copy()
columns = pd.MultiIndex.from_tuples(columns)
df2 = pd.DataFrame(data.reshape((len(index), len(columns))), index=index,
columns=columns)
df2.to_hdf(args.outfile, args.hdf_key)
print("Stored in HDF5 format with the name '{}'".format(args.hdf_key))
| mit | Python | |
3f6ec1a3e9bcdd2dee714e74fac7215b19ae432f | Add an example of a blocking tcp server | facundovictor/non-blocking-socket-samples | blocking_socket.py | blocking_socket.py | """
A Simple example for testing the SimpleServer Class. A simple telnet server.
It is for studying purposes only.
"""
from server import SimpleServer
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def handle_message(sockets=None):
"""
Handle a simple TCP connection.
"""
if sockets is not None:
(readable, writable, errors) = sockets
try:
while True:
data = readable.recv(1024)
print('Received data: %s' % (data))
if data:
print('Sending a custom ACK to the client')
writable.sendall("Received ;)\n")
else:
print('Received empty data')
break
finally:
SS.close_connection()
SS = SimpleServer(blocking=True)
SS.register_handler(handle_message)
SS.bind_and_listeen("localhost", 7878)
| mit | Python | |
22fd64e88700fb8cb0c86eef10df1ae0c5fb91c9 | Create parse_large_file.py | WanghongLin/miscellaneous,WanghongLin/miscellaneous | tools/parse_large_file.py | tools/parse_large_file.py | #!/usr/bin/evn python
# -*- encoding: utf-8 -*-
#
# Simple example for processing large file in multiple threads line by line
#
# Copyright 2019 Wanghong Lin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import multiprocessing
import itertools
import subprocess
def get_file_lines(f_name):
"""
Efficient way to get total line of a file
:param f_name: full file path
:return: file total length
"""
p = subprocess.Popen(['wc', '-l', f_name], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result, err = p.communicate()
if p.returncode != 0:
raise IOError(err)
return int(result.strip().split()[0])
def perform_parse(*args, **kwargs):
start = kwargs['start']
stop = kwargs['stop']
with open(args[0], 'r') as f:
for l in itertools.islice(f, start, stop):
l.split(',')
# add your other processing logic here
def parse_large_file(file_path):
file_lines = get_file_lines(file_path)
number_of_threads = multiprocessing.cpu_count()/2
slice_lines = file_lines / number_of_threads
threads = []
for i in range(number_of_threads):
start = i * slice_lines
stop = max((i + 1) * slice_lines, file_lines) if i + 1 == number_of_threads else (i + 1) * slice_lines
t = threading.Thread(target=perform_parse, args=(file_path,), kwargs={'start': start, 'stop': stop},
name='Thread {}'.format(i))
print('{0} line range {1} -> {2}'.format(t.name, start, stop))
threads.append(t)
t.start()
[t.join() for t in threads]
if __name__ == '__main__':
parse_large_file('path/to/large_file')
| apache-2.0 | Python | |
3a11d1c5235fcc7f40aca4395a183d7e1316117a | Add documentation support for swagger | faith0811/makiki,faith0811/makiki | makiki/documentation.py | makiki/documentation.py | # -*- coding: utf-8 -*-
import json
class Documentation(object):
HUG_TYPE_TRANSLATION = {
'A Whole number': 'integer',
'Accepts a JSON formatted data structure': 'object',
'Basic text / string value': 'string',
'Multiple Values': 'array',
}
def __init__(self, hug_doc, version='1.0', title='REST API', host='localhost', schemas=None, consumes=None, produces=None):
self._content = {
'swagger': '2.0',
'info': {
'version': version,
'title': title,
},
'host': host,
'schemes': ['http'] if schemas is None else schemas,
'consumes': ['application/json'] if consumes is None else consumes,
'produces': ['application/json'] if produces is None else produces,
'paths': {},
}
self.parse_hug_doc(hug_doc)
def parse_hug_doc(self, hug_doc):
for url, spec in hug_doc.items():
self._content['paths'][url] = {}
for method, detail in spec.items():
self._content['paths'][url][method.lower()] = {
'description': detail.get('usage', ''),
'parameters': [{
'name': k,
'in': 'query' if method == 'GET' else 'body',
'required': 'default' not in v,
'type': self.HUG_TYPE_TRANSLATION.get(v.get('type', ''), 'any'),
} for k, v in detail.get('inputs', {}).items()],
'responses': {
'200': {
'description': 'Success',
}
}
}
@property
def content(self):
return json.dumps(self._content)
| mit | Python | |
591e9f15a3b9da59f80f81fcf0d6ddad4aeb7d6a | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/pyside/pyside6/widget_QSqlRelationalTableModel_sqlite_from_file.py | python/pyside/pyside6/widget_QSqlRelationalTableModel_sqlite_from_file.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref: https://doc.qt.io/qtforpython/PySide6/QtSql/QSqlRelationalTableModel.html?highlight=qsqlrelationaltablemodel
import sys
import sqlite3
from PySide6 import QtCore, QtWidgets
from PySide6.QtCore import Qt
from PySide6.QtWidgets import QApplication, QTableView
from PySide6.QtSql import QSqlDatabase, QSqlQuery, QSqlRelationalTableModel, QSqlRelation, QSqlRelationalDelegate
# INIT THE DATABASE #############################
con = sqlite3.connect("employee2.db")
cur = con.cursor()
try:
cur.execute("DROP TABLE t_employee")
except:
pass
try:
cur.execute("DROP TABLE t_country")
except:
pass
cur.execute("CREATE TABLE t_country (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)")
cur.execute("CREATE TABLE t_employee (id INTEGER PRIMARY KEY AUTOINCREMENT, first_name TEXT, last_name TEXT, country_id TEXT, FOREIGN KEY(country_id) REFERENCES t_country(id))")
params_list = [
("France",),
("Belgium",),
("Germany",),
("Spain",),
("Italy",),
]
cur.executemany("INSERT INTO t_country (name) VALUES(?)", params_list)
params_list = [
("Jean", "Dupont", 2),
("Paul", "Dupond", 2),
("Jeanne", "Durand", 1),
("Anne", "Dupuit", 1),
]
cur.executemany("INSERT INTO t_employee (first_name, last_name, country_id) VALUES(?, ?, ?)", params_list)
con.commit()
con.close()
# OPEN THE DATABASE #############################
db = QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName("./employee2.db")
assert db.open()
#################################################
app = QApplication(sys.argv)
table_view = QTableView()
table_view.setSortingEnabled(True)
model = QSqlRelationalTableModel()
model.setTable("t_employee")
model.setRelation(3, QSqlRelation("t_country", "id", "name")) # column 3 in table t_employee is a foreign key that maps with field id of table t_country, and that the view should present the country's name field to the user
model.select()
model.setHeaderData(0, Qt.Horizontal, "ID")
model.setHeaderData(1, Qt.Horizontal, "First Name")
model.setHeaderData(2, Qt.Horizontal, "Last Name")
model.setHeaderData(3, Qt.Horizontal, "Counrty")
table_view.setModel(model)
table_view.setItemDelegate(QSqlRelationalDelegate(table_view))
#################################################
table_view.show()
# The mainloop of the application. The event handling starts from this point.
exit_code = app.exec()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| mit | Python | |
99ffed2a53c5266f312127b7a09f86254891234e | Create 4-LDR.py | CamJam-EduKit/EduKit2 | Code/4-LDR.py | Code/4-LDR.py | # Import Libraries
import time
import RPi.GPIO as GPIO
# Set the GPIO Mode and set the pin to use for the
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# A variable with the LDR reading pin number
PINLDR = 27
def ReadLDR():
LDRCount = 0 # Sets the count to 0
GPIO.setup(PINLDR, GPIO.OUT)
GPIO.output(PINLDR, GPIO.LOW)
time.sleep(0.1) # Drains all charge from the capacitor
GPIO.setup(PINLDR, GPIO.IN) # Sets the pin to be input
# While the input pin reads ‘off’ or Low, count
while (GPIO.input(PINLDR) == GPIO.LOW):
LDRCount += 1 # Add one to the counter
return LDRCount
while True:
print ReadLDR()
time.sleep(1) # Wait for a second
| mit | Python | |
0c4095d9b370da41f653927dc92cc4233aca2beb | Add untested LedStrip driver | lbuchy/marvin_door,lbuchy/marvin_door | LedStrip.py | LedStrip.py | import RPi.GPIO as GPIO, time, os
class RGB:
r = 0xff
g = 0xff
b = 0xff
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
class LedStrip:
spidev = None
height = 10
def __init__(self):
self.spidev = file("/dev/spidev0.0", "w")
def WriteStrip(self, pixels):
if len(pixels) != self.height:
return
start = 0
end = self.height
step = 1
for pixel in range(start,end,step):
self.spidev.write( chr((pixels[pixel].b >> 16) & 0xff ))
self.spidev.write( chr((pixels[pixel].b >> 8) & 0xff ))
self.spidev.write( chr((pixels[pixel].b) & 0xff ))
#self.spidev.write( chr(pixels[pixel].g >> 8) & 0xff )
#self.spidev.write( chr(pixels[pixel].r >> 0) & 0xff )
self.spidev.flush()
if __name__ == "__main__":
strip = LedStrip()
startVal = 0
endVal = 255
step = 1
for val in range(startVal, endVal, step):
pixelArr = [RGB(val,val,val)] * 10
strip.WriteStrip(pixelArr)
time.sleep(0.05)
| apache-2.0 | Python | |
35e8133dbf0f95a511c2eb219ba408af464afc2b | Create file | QuantifyingUncertainty/GMHConfigure,QuantifyingUncertainty/GMHConfigure | jupyter_notebook_config_template.py | jupyter_notebook_config_template.py | c.NotebookApp.ip = '*'
c.NotebookApp.port = 8998
c.NotebookApp.open_browser = False
c.NotebookApp.keyfile = u'/home/ubuntu/.certificates/jupyterkey.pem'
c.NotebookApp.certfile = u'/home/ubuntu/.certificates/jupytercert.pem'
c.NotebookApp.password = u'sha2:PASSWORDHASH'
| mit | Python | |
2d66bc24c883f135a9a22cd40a8b2682ec572373 | Add count_bits | muddyfish/PYKE,muddyfish/PYKE | node/count_bits.py | node/count_bits.py | from nodes import Node
class CountBits(Node):
char = "./"
args = 2
results = 1
contents = 2000
@Node.test_func([8,2], [[1, 3]])
@Node.test_func([12, 3], [[0, 2, 1]])
def count_bits(self, num: int, base: int):
"""Count the number of times each digit occurs in `base`"""
counts = [0 for i in range(base)]
while num != 0:
num, remainder = divmod(num, base)
counts[base-remainder-1] += 1
return [counts]
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.