commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
f0b5b6a84f9b217376b54361b2afaeb19af24380 | Add containers | timothycrosley/blox,timothycrosley/blox,timothycrosley/blox | blox/containers.py | blox/containers.py | '''blox/containers.py
Contains containers that aid in group bloxs together in a logical way
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
'''
from blox.base import Container
from blox.builder import Factory
factory = Factory("Containers")
factory.add()(Container)
| mit | Python | |
2c174eeafa48302951e743ed2b9bbe91a1992899 | make sure that the file you are trying to import actually exists | felliott/scrapi,mehanig/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi,icereval/scrapi,alexgarciac/scrapi,fabianvf/scrapi,mehanig/scrapi,erinspace/scrapi,ostwald/scrapi,CenterForOpenScience/scrapi,jeffreyliu3230/scrapi,felliott/scrapi,fabianvf/scrapi | tests/utils.py | tests/utils.py | from __future__ import unicode_literals
RAW_DOC = {
'doc': str('{}'),
'docID': 'someID',
'timestamps': {
'consumeFinished': '2012-11-30T17:05:48+00:00',
'consumeStarted': '2012-11-30T17:05:48+00:00',
'consumeTaskCreated': '2012-11-30T17:05:48+00:00'
},
'filetype': 'json',
'source': 'tests'
}
RECORD = {
'title': 'Using Table Stable Carbon in Gold and STAR Isotopes',
'contributors': [
{
'prefix': 'The One And Only',
'given': 'DEVON',
'middle': 'Get The Tables',
'family': 'DUDLEY',
'suffix': 'Thirsty Boy',
'email': 'dudley.boyz@email.uni.edu',
'ORCID': 'BubbaRayDudley'
}
],
'id': {
'url': 'http://www.plosone.org/article',
'doi': '10.1371/doi.DOI!',
'serviceID': 'AWESOME'
},
'properties': {
'figures': ['http://www.plosone.org/article/image.png'],
'type': 'text',
'yep': 'A property'
},
'description': 'This study seeks to understand how humans impact\
the dietary patterns of eight free-ranging vervet monkey\
(Chlorocebus pygerythrus) groups in South Africa using stable\
isotope analysis.',
'tags': [
'behavior',
'genetics'
],
'source': 'example_pusher',
'dateCreated': '2012-11-30T17:05:48+00:00',
'dateUpdated': '2015-02-23T17:05:48+00:01',
'_id': 'yes! yes! yes!',
'count': 0
}
| apache-2.0 | Python | |
3b06894ba1aadb3460ed60e47e931065484976ee | Add a command to undo results imports for specified projects | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rsr/management/commands/undo_results_import.py | akvo/rsr/management/commands/undo_results_import.py | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
"""Undo results framework import for the specified projects
Usage:
python manage.py undo_results_import <project-id1> [<project-id2> ...]
"""
import sys
from django.core.management.base import BaseCommand
from ...models import Result
class Command(BaseCommand):
help = u"Undo results framework import for the specified projects"
def handle(self, *args, **options):
if not args:
print(__doc__)
sys.exit(1)
project_ids = map(int, args)
for id_ in project_ids:
results = Result.objects.filter(project__id=id_).exclude(parent_result=None)
print "Deleting {} results for project {}".format(results.count(), id_)
results.delete()
| agpl-3.0 | Python | |
5dde9f6aca671440253729c29530e93974921ea0 | Add a migration to add the 'Other' field to QueuedImage.why_allowed | mysociety/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextmp-popit,neavouli/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextmp-popit,openstate/yournextrepresentative,openstate/yournextrepresentative,datamade/yournextmp-popit,YoQuieroSaber/yournextrepresentative,DemocracyClub/yournextrepresentative,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,openstate/yournextrepresentative,datamade/yournextmp-popit,datamade/yournextmp-popit,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative | moderation_queue/migrations/0007_auto_20150303_1420.py | moderation_queue/migrations/0007_auto_20150303_1420.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0006_auto_20150303_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='why_allowed',
field=models.CharField(default=b'other', max_length=64, choices=[(b'public-domain', b'This photograph is free of any copyright restrictions'), (b'copyright-assigned', b'I own copyright of this photo and I assign the copyright to Democracy Club Limited in return for it being displayed on YourNextMP'), (b'profile-photo', b"This is the candidate's public profile photo from social media (e.g. Twitter, Facebook) or their official campaign page"), (b'other', b'Other')]),
preserve_default=True,
),
]
| agpl-3.0 | Python | |
065b8ec2865b0746393a427d1d078c655905241c | Add renaming PDF files | UO-SPUR/misc | rename-pdf.py | rename-pdf.py | #!/usr/bin/env python
__author__ = 'Jacob Bieker'
import os
DATA_DIRECTORY = os.path.join("test_file")
DATA = os.listdir(DATA_DIRECTORY)
file_name_dict = {}
for file_name in DATA:
split_name = file_name.split("_")
print split_name
file_name_dict.setdefault(split_name[0], [])
# Name has the extra _NUM extension
if len(split_name) > 1:
file_name_dict[split_name[0]].append(split_name[1])
else:
file_name_dict[split_name[0]].append(0)
for key in file_name_dict:
if len(file_name_dict[key]) == 1:
continue
else:
max = 0
for value in file_name_dict[key]:
if int(value) > max:
max = value
elif int(value) == 0:
path = os.path.join(DATA_DIRECTORY, str(key))
os.remove(path)
else:
path = os.path.join(DATA_DIRECTORY, str(key) + "_" + str(value))
os.remove(path)
print file_name_dict | apache-2.0 | Python | |
57b396177854636257ad5884b0deeca6a79d207a | Add libpfm4 package. (#3667) | krafczyk/spack,TheTimmy/spack,EmreAtes/spack,iulian787/spack,EmreAtes/spack,skosukhin/spack,lgarren/spack,iulian787/spack,mfherbst/spack,skosukhin/spack,LLNL/spack,TheTimmy/spack,lgarren/spack,lgarren/spack,tmerrick1/spack,matthiasdiener/spack,skosukhin/spack,lgarren/spack,krafczyk/spack,iulian787/spack,matthiasdiener/spack,mfherbst/spack,EmreAtes/spack,TheTimmy/spack,tmerrick1/spack,LLNL/spack,iulian787/spack,krafczyk/spack,matthiasdiener/spack,LLNL/spack,LLNL/spack,tmerrick1/spack,matthiasdiener/spack,mfherbst/spack,tmerrick1/spack,lgarren/spack,TheTimmy/spack,tmerrick1/spack,krafczyk/spack,EmreAtes/spack,mfherbst/spack,matthiasdiener/spack,krafczyk/spack,iulian787/spack,skosukhin/spack,TheTimmy/spack,mfherbst/spack,LLNL/spack,skosukhin/spack,EmreAtes/spack | var/spack/repos/builtin/packages/libpfm4/package.py | var/spack/repos/builtin/packages/libpfm4/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libpfm4(MakefilePackage):
"""libpfm4 is a userspace library to help
setup performance events for use with
the perf_events Linux kernel interface."""
homepage = "http://perfmon2.sourceforge.net"
url = "https://downloads.sourceforge.net/project/perfmon2/libpfm4/libpfm-4.8.0.tar.gz"
version('4.8.0', '730383896db92e12fb2cc10f2d41dd43')
# Fails to build libpfm4 with intel compiler version 16 and 17
conflicts('intel@16:17')
@property
def install_targets(self):
return ['DESTDIR={0}'.format(self.prefix),
'LIBDIR=/lib',
'INCDIR=/include',
'MANDIR=/man',
'LDCONFIG=true',
'install']
| lgpl-2.1 | Python | |
d4f498db7c2cb5da0ad0202d7a4b6a919ac896b3 | add new package (#24989) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-rply/package.py | var/spack/repos/builtin/packages/py-rply/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyRply(PythonPackage):
"""A pure Python Lex/Yacc that works with RPython."""
homepage = "https://github.com/alex/rply/"
pypi = "rply/rply-0.7.8.tar.gz"
version('0.7.8', sha256='2a808ac25a4580a9991fc304d64434e299a8fc75760574492f242cbb5bb301c9')
depends_on('py-setuptools', type='build')
depends_on('py-appdirs', type=('build', 'run'))
| lgpl-2.1 | Python | |
f627f04ebe0186b19d58619cab8b7098f5ca2e4c | Add plugin for Nova server state metrics | giorgiosironi/sensu-community-plugins,Squarespace/sensu-community-plugins,zerOnepal/sensu-community-plugins,lfdesousa/sensu-community-plugins,maoe/sensu-community-plugins,aryeguy/sensu-community-plugins,royalj/sensu-community-plugins,lenfree/sensu-community-plugins,new23d/sensu-community-plugins,tuenti/sensu-community-plugins,FlorinAndrei/sensu-community-plugins,Seraf/sensu-community-plugins,jennytoo/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,loveholidays/sensu-plugins,Squarespace/sensu-community-plugins,cmattoon/sensu-community-plugins,julienba/sensu-community-plugins,gferguson-gd/sensu-community-plugins,Seraf/sensu-community-plugins,Seraf/sensu-community-plugins,cotocisternas/sensu-community-plugins,klangrud/sensu-community-plugins,reevoo/sensu-community-plugins,lfdesousa/sensu-community-plugins,pkaeding/sensu-community-plugins,tuenti/sensu-community-plugins,Squarespace/sensu-community-plugins,thehyve/sensu-community-plugins,khuongdp/sensu-community-plugins,himyouten/sensu-community-plugins,warmfusion/sensu-community-plugins,PerfectMemory/sensu-community-plugins,petere/sensu-community-plugins,aryeguy/sensu-community-plugins,shnmorimoto/sensu-community-plugins,nilroy/sensu-community-plugins,alertlogic/sensu-community-plugins,alexhjlee/sensu-community-plugins,JonathanHuot/sensu-community-plugins,ideais/sensu-community-plugins,aryeguy/sensu-community-plugins,pkaeding/sensu-community-plugins,thehyve/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,lenfree/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,mecavity/sensu-community-plugins,nilroy/sensu-community-plugins,himyouten/sensu-community-plugins,plasticbrain/sensu-community-plugins,jbehrends/sensu-community-plugins,new23d/sensu-community-plugins,nagas/sensu-community-plugins,royalj/sensu-community-plugins,JonathanHuot/sensu-community-plugins,tuenti/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,alexhjlee/sensu-community-plugins,klangrud/sensu-community-plugins,madAndroid/sensu-community-plugins,justanshulsharma/sensu-community-plugins,cotocisternas/sensu-community-plugins,nagas/sensu-community-plugins,cotocisternas/sensu-community-plugins,klangrud/sensu-community-plugins,PerfectMemory/sensu-community-plugins,jennytoo/sensu-community-plugins,cmattoon/sensu-community-plugins,PerfectMemory/sensu-community-plugins,cread/sensu-community-plugins,leedm777/sensu-community-plugins,leedm777/sensu-community-plugins,warmfusion/sensu-community-plugins,gferguson-gd/sensu-community-plugins,pkaeding/sensu-community-plugins,estately/sensu-community-plugins,shnmorimoto/sensu-community-plugins,himyouten/sensu-community-plugins,gferguson-gd/sensu-community-plugins,estately/sensu-community-plugins,circleback/sensu-community-plugins,ideais/sensu-community-plugins,julienba/sensu-community-plugins,petere/sensu-community-plugins,maoe/sensu-community-plugins,Squarespace/sensu-community-plugins,alertlogic/sensu-community-plugins,cmattoon/sensu-community-plugins,emillion/sensu-community-plugins,madAndroid/sensu-community-plugins,luisdalves/sensu-community-plugins,intoximeters/sensu-community-plugins,cread/sensu-community-plugins,intoximeters/sensu-community-plugins,luisdalves/sensu-community-plugins,nilroy/sensu-community-plugins,loveholidays/sensu-plugins,FlorinAndrei/sensu-community-plugins,reevoo/sensu-community-plugins,julienba/sensu-community-plugins,royalj/sensu-community-plugins,nagas/sensu-community-plugins,warmfusion/sensu-community-plugins,FlorinAndrei/sensu-community-plugins,jbehrends/sensu-community-plugins,madAndroid/sensu-community-plugins,maoe/sensu-community-plugins,ideais/sensu-community-plugins,loveholidays/sensu-plugins,maoe/sensu-community-plugins,lfdesousa/sensu-community-plugins,tuenti/sensu-community-plugins,royalj/sensu-community-plugins,JonathanHuot/sensu-community-plugins,pkaeding/sensu-community-plugins,nagas/sensu-community-plugins,justanshulsharma/sensu-community-plugins,emillion/sensu-community-plugins,tuenti/sensu-community-plugins,reevoo/sensu-community-plugins,leedm777/sensu-community-plugins,thehyve/sensu-community-plugins,khuongdp/sensu-community-plugins,alertlogic/sensu-community-plugins,jennytoo/sensu-community-plugins,new23d/sensu-community-plugins,klangrud/sensu-community-plugins,intoximeters/sensu-community-plugins,ideais/sensu-community-plugins,new23d/sensu-community-plugins,petere/sensu-community-plugins,emillion/sensu-community-plugins,luisdalves/sensu-community-plugins,plasticbrain/sensu-community-plugins,estately/sensu-community-plugins,leedm777/sensu-community-plugins,nilroy/sensu-community-plugins,zerOnepal/sensu-community-plugins,julienba/sensu-community-plugins,JonathanHuot/sensu-community-plugins,plasticbrain/sensu-community-plugins,Seraf/sensu-community-plugins,justanshulsharma/sensu-community-plugins,giorgiosironi/sensu-community-plugins,cmattoon/sensu-community-plugins,luisdalves/sensu-community-plugins,gferguson-gd/sensu-community-plugins,lfdesousa/sensu-community-plugins,cread/sensu-community-plugins,cread/sensu-community-plugins,giorgiosironi/sensu-community-plugins,estately/sensu-community-plugins,khuongdp/sensu-community-plugins,jbehrends/sensu-community-plugins,PerfectMemory/sensu-community-plugins,mecavity/sensu-community-plugins,emillion/sensu-community-plugins,intoximeters/sensu-community-plugins,circleback/sensu-community-plugins,alertlogic/sensu-community-plugins,thehyve/sensu-community-plugins,zerOnepal/sensu-community-plugins,loveholidays/sensu-plugins,zerOnepal/sensu-community-plugins,cotocisternas/sensu-community-plugins,shnmorimoto/sensu-community-plugins,circleback/sensu-community-plugins,shnmorimoto/sensu-community-plugins,plasticbrain/sensu-community-plugins,madAndroid/sensu-community-plugins,warmfusion/sensu-community-plugins,alexhjlee/sensu-community-plugins,reevoo/sensu-community-plugins,circleback/sensu-community-plugins,himyouten/sensu-community-plugins,aryeguy/sensu-community-plugins,giorgiosironi/sensu-community-plugins,lenfree/sensu-community-plugins,jennytoo/sensu-community-plugins,jbehrends/sensu-community-plugins,alexhjlee/sensu-community-plugins,mecavity/sensu-community-plugins,lenfree/sensu-community-plugins,mecavity/sensu-community-plugins,petere/sensu-community-plugins,justanshulsharma/sensu-community-plugins,khuongdp/sensu-community-plugins | plugins/openstack/nova/nova-server-state-metrics.py | plugins/openstack/nova/nova-server-state-metrics.py | #!/usr/bin/env python
from argparse import ArgumentParser
import socket
import time
from novaclient.v3 import Client
DEFAULT_SCHEME = '{}.nova.states'.format(socket.gethostname())
def output_metric(name, value):
print '{}\t{}\t{}'.format(name, value, int(time.time()))
def main():
parser = ArgumentParser()
parser.add_argument('-u', '--user', default='admin')
parser.add_argument('-p', '--password', default='admin')
parser.add_argument('-t', '--tenant', default='admin')
parser.add_argument('-a', '--auth-url', default='http://localhost:5000/v2.0')
parser.add_argument('-S', '--service-type', default='compute')
parser.add_argument('-s', '--scheme', default=DEFAULT_SCHEME)
args = parser.parse_args()
client = Client(args.user, args.password, args.tenant, args.auth_url, service_type=args.service_type)
servers = client.servers.list()
# http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html
states = {
'ACTIVE': 0,
'BUILD': 0,
'DELETED': 0,
'ERROR': 0,
'HARD_REBOOT': 0,
'PASSWORD': 0,
'REBOOT': 0,
'REBUILD': 0,
'RESCUE': 0,
'RESIZE': 0,
'REVERT_RESIZE': 0,
'SHUTOFF': 0,
'SUSPENDED': 0,
'UNKNOWN': 0,
'VERIFY_RESIZE': 0,
}
for server in servers:
if server.status not in states:
states[server.status] = 0
states[server.status] += 1
for state, count in states.iteritems():
output_metric('{}.{}'.format(args.scheme, state.lower()), count)
if __name__ == '__main__':
main()
| mit | Python | |
c0cd62af0ee29743430156eda613665dd58a97c3 | Apply SA naming convention | indico/indico-plugins,ThiefMaster/indico-plugins,ThiefMaster/indico-plugins,indico/indico-plugins,indico/indico-plugins,indico/indico-plugins,ThiefMaster/indico-plugins,ThiefMaster/indico-plugins | chat/indico_chat/migrations/201503101131_35badbd96474_apply_naming_convention.py | chat/indico_chat/migrations/201503101131_35badbd96474_apply_naming_convention.py | """Apply naming convention
Revision ID: 35badbd96474
Revises: 1bd6c5129d29
Create Date: 2015-03-10 11:31:42.850496
"""
from alembic import op
from indico.core.db.sqlalchemy.util.bulk_rename import bulk_rename
# revision identifiers, used by Alembic.
revision = '35badbd96474'
down_revision = '1bd6c5129d29'
mapping = {
'plugin_chat.chatroom_events': {
'indexes': {
'chatroom_events_pkey': 'pk_chatroom_events',
'ix_plugin_chat_chatroom_events_chatroom_id': 'ix_chatroom_events_chatroom_id',
'ix_plugin_chat_chatroom_events_event_id': 'ix_chatroom_events_event_id',
},
'constraints': {
'chatroom_events_chatroom_id_fkey': 'fk_chatroom_events_chatroom_id_chatrooms',
}
},
'plugin_chat.chatrooms': {
'indexes': {
'chatrooms_pkey': 'pk_chatrooms',
'ix_plugin_chat_chatrooms_created_by_id': 'ix_chatrooms_created_by_id',
},
'constraints': {
'chatrooms_jid_node_custom_server_key': 'uq_chatrooms_jid_node_custom_server',
}
}
}
def upgrade():
for stmt in bulk_rename(mapping):
op.execute(stmt)
def downgrade():
for stmt in bulk_rename(mapping, True):
op.execute(stmt)
| mit | Python | |
46c1a39041d89325d849339ebab0854a91f520d4 | Create rtl-config.py | pyreflos/RasPi-Rattle | rtl-config.py | rtl-config.py | ##
## User configuration file - edit these settings to suit your own project
##
file_path = '/home/pi/RPi-RTL/images/' ## path to save images
file_prefix = 'img_' ## prefix before timestamp.jpg, if needed - e.g. a project number
use_timestamp = True ## True = timestamp in filename, False = incremental numbering
| mit | Python | |
9ad5d9668c3faf85c38d5655054b8530d2e14444 | set up migration for meta_data ingest | unicef/rhizome,unicef/rhizome,unicef/rhizome,unicef/rhizome | datapoints/migrations/0002_populate_initial_data.py | datapoints/migrations/0002_populate_initial_data.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import django.db.models.deletion
from django.conf import settings
import pandas as pd
from datapoints.models import IndicatorTag
from django.db.models import get_app, get_models
def populate_initial_data(apps, schema_editor):
xl = pd.ExcelFile('initial_data.xlsx')
all_sheets = xl.sheet_names
datapoints_app = get_app('datapoints')
for model in get_models(datapoints_app):
print model._meta.db_table
print all_sheets
if model._meta.db_table in all_sheets:
model_df = xl.parse(model._meta.db_table)
model_ids = model_df_to_data(model_df,model)
def model_df_to_data(model_df,model):
meta_ids = []
non_null_df = model_df.where((pd.notnull(model_df)), None)
list_of_dicts = non_null_df.transpose().to_dict()
for row_ix, row_dict in list_of_dicts.iteritems():
row_id = model.objects.create(**row_dict)
meta_ids.append(row_id)
return meta_ids
class Migration(migrations.Migration):
dependencies = [
('datapoints', '0001_initial'),
]
operations = [
migrations.RunPython(populate_initial_data),
]
| agpl-3.0 | Python | |
5ef00907efd5502ee3de38030f23a16ccc3a41be | Add script to create a prunable checkpoint of a densely trained model. | mrcslws/nupic.research,numenta/nupic.research,numenta/nupic.research,mrcslws/nupic.research,subutai/nupic.research,subutai/nupic.research | projects/transformers/create_prunable_checkpoint.py | projects/transformers/create_prunable_checkpoint.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Pretrained models need to be exported to be used for finetuning.
Only required argument for this script is the checkpoint folder.
Not tested for modified sparse models.
"""
import argparse
# FIXME: The experiments import Ray, but it must be imported before Pickle # noqa I001
import ray # noqa: F401, I001
from transformers import AutoModelForMaskedLM, HfArgumentParser
from experiments import CONFIGS
from nupic.research.frameworks.pytorch.model_utils import filter_params, get_module_attr
from nupic.torch.modules.sparse_weights import SparseWeightsBase
from run_args import ModelArguments
from run_utils import init_config, init_tokenizer
def convert_to_prunable_checkpoint(checkpoint_folder, experiment):
"""
This loads a dense models weights and a prunable model of similar architecture (one
with SparseWeightsBase layers), copies the weights of the former into the latter,
and then saves a new checkpoint at `{checkpoint_folder}_prunable`.
:param checkpoint_folder: path to dense checkpoint
:param experiment: name of experiment config with a prunable architecture
"""
# We'll use `sparsity=0` to ensure it's dense but prunable model.
exp_config = CONFIGS[experiment]
exp_config["config_kwargs"]["sparsity"] = 0
exp_parser = HfArgumentParser(ModelArguments)
model_args = exp_parser.parse_dict(exp_config)[0]
# Initialize prunable model and dense model.
config = init_config(model_args)
tokenizer = init_tokenizer(model_args)
prunable_model = AutoModelForMaskedLM.from_config(config)
prunable_model.resize_token_embeddings(len(tokenizer))
dense_model = AutoModelForMaskedLM.from_pretrained(checkpoint_folder)
# Determine which parameters belong to SparseWeightsBase classes.
sparse_params = filter_params(prunable_model, include_modules=[SparseWeightsBase])
sparse_dataptrs = [p.data_ptr() for p in sparse_params.values()]
# Load the dense params into the prunable params.
for n2, p2 in prunable_model.named_parameters():
# e.g. replace `linear.module.weight` with `linear.weight` when appropriate.
if p2.data_ptr() in sparse_dataptrs:
n1 = n2.replace(".module", "")
else:
n1 = n2
p1 = get_module_attr(dense_model, n1)
p2.data[:] = p1
# Save the prunable model.
new_folder_name = checkpoint_folder + "_prunable"
prunable_model.save_pretrained(new_folder_name)
print(f"Saved prunable model to:\n{new_folder_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_folder", type=str,
help="Path to checkpoint to convert")
parser.add_argument("-e", "--experiment", choices=list(CONFIGS.keys()),
help="Available experiments", required=True)
args = parser.parse_args()
convert_to_prunable_checkpoint(**args.__dict__)
| agpl-3.0 | Python | |
9aeb9d35cd49ccd7ab1ede87d70666e34b80320c | Add tests for run docker mgmt command | Tazer/readthedocs.org,rtfd/readthedocs.org,soulshake/readthedocs.org,davidfischer/readthedocs.org,takluyver/readthedocs.org,sid-kap/readthedocs.org,sunnyzwh/readthedocs.org,pombredanne/readthedocs.org,laplaceliu/readthedocs.org,takluyver/readthedocs.org,espdev/readthedocs.org,sils1297/readthedocs.org,wijerasa/readthedocs.org,KamranMackey/readthedocs.org,sils1297/readthedocs.org,agjohnson/readthedocs.org,fujita-shintaro/readthedocs.org,asampat3090/readthedocs.org,sunnyzwh/readthedocs.org,VishvajitP/readthedocs.org,istresearch/readthedocs.org,soulshake/readthedocs.org,fujita-shintaro/readthedocs.org,mrshoki/readthedocs.org,atsuyim/readthedocs.org,attakei/readthedocs-oauth,istresearch/readthedocs.org,nikolas/readthedocs.org,CedarLogic/readthedocs.org,Tazer/readthedocs.org,michaelmcandrew/readthedocs.org,davidfischer/readthedocs.org,attakei/readthedocs-oauth,nikolas/readthedocs.org,stevepiercy/readthedocs.org,techtonik/readthedocs.org,kenshinthebattosai/readthedocs.org,mrshoki/readthedocs.org,LukasBoersma/readthedocs.org,singingwolfboy/readthedocs.org,kenshinthebattosai/readthedocs.org,d0ugal/readthedocs.org,agjohnson/readthedocs.org,dirn/readthedocs.org,clarkperkins/readthedocs.org,CedarLogic/readthedocs.org,wijerasa/readthedocs.org,clarkperkins/readthedocs.org,titiushko/readthedocs.org,GovReady/readthedocs.org,soulshake/readthedocs.org,asampat3090/readthedocs.org,wanghaven/readthedocs.org,techtonik/readthedocs.org,kenwang76/readthedocs.org,tddv/readthedocs.org,hach-que/readthedocs.org,Carreau/readthedocs.org,jerel/readthedocs.org,espdev/readthedocs.org,LukasBoersma/readthedocs.org,cgourlay/readthedocs.org,Carreau/readthedocs.org,safwanrahman/readthedocs.org,VishvajitP/readthedocs.org,raven47git/readthedocs.org,laplaceliu/readthedocs.org,sid-kap/readthedocs.org,titiushko/readthedocs.org,rtfd/readthedocs.org,CedarLogic/readthedocs.org,fujita-shintaro/readthedocs.org,kdkeyser/readthedocs.org,tddv/readthedocs.org,gjtorikian/readthedocs.org,raven47git/readthedocs.org,d0ugal/readthedocs.org,fujita-shintaro/readthedocs.org,kdkeyser/readthedocs.org,stevepiercy/readthedocs.org,Tazer/readthedocs.org,GovReady/readthedocs.org,espdev/readthedocs.org,takluyver/readthedocs.org,Carreau/readthedocs.org,d0ugal/readthedocs.org,KamranMackey/readthedocs.org,jerel/readthedocs.org,KamranMackey/readthedocs.org,mhils/readthedocs.org,GovReady/readthedocs.org,stevepiercy/readthedocs.org,cgourlay/readthedocs.org,jerel/readthedocs.org,singingwolfboy/readthedocs.org,SteveViss/readthedocs.org,mrshoki/readthedocs.org,Carreau/readthedocs.org,CedarLogic/readthedocs.org,michaelmcandrew/readthedocs.org,davidfischer/readthedocs.org,royalwang/readthedocs.org,SteveViss/readthedocs.org,kdkeyser/readthedocs.org,michaelmcandrew/readthedocs.org,kenwang76/readthedocs.org,d0ugal/readthedocs.org,emawind84/readthedocs.org,wijerasa/readthedocs.org,royalwang/readthedocs.org,sunnyzwh/readthedocs.org,sid-kap/readthedocs.org,davidfischer/readthedocs.org,emawind84/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,kenshinthebattosai/readthedocs.org,hach-que/readthedocs.org,emawind84/readthedocs.org,atsuyim/readthedocs.org,LukasBoersma/readthedocs.org,wijerasa/readthedocs.org,wanghaven/readthedocs.org,royalwang/readthedocs.org,tddv/readthedocs.org,singingwolfboy/readthedocs.org,jerel/readthedocs.org,espdev/readthedocs.org,gjtorikian/readthedocs.org,mhils/readthedocs.org,SteveViss/readthedocs.org,safwanrahman/readthedocs.org,emawind84/readthedocs.org,mhils/readthedocs.org,sid-kap/readthedocs.org,VishvajitP/readthedocs.org,SteveViss/readthedocs.org,kenshinthebattosai/readthedocs.org,agjohnson/readthedocs.org,raven47git/readthedocs.org,mrshoki/readthedocs.org,kenwang76/readthedocs.org,singingwolfboy/readthedocs.org,hach-que/readthedocs.org,clarkperkins/readthedocs.org,agjohnson/readthedocs.org,attakei/readthedocs-oauth,dirn/readthedocs.org,wanghaven/readthedocs.org,VishvajitP/readthedocs.org,pombredanne/readthedocs.org,laplaceliu/readthedocs.org,gjtorikian/readthedocs.org,royalwang/readthedocs.org,clarkperkins/readthedocs.org,istresearch/readthedocs.org,titiushko/readthedocs.org,mhils/readthedocs.org,sils1297/readthedocs.org,takluyver/readthedocs.org,sunnyzwh/readthedocs.org,dirn/readthedocs.org,techtonik/readthedocs.org,kenwang76/readthedocs.org,michaelmcandrew/readthedocs.org,asampat3090/readthedocs.org,soulshake/readthedocs.org,techtonik/readthedocs.org,hach-que/readthedocs.org,atsuyim/readthedocs.org,titiushko/readthedocs.org,nikolas/readthedocs.org,attakei/readthedocs-oauth,asampat3090/readthedocs.org,GovReady/readthedocs.org,stevepiercy/readthedocs.org,gjtorikian/readthedocs.org,kdkeyser/readthedocs.org,cgourlay/readthedocs.org,nikolas/readthedocs.org,safwanrahman/readthedocs.org,KamranMackey/readthedocs.org,atsuyim/readthedocs.org,cgourlay/readthedocs.org,istresearch/readthedocs.org,safwanrahman/readthedocs.org,espdev/readthedocs.org,sils1297/readthedocs.org,pombredanne/readthedocs.org,laplaceliu/readthedocs.org,wanghaven/readthedocs.org,Tazer/readthedocs.org,LukasBoersma/readthedocs.org,raven47git/readthedocs.org,dirn/readthedocs.org | readthedocs/rtd_tests/tests/test_core_management.py | readthedocs/rtd_tests/tests/test_core_management.py | from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
| mit | Python | |
7df706d8727064275aeccf04f5411661b33f9250 | Create show_vlans.py | rbatist/HPN-Scripting,networkingdvi/HPN-Scripting | show_vlans.py | show_vlans.py |
__version__ = '0.9'
__author__ = 'Remi Batist'
# Overview of vlans including ip-addresses in procurve-style
# An example below
# VLAN-ID | IP Address IP Subnet NAME
# ------- | --------------- --------------- ---------------
# 1 | VLAN 0001
# 6 | VLAN 0006
# 10 | 10.10.10.2 255.255.255.0 VLAN10-SERVERS
# 20 | wifi
# 30 | 10.10.30.253 255.255.255.0 VLAN30-CLIENTS
#### Importing python modules
import comware
def main():
print 'VLAN-ID' + '\t' + '| IP Address' + '\t' + ' IP Subnet' + '\t ' + ' NAME'
print '------- | --------------- --------------- ---------------'
#### Importing current information
result = comware.CLI('display vlan all', False).get_output()
vlanid = ''
vlanna = ''
vlanip = ''
vlansn = ''
found = False
#### Collecting specific items
for line in result:
if 'VLAN ID' in line:
s1 = line.rindex(':') + 1
e1 = len(line)
vlanid = line[s1:e1]
vlanip = ' '
vlansn = ' '
elif 'IPv4 address' in line:
s2 = line.rindex(':') + 2
e2 = len(line)
vlanip = line[s2:e2]
elif 'IPv4 subnet mask' in line:
s5 = line.rindex(':') + 2
e5 = len(line)
vlansn = line[s5:e5]
elif 'Name' in line:
s3 = line.rindex(':') + 2
e3 = len(line)
vlanna = line[s3:e3]
#### Printing specific items
print "%-7s | %-16s %-16s %s" % (vlanid, vlanip, vlansn, vlanna)
if __name__ == "__main__":
main()
| mit | Python | |
66831279407e7a6626b8fe0ea06c886d5191f699 | Create q24telnetexec.py | jmio/Q24,jmio/Q24 | q24telnetexec.py | q24telnetexec.py | #
# Q24 Util. for Pythonista
# 2014/12/23 : First Release
#
##############################################################################################################
import sys
import telnetlib
import time
import os
#import subprocess
from ftplib import FTP
##################################################################
#ftp = FTP("192.168.1.150","target","password","",3)
#ftp.retrlines('LIST')
#ftp.retrbinary('RETR README', open('README', 'wb').write)
#ftp.abort()
#ftp.quit()
HOST = "192.168.1.172"
user = "target"
password = "password"
EXECCOMMAND = "ls"
tn = telnetlib.Telnet(HOST)
v = tn.read_until("login: ",1)
tn.write(user + "\r\n")
time.sleep(0.3)
if password:
v = tn.read_until("Password: ",1)
tn.write(password + "\r\n")
time.sleep(0.3)
print "Connect"
v = tn.read_until(">",1)
print ""
print "Execute"
tn.write(EXECCOMMAND+"\r\n")
r = tn.read_until("->",20)
tn.close()
a = [i.strip("\n") for i in r.split("\r")[1:]]
print "-- Result from "+HOST+" --"
for i in a:
print i
print "-- END --"
| mit | Python | |
991d40c62a7a44fc94ebd83e6726e5beaf867728 | Add exercise Chuck Norris | AntoineAugusti/katas,AntoineAugusti/katas,AntoineAugusti/katas | codingame/easy/chuck_norris.py | codingame/easy/chuck_norris.py | def writeSegment(type, length):
out = ""
if (type == 1):
out += "0 "
else:
out += "00 "
out += '0' * length
return out
def inputToBits(text):
out = ""
for ch in text:
chBin = bin(ord(ch))[2:]
while len(chBin) < 7:
chBin = '0' + chBin
out += chBin
return out
bits = inputToBits(raw_input())
answer = ""
currentBit = int(bits[0])
lengthSequence = 1
# Skip the first and the last bits
for bit in bits[1:-1]:
bit = int(bit)
if bit == currentBit:
lengthSequence += 1
else:
answer += writeSegment(currentBit, lengthSequence) + " "
currentBit = bit
lengthSequence = 1
lastBit = int(bits[-1])
if lastBit == currentBit:
answer += writeSegment(currentBit, lengthSequence + 1)
else:
# Write the old sequence
answer += writeSegment(currentBit, lengthSequence) + " "
# Do not omit the last bit
answer += writeSegment(lastBit, 1)
print answer | mit | Python | |
1d1a95261c5dc057e1e739a459bbf7d1574ce808 | add all files into repo | mark14wu/qqbot_config | plugins/ordersong.py | plugins/ordersong.py | # -*- coding: utf-8 -*-
import pycurl
from io import BytesIO
from urllib.parse import urlencode
import sys
group_list = ["596776383", "513096350", "54840756", "662936261"]
# 596776383 = EFZers_2020!
# 513096350 = 1/2EFZers
# 54840756 = EFZ
# 662936261 = 2020届6班
order_keywords = ['点歌', '来一首', '来首']
rank_keywords = ['点歌', '排行']
def onQQMessage(bot, contact, member, content):
if content[:2] in order_keywords and contact.qq in group_list:
user_input = content[3:]
songname = user_input.split(',')[0]
artist = None
try:
artist = user_input.split(',')[1]
except:
pass
sys.stdout.buffer.write(songname.encode('utf8'))
# try:
post_data = {'s': songname, 'limit': 10, 'type': 1, 'offset': 0}
postfields = urlencode(post_data)
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, 'http://music.163.com/api/search/get/')
c.setopt(c.WRITEDATA, buffer)
c.setopt(pycurl.COOKIEFILE, "appver=1.5.2;")
c.setopt(c.POSTFIELDS, postfields)
c.perform()
c.close()
body = buffer.getvalue()
body = body.decode('iso-8859-1')
null = 0
body = eval(body)
id = body['result']['songs'][0]['id']
name = body['result']['songs'][0]['name']
artists = body['result']['songs'][0]['artists']
artists_list_string = ""
for artist in artists:
artists_list_string += artist['name'] + ', '
artists_list_string = artists_list_string[:-2]
# order_content = "http://music.163.com/#/song?id=" + str(id)
order_content1 = member.name +' 点了一首 ' + artists_list_string + ' 的 ' + name +' 送给大家!'
order_content2 = "http://music.163.com/song/" + str(id) + "?userid=52663812"
bot.SendTo(contact, order_content1)
bot.SendTo(contact, order_content2)
# print('\n')
# print(order_content)
# except:
# bot.SendTo(contact, '点歌' + songname + '失败!')
rank_flag = True
| mit | Python | |
e4cde9db9f9b99622cac8fa88f4acab1fce41d80 | add myrename | jamesp9/dotfiles,jamesp9/dotfiles | bin/myrename.py | bin/myrename.py | #!/usr/bin/env python3
import sys
import shutil
import re
import os
if len(sys.argv) > 1:
src_filename = sys.argv[1]
else:
print("Please supply filename")
sys.exit()
if not os.path.exists(src_filename):
print("{} does't exist".format(src_filename))
sys.exit()
dst_filename, dst_extension = os.path.splitext(src_filename)
chars_to_remove = [' ', ',', '(', ')', '[', ']', ]
for character in chars_to_remove:
if character in dst_filename:
dst_filename = dst_filename.replace(character, '.')
dst_filename = dst_filename.replace('..', '.')
dst_filename = re.sub('\.$', '', dst_filename)
print("Moving {} => {}".format(src_filename, dst_filename + dst_extension))
shutil.move(src_filename, dst_filename + dst_extension)
| mit | Python | |
92ec039cdecffd7ec03f5954dfe5c69499921ba7 | Make consonants | itfoundry/hindkit,itfoundry/hindkit | lib/hindkit/_lab.py | lib/hindkit/_lab.py | #!/usr/bin/env AFDKOPython
# encoding: UTF-8
from __future__ import division, absolute_import, print_function, unicode_literals
import collections
import pytoml
enum = collections.namedtuple
p = enum(
"Property",
"""
Vowel Consonant
Stop Nasal Approximant Fricative
Guttural Palatal Retroflex Dental Labial
Voiced Aspirated
"""
)
class Letter(object):
# IMPLICIT_PROPERTIES = {
# "Nasal": "Voiced",
# }
def __init__(self, name, properties):
self.name = name
self.properties = set(properties)
# for k, v in self.IMPLICIT_PROPERTIES.items():
# if p.__dict__[k] in self.properties:
# self.properties.update(p.__dict__[i] for i in v.split())
def __contains__(self, item):
return item in self.properties
class Consonant(object):
pass
DATA = """
K: Consonant Guttural Stop,
KH: Consonant Guttural Stop Aspirated,
G: Consonant Guttural Stop Voiced,
GH: Consonant Guttural Stop Voiced Aspirated,
NG: Consonant Guttural Nasal Voiced,
C: Consonant Palatal Stop,
CH: Consonant Palatal Stop Aspirated,
J: Consonant Palatal Stop Voiced,
JH: Consonant Palatal Stop Voiced Aspirated,
NY: Consonant Palatal Nasal Voiced,
TT: Consonant Retroflex Stop,
TTH: Consonant Retroflex Stop Aspirated,
DD: Consonant Retroflex Stop Voiced,
DDH: Consonant Retroflex Stop Voiced Aspirated,
NN: Consonant Retroflex Nasal Voiced,
T: Consonant Dental Stop,
TH: Consonant Dental Stop Aspirated,
D: Consonant Dental Stop Voiced,
DH: Consonant Dental Stop Voiced Aspirated,
N: Consonant Dental Nasal Voiced,
P: Consonant Labial Stop,
PH: Consonant Labial Stop Aspirated,
B: Consonant Labial Stop Voiced,
BH: Consonant Labial Stop Voiced Aspirated,
M: Consonant Labial Nasal Voiced,
Y: Consonant Approximant Voiced Palatal,
R: Consonant Approximant Voiced Retroflex,
L: Consonant Approximant Voiced Dental,
V: Consonant Approximant Voiced Labial,
SH: Consonant Fricative Palatal,
SS: Consonant Fricative Retroflex,
S: Consonant Fricative Dental,
H: Consonant Fricative Voiced Guttural,
"""
for item in DATA.split(",")[:-1]:
name, _, properties = item.partition(":")
name = name.strip()
properties = properties.split()
Consonant.__setattr__(name, Letter(name, (p.__getattr__(i) for i in properties)))
Consonant.list.append(Consonant.__getattr__(name))
print(l.__dict__)
| mit | Python | |
ab553873c782a7975e6b2cb3d240d5b9751c7a44 | Add API for RepositoryLayout. | jelmer/subvertpy,jelmer/subvertpy | layout.py | layout.py | # Copyright (C) 2005-2007 Jelmer Vernooij <jelmer@samba.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class RepositoryLayout:
"""Describes a repository layout."""
def get_tag_path(self, name, project=""):
"""Return the path at which the tag with specified name should be found.
:param name: Name of the tag.
:param project: Optional name of the project the tag is for. Can include slashes.
:return: Path of the tag."
"""
raise NotImplementedError
def get_branch_path(self, name, project=""):
"""Return the path at which the branch with specified name should be found.
:param name: Name of the branch.
:param project: Optional name of the project the branch is for. Can include slashes.
:return: Path of the branch.
"""
raise NotImplementedError
def parse(self, path):
"""Parse a path.
:return: Tuple with type ('tag' or 'branch'), project name, branch name and path
inside the branch
"""
raise NotImplementedError
def get_branches(self, project="", revnum=None):
"""Retrieve a list of paths that refer to branches in a specific revision.
"""
raise NotImplementedError
def get_tags(self, project="", revnum=None):
"""Retrieve a list of paths that refer to tags in a specific revision.
"""
raise NotImplementedError
| lgpl-2.1 | Python | |
cbaf4e86c4409735a8f011f5a8f801a34278c21c | Increase text index property size | VinnieJohns/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,plamut/ggrc-core | src/ggrc/migrations/versions/20170112013716_421b2179c02e_update_fulltext_index.py | src/ggrc/migrations/versions/20170112013716_421b2179c02e_update_fulltext_index.py | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Update fulltext index.
Create Date: 2017-01-12 01:37:16.801973
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '421b2179c02e'
down_revision = '177a979b230a'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=64),
type_=sa.String(length=250),
nullable=False
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=250),
type_=sa.String(length=64),
nullable=False
)
| apache-2.0 | Python | |
fa5a570af022404d7a68f2d58851033c75a60920 | add import script for Hartlepool | chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_hartlepool.py | polling_stations/apps/data_collection/management/commands/import_hartlepool.py | from data_collection.management.commands import BaseXpressWebLookupCsvImporter
class Command(BaseXpressWebLookupCsvImporter):
council_id = 'E06000001'
addresses_name = 'HartlepoolProperty Post Code New.csv'
stations_name = 'HartlepoolProperty Post Code New.csv'
elections = ['mayor.tees-valley.2017-05-04']
# Hartlepool use Xpress, but they've provided a slightly trimmed down
# version of the WebLookup export. We need to customise a bit..
station_postcode_field = None
station_address_fields = [
'pollingplaceaddress1',
'pollingplaceaddress2',
]
station_id_field = 'pollingplaceid'
easting_field = 'pollingplaceeasting'
northing_field = 'pollingplacenorthing'
def station_record_to_dict(self, record):
address = self.get_station_address(record)
location = None
return {
'internal_council_id': getattr(record, self.station_id_field).strip(),
'postcode' : '',
'address' : address.strip(),
'location' : location
}
| bsd-3-clause | Python | |
241ac6d844febf829f6442897ebf547a291e5db4 | Add Summarization API code for blog post | IndicoDataSolutions/SuperCell | summarization/summarization.py | summarization/summarization.py | import indicoio
import csv
indicoio.config.api_key = 'YOUR_API_KEY'
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def clean_articles(article_list):
# data processing: clean up new lines and convert strings into utf-8 so the indico API can read the data
# put all articles into a list for easy batch processing
cleaned_articles = [clean_article(text) for row in article_list for text in row]
print "Articles cleaned and ready for batch processing!"
return cleaned_articles
def get_summary(cleaned_articles):
# get article summaries
summary = [indicoio.summarization(item) for item in cleaned_articles]
# clean up result for easy readability
print "Here are the summaries for all %d articles:" % (len(summary))
for line in summary:
print "\n" + " ".join(line)
if __name__ == "__main__":
with open('articles.csv', 'rU') as f:
article_list = csv.reader(f)
cleaned_articles = clean_articles(article_list)
get_summary(cleaned_articles)
| mit | Python | |
8e7e269a9d898d93fc93e666f09114dbf14d73ef | Add script to enhance CSV files with Presto API | mbeckett7/mars-reports-project | mars_enhance_csv.py | mars_enhance_csv.py | #!/usr/bin/env python
'''
Script for enhancing MARS reports with data from the HOLLIS Presto API and the MARS transactions reports.
Created for the Harvard Library ITS MARS Reports Pilot Project, 2014.
'''
import csv
import glob
import requests
import time
from lxml import html
bib_dict = {} # Dictionary of HOLLIS bib numbers
# Get bib numbers from all of the current report CSV files
for file in glob.glob('*.csv'):
with open(file, 'rb') as mars_csv:
reader = csv.reader(mars_csv)
bib_row = ''
for index, row in enumerate(reader):
if index == 0:
if 'Bib No' in row[0]: # Check column 1
bib_row = 0
elif 'Bib No' in row[1]: # Check column 2
bib_row = 1
else:
try:
if ',' in row[bib_row]: # Get only first bib number if there are multiple ones (e.g., in R00)
bib_dict.setdefault(row[bib_row].split(',')[0], None)
else: # Otherwise, get the single bib number
bib_dict.setdefault(row[bib_row], None)
except:
pass
# Get data from HOLLIS Presto API
# Current settings: LDR 06 (type of record), 008 35-37 (language code), and sublibraries and collection codes
print 'Waiting for Presto API to process', len(bib_dict), 'records ...'
for bib, fields in bib_dict.items(): # bib, fields as key, value
libraries = []
marc_url = 'http://webservices.lib.harvard.edu/rest/marc/hollis/' + bib
presto = requests.get(marc_url)
marc_xml = presto.content.replace('<record xmlns="http://www.loc.gov/MARC21/slim" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/MARC21/slim http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd">','<record>')
marc_record = html.fromstring(marc_xml)
ldr06 = marc_record.xpath('//leader/text()')[0][6] #Get LDR byte 06 (type of record)
language = marc_record.xpath('//controlfield[@tag="008"]/text()')[0][35:38] #Get 008 bytes 35-37 (language code)
own = marc_record.xpath('//datafield[@tag="OWN"]/subfield/text()') #Get list of OWN (holding library) fields
collection = marc_record.xpath('//datafield[@tag="852"]/subfield[@code="c"]/text()') #Get collection code from 852 $c
for (i, j) in zip(own, collection): #Combine own field and collection code and format as a text string
libraries.append( i + ' (' + j + ')')
libraries = '; '.join(libraries)
bib_dict[bib] = [ldr06, language, libraries]
if index % 25 == 0: # Delay to avoid being locked out of API
time.sleep(2)
else:
time.sleep(1)
enhanced_dict = {} # Dictionary of enhanced data
# Add data to CSV files
for file in glob.glob('*.csv'):
with open(file, 'rb') as mars_csv:
reader = csv.reader(mars_csv)
enhanced_rows = []
enhanced_file = file[:-4] + '_enhanced.csv'
for index, row in enumerate(reader):
if index == 0:
row[:-3] += ['LDR 06','Language','Libraries']
elif row[0] in bib_dict:
row[:-3] += bib_dict[row[0]]
elif row[1] in bib_dict:
row[:-3] += bib_dict[row[1]]
else:
row[:-3] += ['','','']
enhanced_rows.append(row)
enhanced_dict[enhanced_file] = enhanced_rows
# TO DO: Exclude reports without bib numbers (e.g., authority reports) from enhancement
# TO DO: Split music records (LDR 06) into separate files
# TO DO: Split 'No Replacement Found' (R04) into separate files
for csv_file, csv_data in enhanced_dict.items():
with open(csv_file, 'wb') as enhanced_csv:
writer = csv.writer(enhanced_csv)
writer.writerows(csv_data)
#TO DO: Encoding is not correct for enhanced files; to be fixed
print csv_file, 'has been created'
| mit | Python | |
49b8d12e03088950d960105414294af8a56e1393 | Create get_genome_size.py | KarrLab/kinetic_datanator,KarrLab/kinetic_datanator | kinetic_datanator/data_source/array_express_tools/get_genome_size.py | kinetic_datanator/data_source/array_express_tools/get_genome_size.py | import numpy as np
from ete3 import NCBITaxa
def get_genome_size(organism_name):
domain = get_taxonomic_lineage(organism_name)[-3:-2][0]
if domain == "Bacteria":
file = open('number_of_prokaryote_genes.txt')
if domain == 'Eukaryota':
file = open('number_of_eukaryote_genes.txt')
lines = file.readlines()
lines = [line.split(" ") for line in lines]
total = []
for line in lines:
if line[0] == organism_name:
if not line[12] == '-':
total.append(int(line[12]))
print total
return np.average(total)
def get_taxonomic_lineage(baseSpecies):
ncbi = NCBITaxa()
baseSpecies = ncbi.get_name_translator([baseSpecies])[baseSpecies][0]
lineage = ncbi.get_lineage(baseSpecies)
names = ncbi.get_taxid_translator(lineage)
chain = [names[taxid] for taxid in lineage]
i = len(chain)
new = []
while i > 0:
new.append(chain[i-1])
i = i-1
return new
if __name__ == '__main__':
print get_genome_size('Mycoplasma pneumoniae')
print get_genome_size('Mus musculus')
print get_genome_size('Homo sapiens')
| mit | Python | |
4efe6903ac54cefd9871965853f63ec8841b6e4b | add new splitted countries and remove old ones | geometalab/osmaxx,geometalab/drf-utm-zone-info,geometalab/osmaxx-frontend,geometalab/osmaxx,geometalab/osmaxx,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/drf-utm-zone-info | osmaxx/excerptexport/migrations/0039_update_countries_20160528_2049.py | osmaxx/excerptexport/migrations/0039_update_countries_20160528_2049.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-28 18:49
from __future__ import unicode_literals
from django.db import migrations
from osmaxx.utils.polyfile_helpers import get_polyfile_names_to_file_mapping
def update_excerpt(excerpt, polyfile_path):
from osmaxx.utils.polyfile_helpers import polyfile_to_geos_geometry
geometry = polyfile_to_geos_geometry(polyfile_path)
excerpt.bounding_geometry = geometry
excerpt.save()
def update_countries(apps, schema_editor): # noqa
Excerpt = apps.get_model("excerptexport", "Excerpt") # noqa
done_countries = []
for name, polyfile_path in get_polyfile_names_to_file_mapping().items():
done_countries.append(name)
existing_excerpts = list(Excerpt.objects.filter(excerpt_type='country', name=name))
if len(existing_excerpts) == 0:
excerpt = Excerpt.objects.create(
is_public=True,
name=name,
excerpt_type='country',
)
update_excerpt(excerpt, polyfile_path)
else:
for excerpt in existing_excerpts:
update_excerpt(excerpt, polyfile_path)
# remove old countries, yes, this deletes the existing exports with those countries as well!
Excerpt.objects.filter(excerpt_type='country').exclude(name__in=done_countries).delete()
class Migration(migrations.Migration):
dependencies = [
('excerptexport', '0038_remove_outputfile_file_old'),
]
operations = [
migrations.RunPython(update_countries)
]
| mit | Python | |
6612fffdb5d45b3752851d6d1c692753d6827795 | Update user module URLS | vignanl/Plinth,kkampardi/Plinth,jvalleroy/plinth-debian,jvalleroy/plinth-debian,kkampardi/Plinth,kkampardi/Plinth,kkampardi/Plinth,harry-7/Plinth,jvalleroy/plinth-debian,freedomboxtwh/Plinth,freedomboxtwh/Plinth,freedomboxtwh/Plinth,vignanl/Plinth,harry-7/Plinth,jvalleroy/plinth-debian,harry-7/Plinth,freedomboxtwh/Plinth,jvalleroy/plinth-debian,kkampardi/Plinth,harry-7/Plinth,freedomboxtwh/Plinth,harry-7/Plinth,vignanl/Plinth,vignanl/Plinth,vignanl/Plinth | plinth/modules/users/urls.py | plinth/modules/users/urls.py | #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
URLs for the Users module
"""
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^sys/users/$', views.UserList.as_view(), name='index'),
url(r'^sys/users/create/$', views.UserCreate.as_view(), name='create'),
url(r'^sys/users/(?P<slug>[\w.@+-]+)/edit/$', views.UserUpdate.as_view(),
name='edit'),
url(r'^sys/users/(?P<slug>[\w.@+-]+)/delete/$', views.UserDelete.as_view(),
name='delete'),
url(r'^sys/users/(?P<slug>[\w.@+-]+)/change_password/$',
views.UserChangePassword.as_view(), name='change_password'),
)
| #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
URLs for the Users module
"""
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'plinth.modules.users.views',
url(r'^sys/users/$', views.UserList.as_view(), name='index'),
url(r'^sys/users/create/$', views.UserCreate.as_view(), name='create'),
url(r'^sys/users/edit/(?P<slug>[\w.@+-]+)$', views.UserUpdate.as_view(),
name='edit'),
url(r'^sys/users/delete/(?P<slug>[\w.@+-]+)$', views.UserDelete.as_view(),
name='delete'),
url(r'^sys/users/change_password/(?P<slug>[\w.@+-]+)$',
views.UserChangePassword.as_view(), name='change_password'),
)
| agpl-3.0 | Python |
bb43a2e63f7f7c337b01ef855d426a84b73eeee5 | Add a command prototype to list all items from a playlist | Parisson/Telemeta,Parisson/Telemeta,Parisson/Telemeta,Parisson/Telemeta | telemeta/management/commands/telemeta-export-items-from-user-playlists.py | telemeta/management/commands/telemeta-export-items-from-user-playlists.py | from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils import translation
from telemeta.models import Playlist, MediaCollection, MediaItem
class Command(BaseCommand):
help = "Export media files from playlists of a given user"
args = "username"
def handle(self, *args, **options):
username = args[0]
user = User.objects.get(username=username)
playlists = user.playlists.all()
items = []
for playlist in playlists:
resources = playlist.resources.all()
for resource in resources:
if resource.resource_type == 'collection':
collection = MediaCollection.objects.get(id=resource.resource_id)
for item in collection.items.all():
items.append(item)
elif resource.resource_type == 'item':
item = MediaItem.objects.get(id=resource.resource_id)
items.append(item)
print(items)
| agpl-3.0 | Python | |
811b51c9d3003dc156f8d7ea005182a0be875e32 | add utils for sorting functionality with missing values | markovianhq/bonspy | bonspy/utils.py | bonspy/utils.py | def compare_vectors(x, y):
for x_i, y_i in zip(x, y):
comparison = _compare(x_i, y_i)
if comparison == 0:
continue
else:
return comparison
return 0
def _compare(x, y):
if x is not None and y is not None:
return int(x > y) - int(x < y)
elif x is not None and y is None:
return -1
elif x is None and y is not None:
return 1
else:
return 0
| bsd-3-clause | Python | |
b3e1b721d6439cf8ba69f40536b4fb4e6be0b1ec | fix beaker.py for python3, add that as new file in beaker3.py. next need to make it dynamically choose which to load. | brosander/beaker-notebook,mattyb149/beaker-notebook,vital-ai/beaker-notebook,gef756/beaker-notebook,sirinath/beaker-notebook,maxyeg/beaker-notebook,Chasego/beaker-notebook,Chasego/beaker-notebook,maxyeg/beaker-notebook,mattyb149/beaker-notebook,sirinath/beaker-notebook,sirinath/beaker-notebook,codeaudit/beaker-notebook,ScottPJones/beaker-notebook,mattyb149/beaker-notebook,mattyb149/beaker-notebook,codeaudit/beaker-notebook,gef756/beaker-notebook,codeaudit/beaker-notebook,bradparks/beaker-notebook,gef756/beaker-notebook,brosander/beaker-notebook,maxyeg/beaker-notebook,gef756/beaker-notebook,bradparks/beaker-notebook,maxyeg/beaker-notebook,brosander/beaker-notebook,Chasego/beaker-notebook,maxyeg/beaker-notebook,vital-ai/beaker-notebook,brosander/beaker-notebook,Chasego/beaker-notebook,Chasego/beaker-notebook,mattyb149/beaker-notebook,codeaudit/beaker-notebook,gef756/beaker-notebook,sirinath/beaker-notebook,sirinath/beaker-notebook,ScottPJones/beaker-notebook,brosander/beaker-notebook,bradparks/beaker-notebook,gef756/beaker-notebook,vital-ai/beaker-notebook,maxyeg/beaker-notebook,vital-ai/beaker-notebook,ScottPJones/beaker-notebook,vital-ai/beaker-notebook,ScottPJones/beaker-notebook,ScottPJones/beaker-notebook,bradparks/beaker-notebook,bradparks/beaker-notebook,bradparks/beaker-notebook,vital-ai/beaker-notebook,sirinath/beaker-notebook,mattyb149/beaker-notebook,Chasego/beaker-notebook,codeaudit/beaker-notebook,bradparks/beaker-notebook,brosander/beaker-notebook,gef756/beaker-notebook,sirinath/beaker-notebook,vital-ai/beaker-notebook,Chasego/beaker-notebook,codeaudit/beaker-notebook,mattyb149/beaker-notebook,brosander/beaker-notebook | plugin/ipythonPlugins/src/dist/ipython/beaker3.py | plugin/ipythonPlugins/src/dist/ipython/beaker3.py | # Copyright 2014 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse, json, pandas, yaml
# should be inner class to Beaker
class DataFrameEncoder(json.JSONEncoder):
def default(self, obj):
# similarly handle Panels.
# make this extensible by the user to handle their own types.
if type(obj) == pandas.core.frame.DataFrame:
return obj.to_dict(outtype='list')
if type(obj) == pandas.core.series.Series:
return obj.to_dict()
return json.JSONEncoder.default(self, obj)
class Beaker:
"""Runtime support for Python code in Beaker."""
session_id = ''
core_url = '127.0.0.1:' + os.environ['beaker_core_port']
password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, core_url, 'beaker',
os.environ['beaker_core_password'])
urllib.request.install_opener(urllib.request.build_opener(urllib.request.HTTPBasicAuthHandler(password_mgr)))
def set4(self, var, val, unset, sync):
args = {'name': var, 'session':self.session_id, 'sync':sync}
if not unset:
args['value'] = json.dumps(val, cls=DataFrameEncoder)
req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/set',
urllib.parse.urlencode(args).encode('utf8'))
conn = urllib.request.urlopen(req)
reply = conn.read().decode("utf-8")
if reply != 'ok':
raise NameError(reply)
def get(self, var):
req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/get?' +
urllib.parse.urlencode({
'name': var,
'session':self.session_id}))
conn = urllib.request.urlopen(req)
result = yaml.load(conn.read()) # would use json.loads but it returns unicode
if not result['defined']:
raise NameError('name \'' + var + '\' is not defined in notebook namespace')
return result['value']
beaker_instance = Beaker()
def set(var, val):
return beaker_instance.set4(var, val, False, True)
# returns before the write completes
def set_fast(var, val):
return beaker_instance.set4(var, val, False, False)
# remove a var from the namespace
def unset(var):
return beaker_instance.set4(var, None, True, True)
def get(var):
return beaker_instance.get(var)
def set_session(id):
beaker_instance.session_id = id
| apache-2.0 | Python | |
f9331db039dc549bf9e1d149338f67c455f29bea | add manage.py | jlaine/django-timegraph,jlaine/django-timegraph | manage.py | manage.py | #!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| bsd-2-clause | Python | |
a62dc18745f952b3fcb05ddf4768758e25883698 | Add datamigration to create staff clearances | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/migrations/0058_grant_staff_clearance_for_existing_staff_members.py | accelerator/migrations/0058_grant_staff_clearance_for_existing_staff_members.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-06-12 19:38
from __future__ import unicode_literals
from django.db import migrations
STAFF = "Staff" # don't import from models in migrations.
def grant_staff_clearances_for_role_grantees(apps, program_role):
Clearance = apps.get_model('accelerator', 'Clearance')
program_family = program_role.program.program_family
user_ids = program_role.programrolegrant_set.values_list(
"person_id", flat=True)
for user_id in user_ids:
Clearance.objects.get_or_create(
user_id=user_id,
program_family=program_family,
defaults={"level": STAFF})
def grant_clearances_for_mc_staff_users(apps, schema_editor):
ProgramRole = apps.get_model('accelerator', "ProgramRole")
for program_role in ProgramRole.objects.filter(
user_role__name=STAFF):
grant_staff_clearances_for_role_grantees(apps, program_role)
def revoke_staff_clearances(apps, schema_editor):
Clearance = apps.get_model("accelerator", "Clearance")
Clearance.objects.filter(level=STAFF).delete()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0057_add_clearance_level_staff'),
]
operations = [
migrations.RunPython(
grant_clearances_for_mc_staff_users,
revoke_staff_clearances)
]
| mit | Python | |
ed514214967f4f9dde252070993f519db67057c3 | add tests for intermittent water | mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource | test/668-intermittent-water.py | test/668-intermittent-water.py | #http://www.openstreetmap.org/way/107817218
# Arizona Canal Diversion Channel (ACDC)
assert_has_feature(
16, 12353, 26272, 'water',
{ 'kind': 'river', 'intermittent': 'yes' })
#http://www.openstreetmap.org/way/96528126
# 10th Street Wash
assert_has_feature(
16, 12368, 26272, 'water',
{ 'kind': 'drain', 'intermittent': 'yes' })
#http://www.openstreetmap.org/way/61954975
# Unnamed drain
assert_has_feature(
16, 12372, 26272, 'water',
{ 'kind': 'drain', 'intermittent': 'yes' })
#http://www.openstreetmap.org/way/321690441
# Unnamed stream
assert_has_feature(
16, 12492, 26279, 'water',
{ 'kind': 'stream', 'intermittent': 'yes' })
#http://www.openstreetmap.org/way/68709904
# Unnamed water (lake)
assert_has_feature(
16, 12349, 26257, 'water',
{ 'kind': 'water', 'intermittent': 'yes' }) | mit | Python | |
2d18583309a189e263bda13e19f7a05ba832c14d | Add file to write templates to json | materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org | backend/scripts/templates/templates2file.py | backend/scripts/templates/templates2file.py | #!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import json
import os
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
templates = list(r.table('templates').run(conn))
try:
os.mkdir('/tmp/templates')
except:
pass
for template in templates:
try:
with open("/tmp/templates/{}".format(template['name']), 'w') as out:
json.dump(template, out, indent=4)
except:
pass
| mit | Python | |
72a851829a2190adc240bff3a62f4f03526a5e39 | add basic checks | simomarsili/ndd | ndd/check.py | ndd/check.py | # -*- coding: utf-8 -*-
"""Basic distribution tests."""
# pylint: disable=missing-docstring
import pytest
import ndd
COUNTS = [4, 12, 4, 5, 3, 1, 5, 1, 2, 2, 2, 2, 11, 3, 4, 12, 12, 1, 2]
K = 100
def rounded(*args, **kwargs):
result = ndd.entropy(COUNTS, *args, **kwargs)
return round(result, 3)
def test_base():
assert rounded() == 2.813
def test_k():
assert rounded(k=K) == 2.806
def test_plugin():
assert rounded(estimator='plugin') == 2.635
def test_pmf_plugin():
assert rounded(estimator='pmf_plugin') == 1.678
def test_miller_madow():
assert rounded(estimator='miller_madow') == 2.738
def test_wolper_wolf():
with pytest.raises(TypeError):
_ = rounded(estimator='wolpert_wolf')
def test_nsb_nok():
with pytest.raises(ndd.exceptions.NddError):
_ = rounded(estimator='nsb')
def test_nsb_k():
assert rounded(estimator='nsb', k=K) == 2.806
def test_asymptotic_nsb():
assert rounded(estimator='asymptotic_nsb') == 4.612
def test_grassberger():
assert rounded(estimator='grassberger') == 6.221
def test_auto_estimator_k():
assert rounded(estimator='auto_estimator', k=K) == 2.806
def test_auto_estimator_nok():
assert rounded(estimator='auto_estimator') == 2.813
| bsd-3-clause | Python | |
10d020cc7b91257b74e63c0579788c0f9435857f | add loadTestData.py | GluuFederation/community-edition-setup,GluuFederation/community-edition-setup,GluuFederation/community-edition-setup | templates/test/loadTestData.py | templates/test/loadTestData.py | import os
if not os.path.exists('setup.py'):
print "This script should be run from /install/community-edition-setup/"
sys.exit()
if not os.path.exists('/install/community-edition-setup/setup.properties.last'):
print "setup.properties.last is missing can't continue"
sys.exit()
f=open('setup.py').readlines()
for l in f:
if l.startswith('from pyDes import *'):
break
else:
f.insert(30, 'from pyDes import *\n')
with open('setup.py','w') as w:
w.write(''.join(f))
from setup import *
installObject = Setup( os.path.dirname(os.path.realpath(__file__)))
installObject.load_properties('setup.properties.last')
if installObject.ldap_type == 'opendj':
installObject.createLdapPw()
installObject.encode_test_passwords()
installObject.generate_passport_configuration()
installObject.generate_scim_configuration()
installObject.prepare_base64_extension_scripts()
installObject.render_templates()
installObject.render_test_templates()
installObject.loadTestData()
if installObject.ldap_type == 'opendj':
installObject.deleteLdapPw()
| mit | Python | |
6df873a26ff71b07e68dcb2e9fa9c4b1725a70ce | Add migration for expert bid [WAL-976] | opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur | src/nodeconductor_assembly_waldur/experts/migrations/0003_expertbid.py | src/nodeconductor_assembly_waldur/experts/migrations/0003_expertbid.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-07 15:09
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import nodeconductor.core.fields
class Migration(migrations.Migration):
dependencies = [
('structure', '0052_customer_subnets'),
('experts', '0002_expertrequest'),
]
operations = [
migrations.CreateModel(
name='ExpertBid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', nodeconductor.core.fields.UUIDField()),
('price', models.DecimalField(decimal_places=7, default=0, max_digits=22, validators=[django.core.validators.MinValueValidator(Decimal('0'))])),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='experts.ExpertRequest')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Project')),
],
options={
'abstract': False,
},
),
]
| mit | Python | |
8e983472134817c1312e3713ca45c7359300dedf | Set students current flag based on enrolled and attending | rectory-school/rectory-apps,rectory-school/rectory-apps,rectory-school/rectory-apps,rectory-school/rectory-apps,rectory-school/rectory-apps | academics/management/commands/set_student_current.py | academics/management/commands/set_student_current.py | #!/usr/bin/python
import logging
from datetime import date
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from academics.models import Student, Enrollment, AcademicYear
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Import reset student's current status"
def handle(self, *args, **kwargs):
logger.info("Beginning student status reset routing")
with transaction.atomic():
Student.objects.update(current=False)
current_enrollments = Enrollment.objects.filter(academic_year=AcademicYear.objects.current(), status_enrollment="Enrolled", status_attending="Attending")
current_students = Student.objects.filter(enrollment__in=current_enrollments)
current_students.update(current=True) | mit | Python | |
c812f283762f18cdf9107b2b7fd679c6b57e32d7 | Create coins.py | gappleto97/Senior-Project | common/coins.py | common/coins.py | mit | Python | ||
091fa458c4b1553bbd8e253b5e794850c3470cc9 | JOIN command | Heufneutje/txircd,ElementalAlchemist/txircd,DesertBus/txircd | txircd/modules/cmd_join.py | txircd/modules/cmd_join.py | from txircd.modbase import Command
class JoinCommand(Command):
def onUse(self, user, data):
if "targetchan" not in data or not data["targetchan"]:
return
for chan in data["targetchan"]:
user.join(chan)
def processParams(self, user, params):
channels = params[0].split(",")
keys = params[1].split(",") if len(params) > 1 else []
while len(keys) < len(channels):
keys.append(None)
joining = []
for i in range(0, len(channels)):
joining.append({"channel": channels[i][:64], "key": keys[i]})
remove = set()
for chan in joining:
if chan["channel"] in user.channels:
remove.add(chan)
for chan in remove:
joining.remove(chan)
channels = keys = []
for chan in joining:
channels.append(chan["channel"])
keys.append(chan["key"])
return {
"user": user,
"targetchan": params[0].split(","),
"keys": params[1].split(",") if len(params) > 1 else [],
"moreparams": params[2:]
}
def Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
return {
"commands": {
"JOIN": JoinCommand()
}
}
def cleanup(self):
del self.ircd.commands["JOIN"] | bsd-3-clause | Python | |
1190ae0f9f926db3b6969700a8a2b3ca67d7631c | Add google analytics script | fcooper8472/useful_scripts,fcooper8472/useful_scripts,fcooper8472/useful_scripts | query_google_analytics.py | query_google_analytics.py | import json
import random
data = {
'unique_users': random.randint(11, 99),
'number_of_cats': random.randint(11, 99),
}
with open('/fs/website/people/fergus.cooper/google_analytics_data.json', 'w') as outfile:
json.dump(data, outfile)
| bsd-3-clause | Python | |
145b84b8ad2ba2568a3ca3044a551c042496cb9c | Fix ordered dict rendering for task templates | openstack/rally,openstack/rally,openstack/rally,yeming233/rally,openstack/rally,yeming233/rally | rally/common/yamlutils.py | rally/common/yamlutils.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
import yaml
from yaml import constructor
from yaml import loader
from yaml import nodes
from yaml import parser
from yaml import resolver
ParserError = parser.ParserError
# NOTE(andreykurilin): Jinja2 uses __repr__ methods of objects while rendering
# templates. Such behaviour converts OrderedDict to the string like
# "OrderedDict([('foo', 'xxx'), ('bar', 'yyy')])"
# which breaks json/yaml load.
# In 99% of cases, we are rendering templates based on the dicts obtained
# after yaml.safe_load which uses collections.OrderedDict , so writing here
# the workaround with overridden __repr__ method looks like the best choice.
class OrderedDict(collections.OrderedDict):
"""collections.OrderedDict with __repr__ like in the regular dict."""
def __repr__(self):
return json.dumps(self, sort_keys=False)
def _construct_mapping(loader, node, deep=False):
keys = []
if isinstance(node, nodes.MappingNode):
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
if key in keys:
raise constructor.ConstructorError(
"while constructing a mapping",
node.start_mark,
"the key (%s) is redefined" % key,
key_node.start_mark)
keys.append(key)
return OrderedDict(loader.construct_pairs(node))
class _SafeLoader(loader.SafeLoader):
pass
_SafeLoader.add_constructor(resolver.BaseResolver.DEFAULT_MAPPING_TAG,
_construct_mapping)
def safe_load(stream):
"""Load stream to create python object
:param stream: json/yaml stream.
:returns: dict object
"""
return yaml.load(stream, _SafeLoader)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import yaml
from yaml import constructor
from yaml import loader
from yaml import nodes
from yaml import parser
from yaml import resolver
ParserError = parser.ParserError
def _construct_mapping(loader, node, deep=False):
keys = []
if isinstance(node, nodes.MappingNode):
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
if key in keys:
raise constructor.ConstructorError(
"while constructing a mapping",
node.start_mark,
"the key (%s) is redefined" % key,
key_node.start_mark)
keys.append(key)
return collections.OrderedDict(loader.construct_pairs(node))
class _SafeLoader(loader.SafeLoader):
pass
def safe_load(stream):
"""Load stream to create python object
:param stream: json/yaml stream.
:returns: dict object
"""
_SafeLoader.add_constructor(resolver.BaseResolver.DEFAULT_MAPPING_TAG,
_construct_mapping)
return yaml.load(stream, _SafeLoader)
| apache-2.0 | Python |
6e4be2c3c082ea8551c1eee9fc792511a043f1a7 | Fix bug when using unix sockets. | carltongibson/django-redis-cache,carltongibson/django-redis-cache,chripede/django-redis-cache,maikelwever/django-redis-cache,maikelwever/django-redis-cache,chripede/django-redis-cache | redis_cache/connection.py | redis_cache/connection.py | from redis.connection import UnixDomainSocketConnection, Connection
class CacheConnectionPool(object):
def __init__(self):
self._clients = {}
self._connection_pools = {}
def __contains__(self, server):
return server in self._clients
def __getitem__(self, server):
return self._clients.get(server, None)
def reset(self):
for pool in self._connection_pools.values():
pool.disconnect()
self._clients = {}
self._connection_pools = {}
def get_connection_pool(
self,
client,
host='127.0.0.1',
port=6379,
db=1,
password=None,
parser_class=None,
unix_socket_path=None,
connection_pool_class=None,
connection_pool_class_kwargs=None,
socket_timeout=None,
socket_connect_timeout=None,
**kwargs
):
connection_identifier = (host, port, db, unix_socket_path)
self._clients[connection_identifier] = client
pool = self._connection_pools.get(connection_identifier)
if pool is None:
connection_class = (
unix_socket_path and UnixDomainSocketConnection or Connection
)
kwargs = {
'db': db,
'password': password,
'connection_class': connection_class,
'parser_class': parser_class,
'socket_timeout': socket_timeout,
}
if not issubclass(connection_class, UnixDomainSocketConnection):
kwargs['socket_connect_timeout'] = socket_connect_timeout
kwargs.update(connection_pool_class_kwargs)
if unix_socket_path is None:
kwargs.update({
'host': host,
'port': port,
})
else:
kwargs['path'] = unix_socket_path
pool = connection_pool_class(**kwargs)
self._connection_pools[connection_identifier] = pool
pool.connection_identifier = connection_identifier
return pool
pool = CacheConnectionPool()
| from redis.connection import UnixDomainSocketConnection, Connection
class CacheConnectionPool(object):
def __init__(self):
self._clients = {}
self._connection_pools = {}
def __contains__(self, server):
return server in self._clients
def __getitem__(self, server):
return self._clients.get(server, None)
def reset(self):
for pool in self._connection_pools.values():
pool.disconnect()
self._clients = {}
self._connection_pools = {}
def get_connection_pool(
self,
client,
host='127.0.0.1',
port=6379,
db=1,
password=None,
parser_class=None,
unix_socket_path=None,
connection_pool_class=None,
connection_pool_class_kwargs=None,
socket_timeout=None,
socket_connect_timeout=None,
**kwargs
):
connection_identifier = (host, port, db, unix_socket_path)
self._clients[connection_identifier] = client
pool = self._connection_pools.get(connection_identifier)
if pool is None:
connection_class = (
unix_socket_path and UnixDomainSocketConnection or Connection
)
kwargs = {
'db': db,
'password': password,
'connection_class': connection_class,
'parser_class': parser_class,
'socket_timeout': socket_timeout,
'socket_connect_timeout': socket_connect_timeout,
}
kwargs.update(connection_pool_class_kwargs)
if unix_socket_path is None:
kwargs.update({
'host': host,
'port': port,
})
else:
kwargs['path'] = unix_socket_path
pool = connection_pool_class(**kwargs)
self._connection_pools[connection_identifier] = pool
pool.connection_identifier = connection_identifier
return pool
pool = CacheConnectionPool()
| bsd-3-clause | Python |
7e65f4da82cb9176247c1aa9ab72f40116cc6a8e | Add the Caliper package | mfherbst/spack,lgarren/spack,krafczyk/spack,iulian787/spack,matthiasdiener/spack,skosukhin/spack,iulian787/spack,tmerrick1/spack,mfherbst/spack,lgarren/spack,matthiasdiener/spack,skosukhin/spack,skosukhin/spack,skosukhin/spack,TheTimmy/spack,TheTimmy/spack,TheTimmy/spack,tmerrick1/spack,EmreAtes/spack,EmreAtes/spack,matthiasdiener/spack,lgarren/spack,TheTimmy/spack,iulian787/spack,LLNL/spack,LLNL/spack,mfherbst/spack,tmerrick1/spack,tmerrick1/spack,skosukhin/spack,matthiasdiener/spack,TheTimmy/spack,lgarren/spack,matthiasdiener/spack,tmerrick1/spack,LLNL/spack,iulian787/spack,EmreAtes/spack,iulian787/spack,krafczyk/spack,LLNL/spack,krafczyk/spack,EmreAtes/spack,krafczyk/spack,LLNL/spack,mfherbst/spack,mfherbst/spack,EmreAtes/spack,krafczyk/spack,lgarren/spack | var/spack/repos/builtin/packages/caliper/package.py | var/spack/repos/builtin/packages/caliper/package.py | from spack import *
class Caliper(Package):
"""
Caliper is a generic context annotation system. It gives programmers the
ability to provide arbitrary program context information to (performance)
tools at runtime.
"""
homepage = "https://github.com/LLNL/Caliper"
url = ""
version('master', git='ssh://git@cz-stash.llnl.gov:7999/piper/caliper.git')
variant('mpi', default=False, description='Enable MPI function wrappers.')
depends_on('libunwind')
depends_on('papi')
depends_on('mpi', when='+mpi')
def install(self, spec, prefix):
with working_dir('build', create=True):
cmake('..', *std_cmake_args)
make()
make("install")
| lgpl-2.1 | Python | |
c67b8173e33ed619d9601654a71c47d67fb82fe3 | add package py-rope (#3314) | skosukhin/spack,lgarren/spack,skosukhin/spack,EmreAtes/spack,TheTimmy/spack,matthiasdiener/spack,mfherbst/spack,tmerrick1/spack,iulian787/spack,TheTimmy/spack,EmreAtes/spack,tmerrick1/spack,krafczyk/spack,krafczyk/spack,LLNL/spack,TheTimmy/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,lgarren/spack,matthiasdiener/spack,lgarren/spack,matthiasdiener/spack,krafczyk/spack,LLNL/spack,TheTimmy/spack,lgarren/spack,TheTimmy/spack,iulian787/spack,krafczyk/spack,EmreAtes/spack,mfherbst/spack,krafczyk/spack,matthiasdiener/spack,LLNL/spack,mfherbst/spack,skosukhin/spack,lgarren/spack,skosukhin/spack,mfherbst/spack,iulian787/spack,tmerrick1/spack,tmerrick1/spack,tmerrick1/spack,matthiasdiener/spack,skosukhin/spack,EmreAtes/spack,mfherbst/spack,EmreAtes/spack | var/spack/repos/builtin/packages/py-rope/package.py | var/spack/repos/builtin/packages/py-rope/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyRope(PythonPackage):
"""a python refactoring library."""
homepage = "https://github.com/python-rope/rope"
url = "https://pypi.io/packages/source/r/rope/rope-0.10.5.tar.gz"
version('0.10.5', '21882fd7c04c29d09f75995d8a088be7')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | Python | |
cdcf814003694b86df6abee8f24af9b8609ea9a6 | Add py-zipp package (#12656) | iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/py-zipp/package.py | var/spack/repos/builtin/packages/py-zipp/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyZipp(PythonPackage):
"""Backport of pathlib-compatible object wrapper for zip files."""
homepage = "https://github.com/jaraco/zipp"
url = "https://pypi.io/packages/source/z/zipp/zipp-0.6.0.tar.gz"
version('0.6.0', sha256='3718b1cbcd963c7d4c5511a8240812904164b7f381b647143a89d3b98f9bcd8e')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-setuptools-scm@1.15.0:', type='build')
depends_on('py-more-itertools', type=('build', 'run'))
| lgpl-2.1 | Python | |
a206d2aebd5ae6f084377b4b7f2261a83afe4e70 | Add new package: rsyslog (#18304) | LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/rsyslog/package.py | var/spack/repos/builtin/packages/rsyslog/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rsyslog(AutotoolsPackage):
"""The rocket-fast Syslog Server."""
homepage = "https://www.rsyslog.com/"
url = "https://github.com/rsyslog/rsyslog/archive/v8.2006.0.tar.gz"
version('8.2006.0', sha256='dc30a2ec02d5fac91d3a4f15a00641e0987941313483ced46592ab0b0d68f324')
version('8.2004.0', sha256='b56b985fec076a22160471d389b7ff271909dfd86513dad31e401a775a6dfdc2')
version('8.2002.0', sha256='b31d56311532335212ef2ea7be4501508224cb21f1bef9d262c6d78e21959ea1')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('libestr')
depends_on('libfastjson')
depends_on('zlib')
depends_on('libuuid')
depends_on('libgcrypt')
depends_on('curl')
depends_on('byacc', type='build')
depends_on('flex', type='build')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
| lgpl-2.1 | Python | |
05f794499997a1097bf625e3bd202eaaf06fe373 | Add exec module | ihorlaitan/poet,mossberg/poet,khanhnnvn/poet | common/modules/exec.py | common/modules/exec.py | import module
import re
REGEX = re.compile('^exec(\s+-o(\s+[\w.]+)?)?\s+(("[^"]+")\s+)+$')
MODNAME = 'exec'
USAGE = """Execute commands on target.
usage: exec [-o [filename]] "cmd1" ["cmd2" "cmd3" ...]
\nExecute given commands and optionally log to file with optional filename.
\noptions:
-h\t\tshow help
-o filename\twrite results to file in ARCHIVE_DIR'."""
@module.server_handler(MODNAME)
def server_exec(server, argv):
# extra space is for regex
if len(argv) < 2 or argv[1] in ('-h', '--help') or not REGEX.match(' '.join(argv) + ' '):
print USAGE
return
try:
preproc = preprocess(argv)
except Exception:
print USAGE
return
server.generic(*preproc)
@module.client_handler(MODNAME)
def client_shell(client, inp):
"""Handle server `exec' command.
Execute specially formatted input string and return specially formatted
response.
"""
out = ''
cmds = parse_exec_cmds(inp[5:])
for cmd in cmds:
cmd_out = client.cmd_exec(cmd)
out += '='*20 + '\n\n$ {}\n{}\n'.format(cmd, cmd_out)
client.s.send(out)
def preprocess(argv):
"""Parse posh `exec' command line.
Args:
inp: raw `exec' command line
Returns:
Tuple suitable for expansion into as self.generic() parameters.
"""
write_file = None
write_flag = argv[1] == '-o'
if write_flag:
if len(argv) == 2:
# it was just "exec -o"
raise Exception
if '"' not in argv[2]:
write_file = argv[2]
del argv[2]
del argv[1]
argv = ' '.join(argv)
return argv, write_flag, write_file
def parse_exec_cmds(inp):
"""Parse string provided by server `exec' command.
Convert space delimited string with commands to execute in quotes, for
example ("ls -l" "cat /etc/passwd") into list with commands as strings.
Returns:
List of commands to execute.
"""
if inp.count('"') == 2:
return [inp[1:-1]]
else:
# server side regex guarantees that these quotes will be in the
# correct place -- the space between two commands
third_quote = inp.find('" "') + 2
first_cmd = inp[:third_quote-1]
rest = inp[third_quote:]
return [first_cmd[1:-1]] + parse_exec_cmds(rest)
| mit | Python | |
8a6fdc7c79f9038c1b89ba90d60555f3dcbbfdb9 | Add migration | dbinetti/barberscore-django,dbinetti/barberscore-django,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore,dbinetti/barberscore | project/api/migrations/0016_selection_competitor.py | project/api/migrations/0016_selection_competitor.py | # Generated by Django 2.1.5 on 2019-02-12 14:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0015_auto_20190211_2028'),
]
operations = [
migrations.AddField(
model_name='selection',
name='competitor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='selections', to='api.Competitor'),
),
]
| bsd-2-clause | Python | |
646e8cc87b4bb0d032f6f725a0fc2fbc8dfe5a1f | add a command exporting field with their locales to XLS | Parisson/Telemeta,Parisson/Telemeta,ANR-kamoulox/Telemeta,Parisson/Telemeta,ANR-kamoulox/Telemeta,Parisson/Telemeta,ANR-kamoulox/Telemeta,ANR-kamoulox/Telemeta | telemeta/management/commands/telemeta-export-fields.py | telemeta/management/commands/telemeta-export-fields.py | from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils import translation
from telemeta.models import *
from telemeta.util.unaccent import unaccent
import logging
import codecs
from xlwt import Workbook
class Command(BaseCommand):
help = "Export media fields to a XLS file (see an example in example/data/"
args = "path"
first_row = 1
admin_email = 'webmaster@parisson.com'
language_codes = ['en_US', 'fr_FR', 'de_DE']
models = [MediaFonds, MediaCorpus, MediaCollection, MediaItem]
def handle(self, *args, **options):
self.file = args[0]
self.book = Workbook()
for model in self.models:
self.sheet = self.book.add_sheet(model.element_type)
self.sheet.write(0, 0, 'Field')
self.sheet.col(0).width = 256*32
k = 1
for language_code in self.language_codes:
self.sheet.write(0, k, language_code)
self.sheet.col(k).width = 256*32
k += 1
i = 1
for field in model._meta.fields:
self.sheet.write(i, 0, field.attname)
j = 1
for language_code in self.language_codes:
translation.activate(language_code)
self.sheet.write(i, j, unicode(field.verbose_name.lower()))
j += 1
i += 1
self.book.save(self.file)
| agpl-3.0 | Python | |
d5e77eba7cb7fe5c3235d1e59612e4c32fbe658f | Add test for Flags | ysekky/chainer,niboshi/chainer,sinhrks/chainer,keisuke-umezawa/chainer,tkerola/chainer,minhpqn/chainer,keisuke-umezawa/chainer,wkentaro/chainer,benob/chainer,okuta/chainer,kikusu/chainer,niboshi/chainer,kikusu/chainer,rezoo/chainer,t-abe/chainer,jnishi/chainer,AlpacaDB/chainer,sinhrks/chainer,ktnyt/chainer,cupy/cupy,ktnyt/chainer,cemoody/chainer,jnishi/chainer,chainer/chainer,aonotas/chainer,muupan/chainer,wkentaro/chainer,hvy/chainer,keisuke-umezawa/chainer,benob/chainer,cupy/cupy,okuta/chainer,chainer/chainer,wkentaro/chainer,truongdq/chainer,niboshi/chainer,anaruse/chainer,cupy/cupy,niboshi/chainer,jnishi/chainer,hvy/chainer,tscohen/chainer,okuta/chainer,ronekko/chainer,AlpacaDB/chainer,ktnyt/chainer,jnishi/chainer,t-abe/chainer,kiyukuta/chainer,pfnet/chainer,muupan/chainer,hvy/chainer,okuta/chainer,hvy/chainer,cupy/cupy,chainer/chainer,chainer/chainer,kashif/chainer,keisuke-umezawa/chainer,ktnyt/chainer,delta2323/chainer,truongdq/chainer,wkentaro/chainer | tests/cupy_tests/test_flags.py | tests/cupy_tests/test_flags.py | import unittest
from cupy import flags
class TestFlags(unittest.TestCase):
def setUp(self):
self.flags = flags.Flags(1, 2, 3)
def test_c_contiguous(self):
self.assertEqual(1, self.flags['C_CONTIGUOUS'])
def test_f_contiguous(self):
self.assertEqual(2, self.flags['F_CONTIGUOUS'])
def test_owndata(self):
self.assertEqual(3, self.flags['OWNDATA'])
def test_key_error(self):
with self.assertRaises(KeyError):
self.flags['unknown key']
def test_repr(self):
self.assertEqual(''' C_CONTIGUOUS : 1
F_CONTIGUOUS : 2
OWNDATA : 3''', repr(self.flags))
| mit | Python | |
990b452e8c142d6ada840026e80cbc15ebff895c | add csv test runner | manz/python-mapnik,yohanboniface/python-mapnik,whuaegeanse/mapnik,tomhughes/python-mapnik,Airphrame/mapnik,sebastic/python-mapnik,yiqingj/work,tomhughes/python-mapnik,jwomeara/mapnik,mapnik/python-mapnik,mapnik/mapnik,davenquinn/python-mapnik,Uli1/mapnik,sebastic/python-mapnik,yiqingj/work,garnertb/python-mapnik,qianwenming/mapnik,qianwenming/mapnik,tomhughes/mapnik,pramsey/mapnik,strk/mapnik,cjmayo/mapnik,kapouer/mapnik,cjmayo/mapnik,Airphrame/mapnik,kapouer/mapnik,naturalatlas/mapnik,jwomeara/mapnik,whuaegeanse/mapnik,Airphrame/mapnik,mbrukman/mapnik,mapycz/mapnik,yohanboniface/python-mapnik,manz/python-mapnik,mapycz/python-mapnik,pnorman/mapnik,pramsey/mapnik,zerebubuth/mapnik,stefanklug/mapnik,tomhughes/python-mapnik,kapouer/mapnik,naturalatlas/mapnik,mapnik/mapnik,zerebubuth/mapnik,mapnik/mapnik,Mappy/mapnik,stefanklug/mapnik,strk/mapnik,qianwenming/mapnik,yohanboniface/python-mapnik,lightmare/mapnik,Mappy/mapnik,tomhughes/mapnik,rouault/mapnik,Mappy/mapnik,lightmare/mapnik,CartoDB/mapnik,naturalatlas/mapnik,pnorman/mapnik,manz/python-mapnik,mbrukman/mapnik,qianwenming/mapnik,rouault/mapnik,strk/mapnik,rouault/mapnik,mapycz/python-mapnik,cjmayo/mapnik,stefanklug/mapnik,mapycz/mapnik,sebastic/python-mapnik,rouault/mapnik,whuaegeanse/mapnik,Uli1/mapnik,whuaegeanse/mapnik,lightmare/mapnik,garnertb/python-mapnik,pramsey/mapnik,lightmare/mapnik,naturalatlas/mapnik,garnertb/python-mapnik,davenquinn/python-mapnik,mbrukman/mapnik,mbrukman/mapnik,stefanklug/mapnik,zerebubuth/mapnik,davenquinn/python-mapnik,CartoDB/mapnik,tomhughes/mapnik,yiqingj/work,Uli1/mapnik,mapnik/python-mapnik,kapouer/mapnik,CartoDB/mapnik,tomhughes/mapnik,qianwenming/mapnik,cjmayo/mapnik,jwomeara/mapnik,pramsey/mapnik,Uli1/mapnik,yiqingj/work,mapnik/python-mapnik,mapycz/mapnik,mapnik/mapnik,Mappy/mapnik,pnorman/mapnik,Airphrame/mapnik,strk/mapnik,pnorman/mapnik,jwomeara/mapnik | tests/python_tests/csv_test.py | tests/python_tests/csv_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
from nose.tools import *
from utilities import execution_path
import os, mapnik2
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'csv' in mapnik2.DatasourceCache.instance().plugin_names():
def test_broken_files(visual=False):
broken = glob.glob("../data/csv/fails/*.*")
broken.extend(glob.glob("../data/csv/warns/*.*"))
# Add a filename that doesn't exist
broken.append("../data/csv/fails/does_not_exist.csv")
for csv in broken:
throws = False
if visual:
try:
ds = mapnik2.Datasource(type='csv',file=csv,strict=True,quiet=True)
print '\x1b[33mfailed\x1b[0m',csv
except Exception:
print '\x1b[1;32m✓ \x1b[0m', csv
def test_good_files(visual=False):
good_files = glob.glob("../data/csv/*.*")
good_files.extend(glob.glob("../data/csv/warns/*.*"))
for csv in good_files:
if visual:
try:
ds = mapnik2.Datasource(type='csv',file=csv,quiet=True)
print '\x1b[1;32m✓ \x1b[0m', csv
except Exception:
print '\x1b[33mfailed\x1b[0m',csv
if __name__ == "__main__":
setup()
[eval(run)(visual=True) for run in dir() if 'test_' in run]
| lgpl-2.1 | Python | |
033809cbc96e380bdd657fd4f10d1ca60b0aa9af | Create Graphingcalc.py | Aqkotz/Final-Project | Graphingcalc.py | Graphingcalc.py | mit | Python | ||
2f0db6df4dd835903bf5f5a0e0da31f30c3bc56f | Add manage.py fixschools command | rohitdatta/pepper,rohitdatta/pepper,rohitdatta/pepper | scripts/rename_schools.py | scripts/rename_schools.py | from pepper.users.models import User
from flask_script import Command
import operator
from pepper.app import DB
class FixUsersSchoolNames(Command):
def run(self):
all_colleges = []
all_users = User.query.filter_by(school_id=None).all()
edu_groups = {}
non_edus = []
for user in all_users:
if user.type == 'admin':
continue;
if "edu" in user.email:
school_edu = user.email.split("@")
if not school_edu[1] in edu_groups.keys():
edu_groups[school_edu[1]] = [user]
else:
edu_groups[school_edu[1]].append(user)
else:
non_edus.append(user)
for key, value in edu_groups.iteritems():
count_schools_dict = {}
for user in value:
if not user.school_name in count_schools_dict.keys():
count_schools_dict[user.school_name] = 1
else:
count_schools_dict[user.school_name]+=1
# TODO: Sort in order and get top 5
sorted_schools_dict = sorted(count_schools_dict, key=count_schools_dict.get, reverse=True)[:5]
print(key)
# for x in sorted_schools_dict:
# print(x);
temp = 0
for x in sorted_schools_dict:
if not x == None:
print("(" + str(temp) + "): " + x)
temp+=1
print("" + str(temp) + ": Customize Name")
key = input('Enter number key: ')
if key < len(sorted_schools_dict):
decided_name = sorted_schools_dict[key]
print(decided_name)
else:
while True:
decided_name = raw_input('Enter school name: ')
is_decide = raw_input('Do you want to use this name: ' + decided_name + ' - Y/N')
if is_decide == 'Y' or is_decide == 'y':
break
elif is_decide == 'N' or is_decide == 'n':
continue
else:
print('Invalid decision - Try again.')
print('\n')
for user in value:
user.school_name = decided_name
DB.session.add(user);
DB.session.commit();
all_colleges.append(decided_name)
print('\n')
for user in non_edus:
print(user.school_name + " " + user.email)
print("0: Keep")
print("1: Change")
key = input('Enter number key: ')
if key == 1:
while True:
decided_name = raw_input('Enter school name: ')
is_decide = raw_input('Do you want to use this name: ' + decided_name + ' - y/n?\n')
if is_decide == 'Y' or is_decide == 'y':
break
elif is_decide == 'N' or is_decide == 'n':
continue
else:
print('Invalid decision - Try again.')
print('\n')
user.school_name = decided_name
DB.session.add(user);
DB.session.commit();
if not (user.school_name in all_colleges):
all_colleges.append(user.school_name)
print('\n')
| agpl-3.0 | Python | |
b0d50f52f45d8f1c7de261c7fe8d15e621d0e641 | Add a script to "untie" tied model weights. | lmjohns3/theanets,devdoer/theanets,chrinide/theanets | scripts/theanets-untie.py | scripts/theanets-untie.py | #!/usr/bin/env python
import climate
import cPickle as pickle
import gzip
import numpy as np
logging = climate.get_logger('theanets-untie')
@climate.annotate(
source='load a saved network from FILE',
target='save untied network weights to FILE',
)
def main(source, target):
opener = gzip.open if source.endswith('.gz') else open
p = pickle.load(opener(source))
logging.info('read from %s:', source)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
p['weights'].extend(0 + w.T for w in p['weights'][::-1])
p['biases'].extend(-b for b in p['biases'][-2::-1])
p['biases'].append(np.zeros(
(len(p['weights'][0]), ), p['biases'][0].dtype))
logging.info('writing to %s:', target)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
opener = gzip.open if target.endswith('.gz') else open
pickle.dump(p, opener(target, 'wb'), -1)
if __name__ == '__main__':
climate.call(main)
| mit | Python | |
cb403c3934f2401b6b337a19c8f5fd2c1f77805d | Solve 31. | klen/euler | 031/solution.py | 031/solution.py | # coding: utf-8
""" Project Euler problem #31. """
def problem():
u""" Solve the problem.
In England the currency is made up of pound, £, and pence, p, and there are
eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
It is possible to make £2 in the following way:
1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
How many different ways can £2 be made using any number of coins?
Answer: 73682
"""
goal = 200
coins = 1, 2, 5, 10, 20, 50, 100, 200
ways = [1] + [0] * goal
for coin in coins:
for idx in range(coin, goal + 1):
ways[idx] += ways[idx - coin]
return ways[-1]
# Alternative (bruteforce solution)
# return sum(
# 1
# for c200 in range(goal, -1, -200)
# for c100 in range(c200, -1, -100)
# for c50 in range(c100, -1, -50)
# for c20 in range(c50, -1, -20)
# for c10 in range(c20, -1, -10)
# for c5 in range(c10, -1, -5)
# for _ in range(c5, -1, -2)
# )
if __name__ == '__main__':
print problem()
| mit | Python | |
1774794c448d13a20891454a79f664406b364e4c | add cdr_serieshelper | cdr-stats/cdr-stats,areski/cdr-stats,Star2Billing/cdr-stats,cdr-stats/cdr-stats,cdr-stats/cdr-stats,cdr-stats/cdr-stats,Star2Billing/cdr-stats,areski/cdr-stats,Star2Billing/cdr-stats,areski/cdr-stats,areski/cdr-stats,Star2Billing/cdr-stats | cdr_stats/call_analytic/cdr_serieshelper.py | cdr_stats/call_analytic/cdr_serieshelper.py | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from influxdb import InfluxDBClient
from influxdb import SeriesHelper
# InfluxDB connections settings
host = 'localhost'
port = 8086
user = 'root'
password = 'root'
dbname = 'mydb'
myclient = InfluxDBClient(host, port, user, password, dbname)
class CDRSeriesHelper(SeriesHelper):
# Meta class stores time series helper configuration.
class Meta:
# The client should be an instance of InfluxDBClient.
client = myclient
# The series name must be a string. Add dependent fields/tags in curly brackets.
series_name = 'events.stats.{server_name}'
# Defines all the fields in this time series.
fields = ['some_stat', 'other_stat']
# Defines all the tags for the series.
tags = ['server_name']
# Defines the number of data points to store prior to writing on the wire.
bulk_size = 5
# autocommit must be set to True when using bulk_size
autocommit = True
# The following will create *five* (immutable) data points.
# Since bulk_size is set to 5, upon the fifth construction call, *all* data
# points will be written on the wire via CDRSeriesHelper.Meta.client.
CDRSeriesHelper(server_name='us.east-1', some_stat=159, other_stat=10)
CDRSeriesHelper(server_name='us.east-1', some_stat=158, other_stat=20)
CDRSeriesHelper(server_name='us.east-1', some_stat=157, other_stat=30)
CDRSeriesHelper(server_name='us.east-1', some_stat=156, other_stat=40)
CDRSeriesHelper(server_name='us.east-1', some_stat=155, other_stat=50)
# self.influxdbcdr.set_columns(
# ["time", "duration", "billsec", "country_id", "hangup_id", "switch_id", "user_id"])
# self.influxdbcdr.add_points([1413460800, 10, 8, 55, 16, 1, 1])
| mpl-2.0 | Python | |
fdf3df4154411652b340a38e901c52d678dbd92e | Replace get_user_profile_by_email with get_user. | punchagan/zulip,showell/zulip,tommyip/zulip,shubhamdhama/zulip,vabs22/zulip,showell/zulip,andersk/zulip,hackerkid/zulip,kou/zulip,vabs22/zulip,tommyip/zulip,rht/zulip,jackrzhang/zulip,jackrzhang/zulip,eeshangarg/zulip,eeshangarg/zulip,mahim97/zulip,zulip/zulip,synicalsyntax/zulip,vaidap/zulip,amanharitsh123/zulip,timabbott/zulip,tommyip/zulip,timabbott/zulip,eeshangarg/zulip,rht/zulip,dhcrzf/zulip,amanharitsh123/zulip,brainwane/zulip,punchagan/zulip,verma-varsha/zulip,jrowan/zulip,vabs22/zulip,brockwhittaker/zulip,brockwhittaker/zulip,synicalsyntax/zulip,punchagan/zulip,tommyip/zulip,hackerkid/zulip,Galexrt/zulip,Galexrt/zulip,kou/zulip,showell/zulip,synicalsyntax/zulip,kou/zulip,eeshangarg/zulip,brainwane/zulip,andersk/zulip,tommyip/zulip,mahim97/zulip,verma-varsha/zulip,jrowan/zulip,rishig/zulip,jackrzhang/zulip,rishig/zulip,vaidap/zulip,jrowan/zulip,dhcrzf/zulip,brainwane/zulip,kou/zulip,brockwhittaker/zulip,vabs22/zulip,punchagan/zulip,synicalsyntax/zulip,verma-varsha/zulip,zulip/zulip,rishig/zulip,zulip/zulip,zulip/zulip,vaidap/zulip,brockwhittaker/zulip,punchagan/zulip,mahim97/zulip,rht/zulip,hackerkid/zulip,timabbott/zulip,timabbott/zulip,rht/zulip,shubhamdhama/zulip,andersk/zulip,andersk/zulip,mahim97/zulip,timabbott/zulip,rht/zulip,jrowan/zulip,Galexrt/zulip,verma-varsha/zulip,zulip/zulip,shubhamdhama/zulip,jrowan/zulip,kou/zulip,eeshangarg/zulip,jackrzhang/zulip,verma-varsha/zulip,rishig/zulip,eeshangarg/zulip,andersk/zulip,synicalsyntax/zulip,amanharitsh123/zulip,synicalsyntax/zulip,vaidap/zulip,shubhamdhama/zulip,rht/zulip,dhcrzf/zulip,hackerkid/zulip,verma-varsha/zulip,brockwhittaker/zulip,punchagan/zulip,vabs22/zulip,vaidap/zulip,timabbott/zulip,kou/zulip,tommyip/zulip,showell/zulip,amanharitsh123/zulip,hackerkid/zulip,zulip/zulip,andersk/zulip,timabbott/zulip,dhcrzf/zulip,vaidap/zulip,jrowan/zulip,vabs22/zulip,hackerkid/zulip,dhcrzf/zulip,amanharitsh123/zulip,showell/zulip,mahim97/zulip,rht/zulip,Galexrt/zulip,jackrzhang/zulip,shubhamdhama/zulip,brockwhittaker/zulip,showell/zulip,eeshangarg/zulip,brainwane/zulip,kou/zulip,jackrzhang/zulip,Galexrt/zulip,synicalsyntax/zulip,Galexrt/zulip,showell/zulip,rishig/zulip,rishig/zulip,brainwane/zulip,punchagan/zulip,rishig/zulip,tommyip/zulip,dhcrzf/zulip,mahim97/zulip,brainwane/zulip,jackrzhang/zulip,zulip/zulip,hackerkid/zulip,dhcrzf/zulip,brainwane/zulip,Galexrt/zulip,andersk/zulip,shubhamdhama/zulip,shubhamdhama/zulip,amanharitsh123/zulip | zerver/management/commands/bulk_change_user_name.py | zerver/management/commands/bulk_change_user_name.py | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import CommandError
from zerver.lib.actions import do_change_full_name
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Change the names for many users."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('data_file', metavar='<data file>', type=str,
help="file containing rows of the form <email>,<desired name>")
self.add_realm_args(parser, True)
def handle(self, *args, **options):
# type: (*Any, **str) -> None
data_file = options['data_file']
realm = self.get_realm(options)
with open(data_file, "r") as f:
for line in f:
email, new_name = line.strip().split(",", 1)
try:
user_profile = self.get_user(email, realm)
old_name = user_profile.full_name
print("%s: %s -> %s" % (email, old_name, new_name))
do_change_full_name(user_profile, new_name, None)
except CommandError:
print("e-mail %s doesn't exist in the realm %s, skipping" % (email, realm))
| from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_change_full_name
from zerver.models import UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Change the names for many users."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('data_file', metavar='<data file>', type=str,
help="file containing rows of the form <email>,<desired name>")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
data_file = options['data_file']
with open(data_file, "r") as f:
for line in f:
email, new_name = line.strip().split(",", 1)
try:
user_profile = get_user_profile_by_email(email)
old_name = user_profile.full_name
print("%s: %s -> %s" % (email, old_name, new_name))
do_change_full_name(user_profile, new_name, None)
except UserProfile.DoesNotExist:
print("* E-mail %s doesn't exist in the system, skipping." % (email,))
| apache-2.0 | Python |
7f7feb676ad8ee29dda08ef18e16093c099e912b | sort reports by the first column | puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/userreports/reports/data_source.py | corehq/apps/userreports/reports/data_source.py | from corehq.apps.reports.sqlreport import SqlData
from corehq.apps.userreports.sql import get_table_name
from dimagi.utils.decorators.memoized import memoized
class ConfigurableReportDataSource(SqlData):
def __init__(self, domain, table_id, filters, aggregation_columns, columns):
self.table_name = get_table_name(domain, table_id)
self._filters = {f.slug: f for f in filters}
self._filter_values = {}
self.aggregation_columns = aggregation_columns
self.column_configs = columns
@property
def filters(self):
return [fv.to_sql_filter() for _, fv in self._filter_values.items()]
def set_filter_values(self, filter_values):
for filter_slug, value in filter_values.items():
self._filter_values[filter_slug] = self._filters[filter_slug].create_filter_value(value)
@property
def filter_values(self):
return {k: v for _, fv in self._filter_values.items() for k, v in fv.to_sql_values().items()}
@property
def group_by(self):
return self.aggregation_columns
@property
@memoized
def columns(self):
return [col.get_sql_column() for col in self.column_configs]
@memoized
def get_data(self, slugs=None):
ret = super(ConfigurableReportDataSource, self).get_data(slugs)
# arbitrarily sort by the first column in memory
# todo: should get pushed to the database but not currently supported in sqlagg
return sorted(ret, key=lambda x: x[self.column_configs[0].field])
def get_total_records(self):
return len(self.get_data())
| from corehq.apps.reports.sqlreport import SqlData
from corehq.apps.userreports.sql import get_table_name
from dimagi.utils.decorators.memoized import memoized
class ConfigurableReportDataSource(SqlData):
def __init__(self, domain, table_id, filters, aggregation_columns, columns):
self.table_name = get_table_name(domain, table_id)
self._filters = {f.slug: f for f in filters}
self._filter_values = {}
self.aggregation_columns = aggregation_columns
self.column_configs = columns
@property
def filters(self):
return [fv.to_sql_filter() for _, fv in self._filter_values.items()]
def set_filter_values(self, filter_values):
for filter_slug, value in filter_values.items():
self._filter_values[filter_slug] = self._filters[filter_slug].create_filter_value(value)
@property
def filter_values(self):
return {k: v for _, fv in self._filter_values.items() for k, v in fv.to_sql_values().items()}
@property
def group_by(self):
return self.aggregation_columns
@property
@memoized
def columns(self):
return [col.get_sql_column() for col in self.column_configs]
@memoized
def get_data(self, slugs=None):
return super(ConfigurableReportDataSource, self).get_data(slugs)
def get_total_records(self):
return len(self.get_data())
| bsd-3-clause | Python |
7e4803db10f0cd2ccb6d54117bc68dd999023908 | Create __init__.py | vortex610/mos,vortex610/mos,vortex610/mos,vortex610/mos | run_tests/__init__.py | run_tests/__init__.py | apache-2.0 | Python | ||
339fdd927f9da0f7e15726d087c9916301aef935 | Add soft margin SVM and added kernels and class | a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms | softMarginSVMwithKernels/howItWorksSoftMarginSVM.py | softMarginSVMwithKernels/howItWorksSoftMarginSVM.py | # -*- coding: utf-8 -*-
"""Soft Margin SVM classification with kernels for machine learning.
Soft margin SVM is basically an SVM (see folder **supportVectorMachine**) which
has some 'slack' and allows features to be 'wrongly' classified to avoid
overfitting the classifier. This also includes kernels. Kernels use the inner
product to help us transform the feature space to make it possible for Support
Vector Machines to create a good hyperplane with non-linear feature sets.
I basically just do the 'from scratch' in this part because all this can easily
be done by just adding some parameters to sklearn's svm.SVC().
Example:
$ python howItWorksSoftMarginSVM.py.py
Todo:
*
"""
import numpy as np
from numpy import linalg
# Because I made a convex solver in 'howItWorksSupportVectorMachine.py' I will
# just use a library for it now because it's simpler.
import cvxopt
import cvxopt.solvers
def linear_kernel(x1, x2):
"""Linear kernel function.
if this kernel is used then the decision boundary hyperplane will have a
linear form.
"""
return np.dot(x1, x2)
def polynomial_kernel(x, y, p=3):
"""Polynomial kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Polynomial form.
"""
return (1 + np.dot(x, y))**p
def gaussian_kernel(x, y, sigma=5.0):
"""Gaussian kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Gaussian form.
"""
return np.exp(-linalg.norm(x - y)**2 / (2 * (sigma**2)))
class SVM(object):
"""Support Vector Machine (SVM) class.
This class is for creating an instance of a SVM. To avoid retraining or
refitting (as it's also called) every time it is used.
"""
def __init__(self, kernel=linear_kernel, C=None):
"""The __init__ method of the SVM class.
Args:
kernel (function name): The kernel that will be used.
Default linear kernel.
C: the max sum of all the distances of the features that are
wrongly classified during fitting/training. Default is 'None', if C is
None then it's a hard margin SVM with no slack.
"""
self.kernel = kernel
self.C = C
if self.C is not None:
self.C = float(self.C)
| mit | Python | |
b89a98f0a5b6d4af94b5c52ff4baf4e7c10f2b53 | Create PrintExpScalingFactors.py | enjin/contracts | solidity/python/constants/PrintExpScalingFactors.py | solidity/python/constants/PrintExpScalingFactors.py | from math import exp
MIN_PRECISION = 32
for n in [1,2,3]:
print ' uint256 constant SCALED_EXP_{} = 0x{:x};'.format(n,int(exp(n)*(1<<MIN_PRECISION)))
print ' uint256 constant SCALED_VAL_{} = 0x{:x};'.format(n,int( (n)*(1<<MIN_PRECISION)))
| apache-2.0 | Python | |
171283cc2dee67eed8469fac08f531268a21c780 | add influxdb_user salt state to manage influxdb users | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/states/influxdb_user.py | salt/states/influxdb_user.py | # -*- coding: utf-8 -*-
'''
Management of InfluxDB users
============================
'''
def present(name, passwd, database, user=None, password=None, host=None,
port=None):
'''
Ensure that the user is present
name
The name of the user to manage
passwd
The password of the user
database
The database to create the user in
user
The user to connect as (must be able to create the user)
password
The password of the user
host
The host to connect to
port
The port to connect to
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# check if db does not exist
if not __salt__['influxdb.db_exists'](
database, user, password, host, port):
ret['result'] = False
ret['comment'] = 'Database {0} does not exist'.format(database)
return ret
# check if user exists
if not __salt__['influxdb.user_exists'](
name, database, user, password, host, port):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} is not present and needs to be created'\
.format(name)
return ret
# The user is not present, make it!
if __salt__['influxdb.user_create'](
name, passwd, database, user, password, host, port):
ret['comment'] = 'User {0} has been created'.format(name)
ret['changes'][name] = 'Present'
return ret
else:
ret['comment'] = 'Failed to create user {0}'.format(name)
ret['result'] = False
return ret
# fallback
ret['comment'] = 'User {0} is already present'.format(name)
return ret
def absent(name, database, user=None, password=None, host=None, port=None):
'''
Ensure that the named user is absent
name
The name of the user to remove
database
The database to remove the user from
user
The user to connect as (must be able to remove the user)
password
The password of the user
host
The host to connect to
port
The port to connect to
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
#check if user exists and remove it
if __salt__['influxdb.user_exists'](
name, database, user, password, host, port):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} is present and needs to be removed'\
.format(name)
return ret
if __salt__['influxdb.user_remove'](
name, database, user, password, host, port):
ret['comment'] = 'User {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['comment'] = 'Failed to remove user {0}'.format(name)
ret['result'] = False
return ret
# fallback
ret['comment'] = 'User {0} is not present, so it cannot be removed'\
.format(name)
return ret
| apache-2.0 | Python | |
8601790648a17dd1794be4f88d61e4af01349a80 | Test for the chipseq pipeline code | Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq | tests/test_pipeline_chipseq.py | tests/test_pipeline_chipseq.py | """
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_chipseq import process_chipseq
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = [
resource_path + 'macs2.Human.GCA_000001405.22.fasta',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.ann',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.amb',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwt',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.pac',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.sa',
resource_path + 'macs2.Human.DRR000150.22.fastq',
None
]
metadata = {
'assembly' : 'GRCh38',
'expt_name' : 'macs.Human.SRR1658573'
}
chipseq_handle = process_chipseq()
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, [])
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
| apache-2.0 | Python | |
8cb665e107cb33b5ff3825e5ffb3bde919d36cff | Add cache runner as per #2897 | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/runners/cache.py | salt/runners/cache.py | '''
Return cached data from minions
'''
# Import python libs
import os
# Import salt libs
import salt.output
import salt.payload
def _cdata():
'''
Return the cached data from the minions
'''
ret = {}
serial = salt.payload.Serial(__opts__)
mdir = os.path.join(__opts__['cachedir'], 'minions')
for minion in os.listdir(mdir):
path = os.path.join(mdir, minion, 'data.p')
if os.path.isfile(path):
with open(path) as fp_:
ret[minion] = serial.loads(fp_.read())
return ret
def grains(minion=None):
'''
Return cached grains for all minions or a specific minion
'''
data = _cdata()
if minion:
if minion in data:
salt.output({minion: data[minion]['grains']}, 'grains')
return {minion: data[minion]['grains']}
ret = {}
for minion in data:
ret[minion] = data[minion]['grains']
salt.output({minion: data[minion]['grains']}, 'grains')
return ret
def pillar(minion=None):
'''
Return cached grains for all minions or a specific minion
'''
data = _cdata()
if minion:
if minion in data:
salt.output({minion: data[minion]['pillar']})
return {minion: data[minion]['pillar']}
ret = {}
for minion in data:
ret[minion] = data[minion]['pillar']
salt.output({minion: data[minion]['pillar']})
return ret
| apache-2.0 | Python | |
78ab60f9a0d7251effc238b98dc110706876da8e | add migration for new queue_empty field | Bitergia/allura,heiths/allura,heiths/allura,apache/allura,Bitergia/allura,heiths/allura,lym/allura-git,lym/allura-git,heiths/allura,apache/allura,heiths/allura,apache/incubator-allura,Bitergia/allura,apache/incubator-allura,lym/allura-git,Bitergia/allura,apache/incubator-allura,apache/allura,apache/allura,Bitergia/allura,lym/allura-git,apache/allura,lym/allura-git,apache/incubator-allura | scripts/migrations/029-set-mailbox-queue_empty.py | scripts/migrations/029-set-mailbox-queue_empty.py | import logging
from allura import model as M
log = logging.getLogger(__name__)
def main():
M.Mailbox.query.update({'queue': []},
{'$set': {'queue_empty': True}},
multi=True)
M.Mailbox.query.update({'queue': {'$ne': []}},
{'$set': {'queue_empty': False}},
multi=True)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
1519f6be1d888f107a3b747e17cb1bd9d89c976f | Add SciPy benchmark | stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib | lib/node_modules/@stdlib/math/base/special/beta/benchmark/python/benchmark.scipy.py | lib/node_modules/@stdlib/math/base/special/beta/benchmark/python/benchmark.scipy.py | #!/usr/bin/env python
"""Benchmark scipy.special.beta."""
from __future__ import print_function
import timeit
NAME = "beta"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.special import beta; from random import random;"
stmt = "y = beta(1000.0*random() - 0.0, 1000.0*random() - 0.0)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in xrange(REPEATS):
print("# python::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
f93e29f52a59d5f545faf8fd94e950f50383c7fc | Integrate LLVM at llvm/llvm-project@9c8f950a0400 | tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "9c8f950a04004736bddb5093eda15f9a1c4f2eea"
LLVM_SHA256 = "9dc64e4b455908f00d47b52d8a195b1e8dc98dd6337d16c85f3eda0ca37a1c8e"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:BUILD.bazel",
patch_file = "//third_party/llvm:macos_build_fix.patch",
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "3019898e0d1b494b7e7e76790adb3d83eff4aca1"
LLVM_SHA256 = "cb37f53299af16bc7e196c9dadd1e3fa889583d966b5d3e9adde7dc18d3094c5"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:BUILD.bazel",
patch_file = "//third_party/llvm:macos_build_fix.patch",
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
f902202b5e7f2473a7ce22505a4cc8b4f39377e0 | Add scantailor package (#12833) | iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/scantailor/package.py | var/spack/repos/builtin/packages/scantailor/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Scantailor(CMakePackage):
"""Scan Tailor is an interactive post-processing tool for scanned pages. It
performs operations such as page splitting, deskewing, adding/removing
borders, and others. You give it raw scans, and you get pages ready to be
printed or assembled into a PDF or DJVU file. Scanning, optical character
recognition, and assembling multi-page documents are out of scope of this
project."""
homepage = "http://www.scantailor.org"
url = "https://github.com/trufanov-nok/scantailor/archive/0.2.7.tar.gz"
version(
"0.2.7",
sha256="3e27647621d43638888a268902f8fa098b06a70a5da5d0623b1c11220a367910",
)
depends_on("qt@5:")
depends_on("libjpeg")
depends_on("zlib")
depends_on("libpng")
depends_on("libtiff")
depends_on("boost@1.35:")
depends_on("libxrender")
| lgpl-2.1 | Python | |
4155d6ca5db149d8b213cc4078580fc2e85d7f4d | Migrate database for model changes. | rcutmore/vinotes-api,rcutmore/vinotes-api | vinotes/apps/api/migrations/0002_auto_20150325_1104.py | vinotes/apps/api/migrations/0002_auto_20150325_1104.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wine',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='winery',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
]
| unlicense | Python | |
983877d050dea6c962037815ee5a2b36725ad26f | Add problem66.py | mjwestcott/projecteuler,mjwestcott/projecteuler,mjwestcott/projecteuler | euler_python/problem66.py | euler_python/problem66.py | """
problem66.py
Consider quadratic Diophantine equations of the form: x**2 – Dy**2 = 1.
For example, when D=13, the minimal solution in x is 6492 – 13×1802 = 1.
It can be assumed that there are no solutions in positive integers when
D is square. By finding minimal solutions in x for D = {2, 3, 5, 6, 7},
we obtain the following:
3**2 – 2×2**2 = 1
2**2 – 3×1**2 = 1
9**2 – 5×4**2 = 1
5**2 – 6×2**2 = 1
8**2 – 7×3**2 = 1
Hence, by considering minimal solutions in x for D ≤ 7, the largest x is
obtained when D=5. Find the value of D ≤ 1000 in minimal solutions of x for
which the largest value of x is obtained.
"""
from collections import deque
from fractions import Fraction
from itertools import count
from math import floor, sqrt
from toolset import take
# Each iteration through the convergents of the continued fraction of sqrt(D),
# we want to check whether the numerator and denominator provide a solution to
# the Diophantine equation: https://en.wikipedia.org/wiki/Pell%27s_equation
# See the section entitled 'Fundamental solution via continued fractions'
def process_cf(D):
"""Yield the values in the continued fraction representation of sqrt(D),
e.g. sqrt(23) = [4;(1,3,1,8)], so yield 4, 1, 3, 1, 8, 1, 3, 1, 8, ..."""
# See problem64.py for a link explaining this algorithm. Here we use 'D'
# in place of 'S' to be consistent with the wording of the question.
m = 0
d = 1
a = floor(sqrt(D))
while True:
yield a
m = (d * a) - m
d = (D - m**2) / d
a = floor((floor(sqrt(D)) + m) / d)
def convergent(D, n):
"""Return the nth convergent of the continued fraction for sqrt(D),
where D is a non-square positive integer."""
if n == 1:
return next(process_cf(D))
# Collect the first n partial values of D.
values = deque(take(n, process_cf(D)))
# Construct the continued fraction, where 'tail' is the recursive component.
return Fraction(values.popleft() + Fraction(1, tail(values)))
def tail(values):
"Recursively return the tail end of the continued fraction for sqrt(D)"
next = values.popleft()
if len(values) == 0:
return next
return next + Fraction(1, tail(values))
def solve_pells_equation(D):
def is_solution(frac):
"Check whether the convergent satisfies the Diophantine equation"
x, y = frac.numerator, frac.denominator
return x**2 - D*(y**2) == 1
# Find the solution with the minimal value of x satisfying the equation.
candidates = (convergent(D, n) for n in count(1))
solution = next(filter(is_solution, candidates))
# For the purpose of problem 66, we only need the value of x
return solution.numerator
def problem66():
solutions = [(i, solve_pells_equation(i))
for i in range(1, 1000+1)
if sqrt(i).is_integer() == False]
# Find the solution wth the largest value of x
answer = max(solutions, key=lambda s: s[1])
# Return the value of D for which that value of x was obtained
return answer[0]
| mit | Python | |
253acd0afd532e8fa431ab140856fe0c4ba41684 | make the plugins directory a package, so that plugins can depend on each other. | alexex/alebot | plugins/__init__.py | plugins/__init__.py | # keep this to be able to access plugins from other plugins
| mit | Python | |
7ab37e931a836faa78a78f5d8358d845f72cdf49 | Add low level Gemini serial command script | bgottula/point | point/gemini_cmd.py | point/gemini_cmd.py | #!/usr/bin/env python3
"""
A simple script for sending raw serial commands to Gemini.
"""
import time
import serial
import readline
def main():
ser = serial.Serial('/dev/ttyACM0', baudrate=9600)
while True:
cmd = input('> ')
if len(cmd) == 0:
continue
# losmandy native commands -- add checksum
if cmd[0] == '<' or cmd[0] == '>':
if ':' not in cmd:
print("Rejected: Native command must contain a ':' character")
continue
checksum = 0
for c in cmd:
checksum = checksum ^ ord(c)
checksum %= 128
checksum += 64
cmd = cmd + chr(checksum) + '#'
print('Native command: ' + cmd)
# LX200 command format
elif cmd[0] == ':':
print('LX200 command: ' + cmd)
pass
else:
print("Rejected: Must start with ':', '<', or '>'")
continue
ser.write(cmd.encode())
time.sleep(0.1)
reply = ser.read(ser.in_waiting).decode()
if len(reply) > 0:
print('reply: ' + reply)
if __name__ == "__main__":
main()
| mit | Python | |
f65789fb705b43b446d1fc4b899074a66685a420 | add missed file | djtotten/workbench,SuperCowPowers/workbench,SuperCowPowers/workbench,djtotten/workbench,SuperCowPowers/workbench,djtotten/workbench | workbench/clients/workbench_client.py | workbench/clients/workbench_client.py | ''' This encapsulates some boilerplate workbench client code '''
import ConfigParser
import argparse
import os
def grab_server_args():
''' Grab server info from configuration file '''
workbench_conf = ConfigParser.ConfigParser()
config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini')
workbench_conf.read(config_path)
server = workbench_conf.get('workbench', 'server_uri')
port = workbench_conf.get('workbench', 'server_port')
# Collect args from the command line
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--server', type=str, default=server, help='location of workbench server')
parser.add_argument('-p', '--port', type=int, default=port, help='port used by workbench server')
args, unknown = parser.parse_known_args()
server = str(args.server)
port = str(args.port)
return {'server':server, 'port':port}
| mit | Python | |
f203136772cfdca96a44a848d646426a42111698 | Solve 20. | klen/euler | 020/solution.py | 020/solution.py | """ Project Euler problem #20. """
import math as mt
def problem():
""" Solve the problem.
Find the sum of the digits in the number 100!
Answer: 648
"""
num = mt.factorial(100)
return sum(map(int, str(num)))
if __name__ == '__main__':
print problem()
| mit | Python | |
fe88269d03915e06cba0d0d228e2f4e78592d172 | Create 0007_ssoaccesslist.py | marbindrakon/eve-wspace,mmalyska/eve-wspace,mmalyska/eve-wspace,mmalyska/eve-wspace,mmalyska/eve-wspace,evewspace/eve-wspace,evewspace/eve-wspace,evewspace/eve-wspace,marbindrakon/eve-wspace,marbindrakon/eve-wspace,evewspace/eve-wspace,marbindrakon/eve-wspace | evewspace/API/migrations/0007_ssoaccesslist.py | evewspace/API/migrations/0007_ssoaccesslist.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
('API', '0006_auto_20161223_1751'),
]
operations = [
migrations.CreateModel(
name='SSOAccessList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('char_id', models.IntegerField(null=True)),
('char_name', models.CharField(max_length=255)),
('corp', models.OneToOneField(related_name='access_list_corp', null=True, to='core.Corporation')),
],
),
]
| apache-2.0 | Python | |
5b67f6ddea05cb301a317e500657cb1cd0949bff | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/sorting/easy/running_time_of_quicksort/py/solution.py | hackerrank/algorithms/sorting/easy/running_time_of_quicksort/py/solution.py | #!/bin/python
class QuickSort(object):
def __init__(self, debugMode = False):
self._debugMode = debugMode
self._swapCount = 0
def partition(self, L, lo, hi):
if hi - lo < 2:
return lo
i = j = lo
v = hi - 1
while i < v:
if L[i] < L[v]:
L[i], L[j] = L[j], L[i]
if self._debugMode: self._swapCount += 1
j += 1
i += 1
L[j], L[v] = L[v], L[j]
if self._debugMode: self._swapCount += 1
return j
def sort(self, L):
def helper(L, lo, hi):
if hi - lo < 2:
return lo
v = self.partition(L, lo, hi)
helper(L, lo, v)
helper(L, v + 1, hi)
helper(L, 0, len(L))
class InsertionSort(object):
def __init__(self, debugMode = False):
self._debugMode = debugMode
self._swapCount = 0
def sort(self, L):
for i in range(len(L)):
key = L[i]
for j in reversed(range(i)):
if L[j] < key:
break
L[j], L[j + 1] = L[j + 1], L[j]
if self._debugMode: self._swapCount += 1
size = int(raw_input())
L = [int(value) for value in raw_input().split()]
sortingAlgos = InsertionSort(True), QuickSort(True)
swapCounts = []
for algo in sortingAlgos:
LCopy = L[:]
algo.sort(LCopy)
swapCounts.append(algo._swapCount)
for i in range(len(swapCounts) - 1):
print swapCounts[i] - swapCounts[i + 1]
| mit | Python | |
5fc58dbb3dbb379eee332e0a96704a1ddecb71c2 | move file | kaczmarj/neurodocker,kaczmarj/neurodocker | src/docker/utils.py | src/docker/utils.py | """Utility functions."""
from __future__ import absolute_import, division, print_function
import http.client # This should have been backported to Python2.
from ..utils import logger, load_yaml, save_yaml
def indent(instruction, cmd, line_suffix=' \\'):
"""Add Docker instruction and indent command.
Parameters
----------
instruction : str
Docker instruction for `cmd` (e.g., "RUN").
cmd : str
The command (lines separated by newline character.)
line_suffix : str
The suffix to append to each line except the last one.
Returns
-------
dockerfile_chunk : str
Instruction compatible with Dockerfile sytax.
"""
instruction = instruction.upper()
amount = len(instruction) + 1
indent = ' ' * amount
split_cmd = cmd.splitlines()
if len(split_cmd) == 1:
return "{} {}".format(instruction, cmd)
dockerfile_chunk = ''
for i, line in enumerate(split_cmd):
if i == 0: # First line.
dockerfile_chunk += "{} {}{}".format(instruction, line, line_suffix)
# Change the following to use str.join() method.
elif i == len(split_cmd) - 1: # Last line.
dockerfile_chunk += "\n{}{}".format(indent, line)
else:
dockerfile_chunk += "\n{}{}{}".format(indent, line, line_suffix)
return dockerfile_chunk
def _parse_versions(item):
"""Separate package name and version if version is given."""
if ":" in item:
return tuple(item.split(":"))
else:
return item
def add_neurodebian(os, full=True):
"""Return instructions to add NeuroDebian to Dockerfile.
Parameters
----------
os : str
Operating system. See http://neuro.debian.net/.
Options: 'yakkety', 'xenial', 'trusty', 'precise', 'sid', 'stretch',
'jessie', 'wheezy', 'squeeze'
full : bool
If true, use the full NeuroDebian sources. If false, use the libre
sources.
"""
suffix = "full" if full else "libre"
neurodebian_url = ("http://neuro.debian.net/lists/{}.us-nh.{}"
"".format(os, suffix))
# Change this to store the key locally.
cmd = ("RUN curl -sSl {} >> "
"/etc/apt/sources.list.d/neurodebian.sources.list\n"
"apt-key adv --recv-keys --keyserver"
"hkp://pgp.mit.edu:80 0xA5D32F012649A5A9\n"
"apt-get update".format(neurodebian_url))
return indent("RUN", cmd, line_suffix=" && \\")
class SpecsParser(object):
"""Class to parse specs for Dockerfile.
Parameters
----------
filepath : str
Path to YAML file.
specs : dict
Dictionary of specs.
"""
# Update these as necessary.
TOP_LEVEL_KEYS = ['base', 'conda-env', 'neuroimaging-software']
def __init__(self, filepath=None, specs=None):
if filepath is not None and specs is not None:
raise ValueError("Specify either `filepath` or `specs`, not both.")
elif filepath is not None:
self.specs = load_yaml(filepath)
elif specs is not None:
self.specs = specs
self._validate_keys()
self.keys_not_present = set(self.TOP_LEVEL_KEYS) - set(self.specs)
# Only parse versions if key is present.
# self.parse_versions('neuroimaging-software')
def _validate_keys(self):
"""Raise KeyError if unexpected top-level key."""
unexpected = set(self.specs) - set(self.TOP_LEVEL_KEYS)
if unexpected:
items = ", ".join(unexpected)
raise KeyError("Unexpected top-level key(s) in input: {}"
"".format(items))
def parse_versions(self, key):
"""If <VER> is supplied, convert "<PKG>:<VER>" into tuple
(<PKG>, <VER>).
Parameters
----------
key : str
Key in `self.specs`.
"""
self.specs[key] = [_parse_versions(item) for item in self.specs[key]]
| apache-2.0 | Python | |
844d94619f2cf221ab5bd550f3136be4d164155b | add working dir | icyblade/aleph,icyblade/aleph,icyblade/aleph,icyblade/aleph,icyblade/aleph | working_dir/diff.py | working_dir/diff.py | #! /usr/bin/env python
#! coding: utf8
import os, argparse, re, glob
db_user = 'aleph'
db_pass = 'swbrIcu3Iv4cEhnTzmJL'
# parse args
parser = argparse.ArgumentParser(description='')
parser.add_argument('--locale', '-l', default='zhCN',
help='Locale to extract, eg. zhCN, default zhCN')
parser.add_argument('--version',
help='version string from versions, eg. WOW-21655patch7.0.3_Beta')
args = vars(parser.parse_args())
# generate db_name
db_name = '%s.%s_%s' % (
re.findall('WOW\-([0-9]+)patch([0-9.]+)_Beta',args['version'])[0][::-1]+(args['locale'],)
)
# clear caches/trashes
#os.system('rm -rf CASCExplorer/CASCConsole/cache/')
# Blizzard CDN -> local dbcs
print('[*] Downloading necessary files...')
os.system('mono CASCConsole.exe "DBFilesClient*" ./ ./ {locale} None True "{version}"'.format(
locale = args['locale'],
version = args['version']
))
os.system('mv DBFILESCLIENT/ DBFilesClient/')
# create db
os.system('mysql --user={db_user} --password={db_pass} -e "drop database if exists \`{db_name}\`"'.format(
db_user = db_user, db_pass = db_pass, db_name = db_name
))
os.system('mysql --user={db_user} --password={db_pass} -e "create database \`{db_name}\`"'.format(
db_user = db_user, db_pass = db_pass, db_name = db_name
))
# dbc2sql
for file in glob.glob('./DBFilesClient/*'):
dbc_name = os.path.splitext(os.path.basename(file))[0]
print('[*] Importing %s...' % dbc_name)
os.system('mono DBC\ Viewer.exe %s' % file)
os.system('mysql --user=aleph --password=%s %s < %s.sql' % (db_pass, db_name, dbc_name))
os.system('rm -rf %s.sql' % dbc_name)
# diff
os.system('./lcqcmp {db_user} {db_pass} nga.txt'.format(
db_user = db_user,
db_pass = db_pass,
))
| mit | Python | |
bb0178d0b97f52bb163cf13be3bd763840426f32 | Add API tests | FreeMusicNinja/freemusic.ninja,FreeMusicNinja/freemusic.ninja | django/artists/tests/test_api.py | django/artists/tests/test_api.py | import json
from django.core.urlresolvers import reverse
from mock import patch
from rest_framework import status
from rest_framework.test import APITestCase
from artists.models import Artist
from echonest.models import SimilarResponse
class ArtistTest(APITestCase):
@patch('echonest.utils.get_similar_from_api')
def test_find_artists(self, get_similar):
url = reverse('artist-list')
names = ["Mike Doughty", "Jonathan Coulton"]
response = {
'response': {
'status': {
'message': 'Success',
'version': '4.2',
'code': 0,
},
'artists': [
{'id': 'ARHE4MO1187FB4014D', 'name': 'Mike Doughty'},
{'id': 'ARW7K0P1187B9B5B47', 'name': 'Barenaked Ladies'},
{'id': 'ARXSNCN1187B9B06A3', 'name': 'Jonathan Coulton'}
],
},
}
artists = [Artist.objects.create(name=n) for n in names]
get_similar.return_value = SimilarResponse(
name="They Might Be Giants",
response=json.dumps(response),
)
data = {'name': "They Might Be Giants"}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [
{'id': a.id, 'name': a.name, 'links': list(a.links.all())}
for a in artists
])
def test_get_artist(self):
artist = Artist.objects.create(name="Brad Sucks")
url = reverse('artist-detail', args=[artist.id])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'id': artist.id,
'name': artist.name,
'links': list(artist.links.all()),
})
| bsd-3-clause | Python | |
36beb6ae8bc41e5d131dbbdc65d6716d498375c7 | add script to diff bonferonni & benjamini-whitney p-value corrections | terraswat/hexagram,terraswat/hexagram,ucscHexmap/hexagram,ucscHexmap/hexagram,ucscHexmap/hexagram,terraswat/hexagram | server/diffBHvsBon.py | server/diffBHvsBon.py | #!/usr/bin/env python2.7
"""
diffBHvsBon.py
This reports differnences between the BenjaminiWhitney-FDR p-value correction
vs. the Bonferroni
"""
import sys, os, csv, traceback, glob
def diffBHvsBon():
#basePath = '/Users/swat/data/mcrchopra/first/'
#tmpBase = '/Users/swat/tmp/'
tmpBase = '/cluster/home/swat/tmp/'
basePath = '/cluster/home/swat/data/pancan12/mar17_ranksums/'
# Build a list of layer names with index corresponding to the layers
layers = glob.glob(basePath + 'layer_*.tab')
layerIndex = ['empty' for x in range(len(layers))]
filename = os.path.join(basePath, 'layers.tab')
with open(filename, 'r') as fOut:
fOut = csv.reader(fOut, delimiter='\t')
for i, line in enumerate(fOut.__iter__()):
# layer_2044.tab
j = int(line[1][6:-4])
layerIndex[j] = line[0]
# Save each stats whose two p-value corrections are the same
searches =['stats_*']
#searches =['stats_*', 'statsL_*']
outFiles = [
'stats_sameBHvsBon.tab',
'statsL_sameBHvsBon.tab',
]
i = 0;
same = 0
diff = 0
for search in searches:
search = basePath + search
files = glob.glob(search)
with open(tmpBase + outFiles[i], 'w') as fOut:
fOut = csv.writer(fOut, delimiter='\t')
#fOut.writerow(['#p-value correction', 'layer1', 'layer2'])
for file in files:
layer1 = layerIndex[int(file[len(basePath) + 6:-4])]
with open(file, 'r') as f:
f = csv.reader(f, delimiter='\t')
for i, line in enumerate(f.__iter__()):
# stats_*: TP63_expression altered 0.0003403091 0.0005671818 0.0005671818
if line[2] == line[3]:
same += 1
#fOut.writerow([line[3], layer1, line[0]])
else:
diff += 1
fOut.writerow(['same: ' + str(same) + ', diff: ' + str(diff)])
i += 1
return 0
if __name__ == "__main__" :
try:
# Get the return code to return
# Don't just exit with it because sys.exit works by exceptions.
return_code = diffBHvsBon()
except:
traceback.print_exc()
# Return a definite number and not some unspecified error code.
return_code = 1
sys.exit(return_code)
| mit | Python | |
55fa30c236095006e6f9c970ef668598c4348a96 | Add microservice plugin for adding static attributes to responses. | irtnog/SATOSA,irtnog/SATOSA,SUNET/SATOSA,its-dirg/SATOSA,SUNET/SATOSA | src/satosa/micro_service/attribute_modifications.py | src/satosa/micro_service/attribute_modifications.py | import os
import yaml
from satosa.internal_data import DataConverter
from satosa.micro_service.service_base import ResponseMicroService
class AddStaticAttributes(ResponseMicroService):
"""
Add static attributes to the responses.
The path to the file describing the mapping (as YAML) of static attributes must be specified
with the environment variable 'SATOSA_STATIC_ATTRIBUTES'.
"""
def __init__(self, internal_attributes):
super(AddStaticAttributes, self).__init__()
self.data_converter = DataConverter(internal_attributes)
mapping_file = os.environ.get("SATOSA_STATIC_ATTRIBUTES")
if not mapping_file:
raise ValueError("Could not find file containing mapping of static attributes.")
with open(mapping_file) as f:
self.static_attributes = yaml.safe_load(f)
def process(self, context, data):
all_attributes = data.get_attributes()
all_attributes.update(self.data_converter.to_internal("saml", self.static_attributes))
data.add_attributes(all_attributes)
return data
| apache-2.0 | Python | |
b72dd1c890491ccfe2de66f89f5adc035e862acb | Create HtmlParser.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | service/HtmlParser.py | service/HtmlParser.py | #########################################
# HtmlParser.py
# description: html parser
# categories: [document]
# possibly more info @: http://myrobotlab.org/service/HtmlParser
#########################################
# start the service
htmlparser = Runtime.start("htmlparser","HtmlParser")
| apache-2.0 | Python | |
9c8402bdadb4860a3876aa2ab0f94b9ddac8cfd5 | Add offboard_sample.py | uenota/px4_simulation_stack,uenota/px4_simulation_stack,uenota/px4_simulation_stack | script/offboard_sample.py | script/offboard_sample.py | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import State
from mavros_msgs.srv import CommandBool, CommandBoolRequest
from mavros_msgs.srv import SetMode, SetModeRequest
current_state = State()
def state_cb(msg):
global current_state
current_state = msg
def offboard_node():
rospy.init_node("offb_node")
r = rospy.Rate(20)
rospy.Subscriber("mavros/state", State, state_cb)
local_pos_pub = rospy.Publisher("mavros/setpoint_position/local",
PoseStamped,
queue_size=1000)
arming_client = rospy.ServiceProxy("mavros/cmd/arming", CommandBool)
set_mode_client = rospy.ServiceProxy("mavros/set_mode", SetMode)
while not rospy.is_shutdown() and not current_state.connected:
r.sleep()
pose = PoseStamped()
pose.pose.position.x = 0
pose.pose.position.y = 0
pose.pose.position.z = 2
for i in range(100):
local_pos_pub.publish(pose)
r.sleep()
if rospy.is_shutdown():
break
offb_set_mode = SetModeRequest()
offb_set_mode.custom_mode = "OFFBOARD"
arm_cmd = CommandBoolRequest()
arm_cmd.value = True
last_request = rospy.Time.now()
while not rospy.is_shutdown():
if current_state.mode != "OFFBOARD" \
and (rospy.Time.now() - last_request > rospy.Duration(5)):
try:
offb_set_mode_resp = set_mode_client(offb_set_mode)
if offb_set_mode_resp.mode_sent:
rospy.loginfo("Offboard enabled")
except rospy.ServiceException as e:
rospy.logwarn(e)
last_request = rospy.Time.now()
else:
if not current_state.armed \
and (rospy.Time.now() - last_request > rospy.Duration(5)):
try:
arm_cmd_resp = arming_client(arm_cmd)
if arm_cmd_resp.success:
rospy.loginfo("Vehicle armed")
except rospy.ServiceException as e:
rospy.logwarn(e)
last_request = rospy.Time.now()
local_pos_pub.publish(pose)
r.sleep()
if __name__ == "__main__":
try:
offboard_node()
except rospy.ROSInterruptException:
pass | mit | Python | |
b883b3066848957376d841cb4ffdf2d5646315c8 | add quick-testlist.py | chenxianqin/intel-gpu-tools,tiagovignatti/intel-gpu-tools,mv0/intel-gpu-tools,tiagovignatti/intel-gpu-tools,tiagovignatti/intel-gpu-tools,chenxianqin/intel-gpu-tools,tiagovignatti/intel-gpu-tools,mv0/intel-gpu-tools,mv0/intel-gpu-tools,mv0/intel-gpu-tools,chenxianqin/intel-gpu-tools,chenxianqin/intel-gpu-tools | scripts/quick-testlist.py | scripts/quick-testlist.py | #!/usr/bin/env python
#
# Copyright 2015 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function
import json
import sys
def filter_results(filename):
with open(filename) as data:
json_data = json.load(data)
for test_name in json_data["tests"]:
if json_data["tests"][test_name]["result"] == "incomplete":
continue
if json_data["tests"][test_name]["time"] < 60:
print(test_name)
if len(sys.argv) < 2:
print("Usage: quick-testlist.py RESULTS")
print("Read piglit results from RESULTS and print the tests that executed"
" in under 60 seconds, excluding any incomplete tests. The list can"
" be used by the --test-list option of piglit.")
sys.exit(1)
filter_results(sys.argv[1])
| mit | Python | |
718379eea1e0c58ba76ada08d64512d9f4904c07 | add new package (#10060) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/eztrace/package.py | var/spack/repos/builtin/packages/eztrace/package.py | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Eztrace(AutotoolsPackage):
"""EZTrace is a tool to automatically generate execution traces
of HPC applications."""
homepage = "http://eztrace.gforge.inria.fr"
url = "https://gforge.inria.fr/frs/download.php/file/37703/eztrace-1.1-8.tar.gz"
version('1.1-8', sha256='d80d78a25f1eb0e6e60a3e535e3972cd178c6a8663a3d6109105dfa6a880b8ec')
depends_on('mpi')
# Does not work on Darwin due to MAP_POPULATE
conflicts('platform=darwin')
def configure_args(self):
args = ["--with-mpi={0}".format(self.spec["mpi"].prefix)]
return args
| lgpl-2.1 | Python | |
cfe9550bfe7d8659c06892af8a32662cb372bea9 | add new package : sysstat (#13907) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/sysstat/package.py | var/spack/repos/builtin/packages/sysstat/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sysstat(AutotoolsPackage):
"""The sysstat package contains various utilities, common to many
commercial Unixes, to monitor system performance and usage activity
Sysstat also contains tools you can schedule via cron or systemd to
collect and historize performance and activity data."""
homepage = "https://github.com/sysstat"
url = "https://github.com/sysstat/sysstat/archive/v12.1.6.tar.gz"
version('12.2.0', sha256='614ab9fe8e7937a3edb7b2b6760792a3764ea3a7310ac540292dd0e3dfac86a6')
version('12.1.7', sha256='293b31ca414915896c639a459f4d03a742b3a472953975394bef907b245b3a9f')
version('12.1.6', sha256='50f4cbf023f8b933ed6f1fee0e6d33e508d9dc20355a47f6927e0c6046c6acf6')
version('12.1.5', sha256='d0ea36f278fe10c7978be2a383cb8055c1277d60687ac9030ba694a08a80f6ff')
| lgpl-2.1 | Python | |
da6f3bbb10537da5e88340016fa84ea5bcc359b0 | Add support for route 53 dns record manipulation | CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge | actions/cloudbolt_plugins/aws/route_53_dns_plugin/route_53_dns_record_manipulation.py | actions/cloudbolt_plugins/aws/route_53_dns_plugin/route_53_dns_record_manipulation.py | '''
http://boto3.readthedocs.io/en/latest/reference/services/route53.html
http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.change_resource_record_sets
'''
from resourcehandlers.aws.models import AWSHandler
from common.methods import set_progress
#dns zone friendly name -- no trailing period
ROUTE_53_DNS_DOMAIN = '{{ r53_domain_name }}'
# 'CREATE'|'DELETE'|'UPSERT'
ACTION = '{{ r53_action }}'
# 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA'|'CAA'
RECORD_TYPE = 'A'
# 60 | 120 | <any Integer> '
TTL = 300
def get_hosted_zone_id(client=None, zone=None, env_vpc_id=None):
'''
This code is intended to work out issues where multiple DNS zones are named
the same but assigned to different VPCs, and with this logic we can
determine the correct domain name based on the vpc_id from the server
environment (if there are multiple domains named the same), otherwise
(if there is only 1 domain found), then it doesnt go to that level of
integrity checking.
updated 2018/12/20
'''
#set_progress(f'getting zone: {zone}')
zone_name = f'{zone}.' #zone names have a trailing period
response = client.list_hosted_zones_by_name(DNSName=zone_name)
#set_progress(f"LEN = {len(response['HostedZones'])}")
if len(response['HostedZones']) == 1:
return response['HostedZones'][0]['Id']
elif len(response['HostedZones']) > 1:
for dns_zone in response['HostedZones']:
#set_progress(dns_zone['Id'], ' -- ', dns_zone['Name'])
hz = client.get_hosted_zone(Id=dns_zone['Id'])
if not hz:
#set_progress(f"ERROR GETTING HOSTED ZONE FROM AWS: {Item['Id']}")
break
if env_vpc_id == hz['VPCs'][0]['VPCId']:
#set_progress(f"returning: {dns_zone['Id']}")
return dns_zone['Id']
#set_progress('returning: False')
return False
#needed more resiliency in this function - see above
#def get_hosted_zone_id(client, zone):
# response = client.list_hosted_zones_by_name(DNSName=zone)
# # get first hosted zone returned
# hosted_zone = response['HostedZones'][0]
# zone_id = hosted_zone['Id']
# return zone_id
def change_resource_record(client, zone_id, batch):
'''
perform the update on the record in the given zone id based on the batch
information
'''
response = client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=batch
)
return response
def run(job=None, server=None, **kwargs):
'''
update the route 53 dns record
'''
route_53_dns_zone = ROUTE_53_DNS_DOMAIN
nic = server.nics.first()
dns_domain = nic.network.dns_domain
if not dns_domain:
msg = 'DNS domain not set on selected NIC: {}'.format(nic)
return "FAILURE", "", msg
rh = server.resource_handler.cast()
if not isinstance(rh, AWSHandler):
msg = 'Route53 not supported on RH Type: {}'.format(rh)
return "FAILURE", "", msg
region = server.environment.get_cfv('aws_region')
client = rh.get_boto3_client(region_name=region, service_name='route53')
zone_id = get_hosted_zone_id(client=client,
zone=route_53_dns_zone,
env_vpc_id=server.environment.vpc_id)
name = f'{server.hostname}.{dns_domain}'
#name = '{}.{}'.format(server.hostname, dns_domain)
batch = {
'Comment': 'Created by CloudBolt Job ID: {}'.format(job.id),
'Changes': [
{
'Action': ACTION,
'ResourceRecordSet': {
'ResourceRecords': [{'Value': server.ip}],
'Name': name,
'Type': RECORD_TYPE,
'TTL': TTL
}
},
]
}
change_resource_record(client=client, zone_id=zone_id, batch=batch)
return "SUCCESS", "", ""
| apache-2.0 | Python | |
56592b10e25cd1f2cf8d122df389268ab24b3729 | refactor and use OOMMF_PATH environment variable to locate oommf.tcl | fangohr/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python | oommfmif/__init__.py | oommfmif/__init__.py | import os
import subprocess
# Environment variable OOMMF_PATH should point to the directory which
# contains 'oommf.tcl'
oommf_path = os.environ['OOMMF_PATH']
def call_oommf(argstring):
"""Convenience function to call OOMMF: Typicallusage
p = call_oommf("+version")
p.wait()
stdout, stderr = p.stdout.read(), p.stderr.read()
"""
p = subprocess.Popen(os.path.join(oommf_path, 'oommf.tcl') +
' ' + argstring,
shell=True, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
return p
def get_version():
"""Return OOMMF version as string, something like 1.2.0.5"""
p = call_oommf('+version')
p.wait()
stderr = p.stderr.read() # version is returned in stderr
s_oommftcl, versionstring = stderr.split()[0:2]
return versionstring
| import subprocess
def get_version():
p = subprocess.Popen("~/git/oommf/oommf/oommf.tcl +version", shell=True,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
p.wait()
stdout, stderr = p.stdout.read(), p.stderr.read()
# version is returned in stderr
print(stderr.split()[0:2])
s_oommftcl, versionstring = stderr.split()[0:2]
return versionstring
| bsd-2-clause | Python |
94f5f630c315bc6951c98cd2a9f4908ce05d59a4 | Test float precision in json encoding. | cicku/fedmsg,mathstuf/fedmsg,fedora-infra/fedmsg,maxamillion/fedmsg,fedora-infra/fedmsg,chaiku/fedmsg,vivekanand1101/fedmsg,pombredanne/fedmsg,chaiku/fedmsg,pombredanne/fedmsg,vivekanand1101/fedmsg,maxamillion/fedmsg,mathstuf/fedmsg,fedora-infra/fedmsg,cicku/fedmsg,mathstuf/fedmsg,chaiku/fedmsg,cicku/fedmsg,pombredanne/fedmsg,vivekanand1101/fedmsg,maxamillion/fedmsg | fedmsg/tests/test_encoding.py | fedmsg/tests/test_encoding.py | import unittest
import fedmsg.encoding
from nose.tools import eq_
class TestEncoding(unittest.TestCase):
def test_float_precision(self):
""" Ensure that float precision is limited to 3 decimal places. """
msg = dict(some_number=1234.123456)
json_str = fedmsg.encoding.dumps(msg)
print json_str
output = fedmsg.encoding.loads(json_str)
eq_(str(output['some_number']), '1234.123')
| lgpl-2.1 | Python | |
dfb4c5422c79fcd413d0d9a028cb5548e2678454 | Add script for generating test certificates | datatheorem/TrustKit,datatheorem/TrustKit,datatheorem/TrustKit,datatheorem/TrustKit | generate_test_certificates.py | generate_test_certificates.py | import trustme
# Create a CA
ca = trustme.CA()
# Issue a cert signed by this CA
server_cert = ca.issue_cert(u"www.good.com")
# Save the PEM-encoded data to a file
ca.cert_pem.write_to_path("GoodRootCA.pem")
server_cert.private_key_and_cert_chain_pem.write_to_path("www.good.com.pem")
| mit | Python | |
89a78e09ee52c27df8cd548839b240984b13d61d | add client exception | duanhongyi/kakfa | kafka/exception/client.py | kafka/exception/client.py | class FailedPayloadsException(Exception):
pass
class ConnectionError(Exception):
pass
class BufferUnderflowError(Exception):
pass
class ChecksumError(Exception):
pass
class ConsumerFetchSizeTooSmall(Exception):
pass
class ConsumerNoMoreData(Exception):
pass
| apache-2.0 | Python | |
05004f8dc48fe15268bc2d0146e5788f0bdf463e | Add missing migration | 5monkeys/djedi-cms,5monkeys/djedi-cms,5monkeys/djedi-cms | djedi/migrations/0002_auto_20190722_1447.py | djedi/migrations/0002_auto_20190722_1447.py | # Generated by Django 2.2.3 on 2019-07-22 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djedi', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='node',
name='is_published',
field=models.BooleanField(blank=True, default=False),
),
]
| bsd-3-clause | Python | |
2bbca64af8089433e5e9a1a3a57439286affaabb | Create a macro to set metadata to routed stories from desk's default template. [SDCP-375] (#2017) | superdesk/superdesk-core,petrjasek/superdesk-core,petrjasek/superdesk-core,superdesk/superdesk-core,petrjasek/superdesk-core,superdesk/superdesk-core,petrjasek/superdesk-core,superdesk/superdesk-core | superdesk/macros/set_default_template_metadata.py | superdesk/macros/set_default_template_metadata.py | import logging
from flask import current_app as app
from flask_babel import lazy_gettext
from superdesk import get_resource_service
logger = logging.getLogger(__name__)
def get_default_content_template(item, **kwargs):
if 'dest_desk_id' in kwargs:
desk = None
desk_id = kwargs['dest_desk_id']
elif 'desk' in kwargs:
desk = kwargs['desk']
desk_id = kwargs['desk']['_id']
elif 'task' in item and 'desk' in item['task']:
desk = None
desk_id = item['task'].get('desk')
else:
logger.warning("Can't set default data, no desk identifier found")
return
if desk is None:
desk = get_resource_service('desks').find_one(req=None, _id=desk_id)
if not desk:
logger.warning('Can\'t find desk with id "{desk_id}"'.format(desk_id=desk_id))
return
content_template_id = desk.get("default_content_template")
if not content_template_id:
logger.warning("No default content template set for {desk_name}".format(
desk_name=desk.get('name', desk_id)))
return
content_template = get_resource_service("content_templates").find_one(req=None, _id=content_template_id)
if not content_template:
logger.warning('Can\'t find content_template with id "{content_template_id}"'.format(
content_template_id=content_template_id))
return
return content_template
def set_default_template_metadata(item, **kwargs):
fields_to_exclude = app.config.get('DEFAULT_TEMPLATE_METADATA_MACRO_EXCLUDE', {})
fields_to_override = app.config.get('DEFAULT_TEMPLATE_METADATA_MACRO_OVERRIDE', {})
"""Replace some metadata from default content template"""
content_template = get_default_content_template(item, **kwargs)
if not content_template:
return
data = content_template['data']
vocabularies = get_resource_service('vocabularies').get(req=None, lookup={'field_type': {'$exists': True}})
for vocabulary in vocabularies:
fields_to_exclude.append(vocabulary['_id'])
for key, value in data.items():
if (not item.get(key) and key not in fields_to_exclude) or key in fields_to_override:
item[key] = data.get(key)
# subject contains remaining metadata to copy
subject = data.setdefault('subject', [])
# we first take out the metadata which we want to add, if any
to_add = []
for sub in subject:
if (sub.get('scheme')
and not any(subjects.get('scheme') == sub.get('scheme') for subjects in item.get('subject'))):
to_add.append(sub)
# and now we add the new one
item.setdefault('subject', []).extend([i for i in to_add if i.get('scheme') and to_add])
if not item['subject'] and data.get('subject'):
item['subject'] = data.get('subject')
return item
name = 'Set Default Template Metadata'
label = lazy_gettext('Set Default Template Metadata')
callback = set_default_template_metadata
access_type = 'backend'
action_type = 'direct'
| agpl-3.0 | Python | |
f74c20ae5a35eb66b48b1dbc219d00db674bf995 | Add tests for StudentsInfoList component of GCI dashboard. | rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son | tests/app/soc/modules/gci/views/test_dashboard.py | tests/app/soc/modules/gci/views/test_dashboard.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the view for GCI Dashboard.
"""
from google.appengine.ext import blobstore
from soc.modules.gci.models.score import GCIScore
from tests.profile_utils import GCIProfileHelper
from tests.test_utils import GCIDjangoTestCase
class DashboardTest(GCIDjangoTestCase):
"""Tests the GCI Dashboard components.
"""
def setUp(self):
self.init()
self.url = '/gci/dashboard/' + self.gci.key().name()
def assertDashboardTemplatesUsed(self, response):
"""Asserts that all the templates from the dashboard were used.
"""
self.assertGCITemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gci/dashboard/base.html')
def assertDashboardComponentTemplatesUsed(self, response):
"""Asserts that all the templates to render a component were used.
"""
self.assertDashboardTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gci/dashboard/list_component.html')
self.assertTemplateUsed(response, 'v2/modules/gci/dashboard/component.html')
self.assertTemplateUsed(response, 'v2/soc/list/lists.html')
self.assertTemplateUsed(response, 'v2/soc/list/list.html')
def testStudentsInfoList(self):
"""Tests the studentsInfoList component of the dashboard.
"""
student = GCIProfileHelper(self.gci, self.dev_test).createOtherUser(
'pr@gmail.com').createStudent()
info = student.student_info
#We do this because currently the seeder can not seed the
#BlobReference properties. What we do below is not correct in practice, but
#is ok for testing purpose.
consent_form = blobstore.BlobKey('I cant allow u to participate in GCI :P')
info.consent_form = consent_form
info.put()
score_properties = {'points': 5, 'program': self.gci, 'parent': student}
score = GCIScore(**score_properties)
score.put()
idx = 11
#Set the current user to be the host.
self.data.createHost()
response = self.get(self.url)
self.assertDashboardComponentTemplatesUsed(response)
response = self.getListResponse(self.url, idx)
self.assertIsJsonResponse(response)
data = self.getListData(self.url, idx)
self.assertEqual(len(data), 1)
#Only the consent form has been submitted.
self.assertEqual(data[0]['columns']['consent_form'], 'Yes')
self.assertEqual(data[0]['columns']['student_id_form'], 'No')
#Case when both the forms have been submitted.
student_id_form = blobstore.BlobKey('student_id')
info.student_id_form = student_id_form
info.put()
data = self.getListData(self.url, idx)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['columns']['consent_form'], 'Yes')
self.assertEqual(data[0]['columns']['student_id_form'], 'Yes')
#Case when none of the two forms are submitted.
info.consent_form = None
info.student_id_form = None
info.put()
data = self.getListData(self.url, idx)
self.assertEqual(len(data), 1)
list_fields = data[0]['columns']
self.assertEqual(list_fields['consent_form'], 'No')
self.assertEqual(list_fields['student_id_form'], 'No')
self.assertEqual(list_fields['name'], student.name())
self.assertEqual(list_fields['link_id'], student.link_id)
self.assertEqual(list_fields['email'], student.email) | apache-2.0 | Python | |
c201245a01ded92bec91f1f34320e87666330c44 | add mbtiles command | consbio/seedsource-core,consbio/seedsource-core | seedsource_core/django/seedsource/management/commands/create_vector_tiles.py | seedsource_core/django/seedsource/management/commands/create_vector_tiles.py | from django.core.management import BaseCommand
from seedsource_core.django.seedsource.models import SeedZone
import subprocess
class Command(BaseCommand):
help = 'Facilitates converting of vector data into vector tiles.'
def handle(self, *args, **options):
def write_out(output):
self.stdout.write(output)
zones = []
write_out(self.style.WARNING('Loading data..'))
for sz in SeedZone.objects.all():
zones.append(sz.polygon.json)
write_out(sz.name)
with open("geojson", "w") as f:
f.write('\n'.join(zones))
write_out(self.style.WARNING('Data loaded\nLaunching process to write mbtiles..'))
subprocess.Popen("tippecanoe -o seedtiles.mbtiles -f -zg --drop-densest-as-needed geojson", shell=True)
| bsd-3-clause | Python | |
08364dae50a68b5d053eadc836c02b51873df250 | Add dog_cat | aidiary/keras_examples,aidiary/keras_examples | cnn/dog_cat/dog_cat.py | cnn/dog_cat/dog_cat.py | from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
def save_history(history, result_file):
loss = history.history['loss']
acc = history.history['acc']
val_loss = history.history['val_loss']
val_acc = history.history['val_acc']
nb_epoch = len(acc)
with open(result_file, "w") as fp:
fp.write("epoch\tloss\tacc\tval_loss\tval_acc\n")
for i in range(nb_epoch):
fp.write("%d\t%f\t%f\t%f\t%f\n" % (i, loss[i], acc[i], val_loss[i], val_acc[i]))
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(150, 150, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
history = model.fit_generator(
train_generator,
samples_per_epoch=2000,
nb_epoch=1,
validation_data=validation_generator,
nb_val_samples=800)
model.save_weights('first_try.h5')
save_history(history, 'history.txt')
| mit | Python | |
fa7a24493e6e8029ea2dd7f3bf244b08353c50a3 | create run commnd | ojengwa/gfe | manage.py | manage.py | import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db
app.config.from_object(os.getenv('APP_SETTINGS', 'config.DevelopmentConfig'))
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| mit | Python | |
6bd35a2df0dbeca2668999dafbbd05779911cca7 | add directory for state | boyjimeking/paintown,boyjimeking/paintown,Sevalecan/paintown,Sevalecan/paintown,boyjimeking/paintown,boyjimeking/paintown,Sevalecan/paintown,boyjimeking/paintown,boyjimeking/paintown,Sevalecan/paintown,Sevalecan/paintown,Sevalecan/paintown,boyjimeking/paintown,Sevalecan/paintown,Sevalecan/paintown,boyjimeking/paintown | src/mugen/state/serialize.py | src/mugen/state/serialize.py | #!/usr/bin/env python
# This script reads a specification of a datastructure with fields in it and writes
# out a class that contains those fields and a way to serialize/deserialize them
# to a stream. This is similar to google's protobuf but using a much simpler
# implementation.
# TODO: grammar of specification
| bsd-3-clause | Python | |
d5aae9d0a770cad05c76c30754f5fcc57be5bd9b | Solve Fuel Spent in python | deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground | solutions/uri/1017/1017.py | solutions/uri/1017/1017.py | h = float(input())
s = float(input())
print(f"{h * s / 12.0:.3f}")
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.