commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
257134bdaea7c250d5956c4095adf0b917b65aa6 | Fix null case for event details | database/dict_converters/event_details_converter.py | database/dict_converters/event_details_converter.py | from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
'alliances': event_details.alliance_selections if event_details else None,
'district_points': event_details.district_points if event_details else None,
'rankings': event_details.renderable_rankings if event_details else None,
'stats': event_details.matchstats if event_details else None,
}
return event_details_dict
| from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
'alliances': event_details.alliance_selections,
'district_points': event_details.district_points,
'rankings': event_details.renderable_rankings,
'stats': event_details.matchstats,
}
return event_details_dict
| Python | 0.000017 |
1a296a5203c422a7eecc0be71a91994798f01c10 | copy name->title for BehaviorAction and BehaviorSequences | tndata_backend/goals/migrations/0020_populate_basebehavior_title_slugs.py | tndata_backend/goals/migrations/0020_populate_basebehavior_title_slugs.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.utils.text import slugify
def _copy_name_to_title(model, apps):
"""Copy the values from the Model's name -> title and name_slug -> title_slug."""
M = apps.get_model("goals", model)
for obj in M.objects.all():
obj.title = obj.name
obj.title_slug = obj.name_slug or slugify(obj.name)
obj.save()
def _copy_title_to_name(model, apps):
"""Copy the values from the Model's title -> name and title_slug -> name_slug."""
M = apps.get_model("goals", model)
for obj in M.objects.all():
obj.name = obj.title
obj.name_slug = obj.title_slug or slugify(obj.title)
obj.save()
def copy_behavior_title(apps, schema_editor):
_copy_name_to_title("BehaviorSequence", apps)
def copy_action_title(apps, schema_editor):
_copy_name_to_title("BehaviorAction", apps)
def rev_copy_behavior_title(apps, schema_editor):
_copy_title_to_name("BehaviorSequence", apps)
def rev_copy_action_title(apps, schema_editor):
_copy_title_to_name("BehaviorAction", apps)
class Migration(migrations.Migration):
dependencies = [
('goals', '0019_auto_20150312_1553'),
]
operations = [
migrations.RunPython(copy_behavior_title, reverse_code=rev_copy_behavior_title),
migrations.RunPython(copy_action_title, reverse_code=rev_copy_action_title),
]
| Python | 0 | |
94cfc0a7598dd8dcf455311f8bb41c2016c7c3a8 | Create solution.py | hackerrank/algorithms/warmup/easy/plus_minus/py/solution.py | hackerrank/algorithms/warmup/easy/plus_minus/py/solution.py | #include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <limits.h>
#include <stdbool.h>
int main(void)
{
int n;
scanf("%d",&n);
int arr[n];
for(int arr_i = 0; arr_i < n; arr_i++){
scanf("%d",&arr[arr_i]);
}
const double UNIT_RATIO = 1.0 / n;
double posratio = 0.0;
double negratio = 0.0;
double zratio = 0.0;
int value = 0;
for (int i = 0; i < n; ++i) {
value = arr[i];
if (value > 0) {
posratio += UNIT_RATIO;
} else if (value < 0) {
negratio += UNIT_RATIO;
} else {
zratio += UNIT_RATIO;
}
}
printf("%lf\n", posratio);
printf("%lf\n", negratio);
printf("%lf\n", zratio);
return 0;
}
| Python | 0.000018 | |
a27d30c4514cef93e054d5597829dc758b04c95e | add xycut in util | TranskribusDU/util/XYcut.py | TranskribusDU/util/XYcut.py | # -*- coding: utf-8 -*-
"""
XYcut.py
vertical/ horizontal cuts for page elements:
copyright Naver Labs Europe 2018
READ project
"""
def mergeSegments(lSegment, iMin):
"""Take as input a list of interval on some axis,
together with the object that contributed to this interval.
In this module it's a textbox or an image
Merge overlapping segments
Return a sorted list of disjoints segments together
with the associated objects (that is the union of the objects
associated to the segments being merged)
Contiguous segments closer than iMin are merged as well.
INPUT: [ (a,b,o) , ...]
or INPUT: [ (a,b, [o,...]) , ...]
OUPUT: [ (c,d,[o,...]) , ...], min, max
bProjOn may contain the name of the axis on which the projection has
been done ("X" for an x-cut, "Y" for an y-cut)
then in frontier mode , we keep smal intervals if they are coinciding
with a frontier (e.g. a very narrow horizontal split coinciding with
a line is kept despite it's narower than iMin
p and q are the boundaries along the other axis of the block to cut
"""
lMergedSegment = []
for seg in lSegment:
(aaux,baux,o) = seg
lo = (o,)
a = min(aaux,baux) #just in case...
b = max(aaux,baux) #just in case...
#find all overlapping or close-enough segments and merge them
lOverlap = []
for mseg in lMergedSegment:
[aa,bb,loaux] = mseg
iOver = max(a,aa) - min(b, bb) #negative means overlap
if iOver <= iMin: #overlap or spaced by less than iMin pixel
lOverlap.append(mseg)
else:
pass #nothing to merge with
if lOverlap:
#merge the current segment with all overlapping msegments
for aa, bb, lolo in lOverlap:
if aa<a: a=aa
if bb>b: b=bb
lo = lo + tuple(lolo)
for mseg in lOverlap:
lMergedSegment.remove(mseg)
#mseg = [a, b, lo]
mseg = (a, b, tuple(lo))
lMergedSegment.append(mseg)
#sorted list
lMergedSegment.sort()
amin = lMergedSegment[0][0]
amax = lMergedSegment[-1][1]
return tuple(lMergedSegment), amin, amax
| Python | 0.000001 | |
e5e24ddccf5de2fba743a97c1790406259399d18 | Create one fixture for all tests | conftest.py | conftest.py | import pytest
from fixture.application import Application
@pytest.fixture(scope = "session")
def app(request):
fixture = Application()
request.addfinalizer(fixture.destroy)
return fixture
| Python | 0 | |
74d274f02fa23f1a6799e9f96ccb1ef77162f1bc | Add new package: consul (#18044) | var/spack/repos/builtin/packages/consul/package.py | var/spack/repos/builtin/packages/consul/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Consul(MakefilePackage):
"""Consul is a distributed, highly available,
and data center aware solution to connect and configure applications
across dynamic, distributed infrastructure."""
homepage = "https://www.consul.io"
url = "https://github.com/hashicorp/consul/archive/v1.8.1.tar.gz"
version('1.8.1', sha256='c173e9866e6181b3679a942233adade118976414f6ca2da8deaea0fa2bba9b06')
version('1.8.0', sha256='a87925bde6aecddf532dfd050e907b6a0a6447cdd5dc4f49b46d97c9f73b58f9')
version('1.7.6', sha256='893abad7563c1f085303705f72d8789b338236972123f0ab6d2be24dbb58c2ac')
depends_on('go@1.14:')
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('lib', prefix.lib)
| Python | 0 | |
6427406fc627b467dd4851f32b6a15a74356ef2d | Create new package. (#6043) | var/spack/repos/builtin/packages/r-gviz/package.py | var/spack/repos/builtin/packages/r-gviz/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGviz(RPackage):
"""Genomic data analyses requires integrated visualization
of known genomic information and new experimental data. Gviz
uses the biomaRt and the rtracklayer packages to perform live
annotation queries to Ensembl and UCSC and translates this to
e.g. gene/transcript structures in viewports of the grid
graphics package. This results in genomic information plotted
together with your data."""
homepage = "http://bioconductor.org/packages/Gviz/"
url = "https://git.bioconductor.org/packages/Gviz"
version('1.20.0', git='https://git.bioconductor.org/packages/Gviz', commit='299b8255e1b03932cebe287c3690d58c88f5ba5c')
depends_on('r@3.4.0:3.4.9', when='@1.20.0')
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('r-rtracklayer', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-biomart', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-bsgenome', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-biovizbase', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-latticeextra', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
| Python | 0 | |
0a25b4d4c0bc511592d797f6b214dd6fa3f70dd8 | FIX unhardwire 'modDate' to easy support for 'creDate' | scripts/managedb/lastest-updates.py | scripts/managedb/lastest-updates.py | #!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2013 Telefonica Investigacion y Desarrollo, S.A.U
#
# This file is part of Orion Context Broker.
#
# Orion Context Broker is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Orion Context Broker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# fermin at tid dot es
from pymongo import MongoClient, DESCENDING
from datetime import datetime
from sys import argv
# This script can be easily adapted to used creation date instead of modification date
# just changing the following variable to 'creDate'
refDate = 'modDate'
def printAttrs(attrHash, max):
# Given that each entity can have N attributes where N can be greater than 1, we need to add a second level
# of limit control (beyong the ".limit(max)" in the mongo query)
n = 0
for d in sorted(attrHash.keys(), reverse=True):
for attr in attrHash[d]:
printableDate = datetime.fromtimestamp(d).strftime('%Y-%m-%d %H:%M:%S')
print '-- ' + printableDate + ': '+ attr
n += 1
if n == max:
return
if 4 <= len(argv) <= 5:
type = argv[1]
db = argv[2]
max = int(argv[3])
else:
print 'Wrong number of arguments'
print ' Usage: ./lastest-updates.py <entities|attributes> <db> <limit> [entity_filter] '
print ' Example ./lastest-updates.py entities orion 10'
print ' Example ./lastest-updates.py entities orion 10 TEST_SENSOR'
exit(1)
# Optional argument: filter
query = {}
if len(argv) == 5:
query['_id.id'] = {'$regex': argv[4]}
client = MongoClient('localhost', 27017)
col = client[db]['entities']
if type == 'entities':
query[refDate] = {'$exists': True}
for doc in col.find(query).sort(refDate, direction=DESCENDING).limit(max):
date = int(doc[refDate])
dateString = datetime.fromtimestamp(date).strftime('%Y-%m-%d %H:%M:%S')
entityString = doc['_id']['id'] + ' (' + doc['_id']['type'] + ')'
print '-- ' + dateString + ': ' + entityString
elif type == 'attributes':
# Attributes are stored in a hash. The key of the hash is the modification date, so it is actually a
# hash of lists (due to several attributes could have the same modification date)
attrHash = { }
query['attrs.' + refDate] = {'$exists': True}
for doc in col.find(query).sort(refDate, direction=DESCENDING).limit(max):
entityString = doc['_id']['id'] + ' (' + doc['_id']['type'] + ')'
for attr in doc['attrs']:
if attr.has_key(refDate):
date = int(attr[refDate])
attrString = attr['name'] + ' - ' + entityString
if attrHash.has_key(date):
attrHash[date].append(attrString)
else:
attrHash[date] = [attrString]
printAttrs(attrHash, max)
else:
print 'Unsuported type: <' + type + '>'
exit (1)
| #!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2013 Telefonica Investigacion y Desarrollo, S.A.U
#
# This file is part of Orion Context Broker.
#
# Orion Context Broker is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Orion Context Broker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# fermin at tid dot es
from pymongo import MongoClient, DESCENDING
from datetime import datetime
from sys import argv
def printAttrs(attrHash, max):
# Given that each entity can have N attributes where N can be greater than 1, we need to add a second level
# of limit control (beyong the ".limit(max)" in the mongo query)
n = 0
for d in sorted(attrHash.keys(), reverse=True):
for attr in attrHash[d]:
printableDate = datetime.fromtimestamp(d).strftime('%Y-%m-%d %H:%M:%S')
print '-- ' + printableDate + ': '+ attr
n += 1
if n == max:
return
if 4 <= len(argv) <= 5:
type = argv[1]
db = argv[2]
max = int(argv[3])
else:
print 'Wrong number of arguments'
print ' Usage: ./lastest-updates.py <entities|attributes> <db> <limit> [entity_filter] '
print ' Example ./lastest-updates.py entities orion 10'
print ' Example ./lastest-updates.py entities orion 10 TEST_SENSOR'
exit(1)
# Optional argument: filter
query = {}
if len(argv) == 5:
query['_id.id'] = {'$regex': argv[4]}
client = MongoClient('localhost', 27017)
col = client[db]['entities']
if type == 'entities':
query['modDate'] = {'$exists': True}
for doc in col.find(query).sort('modDate', direction=DESCENDING).limit(max):
modDate = int(doc['modDate'])
dateString = datetime.fromtimestamp(modDate).strftime('%Y-%m-%d %H:%M:%S')
entityString = doc['_id']['id'] + ' (' + doc['_id']['type'] + ')'
print '-- ' + dateString + ': ' + entityString
elif type == 'attributes':
# Attributes are stored in a hash. The key of the hash is the modification date, so it is actually a
# hash of lists (due to several attributes could have the same modification date)
attrHash = { }
query['attrs.modDate'] = {'$exists': True}
for doc in col.find(query).sort('modDate', direction=DESCENDING).limit(max):
entityString = doc['_id']['id'] + ' (' + doc['_id']['type'] + ')'
for attr in doc['attrs']:
if attr.has_key('modDate'):
modDate = int(attr['modDate'])
attrString = attr['name'] + ' - ' + entityString
if attrHash.has_key(modDate):
attrHash[modDate].append(attrString)
else:
attrHash[modDate] = [attrString]
printAttrs(attrHash, max)
else:
print 'Unsuported type: <' + type + '>'
exit (1)
| Python | 0 |
3603669e0359f612b8e68a24b035849e9694aaaf | Add win_system state module | salt/states/win_system.py | salt/states/win_system.py | # -*- coding: utf-8 -*-
'''
Management of Windows system information
========================================
This state is used to manage system information such as the computer name and
description.
.. code-block:: yaml
ERIK-WORKSTATION:
system:
- computer_name
This is Erik's computer, don't touch!:
system:
- computer_desc
'''
# Import python libs
import logging
import sys
# Import salt libs
from salt._compat import string_types
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
This only supports Windows
'''
if salt.utils.is_windows() and 'system.get_computer_desc' in __salt__:
return 'system'
return False
def computer_desc(name):
'''
Manage the computer's description field
name
The desired computer description
'''
# Just in case someone decides to enter a numeric description
name = str(name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Computer description already set to {0!r}'.format(name)}
before_desc = __salt__['system.get_computer_desc']()
if before_desc == name:
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Computer description will be changed to {0!r}'
.format(name))
return ret
if not __salt__['system.set_computer_desc'](name):
ret['result'] = False
ret['comment'] = ('Unable to set computer description to '
'{0!r}'.format(name))
else:
ret['comment'] = ('Computer description successfully changed to {0!r}'
.format(name))
ret['changes'] = {'old': before_desc, 'new': name}
return ret
computer_description = computer_desc
def computer_name(name):
'''
Manage the computer's name
name
The desired computer name
'''
# Just in case someone decides to enter a numeric description
name = str(name).upper()
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Computer name already set to {0!r}'.format(name)}
before_name = __salt__['system.get_computer_name']()
pending_name = __salt__['system.get_pending_computer_name']()
if before_name == name and pending_name is None:
return ret
elif pending_name == name:
ret['comment'] = ('The current computer name is {0!r}, but will be '
'changed to {1!r} on the next reboot'
.format(before_name, name))
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Computer name will be changed to {0!r}'.format(name)
return ret
result = __salt__['system.set_computer_name'](name)
if result is not False:
after_name = result['Computer Name']['Current']
after_pending = result['Computer Name'].get('Pending')
if ((after_pending is not None and after_pending == name) or
(after_pending is None and after_name == name)):
ret['comment'] = 'Computer name successfully set to {0!r}'.format(name)
if after_pending is not None:
ret['comment'] += ' (reboot required for change to take effect)'
ret['changes'] = {'old': before_name, 'new': name}
else:
ret['result'] = False
ret['comment'] = 'Unable to set computer name to {0!r}'.format(name)
return ret
| Python | 0.000001 | |
0f68667e2ddfee6a370afe5c816a1358cfba799e | Correct GitHub URL. | openfisca_qt/widgets/__init__.py | openfisca_qt/widgets/__init__.py | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The widgets variables are created by each country-specific package (cf function init_country())
# Note: The variables below are not inited (to None) here, to ensure that execution will fail when they are used before
# OpenFisca country-specific package is properly inited.
__all__ = [
'CompositionWidget',
]
| # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The widgets variables are created by each country-specific package (cf function init_country())
# Note: The variables below are not inited (to None) here, to ensure that execution will fail when they are used before
# OpenFisca country-specific package is properly inited.
__all__ = [
'CompositionWidget',
]
| Python | 0.000028 |
b02b3e2e385bc04b2f1b1160371d55f8b6122006 | add migration file | pyanalysis/apps/corpus/migrations/0001_initial.py | pyanalysis/apps/corpus/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=150)),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Line',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', models.IntegerField(default=0)),
('text', models.TextField(default=b'', null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Script',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('last_modified', models.DateTimeField(default=django.utils.timezone.now)),
('dataset', models.ForeignKey(related_name='scripts', to='corpus.Dataset')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('st_col', models.IntegerField(default=0)),
('ed_col', models.IntegerField(default=0)),
('type', models.CharField(default=b'', max_length=32, null=True, blank=True)),
('text', models.TextField(default=b'', null=True, blank=True)),
('line', models.ForeignKey(related_name='tokens', to='corpus.Line')),
('script', models.ForeignKey(related_name='tokens', to='corpus.Script')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterIndexTogether(
name='token',
index_together=set([('script', 'type')]),
),
migrations.AlterIndexTogether(
name='script',
index_together=set([('dataset', 'last_modified'), ('dataset', 'name')]),
),
migrations.AddField(
model_name='line',
name='script',
field=models.ForeignKey(related_name='lines', to='corpus.Script'),
preserve_default=True,
),
migrations.AlterIndexTogether(
name='line',
index_together=set([('script', 'number')]),
),
]
| Python | 0.000001 | |
980594ab26887a4628620e9e0e00d89ddbdc4e49 | Create hackstring.py | hackstring.py | hackstring.py | #! /usr/bin/env python
import sys
print "".join(["%%%02x" % ord(x) for x in sys.argv[1]])
print "".join(["\\u%04x" % ord(x) for x in sys.argv[1]])
| Python | 0.000003 | |
739af3ccb50df93b108185ac1e7c0b47cd0bbf31 | Add happycopy2.py. | happycopy2.py | happycopy2.py | #!/usr/bin/env python
#
# Like happycopy.py, but make efforts to fill the buffer when encountering many
# small files (e.g. OS X .sparsebundle)
#
# Picture the scene: converting a drive from HFS+ to NTFS so your TV can play
# movies from it directly.
#
# Problem: copying media files from partition at the end of the drive to the
# new partition at the start of the drive.
#
# Attempt #1: Finder / rsync: 10.4mb/sec, disk rattles like crazy.
# Investigating, since this is a removable disk, write caching is minimal.
# Result: read bandwidth is artificially starved because writes are being
# forced to disk sooner than necessary. Result: huge amount of time wasted on
# disk seeks.
#
# Attempt #2: happycopy.py!@#"!one. Beat rsync at its own game by a clean
# 4mb/sec, with 10% lower CPU utilization. Read 1gb at a time, then write that
# buffer out, rinse repeat. Result: nice fast sequential reads.
#
# Attempt 1 IO graphs:
# Read: /-?_\-/_|?-\-/-|_?_|/-\/--\/
# Write: /-?_\-/_|?-\-/-|_?_|/-\/--\/
#
# Attempt 2 IO graphs:
# Read: /------------\_____________/--------------\_______
# Write: _____________/-------------\______________/-------
#
# Result: happy :)
#
import os
import sys
import time
MAX_BUF = 1048576 * 1024 * 1
def die(msg):
print msg
raise SystemExit(1)
def target_path(src_dir, dst_dir, path):
rel = os.path.relpath(path, src_dir)
return os.path.join(dst_dir, rel)
def stats(s, size, dur):
print >> sys.stderr, s, '%.2fMb/sec' % ((float(size) / dur) / 1048576)
def read_phase(to_copy):
buffered = 0
buffers = []
while to_copy and buffered < MAX_BUF:
src, dst, start = to_copy.pop()
with open(src, 'rb') as fp:
fp.seek(start)
buf = fp.read(MAX_BUF - buffered)
if buf:
buffered += len(buf)
buffers.append((src, dst, buf))
to_copy.append((src, dst, start + len(buf)))
return buffered, buffers
def write_phase(buffers):
for src_path, dst_path, buf in buffers:
with file(dst_path, 'ab') as fp:
fp.write(buf)
print 'write', dst_path, len(buf)
def do_copy(to_copy):
start_ts = time.time()
read = 0
read_secs = 0
written = 0
write_secs = 0
while to_copy:
t0 = time.time()
buffered, buffers = read_phase(to_copy)
read_secs += time.time() - t0
read += buffered
stats('Read', read, read_secs)
t0 = time.time()
write_phase(buffers)
write_secs += time.time() - t0
written += buffered
stats('Write', written, write_secs)
stats('Throughput', written, time.time() - start_ts)
def main():
if len(sys.argv) != 3:
die('Usage: prog src_dir dst_dir')
src_dir, dst_dir = sys.argv[1:]
if not os.path.isdir(src_dir):
die('src dir must be dir')
to_copy = []
for dirpath, dirnames, filenames in os.walk(src_dir):
tgt = target_path(src_dir, dst_dir, dirpath)
if not os.path.exists(tgt):
os.makedirs(tgt)
elif not os.path.isdir(tgt):
print 'gah!', tgt
for filename in filenames:
src_path = os.path.join(dirpath, filename)
dst_path = target_path(src_dir, dst_dir, src_path)
if os.path.exists(dst_path) and \
os.path.getsize(src_path) == os.path.getsize(dst_path):
print 'skip', src_path
else:
to_copy.append((src_path, dst_path, 0))
print 'going to copy', len(to_copy), 'files'
to_copy.reverse()
do_copy(to_copy)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
1dae50ff55de5a2a2b55d7c54465f6e9e35b91ad | Change directories | scripts/CAI_Calculator.py | scripts/CAI_Calculator.py | #!/usr/bin/env python2.7
'''
Created on 18/11/2013
@author: suu13
'''
from collections import Counter
from Bio.SeqUtils import CodonUsage
from Bio import SeqIO
import argparse
import re
from random import sample
from Bio.Data.CodonTable import standard_dna_table
from os import remove
def codon_count(sequence):
codon_list=[sequence[i:i+3].upper() for i in range(0,len(sequence),3)]
return Counter(codon_list)
def Nucleic_Acid_Detect(strg, search=re.compile(r'[^a|t|g|c|A|T|G|C]').search):
return not bool(search(strg))
def Exception_Fixer(file_argument):
mRNA_dict={}
for mRNA in SeqIO.parse(file_argument,"fasta"):
if(len(str(mRNA.seq))%3 == 0) and Nucleic_Acid_Detect(str(mRNA.seq))==True:
#print len(str(mRNA.seq))
mRNA_dict[str(mRNA.description)]=str(mRNA.seq)
output_tmp_file_name=''.join((sample('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',8)+list(".txt")))
with open(output_tmp_file_name,"w") as tmp:
for seq_record in mRNA_dict:
tmp.write('>' + seq_record +'\n' + mRNA_dict[seq_record] +'\n')
return output_tmp_file_name
def CAI_print(fasta_file,CDS_CAI_Index):
FASTA_iterator_obj=SeqIO.parse(fasta_file,"fasta")
for seq_record in FASTA_iterator_obj:
print "%s\t%f" % (str(seq_record.description),CDS_CAI_Index.cai_for_gene(str(seq_record.seq).lower()))
def main():
if args.codoncount is True:
Codons=Counter()
for sequence in SeqIO.parse(args.mRNAfasta,"fasta"):
Codons=Codons + codon_count(str(sequence.seq))
for key,value in Codons.iteritems():
try:
print "%s\t%s\t%d" % (key,standard_dna_table.forward_table[key.upper()],value)
except:
print "%s\t%s\t%d" % (key,'Stop',value)
return
try:
CDS_CAI_Index=CodonUsage.CodonAdaptationIndex() #init CAI object
CDS_CAI_Index.generate_index(args.mRNAfasta) #read mRNA file and create CAI index
#CDS_CAI_Index.print_index()
if args.othersfasta is not None:
try:
CAI_print(args.othersfasta,CDS_CAI_Index)
except TypeError:
output_tmp_file_name=Exception_Fixer(args.othersfasta)
CAI_print(output_tmp_file_name,CDS_CAI_Index)
#print "Exception in othersfasta file which probably have wrong codons or codon numbers..."
#raise
except:
print "Unexpected error"
else:
#CDS_CAI_Index.print_index()
print "\nCodon\tAA\tFrequency"
for key,value in CDS_CAI_Index.codon_count.iteritems():
try:
print "%s\t%s\t%d" % (key,standard_dna_table.forward_table[key.upper()],value)
except:
print "%s\t%s\t%d" % (key,'Stop',value)
CAI_print(args.mRNAfasta,CDS_CAI_Index)
except:
output_tmp_file_name=Exception_Fixer(args.mRNAfasta)
CDS_CAI_Index=CodonUsage.CodonAdaptationIndex() #init CAI object
CDS_CAI_Index.generate_index(output_tmp_file_name) #read mRNA file and create CAI index
if args.othersfasta is not None:
try:
CAI_print(args.othersfasta,CDS_CAI_Index)
except TypeError:
output_tmp_file_name=Exception_Fixer(args.othersfasta)
CAI_print(output_tmp_file_name,CDS_CAI_Index)
#print "Exception in othersfasta file which probably have wrong codons or codon numbers..."
#raise
except:
print "Unexpected error"
else:
CDS_CAI_Index.print_index()
print "\nCodon\tAA\tFrequency"
for key,value in CDS_CAI_Index.codon_count.iteritems():
try:
print "%s\t%s\t%d" % (key,standard_dna_table.forward_table[key],value)
except:
print "%s\t%s\t%d" % (key,'Stop',value)
CAI_print(output_tmp_file_name,CDS_CAI_Index)
remove(output_tmp_file_name)
if __name__ == '__main__':
Argument_Parser=argparse.ArgumentParser(prog="CAI_Calculator.py")
Argument_Parser.add_argument('-mRNAfasta',type=str,help="Reference mRNAs file to calculate CAI, (training dataset)",required=True)
Argument_Parser.add_argument('-othersfasta',type=str,help="FASTA of other RNAs")
Argument_Parser.add_argument('-codoncount',action='store_true',help="Only codon count")
args=Argument_Parser.parse_args()
main()
pass | Python | 0.000001 | |
084dd7fa3836f63d322a5bbf9e0289aa49488abb | Add fastagz field to data objects of process upload-fasta-nucl | resolwe_bio/migrations/0011_nucletide_seq.py | resolwe_bio/migrations/0011_nucletide_seq.py | import gzip
import os
import shutil
from django.conf import settings
from django.db import migrations
from resolwe.flow.migration_ops import DataDefaultOperation, ResolweProcessAddField, ResolweProcessRenameField
from resolwe.flow.utils import iterate_fields
FASTA_SCHEMA = {
'name': 'fasta',
'label': 'FASTA file',
'type': 'basic:file:',
}
class DefaultUnzipFasta(DataDefaultOperation):
"""Set default value."""
def prepare(self, data, from_state):
pass
def get_default_for(self, data, from_state):
"""Return default for given data object."""
fastagz = os.path.join(
settings.FLOW_EXECUTOR['DATA_DIR'],
data.location.subpath,
data.output['fastagz']['file']
)
assert fastagz.endswith('.gz')
fasta = fastagz[:-3]
# Decompress.
with gzip.open(fastagz, 'rb') as infile, open(fasta, 'wb') as outfile:
shutil.copyfileobj(infile, outfile)
size = os.path.getsize(fasta)
return {
'file': os.path.basename(fasta),
'size': size,
'total_size': size,
}
def recompute_data_size(apps, schema_editor):
"""Recompute size of all data objects of process ``upload-fasta-nucl``."""
Data = apps.get_model("flow", "Data") # pylint: disable=invalid-name
for data in Data.objects.filter(process__slug='upload-fasta-nucl'):
hydrate_size(data)
data.save()
def hydrate_size(data):
"""Compute size of all Data object outputs and its cumultative size.
This is a simplified version of original ``hydrate_size`` function,
since we need just a subset of it.
"""
def add_file_size(obj):
"""Add file size to the basic:file field."""
path = os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], data.location.subpath, obj['file'])
obj['size'] = os.path.getsize(path)
obj['total_size'] = obj['size']
data_size = 0
for field_schema, fields in iterate_fields(data.output, data.process.output_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'].startswith('basic:file:'):
add_file_size(value)
data_size += value.get('total_size', 0)
data.size = data_size
class Migration(migrations.Migration):
"""
Make outputs of ``upload-fasta-nucl`` consistent with ``upload-genome``.
Process ``upload-genome`` stores compressed output in ``fastagz``
and uncompressed in ``fasta``. Process ``upload-fasta-nucl`` stores
compressed output in ``fasta`` output field and does not have a
field with uncompressed output. Therefore ``fasta`` field is first
renamed to ``fastagz``. Only then ``fasta`` field is added with
decompressed content.
"""
dependencies = [
('resolwe_bio', '0010_add_relation_types'),
('flow', '0028_add_data_location'),
]
operations = [
ResolweProcessRenameField(
process='upload-fasta-nucl',
field='output.fasta',
new_field='fastagz',
),
ResolweProcessAddField(
process='upload-fasta-nucl',
field='output.fasta',
schema=FASTA_SCHEMA,
default=DefaultUnzipFasta(),
),
migrations.RunPython(recompute_data_size),
]
| Python | 0 | |
2df34105a58a05fd1f50f88bc967360b4bd9afc8 | Create LongestIncreasingSubseq_001.py | leetcode/300-Longest-Increasing-Subsequence/LongestIncreasingSubseq_001.py | leetcode/300-Longest-Increasing-Subsequence/LongestIncreasingSubseq_001.py | class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 0
maxlen = [1 for i in range(n)]
for i in range(1, n):
for j in range(i):
if nums[j] < nums[i] and maxlen[j] + 1 > maxlen[i]:
maxlen[i] = maxlen[j] + 1
return max(maxlen)
| Python | 0.000562 | |
d37f57bc2b6816759a6e7108cef4a03322a622ce | Create generator.py | generator.py | generator.py | #!/usr/bin/python3
import random
import time
import sys
import Being
# print python version- dev purposes
print(sys.version)
# generic dice
def x_dices_n(x,n):
result = 0
for i in range(0, x):
roll_dice_n = random.randint(1,n)
result = roll_dice_n + result
return result
# crude race selector ;)
player = Being.Human()
# roll for stats with generic dice
fresh_stats=[]
for x in range(0, 8):
fresh_stats.append(x_dices_n(2, 10))
# sorts rolled results, removes lowest result, adds 11 as Shalya'a Favor, sorts again
fresh_stats.sort()
fresh_stats.pop(0)
fresh_stats.append(11)
fresh_stats.sort(reverse=True)
# print list without brackets(stat_listed_String)
stat_listed_String = ' '.join(str(S) for S in fresh_stats)
print('rolled')
print(stat_listed_String)
# raw list for chosen stats
chosen_stats = [0] * 8
# empty list for roll enumeration - to avoid doubled attribution
used_stats=[]
# tuple with stat names
stat_first_names = ('WS', 'BS', 'S', 'T', 'Ag', 'Int', 'WP', 'Fel')
# tuple with second line stat names
stat_second_names = ('A', 'W', 'SB', 'TB', 'M', 'Mag', 'IP', 'FP')
# stats preparation
# value as a string
for idx, val in enumerate(fresh_stats):
print('value '+str(val)+' you want to have as ?')
for Ind, Vart in enumerate(stat_first_names):
if (used_stats.count(Ind))==1:
print('*',end='')
print(Vart,end='\t')
print('\n')
for i in range(8):
print(i,end='\t')
print('\n')
while True:
try:
index = int(input('? ')) # input stat index
if (used_stats.count(index)!=0): # check if not assigned already
raise StatPresentError() # give one more time if already assigned
chosen_stats[index]=val # assign value to index
used_stats.append(index) # notes what is assigned
except KeyboardInterrupt:
print('BYE!')
sys.exit(0)
except:
print('Provide once more for what do you want to assign value '+str(val))
continue
else:
break
for w in range(0, 60):
print("*", end='')
print('\n')
print(*stat_first_names, sep='\t')
print(*chosen_stats, sep='\t')
# test purposes
# print(*used_stats, sep='\t')
# print(*fresh_stats, sep='\t')
# increment race base with chosen stats
print('Your character body')
player.body1 = [sum(x) for x in zip(player.base_line_1, chosen_stats)]
player.base_line_2[2] = player.body1[2] // 10
player.base_line_2[3] = player.body1[3] // 10
print(*stat_first_names, sep='\t')
print(*player.body1, sep='\t')
print(*stat_second_names, sep='\t')
print(*player.base_line_2, sep='\t')
# save to file
time_string = time.strftime("%Y-%m-%d--%H%M%S")
filename = ('statistics-' + time_string + '.txt')
f = open(filename, 'w')
for S in fresh_stats:
f.write(str(S))
f.write('\t')
f.write('\n'+str(sum(fresh_stats)))
f.write('\n')
for i in range(8):
f.write(str(stat_first_names[i]))
f.write('\t')
f.write('\n')
for D in chosen_stats:
f.write(str(D))
f.write('\t')
f.write('\n')
for A in player.body1:
f.write(str(A))
f.write('\t')
f.close()
| Python | 0.000001 | |
c2d26a5942cb22f4510abd6d5ff8c83d6a386810 | make migrations and model updates | masterlist/candidates/migrations/0005_auto_20160725_1759.py | masterlist/candidates/migrations/0005_auto_20160725_1759.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0004_auto_20160708_1422'),
]
operations = [
migrations.RemoveField(
model_name='candidate',
name='mask_blue',
),
migrations.RemoveField(
model_name='candidate',
name='mask_red',
),
migrations.RemoveField(
model_name='candidate',
name='sb_max',
),
migrations.RemoveField(
model_name='candidate',
name='sb_min',
),
migrations.AddField(
model_name='candidate',
name='data_season',
field=models.CharField(max_length=64, null=True, verbose_name=b'DES data season'),
),
migrations.AddField(
model_name='candidate',
name='dec_field',
field=models.FloatField(null=True, verbose_name=b'Dec Field'),
),
migrations.AddField(
model_name='candidate',
name='followup_date',
field=models.CharField(max_length=64, null=True, verbose_name=b'followup date'),
),
migrations.AddField(
model_name='candidate',
name='followup_facility',
field=models.CharField(max_length=64, null=True, verbose_name=b'followup facility'),
),
migrations.AddField(
model_name='candidate',
name='followup_success',
field=models.CharField(max_length=64, null=True, verbose_name=b'followup success'),
),
migrations.AddField(
model_name='candidate',
name='lens_class',
field=models.CharField(max_length=64, null=True, verbose_name=b'Lens (Y/N)'),
),
migrations.AddField(
model_name='candidate',
name='ra_field',
field=models.FloatField(null=True, verbose_name=b'RA Field'),
),
migrations.AddField(
model_name='candidate',
name='system_type',
field=models.CharField(max_length=64, null=True, verbose_name=b'Type of Candidate (gal or qso)'),
),
migrations.AddField(
model_name='candidate',
name='z_phot_lens',
field=models.FloatField(null=True, verbose_name=b'Z photo lens'),
),
migrations.AddField(
model_name='candidate',
name='z_spec_src',
field=models.FloatField(null=True, verbose_name=b'Z spec source '),
),
]
| Python | 0 | |
94e4d30dbdbcf9765bf731b1bd792d0fcf3f9d4a | Add prettification middleware | maccman/middleware/prettify.py | maccman/middleware/prettify.py | from bs4 import BeautifulSoup
class PrettifyMiddleware(object):
def process_response(self, request, response):
if response.status_code == 200:
if response["content-type"].startswith("text/html"):
beauty = BeautifulSoup(response.content)
response.content = beauty.prettify()
return response
| Python | 0.000001 | |
be530dc2e18ccbeeb3e4396f47d2a527364e6ab1 | Add migration for ADS.added_via | migrations/versions/f8c0bde5d368_match_sqlalchemy_defintion_and_actual_.py | migrations/versions/f8c0bde5d368_match_sqlalchemy_defintion_and_actual_.py | """Match sqlalchemy defintion and actual schema
Revision ID: f8c0bde5d368
Revises: ae904ac154cf
Create Date: 2019-11-19 11:24:40.555110
"""
# revision identifiers, used by Alembic.
revision = 'f8c0bde5d368'
down_revision = 'ae904ac154cf'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.alter_column('ADS', 'added_via',
existing_type=postgresql.ENUM('form', 'api', name='via'),
nullable=False)
def downgrade():
op.alter_column('ADS', 'added_via',
existing_type=postgresql.ENUM('form', 'api', name='via'),
nullable=True)
| Python | 0.000001 | |
50415300e3ce1e7cc10782aa4661da14d900d6de | Add code generation tests | benchmarks/regression/benchmarks/codegen.py | benchmarks/regression/benchmarks/codegen.py | from examples.seismic.tti.tti_example import tti_setup
repeat = 3
class TTI(object):
space_order = 12
def setup(self):
self.solver = tti_setup(space_order=TTI.space_order)
def time_forward(self):
self.solver.op_fwd()
def time_adjoint(self):
self.solver.op_adj()
| Python | 0 | |
5dd3424e9d95c12c2fb4c770f527b85b928da705 | create a separate module for decoration/coloring | decorate.py | decorate.py | class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' | Python | 0 | |
89d08498f7f7e12fa5486eb88f64829621aa27f9 | Add missing migration | src/nodeconductor_saltstack/saltstack/migrations/0005_label_change.py | src/nodeconductor_saltstack/saltstack/migrations/0005_label_change.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('saltstack', '0004_remove_spl_state'),
]
operations = [
migrations.AlterModelOptions(
name='saltstackservice',
options={'verbose_name': 'SaltStack service', 'verbose_name_plural': 'SaltStack service'},
),
]
| Python | 0.0002 | |
8cde7867eb98cc56533ab0156768ad2409e8c65e | Fix bug | user_notification.py | user_notification.py | #!/usr/bin/python
# encoding: utf-8
from datetime import datetime
import string
from django.core.mail import send_mail
from notifications.models import UserNotification
import settings
email_template = u'''${username}您好:
您有${cnt}条新消息,请点击下面的链接查看:
${msg_url}
感谢使用我们的网站!
${site_name}团队
'''
today = datetime.now()
site_name = settings.SITE_NAME
subject = u'%s:新消息' % site_name
site_base = settings.SITE_BASE
if site_base[-1] != '/':
site_base += '/'
site_root = settings.SITE_ROOT
if site_root[-1] != '/':
site_root += '/'
url = site_base + site_root + 'home/my/'
notifications = UserNotification.objects.all()
d = {}
for e in notifications:
if today.year != e.timestamp.year or today.month != e.timestamp.month or \
today.day != e.timestamp.day:
continue
if d.has_key(e.to_user):
d[e.to_user] += 1
else:
d[e.to_user] = 1
for k in d.keys():
to_user = k
cnt = d[k]
template = string.Template(email_template)
content = template.substitute(username=to_user, cnt=cnt, msg_url=url, \
site_name=site_name)
send_mail(subject, content, settings.DEFAULT_FROM_EMAIL, [to_user], \
fail_silently=False)
| #!/usr/bin/python
# encoding: utf-8
from datetime import datetime
import string
from django.core.mail import send_mail
from notifications.models import UserNotification
import settings
email_template = u'''${username}您好:
您有${cnt}条新消息,请点击下面的链接查看:
${msg_url}
感谢使用我们的网站!
${site_name}团队
'''
today = datetime.now()
site_name = settings.SITE_NAME
subject = u'%s:新消息' % site_name
site_base = settings.SITE_BASE
if site_base[-1] != '/':
site_base += '/'
url = site_base + 'home/my/'
notifications = UserNotification.objects.all()
d = {}
for e in notifications:
if today.year != e.timestamp.year or today.month != e.timestamp.month or \
today.day != e.timestamp.day:
continue
if d.has_key(e.to_user):
d[e.to_user] += 1
else:
d[e.to_user] = 1
for k in d.keys():
to_user = k
cnt = d[k]
template = string.Template(email_template)
content = template.substitute(username=to_user, cnt=cnt, msg_url=url, \
site_name=site_name)
send_mail(subject, content, settings.DEFAULT_FROM_EMAIL, [to_user], \
fail_silently=False)
| Python | 0.000001 |
6d8fb7d052dc7341ecd9fb3388b804b82f77fa0f | add example usage | examples/scores.py | examples/scores.py | """Get a list of average scores for each professor in a department."""
import sys
from collections import defaultdict
import penncoursereview as pcr
def prof_scores(dept):
professor_scores = defaultdict(list)
dept = pcr.Department(dept)
for review in dept.reviews.values:
instructor = review.instructor
rating = review.ratings.rInstructorQuality
professor_scores[instructor.name].append(float(rating))
return professor_scores
def averages(dept):
professor_scores = prof_scores(dept)
for prof, scores in professor_scores.iteritems():
score = sum(scores) / len(scores)
yield prof, score
def main(dept):
for prof, avg in sorted(averages(dept), key=lambda x: x[1]):
print "%s %.2f" % (prof, avg)
if __name__ == "__main__":
if (len(sys.argv) < 2):
print "usage: scores.py <department>"
else:
main(sys.argv[1])
| Python | 0 | |
7dbc289897ecf35f0b709177ac3feacffd8691ca | add a test file | ch_04/testfile.py | ch_04/testfile.py | #this is a test file for eclipse | Python | 0.000001 | |
50194e14a75c3300996f64c415a8593b1243af9f | Add api_helper for testing | src/tests/ggrc/api_helper.py | src/tests/ggrc/api_helper.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc.app import app
from ggrc.services.common import Resource
from ggrc import services
import inspect
import flask
import logging
from sqlalchemy.orm.collections import InstrumentedList
# style: should the class name be all capitals?
class Api():
def __init__(self):
self.tc = app.test_client()
self.tc.get("/login")
self.resource = Resource()
self.service_dict = {s.model_class.__name__: s.name
for s in services.all_services()}
self.headers = {'Content-Type': 'application/json',
"X-Requested-By": "gGRC"
}
self.user_headers = {}
def set_user(self, person=None):
if person:
self.user_headers = {
"X-ggrc-user": self.resource.as_json({
"name": person.name,
"email": person.email,
})
}
else:
self.user_headers = {}
self.tc.get("/logout")
self.tc.get("/login", headers=self.user_headers)
def get_service(self, obj):
if inspect.isclass(obj):
return self.service_dict[obj.__name__]
else:
return self.service_dict[obj.__class__.__name__]
def api_link(self, obj, obj_id=None):
obj_id = "" if obj_id is None else "/" + str(obj_id)
return "/api/%s%s" % (self.get_service(obj), obj_id)
def data_to_json(self, response):
""" add docoded json to response object """
try:
response.json = flask.json.loads(response.data)
except:
response.json = None
return response
def send_request(self, request, obj, data, headers={}, api_link=None):
if api_link is None:
api_link = self.api_link(obj)
headers.update(self.headers)
headers.update(self.user_headers)
json_data = self.resource.as_json(data)
logging.info("request json" + json_data)
response = request(api_link, data=json_data, headers=headers.items())
return self.data_to_json(response)
def put(self, obj, data):
response = self.get(obj, obj.id)
headers = {
"If-Match": response.headers.get("Etag"),
"If-Unmodified-Since": response.headers.get("Last-Modified")
}
api_link = self.api_link(obj, obj.id)
return self.send_request(self.tc.put , obj, data, headers=headers, api_link=api_link)
def post(self, obj, data):
return self.send_request(self.tc.post, obj, data)
def get(self, obj, id):
return self.data_to_json(self.tc.get(self.api_link(obj, id)))
def delete(self, obj, id):
response = self.get(obj, obj.id)
headers = {
"If-Match": response.headers.get("Etag"),
"If-Unmodified-Since": response.headers.get("Last-Modified")
}
headers.update(self.headers)
api_link = self.api_link(obj, obj.id)
return self.tc.delete(api_link, headers=headers)
| Python | 0 | |
2b1b1e1d5db7edf4350239b712d2e872e7769d84 | add problem 24 | euler024.py | euler024.py | #!/usr/bin/env python
def nextperm(s):
'''
http://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
'''
k = None
for i in range(len(s)-1):
if s[i] < s[i+1]:
k = i
if k is None:
# sequence in descending order, last permutation
return None
l = None
for i in range(k+1, len(s)):
if s[i] > s[k]:
l = i
hold = s[l]
s[l] = s[k]
s[k] = hold
# reverse s from k+1 to the end
t = s[k+1:]
t.reverse()
s[k+1:] = t
return s
if __name__=="__main__":
debugging = False
s = range(10)
permutations = 10**6-1
for perm in xrange(permutations):
nextperm(s)
if debugging:
print s
print s
| Python | 0.998228 | |
03279bbc6193d3944dcd2542daa65701a1e0eded | Add solution for problem 26 | euler026.py | euler026.py | #!/usr/bin/python
"""
For resolve this, we have to find the maximum
Full Reptend Prime int he given limit. To do that, we need
to check if the 10 is a primitive root of p.
See http://mathworld.wolfram.com/FullReptendPrime.html for details
"""
from sys import exit
for p in range(999, 7, -2):
for k in range(1, p):
if (10 ** k) % p == 1:
if k != p - 1:
break
else:
print(p)
exit(0)
| Python | 0.000159 | |
8373611a9c5b035953aee208bc65f4be92890314 | add the conversion script | scripts/conversionScripts/toTransformationNode.py | scripts/conversionScripts/toTransformationNode.py | import xml.etree.ElementTree as ET
import xml.dom.minidom as pxml
import os
def convert(tree,fileName=None):
"""
Converts input files to be compatible with merge request ....
change the attribute of node <variablesTransformation> from 'model' to 'distribution'
@ In, tree, xml.etree.ElementTree.ElementTree object, the contents of a RAVEN input file
@ Out, tree, xml.etree.ElementTree.ElementTree object, the modified RAVEN input file
"""
simulation = tree.getroot()
if simulation.tag!='Simulation': return tree #this isn't an input file
for distNode in simulation.iter('MultivariateNormal'):
distName = distNode.get('name')
break
for vtNode in simulation.iter('variablesTransformation'):
vtNode.set('distribution', distName)
modelName = vtNode.get('model')
if modelName != None:
del vtNode.attrib['model']
return tree
if __name__=='__main__':
import convert_utils
import sys
convert_utils.standardMain(sys.argv,convert)
| Python | 0.000001 | |
55a35c642b64a6bdb8314b9470c1f7fedb16478f | print results | tensorflow/python/keras/utils/np_utils.py | tensorflow/python/keras/utils/np_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.to_categorical')
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
Usage Example:
>>> y = [0, 1, 2, 3]
>>> tf.keras.utils.to_categorical(y, num_classes=4)
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]], dtype=float32)
Arguments:
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input. Default: `'float32'`.
Returns:
A binary matrix representation of the input. The classes axis is placed
last.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@keras_export('keras.utils.normalize')
def normalize(x, axis=-1, order=2):
"""Normalizes a Numpy array.
Arguments:
x: Numpy array to normalize.
axis: axis along which to normalize.
order: Normalization order (e.g. 2 for L2 norm).
Returns:
A normalized copy of the array.
"""
l2 = np.atleast_1d(np.linalg.norm(x, order, axis))
l2[l2 == 0] = 1
return x / np.expand_dims(l2, axis)
| # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.to_categorical')
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
Usage Example:
>>> y = [0, 1, 2, 3]
>>> tf.keras.utils.to_categorical(y, num_classes=4)
array([...], dtype=float32)
Arguments:
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input. Default: `'float32'`.
Returns:
A binary matrix representation of the input. The classes axis is placed
last.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@keras_export('keras.utils.normalize')
def normalize(x, axis=-1, order=2):
"""Normalizes a Numpy array.
Arguments:
x: Numpy array to normalize.
axis: axis along which to normalize.
order: Normalization order (e.g. 2 for L2 norm).
Returns:
A normalized copy of the array.
"""
l2 = np.atleast_1d(np.linalg.norm(x, order, axis))
l2[l2 == 0] = 1
return x / np.expand_dims(l2, axis)
| Python | 0.000004 |
ae972cd7fe6856a1265981810ea1d03fc5efcf54 | write test for django admin | tests/organisations/test_admin.py | tests/organisations/test_admin.py | import pytest
from django.urls import reverse
@pytest.mark.django_db
def test_organisation_admin_form(client, organisation,
admin, user_factory,
group_factory):
client.login(username=admin, password='password')
url = reverse('admin:meinberlin_organisations_organisation_add')
response = client.get(url)
assert response.status_code == 200
data = {'name': 'My Organisation'}
response = client.post(url, data)
assert 1 == 2
| Python | 0.000001 | |
cce3b017f36de8fb8682971e13201c0143c524cf | add indexes to make deleting faster | aeromancer/db/alembic/versions/a3d002d161a_add_indexes.py | aeromancer/db/alembic/versions/a3d002d161a_add_indexes.py | """add indexes
Revision ID: a3d002d161a
Revises: 22e0aa22ab8e
Create Date: 2014-11-24 14:24:29.824147
"""
# revision identifiers, used by Alembic.
revision = 'a3d002d161a'
down_revision = '22e0aa22ab8e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('file_project_idx', 'file', ['project_id'])
op.create_index('line_file_idx', 'line', ['file_id'])
def downgrade():
op.drop_index('line_file_idx', 'line')
op.drop_index('file_project_idx', 'file')
| Python | 0.000001 | |
234c03381209d860d7b6ff29263f927736822c1e | Add shellFlags.py (not yet integrated) | js/shellFlags.py | js/shellFlags.py | import random
import os
import subprocess
def memoize(f, cache={}):
'''Function decorator that caches function results.'''
# From http://code.activestate.com/recipes/325205-cache-decorator-in-python-24/#c9
def g(*args, **kwargs):
key = ( f, tuple(args), frozenset(kwargs.items()) )
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return g
# This (or something like it) could move to inspectShell.py, where
# it would replace exitCodeDbgOptOrJsShellXpcshell.
@memoize
def shellSupportsFlag(shell, flag):
with open(os.devnull, 'w') as devnull:
retCode = subprocess.call([shell, flag, "-e", "42"], stdout=devnull, stderr=devnull)
assert 0 <= retCode <= 3
return (retCode == 0)
def chance(p):
return random.random() < p
def randomFlagSet(shell):
'''
Returns a random list of command-line flags appropriate for the given shell.
Only works for spidermonkey js shell. Does not work for xpcshell.
'''
args = []
jaeger = chance(.7)
ion = shellSupportsFlag(shell, "--ion") and chance(.7)
infer = chance(.7)
if shellSupportsFlag(shell, "--no-ion"):
# New js shell defaults jaeger, ion, and infer to on! See bug 724751.
if not jaeger:
args.append("--no-jm")
if not ion:
args.append("--no-ion")
if not infer:
args.append("--no-ti")
else:
# Old shells (and xpcshell?) default jaeger, ion, and infer to off.
if jaeger:
args.append("-m")
if ion:
args.append("--ion")
if infer:
args.append("-n")
if jaeger:
if chance(.4):
args.append("--always-mjit") # aka -a
if chance(.2):
args.append("--debugjit") # aka -d
if chance(.2):
args.append("--execute=mjitChunkLimit(" + str(random.randint(5, 100)) + ")")
if ion:
if chance(.4):
args.append("--ion-eager")
if chance(.2):
args.append("--ion-gvn=" + random.choice(["off", "pessimistic", "optimistic"]))
if chance(.2):
args.append("--ion-regalloc=" + random.choice(["greedy", "lsra"]))
if chance(.2):
args.append("--ion-licm=off")
if chance(.2):
args.append("--ion-range-analysis=off")
if chance(.2):
args.append("--ion-inlining=off")
if chance(.2):
args.append("--ion-osr=off")
#if chance(.05):
# args.append("--execute=verifybarriers()")
if chance(.05):
args.append("--dump-bytecode") # aka -D
if shellSupportsFlag(shell, "--gc-zeal=0") and chance(.05):
args.append("--gc-zeal=" + random.choice(["1", "2"]) + "," + str(random.randint(1, 100)) + "," + random.choice(["0", "1"]))
return args
def basicFlagSets(shell):
if shellSupports(shell, "--no-ion"):
# From https://bugzilla.mozilla.org/attachment.cgi?id=616725
return [
[],
['--no-jm'],
['--ion-gvn=off', '--ion-licm=off'],
['--no-ion', '--no-jm', '--no-ti'],
['--no-ion', '--no-ti'],
['--no-ion', '--no-ti', '-a', '-d'],
['--no-ion', '--no-jm'],
['--no-ion'],
['--no-ion', '-a'],
['--no-ion', '-a', '-d'],
['--no-ion', '-d']
]
else:
return [
# ,m,am,amd,n,mn,amn,amdn,mdn
[],
['-m'],
['-m', 'a'],
['-m', 'a', 'd'],
['-n'],
['-m', '-n'],
['-m', '-n', 'a'],
['-m', '-n', 'a', 'd'],
['-m', '-n', 'd']
]
# Consider adding a function (for compareJIT reduction) that takes a flag set
# and returns all its (meaningful) subsets.
def testRandomFlags():
import sys
for i in range(100):
print ' '.join(randomFlagSet(sys.argv[1]))
if __name__ == "__main__":
testRandomFlags()
| Python | 0 | |
2283ffa381c3e4ff1c5dee639d3cb7c3d09e4bff | format data with portion of cluster center | smoking/integrate_baseline/integrate_with_attr.py | smoking/integrate_baseline/integrate_with_attr.py | import csv
from numpy import *
def data_import(url):
with open(url,'rU') as f_in:
reader = csv.reader(f_in)
raw_data = list(reader)
_x=[]
_y=[]
_point=list()
_point_to_id = dict()
index_point = 0
for index,data in enumerate(raw_data):
if index==0:
continue
if data[1]=='SMOKE':
_point.append([float(data[3]),float(data[2])])
_point_to_id[index_point] = data[0]
index_point += 1
return asarray(_point),_point_to_id
import time
import random
import matplotlib.pyplot as plt
from sklearn.cluster import MeanShift, estimate_bandwidth, KMeans
import json
from collections import Counter
def meaning_shift(_point):
bandwidth = estimate_bandwidth(_point, quantile=0.5)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(_point)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = unique(labels)
n_clusters = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters)
return n_clusters
def get_data(URL,filed):
data = list()
with open(URL) as csv_file:
data_load = csv.DictReader(csv_file)
attributes = data_load.fieldnames
for row in data_load:
data.append(row)
if filed == True:
return data,attributes
else:
return data
def merge_attribute(data1, data2, attributes):
for row in data1:
for obj in data2:
if int(row['SUBJECT']) == int(float(obj['subject'])):
row['EDUCATE'],row['GENDER'],row['AGE'],row['JOB'],row['LOCTIME'] = obj['EDUCATE'],obj['GENDER'],obj['AGE'],obj['JOB'],obj['LOCTIME']
attributes.append('EDUCATE')
attributes.append('GENDER')
attributes.append('AGE')
attributes.append('JOB')
attributes.append('LOCTIME')
return data1, attributes
def kmeans_plot(_point,n_clusters_,point_to_id):
# print "points:",_point
# print len(_point)
k_means = KMeans(init='k-means++', n_clusters=n_clusters_)
k_means.fit(asarray(_point))
k_means_labels = k_means.labels_
# print "label: ",k_means_labels
distinct = dict()
for index,data in enumerate(k_means_labels):
distinct[data] = distinct.get(data,[])
distinct[data].append(int(point_to_id[index]))
# print len(k_means_labels)
for index in distinct:
print list(unique(distinct[index]))
# distinct[index] = len(unique(distinct[index]))
print distinct
k_means_cluster_centers = k_means.cluster_centers_
print "centers:",k_means_cluster_centers,len(k_means_cluster_centers)
k_means_labels_unique = unique(k_means_labels)
user_info = get_data('../temp_data/base_line_attr.csv',False)
result = []
for index in distinct:
temp = dict()
temp['cluster_id'] = int(index)
temp['cluster_center'] = list(k_means_cluster_centers[index])
educatte = []
gender = []
age = []
job = []
loctime = []
print list(unique(distinct[index]))
for subject in list(unique(distinct[index])):
# print int(row['SUBJECT'])
for row in user_info:
# print row['SUBJECT'],subject,type(row['SUBJECT']),type(subject)
if int(row['SUBJECT']) == subject:
# print "get"
educatte.append(row['EDUCATE'])
gender.append(row['GENDER'])
age.append(row['AGE'])
job.append(row['JOB'])
loctime.append(row['LOCTIME'])
break
# print educatte
temp['education'] = count_ratio(educatte)
temp['gender'] = count_ratio(gender)
temp['age'] = count_ratio(age)
temp['job'] = count_ratio(job)
temp['locatime'] = count_ratio(loctime)
result.append(temp)
print result
with open('../temp_data/cluster_count.json','wr') as f_out:
f_out.write(json.dumps(result))
def count_ratio(data):
obj = dict()
num = Counter(data)
for i in num:
obj[str(i)] = float(num[i])/len(data)
return obj
# with open('random_center.json','wr') as f_out:
# f_out.write(json.dumps({'center':k_means_cluster_centers.tolist()}))
# plot the result of k-means
# fig = plt.figure()
# r = lambda: random.randint(0,255)
# colors = ['#%02X%02X%02X' % (r(),r(),r()) for countnums in range(22)]
#
# X = asarray(_point)
# ax = fig.add_subplot(111)
#
# for k, col in zip(range(n_clusters_), colors):
# my_members = k_means_labels == k
# cluster_center = k_means_cluster_centers[k]
# ax.plot(X[my_members, 0], X[my_members, 1], 'w',
# markerfacecolor=col, marker='.',markersize=8)
# ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
# markeredgecolor='k')
#
# ax.set_title('Clustering of Random')
# plt.show()
if __name__=='__main__':
points, point_to_id =data_import('../data/SmokingData.csv')
# print point_to_id
kmeans_plot(points,meaning_shift(points),point_to_id)
| Python | 0 | |
61c2823b5da460001ca02db6b028fc770d204e32 | Add initial test case | api_tests.py | api_tests.py | from flask import Flask, g
import unittest
import json
from simplekv.fs import FilesystemStore
from flaskext.kvsession import KVSessionExtension
from api import app, db
from common.database import Database
class APITest(unittest.TestCase):
def setUp(self):
global db
store = FilesystemStore('session')
KVSessionExtension(store, app)
# Load the debug config
app.config.from_pyfile('../config.defaults.py')
app.config.from_pyfile('../config_debug.py')
app.secret_key = app.config['SECRET_KEY']
db = Database(app.config)
self._setup_database()
app.testing = True
self.app = app.test_client(use_cookies=True)
self.csrf = ''
"""Setup the database
by clearing it and loading the schema"""
def _setup_database(self):
con = db.get_connection()
cur = con.cursor()
cur.execute(open('schema.sql', 'r').read())
con.commit()
db.put_away_connection(con)
def test_1_api_base(self):
rv = self.app.get('/api/')
data = json.loads(rv.data)
assert data['status']['code'] is 0
assert data['csrf_token']
self.csrf = data['csrf_token']
if __name__ == '__main__':
unittest.main()
| Python | 0.00002 | |
72db299a3974b05f511420da5e5861f3bead0065 | Create solution_1.py | problem301/Python/solution_1.py | problem301/Python/solution_1.py | #!/usr/bin/env python
# coding=utf-8
def nim():
binary_map = [0,1]
total = 3
for k in range(28):
binary_map_new = []
for i in range(0, len(binary_map), 2):
if binary_map[i:i+2] == [0,0]:
binary_map_new.extend([0,0,0,1])
total += 3
elif binary_map[i:i+2] == [0,1]:
binary_map_new.extend([0,0])
total += 2
binary_map = binary_map_new
return total
if __name__ == "__main__":
print(nim())
| Python | 0.005726 | |
d8a6d199828acae3c54dfcf5529d70c149f0cbd5 | add a test (#532) | plenum/test/primary_selection/test_recover_after_demoted.py | plenum/test/primary_selection/test_recover_after_demoted.py | from plenum.common.constants import ALIAS, SERVICES
from plenum.test.helper import sdk_send_random_and_check, waitForViewChange
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.pool_transactions.helper import updateNodeData, \
buildPoolClientAndWallet
from plenum.test.view_change.helper import ensure_view_change_by_primary_restart
from stp_core.common.log import getlogger
logger = getlogger()
def demote_primary_node(looper,
initial_pool_of_nodes,
pool_of_nodes,
poolTxnStewardNames,
poolTxnData,
tdirWithClientPoolTxns):
demoted_node = [node for node in pool_of_nodes if node.has_master_primary][0]
indx = initial_pool_of_nodes.index(demoted_node)
steward_name = poolTxnStewardNames[indx]
stewards_seed = poolTxnData["seeds"][steward_name].encode()
stewardClient, stewardWallet = buildPoolClientAndWallet(
(steward_name, stewards_seed), tdirWithClientPoolTxns)
looper.add(stewardClient)
looper.run(stewardClient.ensureConnectedToNodes())
node_data = {
ALIAS: demoted_node.name,
SERVICES: []
}
updateNodeData(looper, stewardClient,
stewardWallet, demoted_node, node_data)
pool_of_nodes = list(set(pool_of_nodes) - {demoted_node})
return pool_of_nodes
def test_restart_primaries_then_demote(
looper, txnPoolNodeSet,
tconf, tdir, allPluginsPath,
sdk_pool_handle, sdk_wallet_steward,
poolTxnStewardNames,
poolTxnData,
tdirWithClientPoolTxns):
"""
"""
logger.info("1. Restart Node1")
pool_of_nodes = ensure_view_change_by_primary_restart(looper,
txnPoolNodeSet,
tconf,
tdir,
allPluginsPath,
customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
# ensure pool is working properly
sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
sdk_wallet_steward, 1)
logger.info("2. Restart Node2")
pool_of_nodes = ensure_view_change_by_primary_restart(looper,
pool_of_nodes,
tconf,
tdir,
allPluginsPath,
customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
# ensure pool is working properly
sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
sdk_wallet_steward, 1)
logger.info("3. Demote Node3")
# demote the node
pool_of_nodes = demote_primary_node(looper,
txnPoolNodeSet,
pool_of_nodes,
poolTxnStewardNames,
poolTxnData,
tdirWithClientPoolTxns)
# make sure view changed
waitForViewChange(looper, pool_of_nodes, expectedViewNo=3)
# ensure pool is working properly
sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
sdk_wallet_steward, 10)
ensure_all_nodes_have_same_data(looper, nodes=pool_of_nodes)
| Python | 0 | |
53bf5c12b77e19d54e3ab50ade8840843cca9649 | add sql group_by unit tests | siuba/tests/test_verb_group_by.py | siuba/tests/test_verb_group_by.py | """
Note: this test file was heavily influenced by its dbplyr counterpart.
https://github.com/tidyverse/dbplyr/blob/master/tests/testthat/test-verb-group_by.R
"""
from siuba import _, group_by, ungroup, summarize
from siuba.dply.vector import row_number, n
import pytest
from .helpers import assert_equal_query, data_frame, backend_notimpl
from string import ascii_lowercase
DATA = data_frame(x = [1,2,3], y = [9,8,7], g = ['a', 'a', 'b'])
@pytest.fixture(scope = "module")
def df(backend):
return backend.load_df(DATA)
def test_group_by_no_add(df):
gdf = group_by(df, _.x, _.y)
assert gdf.group_by == ("x", "y")
def test_group_by_override(df):
gdf = df >> group_by(_.x, _.y) >> group_by(_.g)
assert gdf.group_by == ("g",)
def test_group_by_add(df):
gdf = group_by(df, _.x) >> group_by(_.y, add = True)
assert gdf.group_by == ("x", "y")
def test_group_by_ungroup(df):
q1 = df >> group_by(_.g)
assert q1.group_by == ("g",)
q2 = q1 >> ungroup()
assert q2.group_by == tuple()
@pytest.mark.skip("TODO: need to test / validate joins first")
def test_group_by_before_joins(df):
assert False
@pytest.mark.skip("TODO: (#52)")
def test_group_by_performs_mutate(df):
assert_equal_query(
df,
group_by(z = _.x + _.y) >> summarize(n = n(_)),
data_frame(z = 10, n = 4)
)
| Python | 0.000001 | |
c0d0496eb2675ba2dbd5dbaa9d4b4c701409308f | Allow IHaskellPrelude.hs to not be formatting checked | verify_formatting.py | verify_formatting.py | #!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import subprocess
def hindent(contents):
with open(".tmp3", "w") as f:
f.write(contents)
with open(".tmp3", "r") as f:
output = subprocess.check_output(["hindent", "--style", "gibiansky"],
stdin=f)
return output.decode('utf-8')
def diff(src1, src2):
# Ignore trailing newlines
if src1[-1] == "\n":
src1 = src1[:-1]
if src2[-1] == "\n":
src2 = src2[:-1]
with open(".tmp1", "w") as f1:
f1.write(src1)
with open(".tmp2", "w") as f2:
f2.write(src2)
try:
output = subprocess.check_output(["diff", ".tmp1", ".tmp2"])
return output.decode('utf-8')
except subprocess.CalledProcessError as e:
return e.output.decode('utf-8')
# Verify that we're in the right directory
try:
open("ihaskell.cabal", "r").close()
except:
print(sys.argv[0], "must be run from the ihaskell directory",
file=sys.stderr)
# Find all the source files
sources = []
for source_dir in ["src", "ipython-kernel", "ihaskell-display"]:
for root, dirnames, filenames in os.walk(source_dir):
# Skip cabal dist directories
if "dist" in root:
continue
for filename in filenames:
# Take Haskell files, but ignore the Cabal Setup.hs
# Also ignore IHaskellPrelude.hs, it uses CPP in weird places
ignored_files = ["Setup.hs", "IHaskellPrelude.hs"]
if filename.endswith(".hs") and filename not in ignored_files:
sources.append(os.path.join(root, filename))
hindent_outputs = {}
for source_file in sources:
print("Formatting file", source_file)
with open(source_file, "r") as f:
original_source = f.read()
formatted_source = hindent(original_source)
hindent_outputs[source_file] = (original_source, formatted_source)
diffs = {filename: diff(original, formatted)
for (filename, (original, formatted)) in hindent_outputs.items()}
incorrect_formatting = False
for filename, diff in diffs.items():
if diff:
incorrect_formatting = True
print('Incorrect formatting in', filename)
print('=' * 10)
print(diff)
if incorrect_formatting:
sys.exit(1)
| #!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import subprocess
def hindent(contents):
with open(".tmp3", "w") as f:
f.write(contents)
with open(".tmp3", "r") as f:
output = subprocess.check_output(["hindent", "--style", "gibiansky"],
stdin=f)
return output.decode('utf-8')
def diff(src1, src2):
# Ignore trailing newlines
if src1[-1] == "\n":
src1 = src1[:-1]
if src2[-1] == "\n":
src2 = src2[:-1]
with open(".tmp1", "w") as f1:
f1.write(src1)
with open(".tmp2", "w") as f2:
f2.write(src2)
try:
output = subprocess.check_output(["diff", ".tmp1", ".tmp2"])
return output.decode('utf-8')
except subprocess.CalledProcessError as e:
return e.output.decode('utf-8')
# Verify that we're in the right directory
try:
open("ihaskell.cabal", "r").close()
except:
print(sys.argv[0], "must be run from the ihaskell directory",
file=sys.stderr)
# Find all the source files
sources = []
for source_dir in ["src", "ipython-kernel", "ihaskell-display"]:
for root, dirnames, filenames in os.walk(source_dir):
# Skip cabal dist directories
if "dist" in root:
continue
for filename in filenames:
# Take Haskell files, but ignore the Cabal Setup.hs
if filename.endswith(".hs") and filename != "Setup.hs":
sources.append(os.path.join(root, filename))
hindent_outputs = {}
for source_file in sources:
print("Formatting file", source_file)
with open(source_file, "r") as f:
original_source = f.read()
formatted_source = hindent(original_source)
hindent_outputs[source_file] = (original_source, formatted_source)
diffs = {filename: diff(original, formatted)
for (filename, (original, formatted)) in hindent_outputs.items()}
incorrect_formatting = False
for filename, diff in diffs.items():
if diff:
incorrect_formatting = True
print('Incorrect formatting in', filename)
print('=' * 10)
print(diff)
if incorrect_formatting:
sys.exit(1)
| Python | 0 |
930a8b1a7c980183df5469627a734033ca39a444 | Add functional tests for create_image | shade/tests/functional/test_image.py | shade/tests/functional/test_image.py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_compute
----------------------------------
Functional tests for `shade` image methods.
"""
import tempfile
import uuid
from shade import openstack_cloud
from shade.tests import base
from shade.tests.functional.util import pick_image
class TestCompute(base.TestCase):
def setUp(self):
super(TestCompute, self).setUp()
# Shell should have OS-* envvars from openrc, typically loaded by job
self.cloud = openstack_cloud()
self.image = pick_image(self.cloud.nova_client.images.list())
def test_create_image(self):
test_image = tempfile.NamedTemporaryFile(delete=False)
test_image.write('\0' * 1024 * 1024)
test_image.close()
image_name = 'test-image-%s' % uuid.uuid4()
try:
self.cloud.create_image(name=image_name,
filename=test_image.name,
disk_format='raw',
container_format='bare',
wait=True)
finally:
self.cloud.delete_image(image_name, wait=True)
| Python | 0.000012 | |
59ef02377c41041fd8010231f2c86d1aba072c0f | Complete recur sol | lc0105_construct_binary_tree_from_preorder_and_inorder_traversal.py | lc0105_construct_binary_tree_from_preorder_and_inorder_traversal.py | """Leetcode 105. Construct Binary Tree from Preorder and Inorder Traversal
Medium
URL: https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/
Given preorder and inorder traversal of a tree, construct the binary tree.
Note: You may assume that duplicates do not exist in the tree.
For example, given
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
Return the following binary tree:
3
/ \
9 20
/ \
15 7
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class SolutionRecur(object):
def _build(self, pre_start, pre_end, in_start, in_end,
inorder_d, preorder, inorder):
if pre_start > pre_end or in_start > in_end:
return None
# Preorder's first is root.
root = TreeNode(preorder[pre_start])
# Get root's pos in inorder.
in_root_pos = inorder_d[root.val]
# Compute the number of left from root.
n_left = in_root_pos - in_start
# Build binary trees for root's left and right.
root.left = self._build(pre_start + 1, pre_start + n_left,
in_start, in_root_pos - 1,
inorder_d, preorder, inorder)
root.right = self._build(pre_start + n_left + 1, pre_end,
in_root_pos + 1, in_end,
inorder_d, preorder, inorder)
return root
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
# Create dict for inorder value->index.
inorder_d = {v: i for (i, v) in enumerate(inorder)}
# Build binary tree by recursion.
return self._build(0, len(preorder) - 1, 0, len(inorder) - 1,
inorder_d, preorder, inorder)
def main():
# Ans:
# 3
# / \
# 9 20
# / \
# 15 7
preorder = [3, 9, 20, 15, 7]
inorder = [9, 3, 15, 20, 7]
root = SolutionRecur().buildTree(preorder, inorder)
print root.val
print root.left.val
print root.right.val
print root.right.left.val
print root.right.right.val
if __name__ == '__main__':
main()
| Python | 0.000007 | |
9c045f7667e1bdc6c9137c3877292907f4623774 | Add a management command to check if URNs are present in the database | make_a_plea/management/commands/check_urns_in_db.py | make_a_plea/management/commands/check_urns_in_db.py | import csv
from django.core.management.base import BaseCommand
from apps.plea.models import DataValidation, Case
from apps.plea.standardisers import standardise_urn, format_for_region
class Command(BaseCommand):
help = "Build weekly aggregate stats"
def add_arguments(self, parser):
parser.add_argument('csv_file', nargs='+')
def handle(self, *args, **options):
with open(options['csv_file'][0]) as csvfile:
total_matched, total_missed, matched, missed = 0, 0, 0, 0
for row in csvfile.readlines():
if not row.strip():
continue
elif row.startswith("#"):
if matched > 0 or missed > 0:
print "----------------\nMatched {}\nMissed {}\n\n".format(matched, missed)
total_matched += matched
total_missed += missed
matched = 0
missed = 0
print row
else:
urn = standardise_urn(row)
if Case.objects.filter(urn__iexact=urn).exists():
matched += 1
else:
missed += 1
print "{} - failed".format(urn)
print "----------------\nTotal:\nMatched {}\nMissed {}".format(total_matched, total_missed) | Python | 0 | |
4759cf1b058d1a1b5999882a8b44f84ad89a8a9a | Add tests file | arangodb/tests.py | arangodb/tests.py | # -*- coding: utf-8 -*-
| Python | 0.000001 | |
64b572a4e1e8359d781591e22439fb432c5860b6 | Create click_location.py | click_location.py | click_location.py | from PIL import Image
from pylab import *
im = array(Image.open('img.jpg'))
show()
while(1):
imshow(im)
print "Please click 3 points"
x = ginput(1)
print 'you clicked:',x
| Python | 0.000001 | |
607a73317e0497ee206bf8381f7cfa9fe46a1609 | add xml-row-to-vector script | src/4_train_models/vectorize_data.py | src/4_train_models/vectorize_data.py | #!/usr/bin/python3
from bs4 import BeautifulSoup
from tqdm import tqdm
import numpy as np
import pickle
#http://stackoverflow.com/a/27518377/2230446
def get_num_lines(filename):
f = open(filename, "rb")
num_lines = 0
buf_size = 1024 * 1024
read_f = f.raw.read
buf = read_f(buf_size)
while buf:
num_lines += buf.count(b"\n")
buf = read_f(buf_size)
return num_lines
def load_tag_index_map(filename):
print("loading tag index map")
tag_index_map = pickle.load(open(filename, "rb"))
return tag_index_map
# map list of tags to boolean vector
def vectorize(tags, tag_index_map):
vector = np.zeros(len(tag_index_map))
for tag in tags:
if tag in tag_index_map.keys():
index = tag_index_map[tag]
vector[index] = True
return vector
# convert a single line of the xml file to an input vector and output value
def line_to_x_y(line, tag_index_map):
soup = BeautifulSoup(line, "lxml")
post = soup.find("post")
if post is not None:
tags = post["tags"].strip().split(" ")
# print(tags)
x = vectorize(tags, tag_index_map)
y = score = int(post["score"])
return x, y
print("~~~ERROR~~~")
print("line:", line)
print("~~~ERROR~~~")
# convert entire xml file into list of input vectors and list of output values
def file_to_xs_ys(filename, tag_index_map):
num_lines = get_num_lines(filename)
num_dimensions = len(tag_index_map)
xs = np.zeros((num_lines, num_dimensions), dtype=bool)
ys = np.zeros((num_lines,1))
with open(filename, "r") as f:
for i, line in tqdm(enumerate(f), total=num_lines):
x, y = line_to_x_y(line, tag_index_map)
xs[i] = x
ys[i] = y
return xs, ys
def main():
tag_index_map = load_tag_index_map("../../res/tag_index_map.p")
# print(tag_index_map)
filename = "../../res/head_safebooru.xml"
# filename = "../../res/sample_safebooru.xml"
xs, ys = file_to_xs_ys(filename, tag_index_map)
print(xs[0], ys[0])
print(xs[1], ys[1])
if __name__ == "__main__":
main()
| Python | 0.000002 | |
d6e9971ceefc69f0eefc7440cc5e7035e7dcc05d | Add the middleware for reporting errors to gcloud. | contentcuration/contentcuration/middleware/ErrorReportingMiddleware.py | contentcuration/contentcuration/middleware/ErrorReportingMiddleware.py | from google.cloud import error_reporting
class ErrorReportingMiddleware(object):
def __init__(self, *args, **kwargs):
self.client = error_reporting.Client()
def process_exception(self, request, exception):
self.client.report_exception()
| Python | 0 | |
e3ab7c126f808864f0458b52f36518e485f546ca | Add a session class to help tie everything together in a convenient way. | source/harmony/session.py | source/harmony/session.py | # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import os
from harmony.schema.collection import Collection
from harmony.schema.collector import FilesystemCollector
from harmony.schema.processor import MixinProcessor, ValidateProcessor
from harmony.schema.validator import Validator
class Session(object):
'''A configuration of the various components in a standard way.'''
def __init__(self, collector=None, processors=None, validator_class=None):
'''Initialise session.
*collector* is used to collect schemas for use in the session and
should conform to the :py:class:`~harmony.schema.collector.Collector`
interface. Defaults to a
:py:class:`~harmony.schema.collector.FileSystemCollector` using the
environment variable :envvar:`HARMONY_SCHEMA_PATH` to discover schemas.
*processors* specifies a list of
:py:class:`~harmony.schema.processor.Processor` instances that will
post-process any discovered schemas. If not specified will default to
[:py:class:`~harmony.schema.processor.ValidateProcessor`,
:py:class:`~harmony.schema.processor.MixinProcessor`].
*validator_class* should be the class to use for validation of schemas
and instances. Defaults to
:py:class:`harmony.schema.validator.Validator`.
'''
self.schemas = Collection()
self.collector = collector
if self.collector is None:
paths = os.environ.get('HARMONY_SCHEMA_PATH', '').split(os.pathsep)
self.collector = FilesystemCollector(paths)
self.validator_class = validator_class
if self.validator_class is None:
self.validator_class = Validator
self.processors = processors
if self.processors is None:
self.processors = [
ValidateProcessor(self.validator_class), MixinProcessor()
]
self.refresh()
def refresh(self):
'''Discover schemas and add to local collection.
.. note::
Collection will be processed with self.processors.
'''
self.schemas.clear()
for schema in self.collector.collect():
self.schemas.add(schema)
for processor in self.processors:
processor.process(self.schemas)
| Python | 0 | |
c240f7bcd94b2fe6ead8568f6f6f5a69c1853b3a | Add a shelve/unshelve scenario | tempest/scenario/test_shelve_instance.py | tempest/scenario/test_shelve_instance.py | # Copyright 2014 Scality
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest import config
from tempest.openstack.common import log
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
class TestShelveInstance(manager.ScenarioTest):
"""
This test shelves then unshelves a Nova instance
The following is the scenario outline:
* boot a instance and create a timestamp file in it
* shelve the instance
* unshelve the instance
* check the existence of the timestamp file in the unshelved instance
"""
def _write_timestamp(self, server_or_ip):
ssh_client = self.get_remote_client(server_or_ip)
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
def _check_timestamp(self, server_or_ip):
ssh_client = self.get_remote_client(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
def _shelve_then_unshelve_server(self, server):
self.servers_client.shelve_server(server['id'])
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
self.servers_client.wait_for_server_status(
server['id'], 'SHELVED_OFFLOADED', extra_timeout=offload_time)
else:
self.servers_client.wait_for_server_status(server['id'], 'SHELVED')
self.servers_client.shelve_offload_server(server['id'])
self.servers_client.wait_for_server_status(server['id'],
'SHELVED_OFFLOADED')
self.servers_client.unshelve_server(server['id'])
self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.services('compute', 'network', 'image')
def test_shelve_instance(self):
self.keypair = self.create_keypair()
self.security_group = self._create_security_group()
create_kwargs = {
'key_name': self.keypair['name'],
'security_groups': [self.security_group]
}
server = self.create_server(image=CONF.compute.image_ref,
create_kwargs=create_kwargs)
if CONF.compute.use_floatingip_for_ssh:
_, floating_ip = self.floating_ips_client.create_floating_ip()
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], server['id'])
self._write_timestamp(floating_ip['ip'])
else:
self._write_timestamp(server)
# Prevent bug #1257594 from coming back
# Unshelve used to boot the instance with the original image, not
# with the instance snapshot
self._shelve_then_unshelve_server(server)
if CONF.compute.use_floatingip_for_ssh:
self._check_timestamp(floating_ip['ip'])
else:
self._check_timestamp(server)
| Python | 0.000001 | |
c2036cd7629b93bfc12069eaf174f2427d47e769 | add another test | tests/monitoring/test_check_mesos_duplicate_frameworks.py | tests/monitoring/test_check_mesos_duplicate_frameworks.py | # Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
from paasta_tools.monitoring.check_mesos_duplicate_frameworks import check_mesos_no_duplicate_frameworks
def test_check_mesos_no_duplicate_frameworks_ok(capfd):
with mock.patch(
'paasta_tools.monitoring.check_mesos_duplicate_frameworks.parse_args', autospec=True,
) as mock_parse_args, mock.patch(
'paasta_tools.monitoring.check_mesos_duplicate_frameworks.get_mesos_master', autospec=True,
) as mock_get_mesos_master:
mock_opts = mock.MagicMock()
mock_opts.check = 'marathon,chronos'
mock_parse_args.return_value = mock_opts
mock_master = mock.MagicMock()
mock_master.state = {
'frameworks': [
{'name': 'marathon'},
{'name': 'chronos'},
{'name': 'foobar'},
{'name': 'foobar'},
],
}
mock_get_mesos_master.return_value = mock_master
with pytest.raises(SystemExit) as error:
check_mesos_no_duplicate_frameworks()
out, err = capfd.readouterr()
assert "OK" in out
assert "marathon" in out
assert "chronos" in out
assert "foobar" not in out
assert error.value.code == 0
def test_check_mesos_no_duplicate_frameworks_critical(capfd):
with mock.patch(
'paasta_tools.monitoring.check_mesos_duplicate_frameworks.parse_args', autospec=True,
) as mock_parse_args, mock.patch(
'paasta_tools.monitoring.check_mesos_duplicate_frameworks.get_mesos_master', autospec=True,
) as mock_get_mesos_master:
mock_opts = mock.MagicMock()
mock_opts.check = 'marathon,chronos'
mock_parse_args.return_value = mock_opts
mock_master = mock.MagicMock()
mock_master.state = {
'frameworks': [
{'name': 'marathon'},
{'name': 'marathon'},
{'name': 'chronos'},
{'name': 'foobar'},
{'name': 'foobar'},
],
}
mock_get_mesos_master.return_value = mock_master
with pytest.raises(SystemExit) as error:
check_mesos_no_duplicate_frameworks()
out, err = capfd.readouterr()
assert "CRITICAL" in out
assert "marathon" in out
assert "chronos" in out
assert "foobar" not in out
assert error.value.code == 2
| Python | 0.000004 | |
0b81997dd12f775fc9f814c19fb62ef35bde998e | Add ceres library | autoconf/ceres.py | autoconf/ceres.py | from _external import *
from pthread import *
from amd import *
from gomp import *
from lapack import *
from suitesparse import *
from glog import *
ceres = LibWithHeaderChecker('ceres', 'ceres/ceres.h', 'c++', name='ceres', dependencies = [gomp,lapack,suitesparse,amd,pthread,glog],)
| Python | 0.000001 | |
681f73490fd7d333883134a417477492744ce22a | Add project permissions | src/python/expedient/clearinghouse/project/permissions.py | src/python/expedient/clearinghouse/project/permissions.py | '''
Created on Aug 3, 2010
@author: jnaous
'''
from expedient.common.permissions.shortcuts import create_permission
from expedient.clearinghouse.permissionmgmt.utils import \
request_permission_wrapper
create_permission(
"can_create_project",
description=\
"Owners of this permission can create projects in Expedient.",
view=request_permission_wrapper,
)
create_permission(
"can_edit_project",
description=\
"Owners of this permission can edit basic project properties.",
view=request_permission_wrapper,
)
create_permission(
"can_delete_project",
description=\
"Owners of this permission can edit basic project properties.",
view=request_permission_wrapper,
)
create_permission(
"can_view_project",
description=\
"Owners of this permission can view the project. Without "
"other permissions, they are non-functional members.",
view=request_permission_wrapper,
)
create_permission(
"can_add_members",
description=\
"Owners of this permission can add members to "
"the project and assign to them roles.",
view=request_permission_wrapper,
)
create_permission(
"can_remove_members",
description=\
"Owners of this permission can remove members from "
"the project.",
view=request_permission_wrapper,
)
create_permission(
"can_create_slices",
description=\
"Owners of this permission can create new slices.",
view=request_permission_wrapper,
)
create_permission(
"can_add_aggregates",
description=\
"Owners of this permission can add aggregates "
"to the project.",
view=request_permission_wrapper,
)
create_permission(
"can_remove_aggregates",
description=\
"Owners of this permission can remove aggregates "
"from the project.",
view=request_permission_wrapper,
)
| Python | 0.000001 | |
293b3a0e463a5a215b49c59d03a0cf280428c6c4 | Add bootstrap script for buildout | bootstrap.py | bootstrap.py | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
to_reload = False
try:
import pkg_resources
import setuptools
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# XXX use a more permanent ez_setup.py URL when available.
exec(urlopen('https://bitbucket.org/pypa/setuptools/raw/0.7.2/ez_setup.py'
).read(), ez)
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s",
repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| Python | 0.000001 | |
bd9fce88c235ea6be032a1d15a31bf41df14a444 | Fix missing migration | djangocms_blog/migrations/0033_auto_20180226_1410.py | djangocms_blog/migrations/0033_auto_20180226_1410.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_blog', '0032_auto_20180109_0023'),
]
operations = [
migrations.AlterField(
model_name='blogcategorytranslation',
name='meta_description',
field=models.TextField(blank=True, verbose_name='category meta description', default=''),
),
]
| Python | 0.999773 | |
c0e03b41fbd95fd5c556205d96fd5bd935b55b0e | make wav and noisy slices | make_tfrecords.py | make_tfrecords.py | from __future__ import print_function
import tensorflow as tf
import numpy as np
from collections import namedtuple, OrderedDict
from subprocess import call
import cPickle as pickle
import scipy.io.wavfile as wavfile
import gzip
import h5py
import argparse
import codecs
import timeit
import struct
import toml
import re
import sys
import os
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def slice_signal(signal, window_size, stride=0.5):
""" Return windows of the given signal by sweeping in stride fractions
of window
"""
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(offset, n_samples + offset, offset)):
slices.append(signal[beg_i:end_i])
return np.array(slices)
def read_and_slice(filename, wav_canvas_size, stride=0.5):
fm, wav_data = wavfile.read(filename)
if fm != 16000:
raise ValueError('Sampling rate is expected to be 16kHz!')
signals = slice_signal(wav_data, wav_canvas_size, stride)
return signals
def encoder_proc(wav_filename, noisy_path, out_file, wav_canvas_size):
""" Read and slice the wav and noisy files and write to TFRecords.
out_file: TFRecordWriter.
"""
ppath, wav_fullname = os.path.split(wav_filename)
noisy_filename = os.path.join(noisy_path, wav_fullname)
wav_signals = read_and_slice(wav_filename, wav_canvas_size)
noisy_signals = read_and_slice(noisy_filename, wav_canvas_size)
assert wav_signals.shape == noisy_signals.shape, noisy_signals.shape
for (wav, noisy) in zip(wav_signals, noisy_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw)}))
out_file.write(example.SerializeToString())
def main(opts):
if not os.path.exists(opts.save_path):
# make save path if it does not exist
os.makedirs(opts.save_path)
# set up the output filepath
out_filepath = os.path.join(opts.save_path, opts.out_file)
if os.path.splitext(out_filepath)[1] != '.tfrecords':
# if wrong extension or no extension appended, put .tfrecords
out_filepath += '.tfrecords'
else:
out_filename, ext = os.path.splitext(out_filepath)
out_filepath = out_filename + ext
# check if out_file exists and if force flag is set
if os.path.exists(out_filepath) and not opts.force_gen:
raise ValueError('ERROR: {} already exists. Set force flag (--force-gen) to '
'overwrite. Skipping this speaker.'.format(out_filepath))
elif os.path.exists(out_filepath) and opts.force_gen:
print('Will overwrite previously existing tfrecords')
os.unlink(out_filepath)
with open(opts.cfg) as cfh:
# read the configuration description
cfg_desc = toml.loads(cfh.read())
beg_enc_t = timeit.default_timer()
out_file = tf.python_io.TFRecordWriter(out_filepath)
# process the acoustic and textual data now
for dset_i, (dset, dset_desc) in enumerate(cfg_desc.iteritems()):
print('-' * 50)
wav_dir = spk_desc['clean']
wav_files = [os.path.join(wav_dir, wav) for wav in
os.listdir(wav_dir) if wav.endswith('.wav')]
nfiles = len(wav_files)
for m, wav_file in enumerate(wav_files):
print('Processing wav file {}/{} {}{}'.format(m + 1,
nfiles,
wav_file,
' ' * 10),
end='\r')
sys.stdout.flush()
encoder_proc(wav_file, noisy_dir, out_file, 2 ** 14)
out_file.close()
end_enc_t = timeit.default_timer() - beg_enc_t
print('')
print('*' * 50)
print('Total processing and writing time: {} s'.format(end_enc_t))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert the set of txt and '
'wavs to TFRecords')
parser.add_argument('--cfg', type=str, default='e2e_maker.cfg',
help='File containing the description of datasets '
'to extract the info to make the TFRecords.')
parser.add_argument('--save_path', type=str, default='data/',
help='Path to save the dataset')
parser.add_argument('--out_file', type=str, default='segan.tfrecords',
help='Output filename')
parser.add_argument('--force-gen', dest='force_gen', action='store_true',
help='Flag to force overwriting existing dataset.')
parser.set_defaults(force_gen=False)
opts = parser.parse_args()
main(opts)
| Python | 0.000001 | |
07ef4a6440b59ed6ce207d3442f7ba950a8f9ec8 | Python script to compute cost. | compute_cost.py | compute_cost.py | """compute_cost.py:
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar Singh"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import sys
import os
import random
import datetime
__fmt__ = '%Y-%m-%d'
def computeCost( currentDate, lastDate, nAWS ):
ndays = ( lastDate - currentDate ).days
maxAWS = 5
nyears = ndays / 365.0
if ndays < 365.0 or nAWS > maxAWS:
return int( 100 * 20.0 )
cost = 3 * nyears
if nAWS > 2:
cost += 3 * (maxAWS - nAWS) - (20.0/nAWS) * ( nyears - 1)
return int( 100 * max( 0, cost ))
def random_date(start, end):
"""
This function will return a random datetime between two datetime
objects.
"""
delta = end - start
int_delta = (delta.days * 24.0 * 60 * 60) + delta.seconds
random_second = randrange(int_delta)
return start + datetime.timedelta(seconds=random_second)
def test( ):
import pylab
# Generate random test data.
start = datetime.datetime.strptime( '2017-03-18', __fmt__ )
end = datetime.datetime.strptime( '2021-03-18', __fmt__ )
for naws in range( 0, 5 ):
xval, yval = [ ], [ ]
for i in range( 5* 54 ):
date = start + datetime.timedelta( days = i * 7 )
xval.append( (date - start).days / 365.0 )
yval.append( computeCost( start, date, naws ) )
pylab.xlabel( 'Year' )
pylab.ylabel( 'Cost' )
pylab.plot( xval, yval, alpha = 0.7, label = '%s' % naws )
pylab.legend( )
pylab.savefig( "%s.png" % sys.argv[0] )
if __name__ == '__main__':
test()
| Python | 0.999849 | |
7a0bbdb2395ca1e8579e0f2cc6ccd43807c51161 | Create 6kyu_alpha_to_numeric_and_numeric_to_alpha.py | Solutions/6kyu/6kyu_alpha_to_numeric_and_numeric_to_alpha.py | Solutions/6kyu/6kyu_alpha_to_numeric_and_numeric_to_alpha.py | import re
def AlphaNum_NumAlpha(string):
return ''.join(swap(s) for s in re.findall('\d{1,2}|[a-z]', string))
def swap(s):
return chr(int(s)+96) if s.isdigit() else str(ord(s)-96)
| Python | 0.999268 | |
20c51dbcd2d90bfa234efa5027254a4915995edd | add nmap_hosts migration | alembic/versions/13b7c3d4c802_create_nmap_hosts_table.py | alembic/versions/13b7c3d4c802_create_nmap_hosts_table.py | """create nmap_hosts table
Revision ID: 13b7c3d4c802
Revises: ecd5f49567a6
Create Date: 2017-07-21 08:19:17.849112
"""
from sqlalchemy.dialects import postgresql
from alembic import op
import sqlalchemy as sa
import datetime
def _get_date():
return datetime.datetime.now()
# revision identifiers, used by Alembic.
revision = '13b7c3d4c802'
down_revision = 'ecd5f49567a6'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('nmap_hosts',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('ip_addr', postgresql.INET, unique=True, nullable=False),
sa.Column('created_at', sa.TIMESTAMP(timezone=False), default=_get_date))
def downgrade():
op.drop_table('nmap_hosts')
| Python | 0 | |
941985a561d0bdce1a8aba2e57fc60f90b6164fb | Add jrun main module so "python jrun" works | jrun/__main__.py | jrun/__main__.py | import jrun
if __name__ == '__main__':
jrun.jrun_main()
| Python | 0.000036 | |
b27b3089f393a84c3d004e8d89be43165862be1d | add match matrix | matrix_match.py | matrix_match.py | import random
w1 = int(raw_input(">>> w1: "))
h1 = int(raw_input(">>> h1: "))
w2 = int(raw_input(">>> w2: "))
h2 = int(raw_input(">>> h2: "))
r1 = int(raw_input(">>> r1: "))
r2 = int(raw_input(">>> r2: "))
# w1 = 20
# h1 = 20
# w2 = 3
# h2 = 3
matrix = [[random.randint(r1, r2) for x in range(w1)] for x in range(h1)]
pattern = [[random.randint(r1, r2) for x in range(w2)] for x in range(h2)]
def matchMatrix(matrix1, matrix2):
print 'Match Matrix start:\n '
results = []
temp = []
for x in matrix2:
for y in x:
temp.append(y)
indexOfX = 0
for x in matrix1:
if indexOfX >= (h1-h2+1):
break
indexOfY = 0
for y in x:
if indexOfY >= (w1-w2+1):
break
count = 0
for z in matrix2:
subMatrix = matrix[indexOfX+count]
count+=1
size = len(z)
subX = subMatrix[indexOfY:indexOfY+size]
if z != subX:
break
if count == h2:
results.append((indexOfX, indexOfY))
indexOfY+=1
indexOfX+=1
return results
for x in pattern:
print x
for x in matrix:
print x
print 'Ans:\n%s' % (matchMatrix(matrix, pattern)) | Python | 0.000002 | |
40f92e6293bb13ee1462b932be15f5f11ceeee74 | Add initial implementation of TempType. | compiler/infer.py | compiler/infer.py | """
# ----------------------------------------------------------------------
# infer.py
#
# Type inference for Llama
# http://courses.softlab.ntua.gr/compilers/2012a/llama2012.pdf
#
# Authors: Nick Korasidis <renelvon@gmail.com>
# Dimitris Koutsoukos <dim.kou.shmmy@gmail.com>
# ----------------------------------------------------------------------
"""
class TempType:
"""A temporary type used during inference."""
_next_free = 1 # Next free papaki.
@classmethod
def _get_next_tag(cls):
cls._next_free += 1
return cls._next_free
def __init__(self, node, spec_type=None):
"""
Construct a new temporary type for node `node`.
The user may optionally supply a type for this node;
such a specification is not binding but will improve
error reporting.
"""
self._node = node
self._spec_type = spec_type
self._inferred_type = None
self._tag = self._get_next_tag()
def write_back(self):
self._node.type = self._inferred_type
# TODO: Validate the type before returning.
| Python | 0 | |
a2e27feff324d5aed7220a520df651f688cd1829 | Add migration | bluebottle/assignments/migrations/0002_auto_20190529_1755.py | bluebottle/assignments/migrations/0002_auto_20190529_1755.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-05-29 15:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assignments', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='assignment',
old_name='end',
new_name='end_time',
),
]
| Python | 0.000002 | |
9ca926d052edc754ca3b6f3663b1c00887b2965a | add migration with blank projects.Tag | brasilcomvc/projects/migrations/0004_tag_may_be_blank.py | brasilcomvc/projects/migrations/0004_tag_may_be_blank.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_project_tags'),
]
operations = [
migrations.AlterField(
model_name='project',
name='tags',
field=models.ManyToManyField(to='projects.Tag', blank=True),
preserve_default=True,
),
]
| Python | 0.000001 | |
77ce1e2606132a5a04bc8c1b86c14f6f590e458d | test script added for assessment_lookups | doc/dlkit-gstudio-impl/assessment_lookup.py | doc/dlkit-gstudio-impl/assessment_lookup.py | from dlkit_runtime import PROXY_SESSION, RUNTIME
from dlkit_gstudio.gstudio_user_proxy import GStudioRequest
req_obj = GStudioRequest(id=1)
condition = PROXY_SESSION.get_proxy_condition()
condition.set_http_request(req_obj)
proxy = PROXY_SESSION.get_proxy(condition)
assessment_service_mgr = RUNTIME.get_service_manager('ASSESSMENT', proxy=proxy)
all_banks = assessment_service_mgr.get_banks()
all_banks.available()
# ======
from dlkit_runtime import PROXY_SESSION, RUNTIME
from dlkit_gstudio.gstudio_user_proxy import GStudioRequest
condition = PROXY_SESSION.get_proxy_condition()
proxy = PROXY_SESSION.get_proxy(condition)
assessment_service_mgr = RUNTIME.get_service_manager('ASSESSMENT', proxy=proxy)
all_banks = assessment_service_mgr.get_banks()
all_banks.available()
from dlkit.primordium.id.primitives import Id
bank = assessment_service_mgr.get_bank(Id('assessment.Bank%3A57c00fbded849b11f52fc8ec%40ODL.MIT.EDU'))
bank.get_display_name().text
# bank = all_banks.next()
assessment_items = bank.get_assessments()
assessment_items.available()
a = assessment_items.next()
offerings = bank.get_assessments_offered_for_assessment(a.get_id())
# Error:
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/assessment/objects.pyc in next(self)
# 1190
# 1191 def next(self):
# -> 1192 return self._get_next_object(Assessment)
# 1193
# 1194 next_assessment = property(fget=get_next_assessment)
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/osid/objects.pyc in _get_next_object(self, object_class)
# 2454 raise
# 2455 if isinstance(next_object, dict):
# -> 2456 next_object = object_class(osid_object_map=next_object, runtime=self._runtime, proxy=self._proxy)
# 2457 elif isinstance(next_object, basestring) and object_class == Id:
# 2458 next_object = Id(next_object)
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/assessment/objects.pyc in __init__(self, **kwargs)
# 827
# 828 def __init__(self, **kwargs):
# --> 829 osid_objects.OsidObject.__init__(self, object_name='ASSESSMENT', **kwargs)
# 830 self._catalog_name = 'bank'
# 831
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/osid/objects.pyc in __init__(self, osid_object_map, runtime, **kwargs)
# 114 osid_markers.Extensible.__init__(self, runtime=runtime, **kwargs)
# 115 self._my_map = osid_object_map
# --> 116 self._load_records(osid_object_map['recordTypeIds'])
# 117
# 118 def get_object_map(self, obj_map=None):
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/osid/markers.pyc in _load_records(self, record_type_idstrs)
# 174 """Load all records from given record_type_idstrs."""
# 175 for record_type_idstr in record_type_idstrs:
# --> 176 self._init_record(record_type_idstr)
# 177
# 178 def _init_records(self, record_types):
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/osid/markers.pyc in _init_record(self, record_type_idstr)
# 189 import importlib
# 190 record_type_data = self._record_type_data_sets[Id(record_type_idstr).get_identifier()]
# --> 191 module = importlib.import_module(record_type_data['module_path'])
# 192 record = getattr(module, record_type_data['object_record_class_name'], None)
# 193 # only add recognized records ... so apps don't break
# /usr/lib/python2.7/importlib/__init__.pyc in import_module(name, package)
# 35 level += 1
# 36 name = _resolve_name(name[level:], package, level)
# ---> 37 __import__(name)
# 38 return sys.modules[name]
# ImportError: No module named records.osid.object_records
| Python | 0 | |
a6d3ae8b27f6e97e7e5b4388a20836f25953c26d | Add example config file | config-example.py | config-example.py | """
Minimal config file for kahvibot. Just define values as normal Python code.
"""
# put your bot token here as a string
bot_token = ""
# the tg username of the bot's admin.
admin_username = ""
# if a message contains any of these words, the bot responds
trigger_words = [
"kahvi",
"\u2615", # coffee emoji
"tsufe",
"kahavi",
#"sima", # wappu mode
]
| Python | 0.000001 | |
bcda14f8258daaf3475dd9d3ca3eb7b25aa0496c | Add py-voluptuous (#13457) | var/spack/repos/builtin/packages/py-voluptuous/package.py | var/spack/repos/builtin/packages/py-voluptuous/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyVoluptuous(PythonPackage):
"""Voluptous, despite the name, is a Python data validation library."""
homepage = "https://github.com/alecthomas/voluptuous"
url = "https://github.com/alecthomas/voluptuous/archive/0.11.5.tar.gz"
version('0.11.5', sha256='01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4')
depends_on('py-setuptools', type='build')
| Python | 0 | |
93000ab88c489f720d0f7e6a8921dc69342d61f1 | Add migration | webapp/apps/dynamic/migrations/0012_auto_20160616_1908.py | webapp/apps/dynamic/migrations/0012_auto_20160616_1908.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dynamic', '0011_auto_20160614_1902'),
]
operations = [
migrations.RenameField(
model_name='dynamicbehaviorsaveinputs',
old_name='BE_CG_per',
new_name='BE_cg',
),
]
| Python | 0.000002 | |
662ad845a0ce729d8d8b72121a4c7c6f22e3eaa2 | support for phonetic similarity added | src/indicnlp/script/phonetic_sim.py | src/indicnlp/script/phonetic_sim.py | # Copyright Anoop Kunchukuttan 2014 - present
#
# This file is part of Indic NLP Library.
#
# Indic NLP Library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Indic NLP Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indic NLP Library. If not, see <http://www.gnu.org/licenses/>.
#
from indicnlp import loader
from indicnlp import langinfo
from indicnlp.script.indic_scripts import *
import numpy as np
import gzip
import pandas as pd
import codecs,sys
def equal(v1,v2):
return 0.0 if np.sum( xor_vectors(v1, v2)) > 0 else 1.0
def dice(v1,v2):
dotprod=2*float(np.dot( v1, v2.T ))
return dotprod/float(len(v1)+len(v2))
def jaccard(v1,v2):
dotprod=float(np.dot( v1, v2.T ))
return dotprod/float(len(v1)+len(v2)-dotprod)
def cosine(v1,v2):
dotprod=float(np.dot( v1, v2.T ))
norm1=float(np.dot( v1, v1.T ))
norm2=float(np.dot( v2, v2.T ))
return ((dotprod)/(np.sqrt(norm1*norm2)+0.00001))
def dotprod(v1,v2):
return float(np.dot( v1, v2.T ))
def sim1(v1,v2,base=5.0):
return np.power(base,dotprod(v1,v2))
def softmax(v1,v2):
return sim1(v1,v2,np.e)
def create_similarity_matrix(sim_func,slang,tlang,normalize=True):
dim=langinfo.COORDINATED_RANGE_END_INCLUSIVE-langinfo.COORDINATED_RANGE_START_INCLUSIVE+1
sim_mat=np.zeros((dim,dim))
for offset1 in xrange(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1):
v1=get_phonetic_feature_vector(offset_to_char(offset1,slang),slang)
for offset2 in xrange(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1):
v2=get_phonetic_feature_vector(offset_to_char(offset2,tlang),tlang)
sim_mat[offset1,offset2]=sim_func(v1,v2)
if normalize:
sums=np.sum(sim_mat, axis=1)
sim_mat=(sim_mat.transpose()/sums).transpose()
return sim_mat
| Python | 0 | |
30ea7b5c77acc0af8826e3aef6155f9d329ed419 | Create getCpuUsage2.py | mesosmetrics/getCpuUsage2.py | mesosmetrics/getCpuUsage2.py | import urllib
import json
import time
import sys
if __name__ == '__main__':
agent=sys.argv[1]
url = "http://" + agent + ":5051/monitor/statistics"
executors = {}
response = urllib.urlopen(url)
data = json.loads(response.read())
for itm in data:
executor = {}
id = itm["executor_id"]
executor["name"] = itm["executor_name"]
a = {}
a["cpu_system"] = itm["statistics"]["cpus_system_time_secs"]
a["cpu_user"] = itm["statistics"]["cpus_user_time_secs"]
a["ts"] = itm["statistics"]["timestamp"]
executor["a"] = a
executors[id] = executor
time.sleep(5)
response = urllib.urlopen(url)
data = json.loads(response.read())
for itm in data:
id = itm["executor_id"]
b = {}
b["cpu_system"] = itm["statistics"]["cpus_system_time_secs"]
b["cpu_user"] = itm["statistics"]["cpus_user_time_secs"]
b["ts"] = itm["statistics"]["timestamp"]
executors[id]["b"] = b
for id,itm in executors.items():
cpus_total_usage = ((itm["b"]["cpu_system"]-itm["a"]["cpu_system"]) + \
(itm["b"]["cpu_user"]-itm["a"]["cpu_user"])) / \
(itm["b"]["ts"]-itm["a"]["ts"])
print(str(id) + " : " + str(cpus_total_usage))
| Python | 0 | |
93a41a7d406e5f7c264865d96c0f85b1181e5cb0 | add basic test | tests/utils_tests/extension_tests/test_forward.py | tests/utils_tests/extension_tests/test_forward.py | import mock
import numpy as np
import unittest
import chainer
from chainer import testing
from chainercv.utils import forward
@testing.parameterize(*testing.product({
'in_shapes': [((3, 4),), ((3, 4), (5,))],
'out_shapes': [((3, 4),), ((3, 4), (5,))],
'variable': [True, False],
}))
class TestForward(unittest.TestCase):
def setUp(self):
self.xp = np
self.mocked_model = mock.MagicMock()
self.mocked_model.xp = self.xp
self.inputs = tuple(np.empty(shape) for shape in self.in_shapes)
if len(self.inputs) == 1:
self.inputs = self.inputs[0]
self.outputs = tuple(
self.xp.array(np.empty(shape)) for shape in self.out_shapes)
if self.variable:
self.outputs = tuple(
chainer.Variable(output) for output in self.outputs)
if len(self.outputs) == 1:
self.outputs = self.outputs[0]
def _check_inputs(self, inputs):
if isinstance(self.inputs, tuple):
orig_inputs = self.inputs
else:
orig_inputs = self.inputs,
for orig, in_ in zip(orig_inputs, inputs):
self.assertIsInstance(in_, chainer.Variable)
self.assertEqual(chainer.cuda.get_array_module(in_.data), self.xp)
in_ = chainer.cuda.to_cpu(in_.data)
np.testing.assert_equal(in_, orig)
def _check_outputs(self, outputs):
if len(outputs) == 1:
outputs = outputs,
for orig, out in zip(self.outputs, outputs):
self.assertIsInstance(out, np.ndarray)
if self.variable:
orig = orig.data
orig = chainer.cuda.to_cpu(orig)
np.testing.assert_equal(out, orig)
def test_forward(self):
def _call(*inputs):
self._check_inputs(inputs)
return self.outputs
self.mocked_model.side_effect = _call
outputs = forward(self.mocked_model, self.inputs)
self._check_outputs(outputs)
testing.run_module(__name__, __file__)
| Python | 0.000022 | |
4802b8fe149ed72303bbb0f1f924275dbc004b5a | Use the handy interruptible pool from emcee | Examples/interruptible_pool.py | Examples/interruptible_pool.py | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2010-2013 Daniel Foreman-Mackey & contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Python's multiprocessing.Pool class doesn't interact well with
``KeyboardInterrupt`` signals, as documented in places such as:
* `<http://stackoverflow.com/questions/1408356/>`_
* `<http://stackoverflow.com/questions/11312525/>`_
* `<http://noswap.com/blog/python-multiprocessing-keyboardinterrupt>`_
Various workarounds have been shared. Here, we adapt the one proposed in the
last link above, by John Reese, and shared as
* `<https://github.com/jreese/multiprocessing-keyboardinterrupt/>`_
Our version is a drop-in replacement for multiprocessing.Pool ... as long as
the map() method is the only one that needs to be interrupt-friendly.
Contributed by Peter K. G. Williams <peter@newton.cx>.
*Added in version 2.1.0*
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["InterruptiblePool"]
import signal
import functools
from multiprocessing.pool import Pool
from multiprocessing import TimeoutError
def _initializer_wrapper(actual_initializer, *rest):
"""
We ignore SIGINT. It's up to our parent to kill us in the typical
condition of this arising from ``^C`` on a terminal. If someone is
manually killing us with that signal, well... nothing will happen.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if actual_initializer is not None:
actual_initializer(*rest)
class InterruptiblePool(Pool):
"""
A modified version of :class:`multiprocessing.pool.Pool` that has better
behavior with regard to ``KeyboardInterrupts`` in the :func:`map` method.
:param processes: (optional)
The number of worker processes to use; defaults to the number of CPUs.
:param initializer: (optional)
Either ``None``, or a callable that will be invoked by each worker
process when it starts.
:param initargs: (optional)
Arguments for *initializer*; it will be called as
``initializer(*initargs)``.
:param kwargs: (optional)
Extra arguments. Python 2.7 supports a ``maxtasksperchild`` parameter.
"""
wait_timeout = 3600
def __init__(self, processes=None, initializer=None, initargs=(),
**kwargs):
new_initializer = functools.partial(_initializer_wrapper, initializer)
super(InterruptiblePool, self).__init__(processes, new_initializer,
initargs, **kwargs)
def map(self, func, iterable, chunksize=None):
"""
Equivalent of ``map()`` built-in, without swallowing
``KeyboardInterrupt``.
:param func:
The function to apply to the items.
:param iterable:
An iterable of items that will have `func` applied to them.
"""
# The key magic is that we must call r.get() with a timeout, because
# a Condition.wait() without a timeout swallows KeyboardInterrupts.
r = self.map_async(func, iterable, chunksize)
while True:
try:
return r.get(self.wait_timeout)
except TimeoutError:
pass
except KeyboardInterrupt:
self.terminate()
self.join()
raise
# Other exceptions propagate up.
| Python | 0 | |
4cc1c75356ac97632345c1900d45ac74521079cd | Find an average | ch03_03_p.py | ch03_03_p.py | summation = 0
input_number = float(input())
number_of_input = 0
while input_number != -1:
number_of_input += 1
summation += input_number
input_number = float(input())
if 0 == number_of_input:
print("No Data")
else:
print(summation / number_of_input)
| Python | 0.999994 | |
133da92ed69aafc6c0a8d4466cf3b0266c5edc68 | Add migration for change in profile model. | userprofile/migrations/0006_auto_20180309_2215.py | userprofile/migrations/0006_auto_20180309_2215.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-09 22:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0005_auto_20171121_1923'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default=None, upload_to='profilepictures'),
),
]
| Python | 0 | |
4bce7685c39e7efbb674407184d0bf436cbdaec0 | Create ftxproxy.py | ftxproxy.py | ftxproxy.py | #!/usr/bin/python
# This is a simple port-forward / proxy, written using only the default python
# library. If you want to make a suggestion or fix something you can contact-me
# at voorloop_at_gmail.com
# Distributed over IDC(I Don't Care) license
import socket
import select
import time
import sys
# Changing the buffer_size and delay, you can improve the speed and bandwidth.
# But when buffer get to high or delay go too down, you can broke things
buffer_size = 4096
delay = 0.0001
forward_to = ('10.11.10.18', 8989)
class Forward:
def __init__(self):
self.forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def start(self, host, port):
try:
self.forward.connect((host, port))
return self.forward
except Exception, e:
print e
return False
class TheServer:
input_list = []
channel = {}
def __init__(self, host, port):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((host, port))
self.server.listen(200)
def main_loop(self):
self.input_list.append(self.server)
while 1:
time.sleep(delay)
ss = select.select
inputready, outputready, exceptready = ss(self.input_list, [], [])
for self.s in inputready:
if self.s == self.server:
self.on_accept()
break
self.data = self.s.recv(buffer_size)
if len(self.data) == 0:
self.on_close()
break
else:
self.on_recv()
def on_accept(self):
forward = Forward().start(forward_to[0], forward_to[1])
clientsock, clientaddr = self.server.accept()
if forward:
print clientaddr, "has connected"
self.input_list.append(clientsock)
self.input_list.append(forward)
self.channel[clientsock] = forward
self.channel[forward] = clientsock
else:
print "Can't establish connection with remote server.",
print "Closing connection with client side", clientaddr
clientsock.close()
def on_close(self):
print self.s.getpeername(), "has disconnected"
#remove objects from input_list
self.input_list.remove(self.s)
self.input_list.remove(self.channel[self.s])
out = self.channel[self.s]
# close the connection with client
self.channel[out].close() # equivalent to do self.s.close()
# close the connection with remote server
self.channel[self.s].close()
# delete both objects from channel dict
del self.channel[out]
del self.channel[self.s]
def on_recv(self):
data = self.data
# here we can parse and/or modify the data before send forward
print data
self.channel[self.s].send(data)
if __name__ == '__main__':
server = TheServer('', 8002)
try:
server.main_loop()
except KeyboardInterrupt:
print "Ctrl C - Stopping server"
sys.exit(1)
| Python | 0 | |
350f7056e895dd8ddee756779ae50522f099f998 | Add tests for the oauth2 decorators | yithlibraryserver/oauth2/tests/test_decorators.py | yithlibraryserver/oauth2/tests/test_decorators.py | # Yith Library Server is a password storage server.
# Copyright (C) 2014 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
from bson.tz_util import utc
from pyramid.httpexceptions import HTTPUnauthorized
from yithlibraryserver import testing
from yithlibraryserver.oauth2.decorators import (
protected,
protected_method,
)
@protected(['scope1'])
def view_function(request):
return 'response'
class ViewClass(object):
def __init__(self, request):
self.request = request
@protected_method(['scope1'])
def view_method(self):
return 'response'
class DecoratorsTests(testing.TestCase):
clean_collections = ('access_codes', 'users')
def setUp(self):
super(DecoratorsTests, self).setUp()
os.environ['YITH_FAKE_DATETIME'] = '2014-2-23-08-00-00'
self.user_id = self.db.users.insert({
'username': 'user1',
})
def _create_access_code(self, scope):
expiration = datetime.datetime(2014, 2, 23, 9, 0, tzinfo=utc)
self.db.access_codes.insert({
'access_token': '1234',
'type': 'Bearer',
'expiration': expiration,
'user_id': self.user_id,
'scope': scope,
'client_id': 'client1',
})
def tearDown(self):
del os.environ['YITH_FAKE_DATETIME']
super(DecoratorsTests, self).tearDown()
def test_protected_bad_scope(self):
self._create_access_code('scope2')
request = testing.FakeRequest(headers={
'Authorization': 'Bearer 1234',
}, db=self.db)
self.assertRaises(HTTPUnauthorized, view_function, request)
def test_protected(self):
self._create_access_code('scope1')
request = testing.FakeRequest(headers={
'Authorization': 'Bearer 1234',
}, db=self.db)
self.assertEqual(view_function(request), 'response')
self.assertEqual(request.user['username'], 'user1')
def test_protected_method_bad_scope(self):
self._create_access_code('scope2')
request = testing.FakeRequest(headers={
'Authorization': 'Bearer 1234',
}, db=self.db)
view_object = ViewClass(request)
self.assertRaises(HTTPUnauthorized, view_object.view_method)
def test_protected_method(self):
self._create_access_code('scope1')
request = testing.FakeRequest(headers={
'Authorization': 'Bearer 1234',
}, db=self.db)
view_object = ViewClass(request)
self.assertEqual(view_object.view_method(), 'response')
self.assertEqual(request.user['username'], 'user1')
| Python | 0 | |
f93ae9f59dcbb834b93fa3a57d89d84c4520baa0 | Create collector.py | collector.py | collector.py | import os
from subprocess import Popen, PIPE
import tarfile
import json
import shutil
import re
def move(file, origin, destination):
if not file:
return
if not isinstance(file, basestring):
for entry in file:
move(entry, origin, destination)
return
destination_path = '%s/%s' % (destination, re.sub('(\./|bin/|dist/)', '', file))
origin_path = '%s/%s' % (origin, re.sub('(\./)', '', file))
shutil.move(origin_path, destination_path)
def load_json(conf):
json_data = open(conf)
data = json.load(json_data)
json_data.close()
return data
def check_skip(text, skips):
for word in skips:
if word in text:
return True
return False
config = load_json('cdn-config.json')
print 'Rebuild CDN collection.'
for target in config['targets']:
print 'Collect %s libraries.' % target
proc = Popen(["bower info %s" % target], stdout=PIPE, shell=True)
start = False
for line in proc.communicate()[0].splitlines():
if not start:
if 'Available versions:' in line:
start = True
continue
if 'You can request' in line:
break
if check_skip(line, config['skipWords']):
continue
version = line.strip()[2:]
print 'Version found %s - %s.' % (target, version)
if not os.path.isdir(target):
os.mkdir(target)
directory = "%s/%s" % (target, version)
if os.path.isdir(directory):
if os.listdir(directory):
print 'Skip version, directory already exists %s/%s' % (target, version)
continue
else:
os.mkdir("%s/%s" % (target, version))
proc_info = Popen(["bower info %s#%s" % (target, version)], stdout=PIPE, shell=True)
link = None
info = proc_info.communicate()[0]
info = info[info.find('{'):info.rfind('}') + 1].replace(': ', '": ')
for i, match in enumerate(re.finditer('( [A-za-z]+":)', info)):
pos = match.start() + 1 + i
info = info[:pos] + '"' + info[pos:]
info = info.replace('\'', '"')
info = json.loads(info)
if info['homepage']:
wget_cmd = 'wget --directory-prefix="%(target)s/%(version)s" "%(link)s/archive/%(version)s.tar.gz"' % {
'target': target,
'version': version,
'link': info['homepage']
}
print wget_cmd
proc_download = Popen([wget_cmd], stdout=PIPE, shell=True)
print proc_download.communicate()[0]
archive = "%s/%s" % (directory, os.listdir(directory)[0])
tfile = tarfile.open(archive, 'r:gz')
tfile.extractall(directory)
os.remove(archive)
location = "%s/%s" % (directory, os.listdir(directory)[0])
move(info.get('main', info.get('scripts')), location, directory)
shutil.rmtree(location)
else:
print 'Download link for version not found.'
print info
| Python | 0.000001 | |
ec8f6fdba200fcb4816e170c1517899f1c03db04 | added vowels | vowels.py | vowels.py | # Copyright © 2014 Bart Massey
# Print the hex value of the set of vowels.
n = 0
for c in ('a', 'e', 'i', 'o', 'u'):
b = 2**(ord(c) - ord('a'))
n += b
print(format(n, "x"))
| Python | 0.999665 | |
0f1475eddf3f9237a1f746784b090a4f65d96226 | add import script for Swindon | polling_stations/apps/data_collection/management/commands/import_swindon.py | polling_stations/apps/data_collection/management/commands/import_swindon.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E06000030'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (11).tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (11).tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| Python | 0 | |
9366fe261b2f13f81678851fd5ae4a0035a811c7 | Add new package: py-walinuxagent (#18961) | var/spack/repos/builtin/packages/py-walinuxagent/package.py | var/spack/repos/builtin/packages/py-walinuxagent/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyWalinuxagent(PythonPackage):
"""Microsoft Azure Linux Guest Agent."""
homepage = "https://github.com/Azure/WALinuxAgent"
url = "https://github.com/Azure/WALinuxAgent/archive/pre-v2.2.52.tar.gz"
version('2.2.52', sha256='02c26af75827bd7042aa2285c78dee86ddb25a6a8f6bb0a85679a2df9ba56a3a')
version('2.2.50', sha256='3b2b99552e3b35dfcbb4cabf476d0113d701eb23d2e0e61f35f0fa33cabde0a1')
depends_on('python@2.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pyasn1', type=('build', 'run'))
depends_on('py-distro', type=('build', 'run'), when='^python@3.8:')
| Python | 0 | |
ce647d22a2a65ea40d259b064a6b4f611ca669af | Add test codes for wheels | wheels.py | wheels.py | #!/usr/bin/python2
#coding=utf-8
import RPi.GPIO as GPIO
import time
'''
2 L298N control 4 Motors.
SOC Control GPIO
Front Motor: (Left) 15-ENDA, 31-forward,33-backward
(Right)29-ENDB, 35-forward,37-backward
Rear Motor: (Left) 18-ENDB, 38-forward,40-backward
(Right)25-ENDA, 36-forward,32-backward
This is temporary test codes, need define a wheels class.
'''
# GPIOs should move a common file to define.
def init():
GPIO.setmode(GPIO.BOARD)
# front motor
GPIO.setup(15, GPIO.OUT)
GPIO.setup(31, GPIO.OUT)
GPIO.setup(33, GPIO.OUT)
GPIO.setup(29, GPIO.OUT)
GPIO.setup(35, GPIO.OUT)
GPIO.setup(37, GPIO.OUT)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(38, GPIO.OUT)
GPIO.setup(40, GPIO.OUT)
GPIO.setup(25, GPIO.OUT)
GPIO.setup(36, GPIO.OUT)
GPIO.setup(32, GPIO.OUT)
def reset():
GPIO.output(15, GPIO.LOW)
GPIO.output(31, GPIO.LOW)
GPIO.output(33, GPIO.LOW)
GPIO.output(29, GPIO.LOW)
GPIO.output(35, GPIO.LOW)
GPIO.output(37, GPIO.LOW)
GPIO.output(18, GPIO.LOW)
GPIO.output(38, GPIO.LOW)
GPIO.output(40, GPIO.LOW)
GPIO.output(25, GPIO.LOW)
GPIO.output(36, GPIO.LOW)
GPIO.output(32, GPIO.LOW)
# front left forward
def front_left_forward():
GPIO.output(15, GPIO.HIGH)
GPIO.output(31, GPIO.HIGH)
GPIO.output(33, GPIO.LOW)
# front right forward
def front_right_forward():
GPIO.output(29, GPIO.HIGH)
GPIO.output(35, GPIO.HIGH)
GPIO.output(37, GPIO.LOW)
# rear left forward
def rear_left_forward():
GPIO.output(18, GPIO.HIGH)
GPIO.output(38, GPIO.HIGH)
GPIO.output(40, GPIO.LOW)
# rear right forward
def rear_right_forward():
GPIO.output(25, GPIO.HIGH)
GPIO.output(36, GPIO.HIGH)
GPIO.output(32, GPIO.LOW)
def front_left_back():
GPIO.output(15, GPIO.HIGH)
GPIO.output(31, GPIO.LOW)
GPIO.output(33, GPIO.HIGH)
def front_right_back():
GPIO.output(29, GPIO.HIGH)
GPIO.output(35, GPIO.LOW)
GPIO.output(37, GPIO.HIGH)
def rear_left_back():
GPIO.output(18, GPIO.HIGH)
GPIO.output(38, GPIO.LOW)
GPIO.output(40, GPIO.HIGH)
def rear_right_back():
GPIO.output(25, GPIO.HIGH)
GPIO.output(36, GPIO.LOW)
GPIO.output(32, GPIO.HIGH)
# forward
def forward():
reset()
front_left_forward()
front_right_forward()
rear_left_forward()
rear_right_forward()
# backward
def back():
reset()
front_left_back()
front_right_back()
rear_left_back()
rear_right_back()
# move forward with left
def front_left_turn():
reset()
front_right_forward()
rear_right_forward()
time.sleep(0.3)
reset()
# move forward with right
def front_right_turn():
reset()
front_left_forward()
rear_left_forward()
time.sleep(0.3)
reset()
# move backward with left
def rear_left_turn():
reset()
rear_left_back()
front_left_back()
time.sleep(0.3)
reset()
# move backward with right
def rear_right_turn():
reset()
rear_right_back()
front_right_back()
time.sleep(0.3)
reset()
# stop motor
def stop():
reset()
if __name__ == "__main__":
init()
reset()
forward()
time.sleep(2)
#back()
#time.sleep(2)
#front_left_turn()
#time.sleep(1)
#front_right_turn()
#time.sleep(1)
#rear_left_turn()
#time.sleep(1)
#rear_right_turn()
stop()
#must call this when exit
GPIO.cleanup() | Python | 0 | |
f3db6608c2b4afeb214c3f1b94e0175609ad0b88 | Add migration file for event slug changes | cs4teachers/events/migrations/0018_auto_20170706_0803.py | cs4teachers/events/migrations/0018_auto_20170706_0803.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-06 08:03
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0017_auto_20170705_0952'),
]
operations = [
migrations.AlterField(
model_name='event',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),
),
migrations.AlterField(
model_name='location',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),
),
migrations.AlterField(
model_name='resource',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),
),
migrations.AlterField(
model_name='session',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique_with=['event__slug']),
),
migrations.AlterField(
model_name='thirdpartyevent',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),
),
]
| Python | 0 | |
7d10648275e991fda42c6dccbce340d37d442115 | fix test on test_cell | src/test/test_cell.py | src/test/test_cell.py | import unittest, os
import numpy as np
from particles import PointDipoleList
from molecules import Cluster, Atom
from use_generator import Generator
import dstruct
FILE_XYZ = os.path.join( os.path.dirname(__file__), 'pna_waters.xyz' )
FILE_MOL = os.path.join( os.path.dirname(__file__), 'tip3p44_10qm.mol' )
FILE_PDB = os.path.join( os.path.dirname(__file__), 'tip3p0.pdb' )
POTSTRING = """AU
6 1 22 1
1 0.000000 0.000000 0.000000 -0.66229 0.00000 0.00000 0.34276 4.10574 0.00000 0.00000 4.79229 0.00000 4.01912 0.00000 0.00000 -3.33162 0.00000 0.00000 0.00000 0.00000 -0.32216 0.00000 0.79137
1 1.430429 0.000000 1.107157 0.33114 -0.16617 0.00000 -0.11629 1.53802 0.00000 1.19765 0.90661 0.00000 1.37138 -4.52137 0.00000 -5.08061 -1.35494 0.00000 -4.83365 0.00000 -0.46317 0.00000 -3.47921
1 -1.430429 0.000000 1.107157 0.33114 0.16617 0.00000 -0.11629 1.53802 0.00000 -1.19765 0.90661 0.00000 1.37138 4.52137 0.00000 -5.08061 1.35494 0.00000 4.83365 0.00000 -0.46317 0.00000 -3.47921
2 15.000000 15.000000 15.000000 -0.66229 0.00000 0.00000 0.34276 4.10574 0.00000 0.00000 4.79229 0.00000 4.01912 0.00000 0.00000 -3.33162 0.00000 0.00000 0.00000 0.00000 -0.32216 0.00000 0.79137
2 16.430429 15.000000 16.107157 0.33114 -0.16617 0.00000 -0.11629 1.53802 0.00000 1.19765 0.90661 0.00000 1.37138 -4.52137 0.00000 -5.08061 -1.35494 0.00000 -4.83365 0.00000 -0.46317 0.00000 -3.47921
2 13.569571 15.000000 16.107157 0.33114 0.16617 0.00000 -0.11629 1.53802 0.00000 -1.19765 0.90661 0.00000 1.37138 4.52137 0.00000 -5.08061 1.35494 0.00000 4.83365 0.00000 -0.46317 0.00000 -3.47921"""
from dstruct import Cell
class CellTest( unittest.TestCase ):
def setUp(self):
pass
def test_init(self):
c = Cell( my_min = map(float, [0, 0, 0]),
my_max = map(float, [1, 1, 1] ),
my_cutoff = 0.4)
assert len(c) == 3
c = Cell( my_min = map(float, [-10, 0, 0]),
my_max = map(float, [0, 1, 1] ),
my_cutoff = 12)
assert len(c) == 1
c = Cell( my_min = map(float, [-5, 0, 0]),
my_max = map(float, [10, 1, 1] ),
my_cutoff = 4.9)
assert len(c) == 4
def test_add(self):
c = Cell( my_cutoff = 2.9 )
a1 = Atom( element = 'H', x = 3 )
a2 = Atom( element = 'H', x = 3, y = 3 )
a3 = Atom( element = 'H', x = 3, y = 3, z= 3 )
c.add( a1 )
c.add( a2 )
c.add( a3 )
assert a1 in c[1][0][0]
assert a2 in c[1][1][0]
assert a3 in c[1][1][1]
def test_update(self):
c = Cell( my_cutoff = 3 )
a = Atom( z = 5 )
c.add( a )
assert a in c[0][0][1]
a.z = 0
c = c.update()
assert a in c[0][0][0]
def test_get_closest(self):
cell = Cell.from_xyz( FILE_XYZ )
#ensure at1 exists
for at in cell:
at1 = at
x, y, z = cell.get_index( at1 )
ats = 0
tmp = []
for i in range( x-1, x+2 ):
for j in range( y-1, y+2 ):
for k in range( z-1, z+2 ):
try:
for at in cell[i][j][k]:
if at in tmp:
continue
tmp.append(at)
except IndexError:
pass
assert len(tmp) -1 == len(cell.get_closest( at1 ))
def test_from_PointDipoleList(self, ):
_str = POTSTRING
pdl = PointDipoleList.from_string( _str )
cell = dstruct.Cell.from_PointDipoleList( pdl, co = 5 )
assert isinstance( cell, dstruct.Cell )
if __name__ == '__main__':
unittest.main()
| Python | 0.000002 | |
d03254dabaac466edd697de38c3433475828bd4f | Add tests for has_changes | tests/functions/test_has_changes.py | tests/functions/test_has_changes.py | import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils import has_changes
class TestHasChanges(object):
def setup_method(self, method):
Base = declarative_base()
class Article(Base):
__tablename__ = 'article_translation'
id = sa.Column(sa.Integer, primary_key=True)
title = sa.Column(sa.String(100))
self.Article = Article
def test_without_changed_attr(self):
article = self.Article()
assert not has_changes(article, 'title')
def test_with_changed_attr(self):
article = self.Article(title='Some title')
assert has_changes(article, 'title')
| Python | 0.000001 | |
795cd6e190a1cc4d416c5524399780e586dc6c45 | Add better kitt script | kitt/kitt.py | kitt/kitt.py | from microbit import *
display.scroll("I am the Knight Industries 2000")
MAX_ROWS = 4
MAX_BRIGHTNESS = 9
MIN_BRIGHTNESS = 2
def scan(reverse=False):
for i in range(0, 9):
brightness = MAX_BRIGHTNESS
row_range = range(0, i+1) if not reverse else range(i, -1, -1)
counter = 0
for j in row_range:
x = i - j if not reverse else MAX_ROWS - j
light_level = max(MIN_BRIGHTNESS, brightness) if counter >= 2 else MAX_BRIGHTNESS - counter
print (x, light_level)
if x <= MAX_ROWS and x >= 0:
display.set_pixel(x, 2, light_level)
counter += 1
#if i >= 2:
brightness -= 1
print("-")
if i < 8:
sleep(100)
for x in range(0, MAX_ROWS+1):
display.set_pixel(x, 2, MIN_BRIGHTNESS)
while True:
scan()
scan(reverse=True)
| Python | 0 | |
a6ac5055a1867259ab17997a076299731e57c45b | Add Android extractor | strings2pot/extractors/android.py | strings2pot/extractors/android.py | # -*- coding: utf-8 -*-
import re
import xml.etree.ElementTree as ET
class AndroidExtractor:
def __init__(self, source_file, destination_file, context_id_generator):
self.source_file = source_file
self.destination_file = destination_file
self._create_context_id = context_id_generator
def parse_string(self, string):
s = string.replace("\\'", "'")
s = s.replace("\"", "\\\"")
s = s.replace("\\n", "\n")
s = re.sub(r'%\d\$s', '%s', s)
s = re.sub(r'%\d\$d', '%d', s)
if "\n" in s:
s = s.replace("\n", "\\n\n")
parts = s.split("\n")
new_parts = ["\"\""]
for line in parts:
new_parts.append("\"%s\"" % line)
s = "\n".join(new_parts)
else:
s = "\"%s\"" % s
return s
def run(self):
with open(self.destination_file, 'a') as pot:
root = ET.parse(self.source_file)
counter = 3
for el in root.findall('./string'):
parsed_string = self.parse_string(el.text)
message_id = parsed_string[1:len(parsed_string)-1]
counter += 1
content = "\n#: %s:%d\nmsgctxt \"%s\"\nmsgid %s\nmsgstr \"\"\n" % (
self.source_file,
counter,
self._create_context_id(message_id), # was el.attrib.get('name')
parsed_string )
pot.write(content) | Python | 0.000001 | |
c711f62ef96d67a6e42e3bbe10c0b3cd64a23444 | add moviepy - text_hineinzoomen | moviepy/text_hineinzoomen.py | moviepy/text_hineinzoomen.py | #!/usr/bin/env python
# Video mit Text erzeugen, hineinzoomen (Text wird grösser)
# Einstellungen
text = 'Text' # Text
textgroesse = 150 # Textgroesse in Pixel
textfarbe_r = 0 # Textfarbe R
textfarbe_g = 0 # Textfarbe G
textfarbe_b = 0 # Textfarbe B
schrift = 'FreeSans' # Schriftart
winkel = 0 # Winkel
hgfarbe_r = 1 # Hintergrundfarbe R
hgfarbe_g = 1 # Hintergrundfarbe G
hgfarbe_b = 1 # Hintergrundfarbe B
videobreite = 1280 # in Pixel
videohoehe = 720 # in Pixel
videolaenge = 5 # in Sekunden
videodatei = 'text.ogv' # Videodatei
frames = 25 # Frames pro Sekunde
# Modul moviepy importieren
from moviepy.editor import *
# Modul gizeh importieren
import gizeh
# Funktion um Frames zu erzeugen, t ist die Zeit beim jeweiligen Frame
def create_frame(t):
img = gizeh.Surface(videobreite,videohoehe,bg_color=(hgfarbe_r,hgfarbe_g,hgfarbe_b))
text_img = gizeh.text(text, fontfamily=schrift, fontsize=t*(textgroesse/videolaenge),
fill=(textfarbe_r,textfarbe_g,textfarbe_b),
xy=(videobreite/2,videohoehe/2), angle=winkel)
text_img.draw(img)
return img.get_npimage()
# Video erzeugen
video = VideoClip(create_frame, duration=videolaenge)
# Video schreiben
video.write_videofile(videodatei, fps=frames)
# Hilfe fuer moviepy: https://zulko.github.io/moviepy/index.html
# Hilfe fuer gizeh: https://github.com/Zulko/gizeh
# text_hineinzoomen.py
# Lizenz: http://creativecommons.org/publicdomain/zero/1.0/
# Author: openscreencast.de
| Python | 0.000001 | |
c61b1595709b6acd26cf7c43e7858e3ad5cb588f | Add missing module. | csvkit/headers.py | csvkit/headers.py | #!/usr/bin/env python
def make_default_headers(n):
"""
Make a set of simple, default headers for files that are missing them.
"""
return [u'column%i' % (i + 1) for i in range(n)]
| Python | 0 | |
1ad62b8fcffd88cc5aecb01418650e09aaa7ffad | Add deck_test.py, it's only a script for test when programming. | deck_test.py | deck_test.py | import os
import json
from hearthstone.deckstrings import Deck
from hearthstone.enums import FormatType
from hearthstone.cardxml import load
from hearthstone.enums import Locale,Rarity
from collection import Collection
# Create a deck from a deckstring
deck = Deck()
deck.heroes = [7] # Garrosh Hellscream
deck.format = FormatType.FT_WILD
# Nonsense cards, but the deckstring doesn't validate.
deck.cards = [(1, 3), (2, 3), (3, 3), (4, 3)] # id, count pairs
print(deck.as_deckstring) # "AAEBAQcAAAQBAwIDAwMEAw=="
# Import a deck from a deckstring
deck = Deck.from_deckstring("AAEBAf0ECMAB5gT7BPsFigbYE5KsAv2uAgucArsClQONBKsEtAThBJYF7Ae8CImsAgA=")
print (deck.cards)
# load card database from CardDefs.xml and use it to initialize DBF database
db, xml = load(os.path.join("hsdata","CardDefs.xml"), locale="zhCN")
db_dbf={}
for card in db:
#print (card)
db_dbf[db[card].dbf_id] = db[card]
#print (db)
for cardPair in deck.cards:
# print (cardPair[0])
card = db_dbf[cardPair[0]]
print (cardPair[1],"x(", card.cost,")", card.name, card.rarity)
#print (type(deck.cards))
#col = Collection()
#for cardPair in deck.cards:
# col.add(cardPair)
#col.output()
#col.writeToFiles("mycards.csv")
col2 = Collection()
col2.loadFromFile("mycards.csv")
col2.output()
#col2.limitTo(1)
#col2.output()
#col3 = Collection()
#col3.initFromDeckStringFile("initdeck")
#col3.output()
def calculateLacksFromJSONFile(path, collection, db_dbf):
newlist = []
with open (path, "rt") as f:
for line in f.readlines():
data = json.loads(line)['result']
deck = Deck.from_deckstring(data['deckstring'])
if len(deck.cards) <= 0:
# If there exists some connection problem,
# we may get an empty deck here.
# If so, just ignore it.
continue
print (data)
print (deck.cards)
newdict = {}
newdict["name"] = data['title']
newdict["date"] = data['date']
newdict["type"] = data['type']
newdict["deck"] = deck
newdict["lacked"], newdict["alreadyHave"] = collection.calculateLacks(deck.cards)
# print (newdict["lacked"])
_, newdict["dust"] = calcArcaneDust(newdict["lacked"], db_dbf)
newdict["power"] = 1
newlist.append(newdict)
return newlist
def calcArcaneDust(cards, db_dbf):
"""Calculate the aracne dust
Return how much dust will be generated from the cards (dustOut)
or how much is needed to prodece the cards (dustIn)
"""
dustOut = 0
dustIn = 0
for cardPair in cards:
card = db_dbf[cardPair[0]]
if card.rarity == Rarity.COMMON:
dustOut += 5
dustIn += 40
elif card.rarity == Rarity.RARE:
dustOut += 20
dustIn += 100
elif card.rarity == Rarity.EPIC:
dustOut += 100
dustIn += 400
elif card.rarity == Rarity.LEGENDARY:
dustOut += 400
dustIn += 1600
return dustOut, dustIn
#print (calculateLacksFromFile("deck1.txt", col2, db_dbf))
with open ('t3.json', 'r') as f:
for line in f.readlines():
data = json.loads(line)['result']
print (data)
print (calculateLacksFromJSONFile('t3.json', col2, db_dbf))
| Python | 0 | |
9b50da16238d2f816199c8fb8a20ec558edf5d46 | Create oie_compress.py | oie_compress.py | oie_compress.py | # 1.0 much be paid on insurance claim
# 1.0 much is paid
# 1.0 much is paid on insurance claim
# 1.0 much be paid
# -----------------------------------------------------
# 1.0 channel joining two bodies
# 1.0 channel joining two larger bodies of water
# 1.0 channel joining two larger bodies
# 1.0 channel joining two bodies of water
# 1.0 narrow channel joining two bodies of water
# 1.0 narrow channel joining two larger bodies
# 1.0 narrow channel joining two larger bodies of water
# 1.0 narrow channel joining two bodies
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--oie", help="Input file containing openIE triplets to be compresses.", required=True)
parser.add_argument("--o", help="Output file for compressed openIE triplets.")
args = parser.parse_args()
with open(args.oie) as f:
triplets=map(str.strip().split("\t")[1:], f.readlines())
if len(triplets) < 3:
print "No triplets in file %s" % args.oie
exit()
for c in xrange(3):
[row[c] for row in triplets]
| Python | 0.000002 | |
aad116d8bd35eee22d07edaff4cd8ddf80ea80be | Create 2.3Identify_SeasonalValues.py | UseCases_files/3Figures_Python/2.3Identify_SeasonalValues.py | UseCases_files/3Figures_Python/2.3Identify_SeasonalValues.py | # Use Case 2.3Identify_SeasonalValues
# plot Seasonal data for multiple scenarios
# Adel Abdallah
# October 30, 2017
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
from random import randint
import pandas as pd
## read the input data from GitHub csv file which is a direct query output
# 3.3Identify_SeasonalValues.csv
df = pd.read_csv("https://raw.githubusercontent.com/WamdamProject/WaMDaM_UseCases/master/UseCases_files/2Results_CSV/2.3Identify_SeasonalValues.csv")
#get the many curves by looking under "ScenarioName" column header.
#Then plot Season name vs season value
column_name = "ScenarioName"
subsets = df.groupby(column_name)
data = []
#for each subset (curve), set up its legend and line info manually so they can be edited
subsets_settings = {
'Bear Wet Year Model': {
'dash': 'solid',
'mode':'lines+markers',
'width':'3',
'legend_index': 0,
'legend_name': 'Wet Year Model',
'color':'rgb(41, 10, 216)'
},
'Bear Normal Year Model': { # this oone is the name of subset as it appears in the csv file
'dash': 'solid', # this is properity of the line (curve)
'width':'3',
'mode':'lines+markers',
'legend_index': 1, # to order the legend
'legend_name': 'Normal Year Model', # this is the manual curve name
'color':'rgb(38, 77, 255)'
},
'Bear Dry Year Model': {
'dash': 'solid',
'mode':'lines+markers',
'width':'3',
'legend_index': 2,
'legend_name': 'Dry Year Model',
'color':'rgb(63, 160, 255)'
},
}
# This dict is used to map legend_name to original subset name
subsets_names = {y['legend_name']: x for x,y in subsets_settings.iteritems()}
for subset in subsets.groups.keys():
print subset
dt = subsets.get_group(name=subset)
s = go.Scatter(
x=df.SeasonName,
y=dt['SeasonNumericValue'],
name = subsets_settings[subset]['legend_name'],
line = dict(
color =subsets_settings[subset]['color'],
width =subsets_settings[subset]['width'],
dash=subsets_settings[subset]['dash']
),
marker=dict(size=10),
opacity = 0.8
)
data.append(s)
# Legend is ordered based on data, so we are sorting the data based
# on desired legend order indicarted by the index value entered above
data.sort(key=lambda x: subsets_settings[subsets_names[x['name']]]['legend_index'])
layout = dict(
#title = "Use Case 3.3",
yaxis = dict(
title = "Cumulative flow <br> (acre-feet/month)",
tickformat= ',',
showline=True,
dtick='5000',
ticks='outside',
ticklen=10
),
xaxis = dict(
#title = "Month",
ticks='inside',
ticklen=25
),
legend=dict(
x=0.6,y=0.5,
bordercolor='#00000',
borderwidth=2
),
width=1100,
height=800,
#paper_bgcolor='rgb(233,233,233)',
#plot_bgcolor='rgb(233,233,233)',
margin=go.Margin(l=210,b=100),
font=dict(size=28)
)
# create a figure object
fig = dict(data=data, layout=layout)
#py.iplot(fig, filename = "2.3Identify_SeasonalValues")
## it can be run from the local machine on Pycharm like this like below
## It would also work here offline but in a seperate window
plotly.offline.plot(fig, filename = "2.3Identify_SeasonalValues")
| Python | 0.000005 | |
302f98844487d894252d3dc3f4d30940fbcbd9e1 | Allow pex to be invoked using runpy (python -m pex). (#637) | pex/__main__.py | pex/__main__.py | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from pex.bin import pex
__name__ == '__main__' and pex.main()
| Python | 0.000065 | |
51398136622566b36a077e80841d10957d0108c3 | add liveview sample code | playLiveview.py | playLiveview.py | #!/usr/bin/env python
from sonyAPI2 import API2
import cv2
import urllib2
import numpy as np
import time
import struct
api = API2()
# api.update_url()
api.update_api_list()
try:
result = api.do('startLiveview')
url = result['result'][0]
except KeyError:
print result
f = urllib2.urlopen(url)
#method 1
buff = ''
chunk_size = 8192
for i in xrange(300):
if len(buff) < chunk_size:
time_s = time.time()
buff = buff + f.read(chunk_size)
# print "Download Speed %f KB/s"%(chunk_size/1000/(time.time() - time_s))
time_s = time.time()
start_code = ''.join(buff).find('$5hy')
# print "LCS time cost", time.time() - time_s
if start_code < 0:
buff = buff[-12:]
print "skip", len(buff)-12
elif start_code < 8:
buff = buff[8:]
print "skip a header"
else:
if start_code > len(buff) - 129:
buff = buff + f.read(chunk_size)
start_byte = ord(buff[start_code - 8])
payload_type = ord(buff[start_code - 7])
sequence_num, = struct.unpack('>I', buff[start_code - 6:start_code - 4].rjust(4,'\0'))
time_stamp, = struct.unpack('>I', buff[start_code - 4:start_code].rjust(4,'\0'))
payload_size, = struct.unpack('>I', buff[start_code+4:start_code+7].rjust(4,'\0'))
padding_size = ord(buff[start_code+8])
print "StartByte:%d\t sequenceNum:%d\t timeStamp:%d\t Type:%d\t Payload:%d\t Padding:%d\t"%(
start_byte,sequence_num,time_stamp,payload_type,payload_size,padding_size)
buff = buff[start_code+128:]
if payload_type == 1:
if payload_size + padding_size > len(buff):
time_s = time.time()
download_size = payload_size+padding_size-len(buff)
buff = buff + f.read(download_size)
# print "Download Speed %f KB/s"%(download_size/1000/(time.time() - time_s))
img_data = buff[:payload_size]
buff = buff[payload_size:]
time_s = time.time()
d = np.asarray(bytearray(img_data), dtype='uint8')
img = cv2.imdecode(d,cv2.IMREAD_COLOR)
cv2.imshow('postview',img)
cv2.waitKey(10)
# print "Decode time cost", time.time() - time_s
#method 2
def checkbyte(f):
if f.read(4) == '$5hy':
return
state = 0
i = 1
while 1:
i+=1
if state == 0 :
if f.read(1) == '$':
state = 1
else:
state = 0
if state == 1 :
if f.read(1) == '5':
state = 2
else:
state = 0
if state == 2 :
if f.read(1) == 'h':
state = 3
else:
state = 0
if state == 3 :
if f.read(1) == 'y':
state = 4
else:
state = 0
if state == 4 :
print 'skip', i
return
for i in xrange(300):
buff = f.read(8)
start_byte ord(buff[0])
payload_type, = struct.unpack('>I',buff[1].rjust(4,'\0'))
sequence_num, = struct.unpack('>I',buff[2:4].rjust(4,'\0'))
time_stamp, = struct.unpack('>I',buff[4:8])
#payload header
checkbyte(f)
buff = f.read(124)
payload_size, = struct.unpack('>I',buff[0:3].rjust(4,'\0'))
padding_size= ord(buff[3])
print "StartByte:%d\t sequenceNum:%d\t timeStamp:%d\t Type:%d\t Payload:%d\t Padding:%d\t"%(
start_byte,sequence_num,time_stamp,payload_type,payload_size,padding_size)
d = f.read(payload_size)
if padding_size > 0:
f.read(padding_size)
if payload_type == 1:
# Type = 0x01
d = np.asarray(bytearray(d), dtype='uint8')
img = cv2.imdecode(d,cv2.IMREAD_COLOR)
cv2.imshow('postview',img)
cv2.waitKey(1)
print api.do('stopLiveview')
| Python | 0 | |
8f506c20ccad47ee6f2454a419145b1b2b48adba | Create bold-words-in-string.py | Python/bold-words-in-string.py | Python/bold-words-in-string.py | # Time: O(n * l), n is the length of S, l is the average length of words
# Space: O(t), t is the size of trie
class Solution(object):
def boldWords(self, words, S):
"""
:type words: List[str]
:type S: str
:rtype: str
"""
_trie = lambda: collections.defaultdict(_trie)
trie = _trie()
for i, word in enumerate(words):
reduce(dict.__getitem__, word, trie)["_end"] = i
lookup = [False] * len(S)
for i in xrange(len(S)):
curr = trie
k = -1
for j in xrange(i, len(S)):
if S[j] not in curr:
break
curr = curr[S[j]]
if "_end" in curr:
k = j
for j in xrange(i, k+1):
lookup[j] = True
result = []
for i in xrange(len(S)):
if lookup[i] and (i == 0 or not lookup[i-1]):
result.append("<b>")
result.append(S[i])
if lookup[i] and (i == len(S)-1 or not lookup[i+1]):
result.append("</b>");
return "".join(result)
| Python | 0.999987 | |
ea0c49745ef454779e711acd3a2f6bc40ce9cf74 | add multichannel | multichannel-imdb.py | multichannel-imdb.py | import logging
from keras.layers.recurrent import LSTM, GRU
from keras.models import Sequential, model_from_json, Graph
from keras.layers.core import Dense, Dropout, MaxoutDense, Activation
from keras.layers.advanced_activations import PReLU
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras.optimizers import SGD
from cnn.layers.convolutions import *
from cnn.layers.embeddings import *
import numpy as np
import cPickle as pickle
LOGGER_PREFIX = ' %s'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def log(msg, logger=logger):
logger.info(LOGGER_PREFIX % msg)
if __name__ == '__main__':
WV_FILE = './data/wv/IMDB-GloVe-300dim-glovebox.pkl'
WV_FILE_GLOBAL = './data/wv/glove.42B.300d.120000-glovebox.pkl'
MODEL_FILE = './test-model.h5'
# -- load in all the data
train, test = {}, {}
log('Loading training data')
train['text4imdb'] = np.load('IMDB_train_glove_X.npy')
train['text4global'] = np.load('IMDB_train_global_glove_X.npy')
train['labels'] = np.load('IMDB_train_glove_y.npy')
log('Shuffling training data')
shuff = range(train['text4imdb'].shape[0])
np.random.shuffle(shuff)
for k in train.keys():
train[k] = train[k][shuff]
if 'lab' not in k:
train[k] = train[k].reshape(train[k].shape[0], -1)
# -- flatten across paragraph dimension, will later be reconstructed in the embedding
# weights = 1.0 * (train['text'] > 0)
del shuff
log('Loading testing data')
# -- testing data
test['text4imdb'] = np.load('IMDB_test_glove_X.npy')
test['text4imdb'] = test['text4imdb'].reshape(test['text4imdb'].shape[0], -1)
test['text4global'] = np.load('IMDB_test_global_glove_X.npy')
test['text4global'] = test['text4global'].reshape(test['text4global'].shape[0], -1)
test['labels'] = np.load('IMDB_test_glove_y.npy')
log('Loading IMDB trained word vectors')
gb = pickle.load(open(WV_FILE, 'rb'))
log('Loading pretrained word vectors')
gb_global = pickle.load(open(WV_FILE_GLOBAL, 'rb'))
WV_PARAMS = {
'imdb_vectors' :
{
'input_name' : 'imdb_input',
'vocab_size' : gb.W.shape[0],
'init' : gb.W,
'fixed' : False
},
'glove_vectors' :
{
'input_name' : 'glove_input',
'vocab_size' : gb_global.W.shape[0],
'init' : gb_global.W,
'fixed' : False
}
}
NGRAMS = [4, 5, 6]
NFILTERS = 10
SENTENCE_LENGTH = 30
PARAGRAPH_LENGTH = 30
log('Making graph model')
graph = Graph()
log('Making embedding')
for name, params in WV_PARAMS.iteritems():
# -- add each word vector channel
graph.add_input(params['input_name'], (-1, ), dtype='int')
# -- create the embedding!
graph.add_node(make_embedding(wv_size=300, **params), name=name, input=params['input_name'])
# -- reshape to 5D tensor
graph.add_node(Reshape((-1, SENTENCE_LENGTH, len(WV_PARAMS), 300)), name='reshape', inputs=WV_PARAMS.keys(), merge_mode='concat')
# -- permut
graph.add_node(Permute(dims=(1, 3, 2, 4)), name='embedding', input='reshape')
log('Adding convolved n-grams')
# for n in [4, 5]:
for n in NGRAMS:
graph.add_node(
TimeDistributedConvolution2D(NFILTERS, n, WV_PARAMS['glove_vectors']['init'].shape[1], activation='relu'),
name='conv{}gram'.format(n), input='embedding')
graph.add_node(
TimeDistributedMaxPooling2D(pool_size=(SENTENCE_LENGTH - n + 1, 1)),
name='maxpool{}gram'.format(n), input='conv{}gram'.format(n))
graph.add_node(
Dropout(0.7),
name='dropout{}gram'.format(n), input='maxpool{}gram'.format(n))
graph.add_node(
TimeDistributedFlatten(),
name='flatten{}gram'.format(n), input='dropout{}gram'.format(n))
log('Adding bi-directional GRU')
graph.add_node(GRU(25), name='gru_forwards', inputs=['flatten{}gram'.format(n) for n in NGRAMS], concat_axis=-1)
graph.add_node(GRU(25, go_backwards=True), name='gru_backwards', inputs=['flatten{}gram'.format(n) for n in NGRAMS], concat_axis=-1)
# graph.add_node(GRU(16), name='gru', input='flatten4gram')
graph.add_node(Dropout(0.5), name='gru_dropout', inputs=['gru_forwards', 'gru_backwards'])
graph.add_node(Dense(1, activation='sigmoid'), name='probability', input='gru_dropout')
graph.add_output(name='prediction', input='probability')
log('Compiling model (Veuillez patienter)...')
sgd = SGD(lr=0.01, momentum=0.8, decay=0.0001, nesterov=True)
# graph.compile(sgd, {'prediction': 'binary_crossentropy'})
graph.compile('rmsprop', {'prediction': 'binary_crossentropy'})
log('Fitting! Hit CTRL-C to stop early...')
try:
history = graph.fit(
{
'imdb_input' : train['text4imdb'],
'glove_input' : train['text4global'],
'prediction': train['labels']
},
validation_split=0.35, batch_size=16, nb_epoch=100,
verbose=True, # -- for logging purposes
# sample_weight = {'prediction' : weights},
callbacks =
[
EarlyStopping(verbose=True, patience=30, monitor='val_loss'),
ModelCheckpoint(MODEL_FILE, monitor='val_loss', verbose=True, save_best_only=True)
]
)
except KeyboardInterrupt:
log('Training stopped early!')
log('Loading best weights...')
graph.load_weights(MODEL_FILE)
log('getting predictions on the test set')
yhat = graph.predict({'imdb_input' : test['text4imdb'], 'glove_input' : test['text4global'], }, verbose=True, batch_size=50)
acc = ((yhat['prediction'].ravel() > 0.5) == (test['labels'] > 0.5)).mean()
log('Test set accuracy of {}%.'.format(acc * 100.0))
log('Test set error of {}%. Exiting...'.format((1 - acc) * 100.0))
| Python | 0.998781 | |
2623e5e18907c1ca13661e1f468368fb17bc50d9 | add preproc tests | py/desispec/test/test_preproc.py | py/desispec/test/test_preproc.py | import unittest
import numpy as np
from desispec.preproc import preproc, _parse_sec_keyword
class TestPreProc(unittest.TestCase):
def setUp(self):
hdr = dict()
hdr['CAMERA'] = 'b0'
hdr['DATE-OBS'] = '2018-09-23T08:17:03.988'
hdr['CCDSEC'] = '[1:200,1:150]'
hdr['BIASSECA'] = '[1:20,1:80]'
hdr['DATASECA'] = '[21:110,1:80]'
hdr['CCDSECA'] = '[1:90,1:80]'
hdr['BIASSECB'] = '[221:240,1:80]'
hdr['DATASECB'] = '[111:220,1:80]'
hdr['CCDSECB'] = '[91:200,1:80]'
hdr['BIASSECC'] = '[1:20,81:150]'
hdr['DATASECC'] = '[21:110,81:150]'
hdr['CCDSECC'] = '[1:90,81:150]'
hdr['BIASSECD'] = '[221:240,81:150]'
hdr['DATASECD'] = '[111:220,81:150]'
hdr['CCDSECD'] = '[91:200,81:150]'
self.header = hdr
self.ny = 150
self.nx = 200
self.noverscan = 20
self.rawimage = np.zeros((self.ny, self.nx+2*self.noverscan))
self.offset = dict(A=100.0, B=100.5, C=50.3, D=200.4)
self.gain = dict(A=1.0, B=1.5, C=0.8, D=1.2)
self.rdnoise = dict(A=2.0, B=2.2, C=2.4, D=2.6)
self.quad = dict(
A = np.s_[0:80, 0:90], B = np.s_[0:80, 90:200],
C = np.s_[80:150, 0:90], D = np.s_[80:150, 90:200],
)
for amp in ('A', 'B', 'C', 'D'):
self.header['GAIN'+amp] = self.gain[amp]
self.header['RDNOISE'+amp] = self.rdnoise[amp]
xy = _parse_sec_keyword(hdr['BIASSEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
self.rawimage[xy] += self.offset[amp]
self.rawimage[xy] += np.random.normal(scale=self.rdnoise[amp], size=shape)/self.gain[amp]
xy = _parse_sec_keyword(hdr['DATASEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
self.rawimage[xy] += self.offset[amp]
self.rawimage[xy] += np.random.normal(scale=self.rdnoise[amp], size=shape)/self.gain[amp]
#- Confirm that all regions were correctly offset
assert not np.any(self.rawimage == 0.0)
def test_preproc(self):
image = preproc(self.rawimage, self.header)
self.assertEqual(image.pix.shape, (self.ny, self.nx))
self.assertTrue(np.all(image.ivar <= 1/image.readnoise**2))
for amp in ('A', 'B', 'C', 'D'):
pix = image.pix[self.quad[amp]]
rdnoise = np.median(image.readnoise[self.quad[amp]])
self.assertAlmostEqual(np.median(pix), 0.0, delta=0.2)
self.assertAlmostEqual(np.std(pix), self.rdnoise[amp], delta=0.2)
self.assertAlmostEqual(rdnoise, self.rdnoise[amp], delta=0.2)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
228f4325aa5f1c8b616f45462280b4a7cb0792dd | Add test for empty files to csvjoin | tests/test_utilities/test_csvjoin.py | tests/test_utilities/test_csvjoin.py | #!/usr/bin/env python
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkit.utilities.csvjoin import CSVJoin, launch_new_instance
from tests.utils import CSVKitTestCase, EmptyFileTests
class TestCSVJoin(CSVKitTestCase, EmptyFileTests):
Utility = CSVJoin
default_args = ['examples/dummy.csv', '-']
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/join_a.csv', 'examples/join_b.csv']):
launch_new_instance()
def test_sequential(self):
output = self.get_output_as_io(['examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_inner(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 3)
def test_left(self):
output = self.get_output_as_io(['-c', 'a', '--left', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 5)
def test_right(self):
output = self.get_output_as_io(['-c', 'a', '--right', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_outer(self):
output = self.get_output_as_io(['-c', 'a', '--outer', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 6)
def test_left_short_columns(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a_short.csv', 'examples/join_b.csv'])
with open('examples/join_short.csv') as f:
self.assertEqual(output.readlines(), f.readlines())
| #!/usr/bin/env python
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkit.utilities.csvjoin import CSVJoin, launch_new_instance
from tests.utils import CSVKitTestCase
class TestCSVJoin(CSVKitTestCase):
Utility = CSVJoin
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/join_a.csv', 'examples/join_b.csv']):
launch_new_instance()
def test_sequential(self):
output = self.get_output_as_io(['examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_inner(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 3)
def test_left(self):
output = self.get_output_as_io(['-c', 'a', '--left', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 5)
def test_right(self):
output = self.get_output_as_io(['-c', 'a', '--right', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_outer(self):
output = self.get_output_as_io(['-c', 'a', '--outer', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 6)
def test_left_short_columns(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a_short.csv', 'examples/join_b.csv'])
with open('examples/join_short.csv') as f:
self.assertEqual(output.readlines(), f.readlines())
| Python | 0 |
dbb127a6fbadfa17f5faad45e8d7ebb6b943a77d | add basic test for vamp_spectral_centroid | tests/test_vamp_spectral_centroid.py | tests/test_vamp_spectral_centroid.py | #! /usr/bin/env python
import unittest
from unit_timeside import TestRunner
from timeside.plugins.decoder.aubio import AubioDecoder as FileDecoder
from timeside.core import get_processor
from timeside.core.tools.test_samples import samples
class TestVampSpectralCentroid(unittest.TestCase):
proc_id = 'vamp_spectral_centroid'
def setUp(self):
self.analyzer = get_processor(self.proc_id)()
def testOnC4Scale(self):
"runs on C4 scale"
self.source = samples["C4_scale.wav"]
def testOnSweep(self):
"runs on sweep"
self.source = samples["sweep.wav"]
def tearDown(self):
decoder = FileDecoder(self.source)
(decoder | self.analyzer).run()
results = self.analyzer.results
result = results.get_result_by_id(self.proc_id)
duration = result.audio_metadata.duration
data_duration = result.data_object.time[-1]
self.assertAlmostEqual (duration, data_duration, 1)
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| Python | 0.000001 | |
86c67f321ec4ee7c254fde4a7f942a83d5e35016 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/00734af980b920f9f963badf85fbeb12d576fde5. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "00734af980b920f9f963badf85fbeb12d576fde5"
TFRT_SHA256 = "0c136cdfb87ae3663c162ad807c57983a8119fa7097fb589c4a7d04b98d09d3d"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "d1caeb8bdba1851194baf06c28ea09b5b67e5623"
TFRT_SHA256 = "e480ad7451b9e3ce45da61d7107953a4d55789bf6087442fd000a1ecb7c6604e"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0 |
8cf5b328d7596a9b74490b7dfd4a1b8aa1577b55 | Merge remote-tracking branch 'origin' into AC-9512 | accelerator/migrations/0110_remove_bucket_list_program_role_20220707_1001.py | accelerator/migrations/0110_remove_bucket_list_program_role_20220707_1001.py | from django.db import migrations
def remove_bucket_list_program_roles(apps, schema_editor):
BucketState = apps.get_model('accelerator', 'BucketState')
ProgramRole = apps.get_model('accelerator', 'ProgramRole')
ProgramRoleGrant = apps.get_model('accelerator', 'ProgramRoleGrant')
NodePublishedFor = apps.get_model('accelerator', 'NodePublishedFor')
program_role_ids = BucketState.objects.values_list('program_role_id',
flat=True)
NodePublishedFor.objects.filter(
published_for_id__in=program_role_ids).delete()
ProgramRoleGrant.objects.filter(
program_role_id__in=program_role_ids).delete()
BucketState.objects.all().delete()
ProgramRole.objects.filter(pk__in=program_role_ids).delete()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0109_remove_interest_fields_20220705_0425'),
]
operations = [
migrations.RunPython(remove_bucket_list_program_roles,
migrations.RunPython.noop)
]
| Python | 0 | |
5945fe5c527b3f5cb2ed104eccdf9266dc702eb1 | add second order constraint | toppra/constraint/can_linear_second_order.py | toppra/constraint/can_linear_second_order.py | from .canonical_linear import CanonicalLinearConstraint
from .constraint import DiscretizationType
import numpy as np
class CanonicalLinearSecondOrderConstraint(CanonicalLinearConstraint):
""" A class to represent Canonical Linear Generalized Second-order constraints.
Parameters
----------
inv_dyn: (array, array, array) -> array
The "inverse dynamics" function that receives joint position, velocity and
acceleration as inputs and ouputs the "joint torque". See notes for more
details.
cnst_coeffs: (array) -> array, array
The coefficient functions of the constraints. See notes for more details.
Notes
-----
A constraint of this kind can be represented by the following formula
.. math::
A(q) \ddot q + \dot q^\\top B(q) \dot q + C(q) = w,
where w is a vector that satisfies the polyhedral constraint
.. math::
F(q) w \\leq g(q).
To evaluate the constraint parameters, multiple calls to inv_dyn, cnst_F and cnst_g
are made. Specifically one can write the second-order equation as follows
.. math::
A(q) p'(s) \ddot s + [A(q) p''(s) + p'(s)^\\top B(q) p'(s)] + C(q) = w,
To evaluate the coefficients a(s), b(s), c(s), inv_dyn is called repeatedly with
appropriate arguments.
"""
def __init__(self, inv_dyn, cnst_F, cnst_g, discretization_scheme=DiscretizationType.Collocation):
super(CanonicalLinearSecondOrderConstraint, self).__init__()
self.discretization_type = discretization_scheme
self.inv_dyn = inv_dyn
self.cnst_F = cnst_F
self.cnst_g = cnst_g
self._format_string = " Generalized Second-order constraint"
self.discretization_type = discretization_scheme
def compute_constraint_params(self, path, gridpoints):
v_zero = np.zeros(path.get_dof())
p = path.eval(gridpoints)
ps = path.evald(gridpoints)
pss = path.evaldd(gridpoints)
F = np.array(map(self.cnst_F, p))
g = np.array(map(self.cnst_g, p))
c = np.array(
map(lambda p_: self.inv_dyn(p_, v_zero, v_zero), p)
)
a = np.array(
map(lambda p_, ps_: self.inv_dyn(p_, v_zero, ps_), zip(p, ps))
) - c
b = np.array(
map(lambda p_, ps_, pss_: self.inv_dyn(p_, ps_, pss_), zip(p, ps, pss))
) - c
return a, b, c, F, g, None, None
| Python | 0.009653 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.