commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
7e0e196c049d41cfee1eb3d4432967e62c6f37ed
Add Unit Tests for Blacklists V2
designate/tests/test_api/test_v2/test_blacklists.py
designate/tests/test_api/test_v2/test_blacklists.py
# Copyright 2014 Rackspace Hosting # All rights reserved # # Author: Betsy Luzader <betsy.luzader@rackspace.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from designate.tests.test_api.test_v2 import ApiV2TestCase class ApiV2BlacklistsTest(ApiV2TestCase): def setUp(self): super(ApiV2BlacklistsTest, self).setUp() def test_get_blacklists(self): # Set the policy file as this is an admin-only API self.policy({'find_blacklists': '@'}) response = self.client.get('/blacklists/') # Check the headers are what we expect self.assertEqual(200, response.status_int) self.assertEqual('application/json', response.content_type) # Check the body structure is what we expect self.assertIn('blacklists', response.json) self.assertIn('links', response.json) self.assertIn('self', response.json['links']) # Test with 0 blacklists self.assertEqual(0, len(response.json['blacklists'])) # Test with 1 blacklist self.create_blacklist(fixture=0) response = self.client.get('/blacklists/') self.assertIn('blacklists', response.json) self.assertEqual(1, len(response.json['blacklists'])) # test with 2 blacklists self.create_blacklist(fixture=1) response = self.client.get('/blacklists/') self.assertIn('blacklists', response.json) self.assertEqual(2, len(response.json['blacklists'])) def test_get_blacklist(self): blacklist = self.create_blacklist(fixture=0) # Set the policy file as this is an admin-only API self.policy({'find_blacklist': '@'}) response = self.client.get('/blacklists/%s' % blacklist['id'], headers=[('Accept', 'application/json')]) # Verify the headers self.assertEqual(200, response.status_int) self.assertEqual('application/json', response.content_type) # Verify the body structure self.assertIn('blacklist', response.json) self.assertIn('links', response.json['blacklist']) self.assertIn('self', response.json['blacklist']['links']) # Verify the returned values self.assertIn('id', response.json['blacklist']) self.assertIn('created_at', response.json['blacklist']) self.assertIsNone(response.json['blacklist']['updated_at']) self.assertEqual(self.get_blacklist_fixture(0)['pattern'], response.json['blacklist']['pattern']) def test_create_blacklist(self): self.policy({'create_blacklist': '@'}) fixture = self.get_blacklist_fixture(0) response = self.client.post_json('/blacklists/', {'blacklist': fixture}) # Verify the headers self.assertEqual(201, response.status_int) self.assertEqual('application/json', response.content_type) # Verify the body structure self.assertIn('blacklist', response.json) self.assertIn('links', response.json['blacklist']) self.assertIn('self', response.json['blacklist']['links']) # Verify the returned values self.assertIn('id', response.json['blacklist']) self.assertIn('created_at', response.json['blacklist']) self.assertIsNone(response.json['blacklist']['updated_at']) self.assertEqual(fixture['pattern'], response.json['blacklist']['pattern']) def test_delete_blacklist(self): blacklist = self.create_blacklist(fixture=0) self.policy({'delete_blacklist': '@'}) self.client.delete('/blacklists/%s' % blacklist['id'], status=204) def test_update_blacklist(self): blacklist = self.create_blacklist(fixture=0) self.policy({'update_blacklist': '@'}) # Prepare the update body body = {'blacklist': {'description': 'prefix-%s' % blacklist['description']}} response = self.client.patch_json('/blacklists/%s' % blacklist['id'], body, status=200) # Verify the headers self.assertEqual(200, response.status_int) self.assertEqual('application/json', response.content_type) # Verify the body structure self.assertIn('blacklist', response.json) self.assertIn('links', response.json['blacklist']) self.assertIn('self', response.json['blacklist']['links']) # Verify the returned values self.assertIn('id', response.json['blacklist']) self.assertIsNotNone(response.json['blacklist']['updated_at']) self.assertEqual('prefix-%s' % blacklist['description'], response.json['blacklist']['description'])
Python
0.000005
67596d081059a004e5f7ab15f7972773fdf2f15e
ADD PyGrid SetupService message tests
tests/syft/grid/messages/setup_msg_test.py
tests/syft/grid/messages/setup_msg_test.py
# syft absolute import syft as sy from syft.core.io.address import Address from syft.grid.messages.setup_messages import CreateInitialSetUpMessage from syft.grid.messages.setup_messages import CreateInitialSetUpResponse from syft.grid.messages.setup_messages import GetSetUpMessage from syft.grid.messages.setup_messages import GetSetUpResponse def test_create_initial_setup_message_serde() -> None: bob_vm = sy.VirtualMachine(name="Bob") target = Address(name="Alice") request_content = { "settings": { "cloud-admin-token" : "d84we35ad3a1d59a84sd9", "cloud-credentials": "<cloud-credentials.pem>", "infra": { "autoscaling": True, "triggers": { "memory": "50", "vCPU": "80" } }, } } msg = CreateInitialSetUpMessage( address=target, content= request_content, reply_to=bob_vm.address, ) blob = msg.serialize() msg2 = sy.deserialize(blob=blob) assert msg.id == msg2.id assert msg.address == target assert msg.content == msg2.content assert msg == msg2 def test_create_initial_setup_response_serde() -> None: target = Address(name="Alice") request_content = {"msg": "Initial setup registered successfully!"} msg = CreateInitialSetUpResponse( address=target, success=True, content= request_content, ) blob = msg.serialize() msg2 = sy.deserialize(blob=blob) assert msg.id == msg2.id assert msg.address == target assert msg.content == msg2.content assert msg == msg2 def test_get_initial_setup_message_serde() -> None: bob_vm = sy.VirtualMachine(name="Bob") target = Address(name="Alice") request_content = {} msg = GetSetUpMessage( address=target, content=request_content, reply_to=bob_vm.address, ) blob = msg.serialize() msg2 = sy.deserialize(blob=blob) assert msg.id == msg2.id assert msg.address == target assert msg.content == msg2.content assert msg == msg2 def test_delete_worker_response_serde() -> None: target = Address(name="Alice") content = { "settings": { "cloud-admin-token" : "d84we35ad3a1d59a84sd9", "cloud-credentials": "<cloud-credentials.pem>", "infra": { "autoscaling": True, "triggers": { "memory": "50", "vCPU": "80" } }, } } msg = GetSetUpResponse( success=True, address=target, content=content, ) blob = msg.serialize() msg2 = sy.deserialize(blob=blob) assert msg.id == msg2.id assert msg.address == target assert msg.content == msg2.content assert msg == msg2
Python
0
08b52fab8c561834996bd23a5a7654bfac7ea75e
Fix MR comments (NC-1168)
nodeconductor/cost_tracking/filters.py
nodeconductor/cost_tracking/filters.py
from __future__ import unicode_literals from django.contrib.contenttypes.models import ContentType from django.db.models import Q import django_filters from rest_framework import filters from nodeconductor.core import filters as core_filters from nodeconductor.cost_tracking import models, serializers from nodeconductor.structure import models as structure_models, SupportedServices from nodeconductor.structure.models import Resource class PriceEstimateFilter(django_filters.FilterSet): is_manually_input = django_filters.BooleanFilter() class Meta: model = models.PriceEstimate fields = [ 'is_manually_input', ] class PriceEstimateScopeFilterBackend(core_filters.GenericKeyFilterBackend): def get_related_models(self): return models.PriceEstimate.get_estimated_models() def get_field_name(self): return 'scope' class AdditionalPriceEstimateFilterBackend(filters.BaseFilterBackend): def filter_queryset(self, request, queryset, view): if 'date' in request.query_params: date_serializer = serializers.PriceEstimateDateFilterSerializer( data={'date_list': request.query_params.getlist('date')}) date_serializer.is_valid(raise_exception=True) query = Q() for year, month in date_serializer.validated_data['date_list']: query |= Q(year=year, month=month) queryset = queryset.filter(query) # Filter by date range date_range_serializer = serializers.PriceEstimateDateRangeFilterSerializer(data=request.query_params) date_range_serializer.is_valid(raise_exception=True) if 'start' in date_range_serializer.validated_data: year, month = date_range_serializer.validated_data['start'] queryset = queryset.filter(Q(year__gt=year) | Q(year=year, month__gte=month)) if 'end' in date_range_serializer.validated_data: year, month = date_range_serializer.validated_data['end'] queryset = queryset.filter(Q(year__lt=year) | Q(year=year, month__lte=month)) # Filter by customer if 'customer' in request.query_params: customer_uuid = request.query_params['customer'] qs = Q() for model in models.PriceEstimate.get_estimated_models(): content_type = ContentType.objects.get_for_model(model) if model == structure_models.Customer: query = {'uuid': customer_uuid} else: query = {model.Permissions.customer_path + '__uuid': customer_uuid} ids = model.objects.filter(**query).values_list('pk', flat=True) qs |= Q(content_type=content_type, object_id__in=ids) queryset = queryset.filter(qs) return queryset class PriceListItemServiceFilterBackend(core_filters.GenericKeyFilterBackend): def get_related_models(self): return structure_models.Service.get_all_models() def get_field_name(self): return 'service' class DefaultPriceListItemFilter(django_filters.FilterSet): resource_type = core_filters.ContentTypeFilter( name='resource_content_type', models=Resource.get_all_models()) class Meta: model = models.DefaultPriceListItem fields = [ 'key', 'item_type', 'resource_type', ]
from __future__ import unicode_literals from django.contrib.contenttypes.models import ContentType from django.db.models import Q import django_filters from rest_framework import filters from nodeconductor.core import filters as core_filters from nodeconductor.cost_tracking import models, serializers from nodeconductor.structure import models as structure_models, SupportedServices class PriceEstimateFilter(django_filters.FilterSet): is_manually_input = django_filters.BooleanFilter() class Meta: model = models.PriceEstimate fields = [ 'is_manually_input', ] class PriceEstimateScopeFilterBackend(core_filters.GenericKeyFilterBackend): def get_related_models(self): return models.PriceEstimate.get_estimated_models() def get_field_name(self): return 'scope' class AdditionalPriceEstimateFilterBackend(filters.BaseFilterBackend): def filter_queryset(self, request, queryset, view): if 'date' in request.query_params: date_serializer = serializers.PriceEstimateDateFilterSerializer( data={'date_list': request.query_params.getlist('date')}) date_serializer.is_valid(raise_exception=True) query = Q() for year, month in date_serializer.validated_data['date_list']: query |= Q(year=year, month=month) queryset = queryset.filter(query) # Filter by date range date_range_serializer = serializers.PriceEstimateDateRangeFilterSerializer(data=request.query_params) date_range_serializer.is_valid(raise_exception=True) if 'start' in date_range_serializer.validated_data: year, month = date_range_serializer.validated_data['start'] queryset = queryset.filter(Q(year__gt=year) | Q(year=year, month__gte=month)) if 'end' in date_range_serializer.validated_data: year, month = date_range_serializer.validated_data['end'] queryset = queryset.filter(Q(year__lt=year) | Q(year=year, month__lte=month)) # Filter by customer if 'customer' in request.query_params: customer_uuid = request.query_params['customer'] qs = Q() for model in models.PriceEstimate.get_estimated_models(): content_type = ContentType.objects.get_for_model(model) if model == structure_models.Customer: query = {'uuid': customer_uuid} else: query = {model.Permissions.customer_path + '__uuid': customer_uuid} ids = model.objects.filter(**query).values_list('pk', flat=True) qs |= Q(content_type=content_type, object_id__in=ids) queryset = queryset.filter(qs) return queryset class PriceListItemServiceFilterBackend(core_filters.GenericKeyFilterBackend): def get_related_models(self): return structure_models.Service.get_all_models() def get_field_name(self): return 'service' class DefaultPriceListItemFilter(django_filters.FilterSet): resource_type = core_filters.ContentTypeFilter( name='resource_content_type', models=SupportedServices.get_resource_models().values()) class Meta: model = models.DefaultPriceListItem fields = [ 'key', 'item_type', 'resource_type', ]
Python
0
cb707785cb165e8570aa8201d2e71b5ed2c2f3bd
Use new rebot model
utest/webcontent/spec/data/create_jsdata_for_specs.py
utest/webcontent/spec/data/create_jsdata_for_specs.py
#!/usr/bin/env python import fileinput from os.path import join, dirname, abspath import sys import os from robot.result.datamodel import DatamodelVisitor BASEDIR = dirname(abspath(__file__)) OUTPUT = join(BASEDIR, 'output.xml') sys.path.insert(0, join(BASEDIR, '..', '..', '..', '..', 'src')) import robot from robot.reporting.outputparser import OutputParser from robot.reporting.jsondatamodel import SeparatingWriter, DataModelWriter def run_robot(testdata, loglevel='INFO'): robot.run(testdata, log='NONE', report='NONE', tagstatlink=['force:http://google.com:<kuukkeli&gt;', 'i*:http://%1/:Title of i%1'], tagdoc=['test:this_is_*my_bold*_test', 'IX:*Combined* & escaped <&lt; tag doc'], tagstatcombine=['fooANDi*:zap', 'i?:IX'], critical=[], noncritical=[], outputdir=BASEDIR, loglevel=loglevel) def create_jsdata(outxml, target, split_log): result = robot.result.builders.ResultFromXML(outxml) visitor = DatamodelVisitor(result, split_log=split_log) model = DataModelWriter(visitor.datamodel, visitor._context.split_results) model.set_settings({'logURL': 'log.html', 'reportURL': 'report.html', 'background': {'fail': 'DeepPink'}}) with open(target, 'w') as output: model.write_to(output) for index, (keywords, strings) in enumerate(model._split_results): writer = SeparatingWriter(output, '') writer.dump_json('window.outputKeywords%d = ' % index, keywords) writer.dump_json('window.outputStrings%d = ' % index, strings) def replace_all(file,searchExp,replaceExp): for line in fileinput.input(file, inplace=1): if searchExp in line: line = line.replace(searchExp,replaceExp) sys.stdout.write(line) def create(input, target, targetName, loglevel='INFO', split_log=False): input = join(BASEDIR, input) target = join(BASEDIR, target) run_robot(input, loglevel) create_jsdata(OUTPUT, target, split_log) replace_all(target, 'window.output', 'window.' + targetName) if __name__ == '__main__': create('Suite.txt', 'Suite.js', 'suiteOutput') create('SetupsAndTeardowns.txt', 'SetupsAndTeardowns.js', 'setupsAndTeardownsOutput') create('Messages.txt', 'Messages.js', 'messagesOutput') create('teardownFailure', 'TeardownFailure.js', 'teardownFailureOutput') create(join('teardownFailure', 'PassingFailing.txt'), 'PassingFailing.js', 'passingFailingOutput') create('TestsAndKeywords.txt', 'TestsAndKeywords.js', 'testsAndKeywordsOutput') create('.', 'allData.js', 'allDataOutput') create('.', 'splitting.js', 'splittingOutput', split_log=True) os.remove(OUTPUT)
#!/usr/bin/env python import fileinput from os.path import join, dirname, abspath import sys import os BASEDIR = dirname(abspath(__file__)) OUTPUT = join(BASEDIR, 'output.xml') sys.path.insert(0, join(BASEDIR, '..', '..', '..', '..', 'src')) import robot from robot.reporting.outputparser import OutputParser from robot.reporting.jsondatamodel import SeparatingWriter def run_robot(testdata, loglevel='INFO'): robot.run(testdata, log='NONE', report='NONE', tagstatlink=['force:http://google.com:<kuukkeli&gt;', 'i*:http://%1/:Title of i%1'], tagdoc=['test:this_is_*my_bold*_test', 'IX:*Combined* & escaped <&lt; tag doc'], tagstatcombine=['fooANDi*:zap', 'i?:IX'], critical=[], noncritical=[], outputdir=BASEDIR, loglevel=loglevel) def create_jsdata(outxml, target, split_log): model = OutputParser(split_log=split_log).parse(outxml) model.set_settings({'logURL': 'log.html', 'reportURL': 'report.html', 'background': {'fail': 'DeepPink'}}) with open(target, 'w') as output: model.write_to(output) for index, (keywords, strings) in enumerate(model._split_results): writer = SeparatingWriter(output, '') writer.dump_json('window.outputKeywords%d = ' % index, keywords) writer.dump_json('window.outputStrings%d = ' % index, strings) def replace_all(file,searchExp,replaceExp): for line in fileinput.input(file, inplace=1): if searchExp in line: line = line.replace(searchExp,replaceExp) sys.stdout.write(line) def create(input, target, targetName, loglevel='INFO', split_log=False): input = join(BASEDIR, input) target = join(BASEDIR, target) run_robot(input, loglevel) create_jsdata(OUTPUT, target, split_log) replace_all(target, 'window.output', 'window.' + targetName) if __name__ == '__main__': create('Suite.txt', 'Suite.js', 'suiteOutput') create('SetupsAndTeardowns.txt', 'SetupsAndTeardowns.js', 'setupsAndTeardownsOutput') create('Messages.txt', 'Messages.js', 'messagesOutput') create('teardownFailure', 'TeardownFailure.js', 'teardownFailureOutput') create(join('teardownFailure', 'PassingFailing.txt'), 'PassingFailing.js', 'passingFailingOutput') create('TestsAndKeywords.txt', 'TestsAndKeywords.js', 'testsAndKeywordsOutput') create('.', 'allData.js', 'allDataOutput') create('.', 'splitting.js', 'splittingOutput', split_log=True) os.remove(OUTPUT)
Python
0
3608c4d3b559ba7fa1bd9629231e98196681caa4
add package py-gdbgui (#7715)
var/spack/repos/builtin/packages/py-gdbgui/package.py
var/spack/repos/builtin/packages/py-gdbgui/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PyGdbgui(PythonPackage): """gdbgui is a modern, free, browser-based frontend to gdb""" homepage = "https://gdbgui.com" url = "https://pypi.io/packages/source/g/gdbgui/gdbgui-0.11.2.1.tar.gz" version('0.11.2.1', 'c15dd3f60fe372b2e93f705c7ee75f51') depends_on('py-setuptools', type=('build', 'run')) depends_on('py-flask@0.12.2:', type=('build', 'run')) depends_on('py-flask-compress@1.4.0:', type=('build', 'run')) depends_on('py-flask-socketio@2.9.3:', type=('build', 'run')) depends_on('py-gevent@1.2.2:', type=('build', 'run')) depends_on('py-pygdbmi@0.8.2.0:', type=('build', 'run')) depends_on('py-pygments@2.2.0:', type=('build', 'run')) depends_on('gdb', type='run')
Python
0
478072e8350d03655364ea9147bbe21bafabbcce
Add tests for template tags
geotrek/feedback/tests/test_template_tags.py
geotrek/feedback/tests/test_template_tags.py
from datetime import datetime from django.test import TestCase from geotrek.authent.tests.factories import UserFactory, UserProfileFactory from geotrek.feedback.templatetags.feedback_tags import ( predefined_emails, resolved_intervention_info, status_ids_and_colors) from geotrek.feedback.tests.factories import (PredefinedEmailFactory, ReportStatusFactory) from geotrek.maintenance.tests.factories import ReportInterventionFactory class TestFeedbackTemplateTags(TestCase): @classmethod def setUpTestData(cls): cls.user1 = UserFactory(username="CCCC") UserProfileFactory.create(user=cls.user1, extended_username="Communauté des Communes des Communautés Communataires") cls.user2 = UserFactory(username="Kurt") UserProfileFactory.create(user=cls.user2) solved_status = ReportStatusFactory(identifier='solved_intervention', color="#448654") cls.intervention_solved_1 = ReportInterventionFactory(date=datetime(year=1997, month=4, day=4).date()) cls.report_1 = cls.intervention_solved_1.target cls.report_1.status = solved_status cls.report_1.assigned_user = cls.user1 cls.report_1.save() cls.intervention_solved_2 = ReportInterventionFactory(date=datetime(year=1997, month=5, day=4).date()) cls.report_2 = cls.intervention_solved_2.target cls.report_2.status = solved_status cls.report_2.assigned_user = cls.user2 cls.report_2.save() cls.email1 = PredefinedEmailFactory() cls.email2 = PredefinedEmailFactory() def test_resolved_intervention_username(self): self.assertEqual( "{\"date\": \"04/04/1997\", \"username\": \"Communaut\\u00e9 des Communes des Communaut\\u00e9s Communataires\"}", resolved_intervention_info(self.report_1) ) self.assertEqual( "{\"date\": \"04/05/1997\", \"username\": \"Kurt\"}", resolved_intervention_info(self.report_2) ) def test_status_ids_and_colors(self): self.assertEqual( "{\"1\": {\"id\": \"solved_intervention\", \"color\": \"#448654\"}, \"2\": {\"id\": \"ID 1\", \"color\": \"#444444\"}, \"3\": {\"id\": \"ID 2\", \"color\": \"#444444\"}}", status_ids_and_colors() ) def test_predefined_emails(self): self.assertEqual( "{\"1\": {\"label\": \"Predefined Email 0\", \"text\": \"Some email body content 0\"}, \"2\": {\"label\": \"Predefined Email 1\", \"text\": \"Some email body content 1\"}}", predefined_emails() )
Python
0
8355cb358d14589a194926d37beeb5af7af2a591
Increase event image url limit from 200
falmer/events/migrations/0012_auto_20170905_1208.py
falmer/events/migrations/0012_auto_20170905_1208.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-09-05 11:08 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('events', '0011_auto_20170905_1028'), ] operations = [ migrations.AlterField( model_name='mslevent', name='image_url', field=models.URLField(max_length=2000), ), migrations.AlterField( model_name='mslevent', name='url', field=models.URLField(max_length=2000), ), ]
Python
0.000114
a16b4401f37f08d8cb5e1f9ec1b7d4a3221360ab
Add test case for regular extrusion
test/test_regular_extrusion.py
test/test_regular_extrusion.py
# -*- coding: utf-8 -*- """Creates regular cube mesh by extrusion. """ import pygmsh from helpers import compute_volume def test(): x = 5 y = 4 z = 3 x_layers = 10 y_layers = 5 z_layers = 3 geom = pygmsh.built_in.Geometry() p = geom.add_point([0, 0, 0], 1) _, l, _ = geom.extrude(p, [x, 0, 0], num_layers=x_layers) _, s, _ = geom.extrude(l, [0, y, 0], num_layers=y_layers) geom.extrude(s, [0, 0, z], num_layers=z_layers) points, cells, _, _, _ = pygmsh.generate_mesh(geom) ref_vol = x * y * z assert abs(compute_volume(points, cells) - ref_vol) < 1.0e-2 * ref_vol # Each grid-cell from layered extrusion will result in 6 tetrahedrons. ref_tetras = 6 * x_layers * y_layers * z_layers assert len(cells["tetra"]) == ref_tetras return points, cells if __name__ == "__main__": import meshio meshio.write_points_cells("cube.vtu", *test())
Python
0
5c7b70061d6e96619c6e3a40c87aaec39b408fdf
Enable subscription admin area
subscription/admin.py
subscription/admin.py
from django.contrib import admin from subscription.models import MessageSet, Message, Subscription admin.site.register(MessageSet) admin.site.register(Message) admin.site.register(Subscription)
Python
0
78552d3de73174dd0ffdf9a58009281013dcf640
make interface consistent for add_sample
rlkit/data_management/replay_buffer.py
rlkit/data_management/replay_buffer.py
import abc class ReplayBuffer(object, metaclass=abc.ABCMeta): """ A class used to save and replay data. """ @abc.abstractmethod def add_sample(self, observation, action, reward, terminal, next_observation, **kwargs): """ Add a transition tuple. """ pass @abc.abstractmethod def terminate_episode(self): """ Let the replay buffer know that the episode has terminated in case some special book-keeping has to happen. :return: """ pass @abc.abstractmethod def num_steps_can_sample(self, **kwargs): """ :return: # of unique items that can be sampled. """ pass def add_path(self, path): """ Add a path to the replay buffer. This default implementation naively goes through every step, but you may want to optimize this. NOTE: You should NOT call "terminate_episode" after calling add_path. It's assumed that this function handles the episode termination. :param path: Dict like one outputted by rlkit.samplers.util.rollout """ for i, ( obs, action, reward, next_obs, terminal, agent_info, env_info ) in enumerate(zip( path["observations"], path["actions"], path["rewards"], path["next_observations"], path["terminals"], path["agent_infos"], path["env_infos"], )): self.add_sample( obs, action, reward, next_obs, terminal, agent_info=agent_info, env_info=env_info, ) self.terminate_episode() @abc.abstractmethod def random_batch(self, batch_size): """ Return a batch of size `batch_size`. :param batch_size: :return: """ pass
import abc class ReplayBuffer(object, metaclass=abc.ABCMeta): """ A class used to save and replay data. """ @abc.abstractmethod def add_sample(self, observation, action, reward, next_observation, terminal, **kwargs): """ Add a transition tuple. """ pass @abc.abstractmethod def terminate_episode(self): """ Let the replay buffer know that the episode has terminated in case some special book-keeping has to happen. :return: """ pass @abc.abstractmethod def num_steps_can_sample(self, **kwargs): """ :return: # of unique items that can be sampled. """ pass def add_path(self, path): """ Add a path to the replay buffer. This default implementation naively goes through every step, but you may want to optimize this. NOTE: You should NOT call "terminate_episode" after calling add_path. It's assumed that this function handles the episode termination. :param path: Dict like one outputted by rlkit.samplers.util.rollout """ for i, ( obs, action, reward, next_obs, terminal, agent_info, env_info ) in enumerate(zip( path["observations"], path["actions"], path["rewards"], path["next_observations"], path["terminals"], path["agent_infos"], path["env_infos"], )): self.add_sample( obs, action, reward, next_obs, terminal, agent_info=agent_info, env_info=env_info, ) self.terminate_episode() @abc.abstractmethod def random_batch(self, batch_size): """ Return a batch of size `batch_size`. :param batch_size: :return: """ pass
Python
0
7bd6f3e7751deecfc3cd555fc071d722c856802c
Implement division using built in library function
chips/compiler/builtins.py
chips/compiler/builtins.py
#!/usr/bin/env python """Support Library for builtin Functionality""" __author__ = "Jon Dawson" __copyright__ = "Copyright (C) 2013, Jonathan P Dawson" __version__ = "0.1" builtins=""" unsigned unsigned_divide_xxxx(unsigned dividend, unsigned divisor){ unsigned denom = divisor; unsigned bit = 1; unsigned quotient = 0; if( denom > dividend ) return 0; if( denom == dividend ) return 1; while(denom <= dividend){ denom <<= 1; bit <<= 1; } denom >>= 1; bit >>= 1; while(bit){ if(dividend >= denom){ dividend -= denom; quotient |= bit; } bit >>= 1; denom >>= 1; } return quotient; } int divide_xxxx(int dividend, int divisor){ unsigned udividend, udivisor, uquotient; unsigned dividend_sign, divisor_sign, quotient_sign; dividend_sign = dividend & 0x8000u; divisor_sign = divisor & 0x8000u; quotient_sign = dividend_sign ^ divisor_sign; udividend = dividend_sign ? -dividend : dividend; udivisor = divisor_sign ? -divisor : divisor; uquotient = unsigned_divide_xxxx(udividend, udivisor); return quotient_sign ? -uquotient : uquotient; } long unsigned long_unsigned_divide_xxxx(long unsigned dividend, long unsigned divisor){ long unsigned denom = divisor; long unsigned bit = 1; long unsigned quotient = 0; if( denom > dividend ) return 0; if( denom == dividend ) return 1; while(denom <= dividend){ denom <<= 1; bit <<= 1; } denom >>= 1; bit >>= 1; while(bit){ if(dividend >= denom){ dividend -= denom; quotient |= bit; } bit >>= 1; denom >>= 1; } return quotient; } long int long_divide_xxxx(long int dividend, long int divisor){ long unsigned udividend, udivisor, uquotient; long unsigned dividend_sign, divisor_sign, quotient_sign; dividend_sign = dividend & 0x80000000ul; divisor_sign = divisor & 0x80000000ul; quotient_sign = dividend_sign ^ divisor_sign; udividend = dividend_sign ? -dividend : dividend; udivisor = divisor_sign ? -divisor : divisor; uquotient = long_unsigned_divide_xxxx(udividend, udivisor); return quotient_sign ? -uquotient : uquotient; } """
Python
0.000001
6eb49ae8dcf33a7d7be9ed3c8208bc83a9a46757
Create python_wiki_one.py
ckOLDab/python_wiki_one.py
ckOLDab/python_wiki_one.py
import time import BaseHTTPServer HOST_NAME = '127.0.0.1' # !!!REMEMBER TO CHANGE THIS!!! PORT_NUMBER = 80 # Maybe set this to 9000. class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_HEAD(s): s.send_response(200) s.send_header("Content-type", "text/html") s.end_headers() def do_GET(s): """Respond to a GET request.""" s.send_response(200) s.send_header("Content-type", "text/html") s.end_headers() s.wfile.write("<html><head><title>Title goes here.</title></head>") s.wfile.write("<body><p>This is a test.</p>") # If someone went to "http://something.somewhere.net/foo/bar/", # then s.path equals "/foo/bar/". s.wfile.write("<p>You accessed path: %s</p>" % s.path) s.wfile.write("</body></html>") if __name__ == '__main__': server_class = BaseHTTPServer.HTTPServer httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler) print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER) try: httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close() print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER)
Python
0.999848
4fd9984e4187a64906ca9e0b3a7ccf01486c156d
Create Fred14_LeftArmServos.py
home/Ray.Edgley/fred/fred01/Fred14_LeftArmServos.py
home/Ray.Edgley/fred/fred01/Fred14_LeftArmServos.py
######################################################### # Fred14 Setup the Left Arm Servos ######################################################### # We will be using the following services: # Servo Service ######################################################### # I Fred's Left Arm, we have the Bicep Rotator, the Elbow, # the Wrist and the five fingures. # You know it's just like the Right Arm # Lets start with the Bicep Rotator. # All service are created using the Runtime Service. leftBicep = Runtime.createAndStart("leftBicep", "Servo") # Next we need to attach ther servo Service to a Controller Service, in this case it will be the head # Adafruit16ChServoDriver. We also need to tell the Servo Service which pin on the controller # the servo is connected to, in this case pin 15 leftBicep.attach(leftArm,15) # Now we tell the Servo Service about our servos limits, in some cases if the servo goes to far, things will break leftBicep.setMinMax(0,180) # This allows you to map the input to the Servo service to an actual servo position output leftBicep.map(0,180,1,180) # there is a rest command that can be issued to the servo, # when that happens, this is the position that the servo will go to leftBicep.setRest(90) # if your servo run backwards, then set this to true in order to reverse it. leftBicep.setInverted(False) # degrees per second rotational velocity, setting -1 will set the speed to the servo's default leftBicep.setVelocity(60) # this allows the Servo Sevice to turn off the motor when it has reached the target position. # the major advantage to this is the servos will use less power and have a lower chance of buring out. leftBicep.setAutoDisable(True) # Ok now that we have fully defined the headX servo lets make sure it is in the rest position. leftBicep.rest() # commands not used here but will be in other parts on the program are the following: # leftBicep.moveTo(x) where x is the position you want move to. # leftBicep.moveToBlockig(x) as above except execution of the program will pause until the position is reached. # leftBicep.disable() will turn off the servo without unloading the service. # leftBicep.enable() the oposite of disable will turn the servo back on after being disabled. # disable and enable are not required if setAutoDisable is set to True # For each servo that we have, we need to create a Servo Service, so this will be a process # of repeating what we did above for each servo just using a diferent object name. leftElbow = Runtime.createAndStart("leftElbow", "Servo") leftElbow.attach(leftArm,14) leftElbow.setMinMax(0,180) leftElbow.map(0,180,1,180) leftElbow.setRest(90) leftElbow.setInverted(False) leftElbow.setVelocity(60) leftElbow.setAutoDisable(True) leftElbow.rest() leftWrist = Runtime.createAndStart("leftWrist", "Servo") leftWrist.attach(leftArm,0) leftWrist.setMinMax(0,180) leftWrist.map(0,180,1,180) leftWrist.setRest(90) leftWrist.setInverted(False) leftWrist.setVelocity(60) leftWrist.setAutoDisable(True) leftWrist.rest() leftThumb = Runtime.createAndStart("leftThumb", "Servo") leftThumb.attach(leftArm,1) leftThumb.setMinMax(0,180) leftThumb.map(0,180,1,180) leftThumb.setRest(90) leftThumb.setInverted(False) leftThumb.setVelocity(60) leftThumb.setAutoDisable(True) leftThumb.rest() leftIndex = Runtime.createAndStart("leftIndex", "Servo") leftIndex.attach(leftArm,2) leftIndex.setMinMax(0,180) leftIndex.map(0,180,1,180) leftIndex.setRest(90) leftIndex.setInverted(False) leftIndex.setVelocity(60) leftIndex.setAutoDisable(True) leftIndex.rest() leftMajure = Runtime.createAndStart("leftMajure", "Servo") leftMajure.attach(leftArm,3) leftMajure.setMinMax(0,180) leftMajure.map(0,180,1,180) leftMajure.setRest(90) leftMajure.setInverted(False) leftMajure.setVelocity(60) leftMajure.setAutoDisable(True) leftMajure.rest() leftRing = Runtime.createAndStart("leftRing", "Servo") leftRing.attach(leftArm,4) leftRing.setMinMax(0,180) leftRing.map(0,180,1,180) leftRing.setRest(90) leftRing.setInverted(False) leftRing.setVelocity(60) leftRing.setAutoDisable(True) leftRing.rest() leftLittle = Runtime.createAndStart("leftLittle", "Servo") leftLittle.attach(leftArm,5) leftLittle.setMinMax(0,180) leftLittle.map(0,180,1,180) leftLittle.setRest(90) leftLittle.setInverted(False) leftLittle.setVelocity(60) leftLittle.setAutoDisable(True) leftLittle.rest()
Python
0
53cdd6e7bcb37567382f3b3688b6a55f1b2968be
Add test_binaryclassifier
tests/test_binaryclassifier.py
tests/test_binaryclassifier.py
import numpy as np from sklearn import svm, datasets from darwin.pipeline import ClassificationPipeline def test_binary_classification_with_classification_pipeline(): # generate the dataset n_samples=100 n_features=20 x, y = datasets.make_gaussian_quantiles(mean=None, cov=1.0, n_samples=n_samples, n_features=n_features, n_classes=2, shuffle=True, random_state=1) # another way to generate the data # x, y = datasets.make_hastie_10_2(n_samples=10, random_state=1) # -- test with darwin classifier_name='linsvm' cvmethod='10' n_feats = x.shape[1] pipe = ClassificationPipeline(n_feats=n_feats, clfmethod=classifier_name, cvmethod=cvmethod) results, metrics = pipe.cross_validation(x, y) assert(results is not None)
Python
0.000015
bb80025f3ed8169a2558e9c5c6bc4db5a862d7ae
Integrate LLVM at llvm/llvm-project@529a3d87a799
third_party/llvm/workspace.bzl
third_party/llvm/workspace.bzl
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "529a3d87a799a2cba29bc1d0f426a00d5bb4c88f" LLVM_SHA256 = "3d2ca52bd36ad3904f2f5d0e43935b0e82f3d1ac137e0a89025141e36735944f" tf_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], build_file = "//third_party/llvm:llvm.BUILD", patch_file = [ "//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved "//third_party/llvm:build.patch", "//third_party/llvm:macos_build_fix.patch", ], link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"}, )
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "6139626d738fd03b968e07698f5cd26924e3cd65" LLVM_SHA256 = "b9581ac44a7d9dd3a8470497cddb63c387ab6520d82d077fb609bc29b4e7b887" tf_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], build_file = "//third_party/llvm:llvm.BUILD", patch_file = [ "//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved "//third_party/llvm:build.patch", "//third_party/llvm:macos_build_fix.patch", ], link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"}, )
Python
0.000001
fee6e923b27947721ba1ea4ce5005f54eabba421
Integrate LLVM at llvm/llvm-project@4504e1134c91
third_party/llvm/workspace.bzl
third_party/llvm/workspace.bzl
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "4504e1134c9118f3c322685f8a90129e09bab92c" LLVM_SHA256 = "2b1d7a96ff37600cae12d2ed51b9f0554b1bbc6511ffe51ac7525928b29bab44" tf_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], link_files = { "//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD", "//third_party/mlir:BUILD": "mlir/BUILD", "//third_party/mlir:build_defs.bzl": "mlir/build_defs.bzl", "//third_party/mlir:linalggen.bzl": "mlir/linalggen.bzl", "//third_party/mlir:tblgen.bzl": "mlir/tblgen.bzl", "//third_party/mlir:test.BUILD": "mlir/test/BUILD", }, )
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "0ad1d9fdf22dad41312e02b8bc990bf58ce1744c" LLVM_SHA256 = "517db6d771cf24d9f0aea6d4fdd59591347c7eb9d86ef58521fe8cb929fbe82b" tf_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], link_files = { "//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD", "//third_party/mlir:BUILD": "mlir/BUILD", "//third_party/mlir:build_defs.bzl": "mlir/build_defs.bzl", "//third_party/mlir:linalggen.bzl": "mlir/linalggen.bzl", "//third_party/mlir:tblgen.bzl": "mlir/tblgen.bzl", "//third_party/mlir:test.BUILD": "mlir/test/BUILD", }, )
Python
0.000003
a868b0d057b34dbd487a1e3d2b08d5489651b3ff
Integrate LLVM at llvm/llvm-project@fe611b1da84b
third_party/llvm/workspace.bzl
third_party/llvm/workspace.bzl
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "fe611b1da84b9442c093739394d336af9e99c1a1" LLVM_SHA256 = "52edc892b020736f4c53e52b63687ee7caab93c90a0062b4026f4d49fc18266f" tf_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], build_file = "//third_party/llvm:BUILD.bazel", )
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "bd7ece4e063e7afd08cbaa311878c09aadf5ec21" LLVM_SHA256 = "e9390dfa94c1143f35437bea8a011b030194e047bc3df45e2627cff88f83d2ed" tf_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], build_file = "//third_party/llvm:BUILD.bazel", )
Python
0.000006
ba7bae1d4b193eaf29b827ac6e48be5afc114613
Rename file and fix up paths in code.
kaggle-classification/keras-trainer/model.py
kaggle-classification/keras-trainer/model.py
""" Classifiers for the Toxic Comment Classification Kaggle challenge, https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import pandas as pd import os import tensorflow as tf from keras.callbacks import EarlyStopping from keras.callbacks import ModelCheckpoint from keras.layers import Conv1D from keras.layers import Dense from keras.layers import Dropout from keras.layers import Embedding from keras.layers import Flatten from keras.layers import Input from keras.layers import MaxPooling1D from keras.layers import Activation from keras.layers import Concatenate from keras.models import load_model from keras.models import Model from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from os.path import expanduser from sklearn import metrics DEFAULT_HPARAMS = tf.contrib.training.HParams( learning_rate=0.00005, dropout_rate=0.3, batch_size=128, epochs=20, max_sequence_length=250, embedding_dim=100) LABELS = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] class AttentionToxModel(): """Toxicity model using CNN + Attention""" def __init__(self, model_path, embeddings_path, hparams=DEFAULT_HPARAMS): self.model_path = model_path self.embeddings_path = embeddings_path self.hparams = hparams print("Setting up tokenizer...") self.tokenizer = self._setup_tokenizer() print("Setting up embedding matrix...") self.embedding_matrix = self._setup_embedding_matrix() print("Loading model...") self._load_model() def train(self, train): model = self._build_model() train_comment = self._prep_texts(train['comment_text']) train_labels = [train[label] for label in LABELS] callbacks = [ ModelCheckpoint( self.model_path, save_best_only=True, verbose=True), EarlyStopping( monitor='val_loss', mode='auto') ] model.fit( x=train_comment, y=train_labels, batch_size=self.hparams.batch_size, epochs=self.hparams.epochs, #validation_data=(validation_comment, validation_labels), validation_split=0.1, callbacks=callbacks) self._load_model() def predict(self, texts): data = self._prep_texts(texts) return self.model.predict(data) def score_auc(self, data): predictions = self.predict(data['comment_text']) scores = [] for idx, label in enumerate(LABELS): labels = np.array(data['toxic']) score = metrics.roc_auc_score(labels, predictions[idx].flatten()) scores.append(score) print("{} has AUC {}".format(label, score)) print("Avg AUC {}".format(np.mean(scores))) def _prep_texts(self, texts): return pad_sequences(self.tokenizer.texts_to_sequences(texts), maxlen=self.hparams.max_sequence_length) def _load_model(self): try: self.model = load_model(self.model_path) print('Model loaded from: {}'.format(self.model_path)) except IOError: print('Could not load model at: {}'.format(self.model_path)) def _setup_tokenizer(self): words = [] with open(self.embeddings_path) as f: for line in f: words.append(line.split()[0]) # TODO(jjtan): configure OOV token tokenizer = Tokenizer() tokenizer.fit_on_texts(words) return tokenizer def _setup_embedding_matrix(self): embedding_matrix = np.zeros((len(self.tokenizer.word_index) + 1, self.hparams.embedding_dim)) with open(self.embeddings_path) as f: for line in f: values = line.split() word = values[0] if word in self.tokenizer.word_index: word_idx = self.tokenizer.word_index[word] word_embedding = np.asarray(values[1:], dtype='float32') embedding_matrix[word_idx] = word_embedding return embedding_matrix def _build_model(self): I = Input(shape=(self.hparams.max_sequence_length,), dtype='float32') E = Embedding( len(self.tokenizer.word_index) + 1, self.hparams.embedding_dim, weights=[self.embedding_matrix], input_length=self.hparams.max_sequence_length, trainable=False)(I) X5 = Conv1D(128, 5, activation='relu', padding='same')(E) X5 = MaxPooling1D(250, padding='same')(X5) X4 = Conv1D(128, 4, activation='relu', padding='same')(E) X4 = MaxPooling1D(250, padding='same')(X4) X3 = Conv1D(128, 3, activation='relu', padding='same')(E) X3 = MaxPooling1D(250, padding='same')(X3) X = Concatenate(axis=-1)([X5, X4, X3]) X = Flatten()(X) X = Dropout(self.hparams.dropout_rate)(X) X = Dense(128, activation='relu')(X) X = Dropout(self.hparams.dropout_rate)(X) toxic_out = Dense(1, activation='sigmoid', name='toxic')(X) severe_toxic_out = Dense(1, activation='sigmoid', name='severe_toxic')(X) obscene_out = Dense(1, activation='sigmoid', name='obscene')(X) threat_out = Dense(1, activation='sigmoid', name='threat')(X) insult_out = Dense(1, activation='sigmoid', name='insult')(X) identity_hate_out = Dense(1, activation='sigmoid', name='identity_hate')(X) model = Model(inputs=I, outputs=[toxic_out, severe_toxic_out, obscene_out, threat_out, insult_out, identity_hate_out]) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) print(model.summary()) return model if __name__ == "__main__": data_dir = os.path.expanduser('~/data') model_file = os.path.join(data_dir, 'keras_kaggle_model.h5') embeddings_file = os.path.join(data_dir, 'glove.6B/glove.6B.100d.txt') train_file = os.path.join(data_dir, 'kaggle_train.csv') validation_file = os.path.join(data_dir, 'kaggle_validation.csv') test_file = os.path.join(data_dir, 'kaggle_test.csv') model = AttentionToxModel(model_path=model_file, embeddings_path=embeddings_file) train = pd.read_csv(train_file) validation = pd.read_csv(validation_file) train_and_validation = pd.concat([train, validation]) model.train(train_and_validation) test_data = pd.read_csv(test_file) predictions = model.predict(test_data['comment_text']) model.score_auc(test_data) model.predict(['This sentence is benign'])
Python
0
d8d87a0cdcce8c8ec1770aa722a17a40d25409e6
Add migration script to migrate all "extra" entries to lists
scripts/migration/migrate_registration_extra_again.py
scripts/migration/migrate_registration_extra_again.py
""" Changes existing question.extra on all registrations and draft registrations to a list. Required for multiple files attached to a question. """ import sys import logging from modularodm import Q from website.app import init_app from scripts import utils as scripts_utils from website.models import Node, DraftRegistration from framework.transactions.context import TokuTransaction logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def migrate_extras(queryset, dry=True): migrated = [] errored = set() model_name = 'Node' for obj in queryset: # 1 transaction per obj, to prevent locking errors with TokuTransaction(): changed = False if isinstance(obj, DraftRegistration): meta = [obj.registration_metadata] model_name = 'DraftRegistration' if obj.registered_node: # Skip over drafts that have been completed continue else: meta = obj.registered_meta.values() model_name = 'Node' if not meta: continue for data in meta: for question, answer in data.items(): if isinstance(answer.get('extra'), dict): if not answer.get('extra'): logger.info('Migrating extra for question {!r} on {} {}'.format(question, model_name, obj._id)) answer['extra'] = [] changed = True else: # We don't expect to get here logger.error('Found non-empty "extra" on {} {} for question {!r}'.format(model_name, obj._id, question)) errored.add(obj) for value in answer.values(): if isinstance(value, dict): for k, v in value.items(): if isinstance(v, dict) and isinstance(v.get('extra'), dict): if not v.get('extra'): logger.info('Migrating {}/extra for question {} on {} {}'.format(k, question, model_name, obj._id)) v['extra'] = [] changed = True else: # We don't expect to get here logger.error('Found non-empty "{}/extra" on {} {} for question {}'.format(k, model_name, obj._id, question)) errored.add(obj) if changed: migrated.append(obj._id) if model_name == 'DraftRegistration': # Prevent datetime_updated from being updated on save obj._fields['datetime_updated']._auto_now = False if not dry: changed = obj.save() if model_name == 'DraftRegistration': assert changed == {'registration_metadata'}, 'Expected only registration_metadata to change. Got: {}'.format(changed) return migrated, errored def migrate(dry=True): registrations = Node.find( Q('is_registration', 'eq', True) & Q('registered_meta', 'ne', None) ) regs_migrated, reg_errored = migrate_extras(registrations, dry=dry) drafts = DraftRegistration.find(Q('registration_metadata', 'ne', {})) drafts_migrated, drafts_errored = migrate_extras(drafts, dry=dry) logger.info('Migrated registered_meta for {} registrations'.format(len(regs_migrated))) if reg_errored: logger.error('{} errored: {}'.format(len(reg_errored), reg_errored)) logger.info('Migrated registered_meta for {} draft registrations'.format(len(drafts_migrated))) if drafts_errored: logger.error('{} errored: {}'.format(len(drafts_errored), drafts_errored)) if __name__ == '__main__': dry_run = '--dry' in sys.argv if not dry_run: scripts_utils.add_file_logger(logger, __file__) init_app(set_backends=True, routes=False) migrate(dry=dry_run)
Python
0.000001
1ee8ad86132771f8124c7a3a3321b582dd6b816d
add migration
servicerating/migrations/0002_servicerating_dedupe.py
servicerating/migrations/0002_servicerating_dedupe.py
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): crontab = orm['djcelery.CrontabSchedule']( month_of_year="*", day_of_week="*", hour="5", minute="0", day_of_month="*" ) crontab.save() task = orm['djcelery.PeriodicTask']( task="servicerating.tasks.ensure_one_servicerating", name="Ensure Clean Servicerating", args="[]", enabled=True, crontab=crontab, kwargs="{}", description="" ) task.save() def backwards(self, orm): pass models = { u'servicerating.contact': { 'Meta': {'object_name': 'Contact'}, 'conversation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts'", 'to': u"orm['servicerating.Conversation']"}), 'created_at': ('servicerating.models.AutoNewDateTimeField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '43'}), 'msisdn': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('servicerating.models.AutoDateTimeField', [], {'blank': 'True'}), 'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, u'servicerating.conversation': { 'Meta': {'object_name': 'Conversation'}, 'created_at': ('servicerating.models.AutoNewDateTimeField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '43'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('servicerating.models.AutoDateTimeField', [], {'blank': 'True'}), 'user_account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'conversations'", 'to': u"orm['servicerating.UserAccount']"}) }, u'servicerating.extra': { 'Meta': {'object_name': 'Extra'}, 'contact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extras'", 'to': u"orm['servicerating.Contact']"}), 'created_at': ('servicerating.models.AutoNewDateTimeField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'updated_at': ('servicerating.models.AutoDateTimeField', [], {'blank': 'True'}), 'value': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, u'servicerating.response': { 'Meta': {'object_name': 'Response'}, 'contact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contact_responses'", 'to': u"orm['servicerating.Contact']"}), 'created_at': ('servicerating.models.AutoNewDateTimeField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'updated_at': ('servicerating.models.AutoDateTimeField', [], {'blank': 'True'}), 'value': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, u'servicerating.useraccount': { 'Meta': {'object_name': 'UserAccount'}, 'created_at': ('servicerating.models.AutoNewDateTimeField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '43'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('servicerating.models.AutoDateTimeField', [], {'blank': 'True'}) } } complete_apps = ['servicerating'] symmetrical = True
Python
0.000001
6b01692fcdaf0dc01bb32eefcf883200d2864f60
make paths work independent of cursor
statscraper/scrapers/PXWebScraper.py
statscraper/scrapers/PXWebScraper.py
# encoding: utf-8 """ A wrapper around the PX-Web API. As implementations and versions vary, this is best used as a base class, for more specific scrapers to extend. If used directly, an API endpoint must be set: scraper = PXWeb(base_url="http://api.example.com/") # ...or: scraper = PXWeb() scraper.base_url = "http://api.example.com/" """ try: from simplejson.scanner import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import requests from statscraper import (BaseScraper, Collection, Dataset, Dimension, InvalidData) class PXWeb(BaseScraper): base_url = None # API endpoint @BaseScraper.on("init") def _get_args(self, *args, **kwargs): """ Store `base_url`, if given on init. This is convenient when the PXWeb scraper is used directly by an end user. """ if "base_url" in kwargs and kwargs["base_url"]: self.base_url = kwargs["base_url"] def _api_path(self, item): """Get the API path for the current cursor position.""" if self.base_url is None: raise NotImplementedError("base_url not set") path = "/".join([x.blob["id"] for x in item.path]) return "/".join([self.base_url, path]) def _fetch_itemslist(self, item): data = requests.get(self._api_path(item)).json() for d in data: if d["type"] == "l": yield Collection(d["text"], blob=d) else: yield Dataset(d["text"], blob=d) def _fetch_dimensions(self, dataset): data = requests.get(self._api_path(dataset)).json() try: for d in data["variables"]: yield Dimension(d["code"], label=d["text"], allowed_values=d["values"]) except KeyError: yield None def _fetch_data(self, dataset, query): if query is None: query = {} body = { 'query': [{ 'code': key, 'selection': { 'filter': "item", # value can be a list or a value 'values': value if isinstance(value, list) else [value] } } for key, value in query.iteritems()], 'response': { 'format': "json" } } try: raw = requests.post(self._api_path(dataset), json=body) data = raw.json() except JSONDecodeError: raise InvalidData("No valid response from PX Web. Check your query for spelling errors.") return data["data"]
# encoding: utf-8 """ A wrapper around the PX-Web API. As implementations and versions vary, this is best used as a base class, for more specific scrapers to extend. If used directly, an API endpoint must be set: scraper = PXWeb(base_url="http://api.example.com/") # ...or: scraper = PXWeb() scraper.base_url = "http://api.example.com/" """ try: from simplejson.scanner import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import requests from statscraper import (BaseScraper, Collection, Dataset, Dimension, InvalidData) class PXWeb(BaseScraper): base_url = None # API endpoint @BaseScraper.on("init") def _get_args(self, *args, **kwargs): """ Store `base_url`, if given on init. This is convenient when the PXWeb scraper is used directly by an end user. """ if "base_url" in kwargs and kwargs["base_url"]: self.base_url = kwargs["base_url"] @property def _api_path(self): """Get the API path for the current cursor position.""" if self.base_url is None: raise NotImplementedError("base_url not set") path = "/".join([x.blob["id"] for x in self.current_item.path]) return "/".join([self.base_url, path]) def _fetch_itemslist(self, item): data = requests.get(self._api_path).json() for d in data: if d["type"] == "l": yield Collection(d["text"], blob=d) else: yield Dataset(d["text"], blob=d) def _fetch_dimensions(self, dataset): data = requests.get(self._api_path).json() try: for d in data["variables"]: yield Dimension(d["code"], label=d["text"], allowed_values=d["values"]) except KeyError: yield None def _fetch_data(self, dataset, query): if query is None: query = {} body = { 'query': [{ 'code': key, 'selection': { 'filter': "item", # value can be a list or a value 'values': value if isinstance(value, list) else [value] } } for key, value in query.iteritems()], 'response': { 'format': "json" } } try: raw = requests.post(self._api_path, json=body) data = raw.json() except JSONDecodeError: raise InvalidData("No valid response from PX Web. Check your query for spelling errors.") return data["data"]
Python
0.000003
a973c3b5d1683cd27f8b88c4c38daa934dae75c1
refactor storeRegiser
usermanage/views/storeRegister.py
usermanage/views/storeRegister.py
from django.shortcuts import render, redirect from django.http import HttpResponseRedirect from django.contrib.auth import login, authenticate, logout from django.contrib.auth.models import User, Group from django.contrib.auth.decorators import login_required, user_passes_test, permission_required from django.contrib.auth.forms import UserCreationForm from customermanage.models import Coupon, Wallet from storemanage.models import Ticket # Create your views here. from usermanage import models def storeRegister(request): if request.user.is_authenticated: return redirect('index:index') if request.method == 'GET': return render(request,'usermanage/register-store.html') data = request.POST # check user already exits if User.objects.filter(username=data['username']).exists(): return render(request,'usermanage/register-store.html', { 'user_error' : True, }) user = User.objects.create_user(data['username'], password = data['password']) g = Group.objects.get(name='store') g.user_set.add(user) user.save() g.save() storeprofile = models.Store(user = user, store_name=data['storename'], profile_image_url=data['profile_image_url']) storeprofile.save() return redirect_after_login(user)
Python
0.99987
9316bc07c77e2f51332a40bf430cef117f4d89e1
Add script to check for Dockerfile coverage
util/check_dockerfile_coverage.py
util/check_dockerfile_coverage.py
import yaml import os import pathlib2 import itertools import argparse import logging import sys TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR") CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml") LOGGER = logging.getLogger(__name__) def check_coverage(containers): # open config file containing container weights config_file_path = pathlib2.Path(CONFIG_FILE_PATH) with (config_file_path.open(mode='r')) as file: try: config = yaml.load(file) except yaml.YAMLError, exc: LOGGER.error("error in configuration file: %s" % str(exc)) sys.exit(1) # get container weights weights = config.get("weights") # convert all containers in config file to a list of tuples (<container>, <weight>) weights_list = [x.items() for x in weights] weights_list = list(itertools.chain.from_iterable(weights_list)) # performs intersection between weighted containers and input containers used_containers = [x for x in weights_list if x[0] in containers] # determine which Dockerfiles are not covered; i.e. the set difference of the Dockerfiles to build minus the Dockerfile # available to be built is non-empty uncovered = set(containers) - set([x[0] for x in used_containers]) # exit with error code if uncovered Dockerfiles exist if uncovered: LOGGER.error("The following Dockerfiles are not described in the parsefiles_config.yml file: {}. Please see the following documentation on how to add Dockerfile ranks to the configuration file: {}".format(uncovered, "https://github.com/edx/configuration/blob/master/util/README.md")) sys.exit(1) def arg_parse(): parser = argparse.ArgumentParser(description = 'Given a list of containers as input and a number of shards, ' 'finds an approximation of the optimal distribution of the containers over the shards, provided a set of hard-coded weights ' 'in parsefiles_config.yml.') parser.add_argument('containers', help = "the Dockerfiles that need to be built as the result of some commit change and whose coverage is checked") return parser.parse_args() if __name__ == '__main__': args = arg_parse() # configure logging logging.basicConfig() containers = [] for word in args.containers.split(): containers.append(word) check_coverage(containers)
Python
0
c78c82987feb4dbe91b750ab90a3d163fc1340c6
Add datatables filter mixin
rhinocloud/contrib/jquery/views.py
rhinocloud/contrib/jquery/views.py
from django.views import generic from django.db.models import Q from django.core.exceptions import ImproperlyConfigured class DataTablesServerDataMixin(object): query_class = Q initial_query_kwargs = {} searchable_fields = () def get_searchable_fields(self): if self.searchable_fields is not None: fields = self.searchable_fields else: raise ImproperlyConfigured('Provide searchable_fields or override get_searchable_fields().') return fields def get_query_class(self): if self.query_class: qc = self.query_class else: raise ImproperlyConfigured('Provide query_class or override get_query_class().') return qc def get_initial_query_kwargs(self): if self.initial_query_kwargs is not None: kwargs = self.initial_query_kwargs else: raise ImproperlyConfigured('Provide initial_query_kwargs or override get_initial_query_kwargs().') return kwargs def get_initial_query(self): return self.get_query_class()(**self.get_initial_query_kwargs()) def get_searchterm_query(self, field, value): return self.get_query_class()(**{'%s__contains' % field: value}) def get_queryset(self, **kwargs): queryset = super(DataTablesServerDataMixin, self).get_queryset(**kwargs) iSortingCols = int(self.request.GET.get('iSortingCols', -1)) sSearch = self.request.GET.get('sSearch', None) if sSearch is not None: query = self.get_initial_query() for field in self.get_searchable_fields(): query.add(self.get_searchterm_query(field, sSearch), Q.OR) queryset = queryset.filter(query) ordering = [] for i in range(iSortingCols): sSortDir = self.request.GET['sSortDir_%s' % i] iSortingCols = int(self.request.GET['iSortCol_%s' % i]) ordering.append('%s%s' % (sSortDir == 'asc' and '-' or '', self.get_searchable_fields()[iSortingCols])) queryset = queryset.order_by(*ordering) return queryset
Python
0
4aeec42ca745db546b66f0a708f1c13f806164f8
Add commmand to re-assign person cases to facilities
custom/enikshay/management/commands/reassign_from_facility.py
custom/enikshay/management/commands/reassign_from_facility.py
from __future__ import absolute_import, print_function import csv import datetime import six from django.core.management.base import BaseCommand from casexml.apps.case.util import get_all_changes_to_case_property from corehq.apps.hqcase.utils import bulk_update_cases from corehq.util.log import with_progress_bar from custom.enikshay.case_utils import ( get_all_episode_ids, iter_all_active_person_episode_cases, ) from dimagi.utils.chunked import chunked MJK = 'df661f7aaf384e9c98d88beeedb83050' ALERT_INDIA = 'af50474dd6b747b29a2934b7b0359bdf' class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('domain') parser.add_argument('--commit', action='store_true') def handle(self, domain, **options): commit = options['commit'] filename = "reassign_from_facility-{}.csv".format(datetime.datetime.now().strftime('%Y-%m-%d_%H.%M.%S')) columns = ['case_id', 'facility_assigned_to', 'owner_id', 'last_owner_id_changed', 'last_facility_assigned_to_changed', 'note'] case_ids = get_all_episode_ids(domain) cases = iter_all_active_person_episode_cases(domain, case_ids, sector='private') bad_cases = [] to_update = [] for person, _ in with_progress_bar(cases, length=len(case_ids)): facility_assigned_to = person.get_case_property('facility_assigned_to') owner_id = person.owner_id if facility_assigned_to == owner_id: continue if not facility_assigned_to and owner_id in [MJK, ALERT_INDIA]: # cases with a blank facility and owned by MJK or Alert-India are known about already continue owner_id_changes = sorted(get_all_changes_to_case_property(person, 'owner_id'), key=lambda c: c.modified_on, reverse=True) facility_id_changes = sorted(get_all_changes_to_case_property(person, 'facility_assigned_to'), key=lambda c: c.modified_on, reverse=True) case_dict = { 'case_id': person.case_id, 'facility_assigned_to': facility_assigned_to, 'owner_id': owner_id, } try: case_dict['last_owner_id_changed'] = owner_id_changes[0].modified_on case_dict['last_facility_assigned_to_changed'] = facility_id_changes[0].modified_on if owner_id_changes[0].modified_on < facility_id_changes[0].modified_on: case_dict['note'] = 'updated' to_update.append((person.case_id, {"owner_id": facility_assigned_to}, False)) else: case_dict['note'] = 'not updated' except IndexError as e: case_dict['last_owner_id_changed'] = None case_dict['last_facility_assigned_to_changed'] = None case_dict['note'] = 'no changes found: {}'.format(six.text_type(e)) bad_cases.append(case_dict) if commit: print("Updating: ", len(to_update), " cases") for update in chunked(to_update, 100): bulk_update_cases(domain, update, self.__module__) else: print("Would have updated: ", len(to_update), " cases") with open(filename, 'w') as f: writer = csv.DictWriter(f, fieldnames=columns) writer.writeheader() for case in bad_cases: writer.writerow(case)
Python
0
5e9a66fb07b8071a8ac3d2130c2796a7cda03739
Create ConfigurationPIN.py
ConfigurationPIN.py
ConfigurationPIN.py
# -*- coding: utf8 -*- # Imports necessaires import re import sys import os import random name = os.popen('whoami').read() cutName = name.split("\n") name = cutName[0] sys.path.append('~/AndroidViewClient/src/') from com.dtmilano.android.viewclient import ViewClient from com.dtmilano.android.adb.adbclient import AdbClient device, serialno = ViewClient.connectToDeviceOrExit() if not device: raise Exception("Connection avec l'appareil impossible.") #retour à la page d'accueuil du téléphone pour commencer les manips (on appuie sur la touche de retour a l'accueuil du téléphone) device.press('KEYCODE_HOME') #Lancement de paramètres package = 'com.android.settings' activity = '.Settings' componentName = package + "/" + activity device.startActivity(componentName) #Creation d'un objet ViewClient vc = ViewClient(device=device, serialno=serialno) # on défile l'écran pour faire apparaître “Sécurité” device.drag((100,600),(100,100),1,20) vc.sleep(0.01) #pause pour charger l'affichage vc.dump() #dump récupère l'arbre de l'IHM dans ViewClient # On clique sur l'onglet "Sécurité" security = vc.findViewWithText(u"Ecran de verrouillage") # utf-8 security.touch() # on declenche l'appui vc.sleep(0.01) #pause vc.dump() #On clique sur l'onglet "Verrouillage de l'écran" mode = vc.findViewWithText(u"Glissement") if mode is not None: mode.touch() #On "clique" ici en touchant l'écran. vc.sleep(0.02) vc.dump() #Setup du code pin pin = vc.findViewWithText('Code PIN') pin.touch()# On clique sur le bouton "PIN" vc.sleep(0.02) vc.dump() # On choisit la zone de saisie de texte # choix par id car l'Edit Text n'a pas de nom password = vc.findViewByIdOrRaise("id/no_id/18") #Creation d'un code PIN aleatoire entre 1000 & 9999 rand = str(random.randint(1000,9999)) print rand # affichage console du code PIN #Creation/ouverture d'un fichier txt fichier = open("/home/"+name+"/AVCData/CodesPIN.txt", "a") #Inscription du PIN dans ce fichier txt fichier.write("PIN"+serialno+" : "+rand+"\n") fichier.close() password.type(rand) #écriture du mot de passe vc.sleep(0.01) vc.dump() ctn = vc.findViewWithText('Continuer') ctn.touch() # appui sur Continuer vc.sleep(0.01) vc.dump() password = vc.findViewByIdOrRaise("id/no_id/18") password.type(rand) # 2ème saisie du mot de passe vc.sleep(0.01) vc.dump() ok = vc.findViewWithText('OK') ok.touch() # confirmation else: print "Ce Smartphone déjà sécurisé par code PIN"
Python
0.000001
805708048f493ca538a9e0b8d9d40ae1d4baf2c3
Add a tool to reproduce HTTP KeepAlive races in OpenStack gate jobs.
keepalive-race/keep-alive-race.py
keepalive-race/keep-alive-race.py
#!/usr/bin/python3 """ This script demonstrates a race condition with HTTP/1.1 keepalive """ import decimal import json import subprocess import time import threading import requests requests.packages.urllib3.disable_warnings() CREDS = json.loads(subprocess.check_output( "openstack --os-cloud devstack token issue -f json".split(), ).decode()) URL = 'https://10.0.1.44:8774/v2/%s/servers/detail' % (CREDS['project_id']) def decimal_range(x, y, jump): x = decimal.Decimal(x) y = decimal.Decimal(y) jump = decimal.Decimal(jump) while x < y: yield float(x) x += jump def get(exit): for delay in decimal_range(4.95, 4.96, 0.005): session = requests.Session() if exit.is_set(): return for i in range(10): if exit.is_set(): return time.sleep(delay) headers = { 'User-Agent': 'timeout-race/%s' % i, 'X-Auth-Token': CREDS['id'] } try: session.get(URL, verify=False, headers=headers) except Exception as e: print(e) exit.set() threads = [] exit = threading.Event() for i in range(50): threads.append(threading.Thread(target=get,args=(exit,))) for thread in threads: thread.start() for thread in threads: thread.join()
Python
0.000029
e18c1d6a62d8a896e853f1bd16d4ea5bf6f67401
add statistics script
statistics.py
statistics.py
#!/usr/bin/env python """ Takes the database from the output of integrate.py (e.g. after the disambiguated inventors have been merged into the database) and computes statistics on top of it """ import uuid from datetime import datetime from lib import alchemy from collections import Counter, defaultdict def compute_future_citation_rank(): """ Ranks each patent by number of future citations in a given year Returns nested dictionary: years[YEAR][PATENT_ID] = number of times PATENT_ID was cited in YEAR """ citations = (c for c in alchemy.session.query(alchemy.USPatentCitation).yield_per(1)) years = defaultdict(Counter) print "Counting citations...", datetime.now() for cit in citations: if cit.date: year = cit.date.year patid = cit.patent_id years[year][patid] += 1 print "Finished counting citations", datetime.now() return years def insert_future_citation_rank(years): """ Accepts as input the dictionary returned from compute_future_citation_rank: years[YEAR][PATENT_ID] = number of times PATENT_ID was cited in YEAR Inserts rows into the correct table: """ # remove old rows to make way for new rankings deleted = alchemy.session.query(alchemy.FutureCitationRank).delete() print 'Removed {0} rows from FutureCitationRank'.format(deleted) print 'Inserting records in order...', datetime.now() for year in years.iterkeys(): rank = 0 prev_num_cits = float('inf') commit_counter = 0 for i, record in enumerate(years[year].most_common()): if record[1] < prev_num_cits: prev_num_cits = record[1] rank += 1 row = {'uuid': str(uuid.uuid1()), 'patent_id': record[0], 'num_citations': record[1], 'citation_year': year, 'rank': rank} dbrow = alchemy.FutureCitationRank(**row) alchemy.session.merge(dbrow) if (i+1) % 1000 == 0: alchemy.commit() alchemy.commit() print 'Finished inserting records', datetime.now() def compute_inventor_rank(): """ Ranks each inventor by number of granted patents in a given year Returns nested dictionary: years[YEAR][INVENTOR_ID] = number of patents granted in YEAR to INVENTOR_ID """ patents = (p for p in alchemy.session.query(alchemy.Patent).yield_per(1)) years = defaultdict(Counter) print 'Counting granted patents...', datetime.now() for pat in patents: year = pat.date.year inventors = pat.inventors for inventor in inventors: years[year][inventor.id] += 1 print 'Finished counting', datetime.now() return years def insert_inventor_rank(years): """ Accepts as input the dictionary returned from compute_inventor_rank: years[YEAR][INVENTOR_ID] = number of patents granted in YEAR to INVENTOR_ID Inserts rows into the correct table: """ deleted = alchemy.session.query(alchemy.InventorRank).delete() print 'removed {0} rows'.format(deleted) print 'Inserting records in order...', datetime.now() for year in years.iterkeys(): rank = 0 prev_num_cits = float('inf') commit_counter = 0 for i, record in enumerate(years[year].most_common()): if record[1] < prev_num_cits: prev_num_cits = record[1] rank += 1 row = {'uuid': str(uuid.uuid1()), 'inventor_id': record[0], 'num_patents': record[1], 'patent_year': year, 'rank': rank} dbrow = alchemy.InventorRank(**row) alchemy.session.merge(dbrow) if (i+1) % 1000 == 0: alchemy.commit() alchemy.commit() print 'Finished inserting records', datetime.now() if __name__=='__main__': years = compute_future_citation_rank() insert_future_citation_rank(years) years = compute_inventor_rank() insert_inventor_rank(years)
Python
0.000001
64314d580fac2cf0e0434527437b106a94e8ded0
Add setup.py build for kivy-launcher reboot
testapps/testlauncherreboot_setup/sdl2.py
testapps/testlauncherreboot_setup/sdl2.py
''' Clone Python implementation of Kivy Launcher from kivy/kivy-launcher repo, install deps specified in the OPTIONS['apk']['requirements'] and put it to a dist named OPTIONS['apk']['dist-name']. Tested with P4A Dockerfile at 5fc5241e01fbbc2b23b3749f53ab48f22239f4fc, kivy-launcher at ad5c5c6e886a310bf6dd187e992df972864d1148 on Windows 8.1 with Docker for Windows and running on Samsung Galaxy Note 9, Android 8.1. docker run \ --interactive \ --tty \ -v "/c/Users/.../python-for-android/testapps":/home/user/testapps \ -v ".../python-for-android/pythonforandroid":/home/user/pythonforandroid \ p4a sh -c '\ . venv/bin/activate \ && cd testapps/testlauncherreboot_setup \ && python sdl2.py apk \ --sdk-dir $ANDROID_SDK_HOME \ --ndk-dir $ANDROID_NDK_HOME' ''' # pylint: disable=import-error,no-name-in-module from subprocess import Popen from distutils.core import setup from os import listdir from os.path import join, dirname, abspath, exists from pprint import pprint from setuptools import find_packages ROOT = dirname(abspath(__file__)) LAUNCHER = join(ROOT, 'launcherapp') if not exists(LAUNCHER): PROC = Popen([ 'git', 'clone', 'https://github.com/kivy/kivy-launcher', LAUNCHER ]) PROC.communicate() assert PROC.returncode == 0, PROC.returncode pprint(listdir(LAUNCHER)) pprint(listdir(ROOT)) OPTIONS = { 'apk': { 'debug': None, 'bootstrap': 'sdl2', 'requirements': ( 'python3,sdl2,kivy,android,pyjnius,plyer' ), # 'sqlite3,docutils,pygments,' # 'cymunk,lxml,pil,openssl,pyopenssl,' # 'twisted,audiostream,ffmpeg,numpy' 'android-api': 27, 'ndk-api': 21, 'dist-name': 'bdisttest_python3launcher_sdl2_googlendk', 'name': 'TestLauncherPy3-sdl2', 'package': 'org.kivy.testlauncherpy3_sdl2_googlendk', 'ndk-version': '10.3.2', 'arch': 'armeabi-v7a', 'permissions': [ 'ACCESS_COARSE_LOCATION', 'ACCESS_FINE_LOCATION', 'BLUETOOTH', 'BODY_SENSORS', 'CAMERA', 'INTERNET', 'NFC', 'READ_EXTERNAL_STORAGE', 'RECORD_AUDIO', 'USE_FINGERPRINT', 'VIBRATE', 'WAKE_LOCK', 'WRITE_EXTERNAL_STORAGE' ] } } PACKAGE_DATA = { 'launcherapp': [ '*.py', '*.png', '*.ttf', '*.eot', '*.svg', '*.woff', ], 'launcherapp/art': [ '*.py', '*.png', '*.ttf', '*.eot', '*.svg', '*.woff', ], 'launcherapp/art/fontello': [ '*.py', '*.png', '*.ttf', '*.eot', '*.svg', '*.woff', ], 'launcherapp/data': [ '*.py', '*.png', '*.ttf', '*.eot', '*.svg', '*.woff', ], 'launcherapp/launcher': [ '*.py', '*.png', '*.ttf', '*.eot', '*.svg', '*.woff', ] } PACKAGES = find_packages() print('packages are', PACKAGES) setup( name='testlauncherpy3_sdl2_googlendk', version='1.0', description='p4a sdl2.py apk', author='Peter Badida', author_email='keyweeusr@gmail.com', packages=find_packages(), options=OPTIONS, package_data=PACKAGE_DATA )
Python
0
e11de6b814da4e5ade9fadaa035d6141ab3c113f
add test that features can be read and written ok
test/test_features.py
test/test_features.py
#!/usr/bin/python import unittest import RMF class GenericTest(unittest.TestCase): def _create(self, path): fh= RMF.create_rmf_file(path) rt= fh.get_root_node() reps=[rt.add_child("rep"+str(i), RMF.REPRESENTATION) for i in range(0,5)] sf= RMF.ScoreFactory(fh) fn= rt.add_child("feature", RMF.FEATURE) sd= sf.get(fn) sd.set_score(10.0) sd.set_representation(reps) def _test(self, path): fh= RMF.open_rmf_file_read_only(path) rt= fh.get_root_node() ch= rt.get_children() fn= ch[-1] reps= ch[:-1] sf= RMF.ScoreConstFactory(fh) sd= sf.get(fn) print sd.get_score() print reps print sd.get_representation() self.assert_(sd.get_score()==10) self.assert_(sd.get_representation() == reps) def test_multiparent(self): """Test that feature nodes work right""" for suffix in RMF.suffixes: path=RMF._get_temporary_file_path("alias2."+suffix) print path self._create(path) self._test(path) if __name__ == '__main__': unittest.main()
Python
0
48da7ceb86387d3cb6fd53f50110232813123ecc
Add tests for ansible roster virtual
tests/pytests/unit/roster/test_ansible.py
tests/pytests/unit/roster/test_ansible.py
import pytest import salt.roster.ansible as ansible from tests.support.mock import patch @pytest.mark.xfail @pytest.mark.parametrize( "which_value", [False, None], ) def test_virtual_returns_False_if_ansible_inventory_doesnt_exist(which_value): with patch("salt.utils.path.which", autospec=True, return_value=which_value): assert ansible.__virtual__() == (False, "Install `ansible` to use inventory")
Python
0
f3e3ab4fea1d367578adffbefd072616beaee65e
Create word_a10n.py
word_a10n.py
word_a10n.py
#Kunal Gautam #Codewars : @Kunalpod #Problem name: Word a10n (abbreviation) #Problem level: 6 kyu import re def abbreviate(s): words = re.findall('[A-Za-z][A-Za-z][A-Za-z][A-Za-z]+', s) for word in words: s = s.replace(word, word[0] + str(len(word) - 2) + word[-1]) return s
Python
0.999953
ccb2c0c2c2fac64b88248819b00feddeaafd252c
Create evernote_test.py
evernote_test.py
evernote_test.py
from evernote import * NOTE1 = """<note> <guid>6BA8DC47-EB38-40D9-BE32-5D5DD82E9EC7</guid> <created>2013-07-12T19:22:13Z</created> <tag>poetry</tag> <tag>whitman</tag> <content> Gliding o'er all, through all, Through Nature, Time, and Space, As a ship on the waters advancing, The voyage of the soul - not life alone, Death, many deaths I'll sing. And some other words for indexing. </content> </note>""" NOTE2 = """<note> <guid>450E1369-9D9D-4168-8969-2A4DCC8DDEC4</guid> <created>2014-04-29T08:37:16Z</created> <tag>poetry</tag> <tag>byron</tag> <content> Famed for their civil and domestic quarrels See heartless Henry lies by headless Charles; Between them stands another sceptred thing, It lives, it reigns - "aye, every inch a king." Charles to his people, Henry to his wife, In him the double tyrant starts to life: Justice and Death have mixed their dust in vain. The royal Vampires join and rise again. What now can tombs avail, since these disgorge The blood and dirt of both to mould a George! </content> </note>""" def test(): note1, note2 = parse_note(NOTE1), parse_note(NOTE2) assert note1['guid'] == '6BA8DC47-EB38-40D9-BE32-5D5DD82E9EC7' assert 'poetry' in note2['tag'] assert 'for' in set.intersection(note1['content'], note2['content']) trie1 = tree() trie_delete('blah', 'blah', trie1) # just make sure no error thrown assert get_word('none', trie1) is NULL_GUIDS assert find_trie('none', trie1) == tree() assert 'n' in trie1 and 'e' in find_trie('non', trie1) trie_put('to', 'to', trie1) trie_put('toes', 'toes', trie1) assert 'to' in get_word('to', trie1) and 'toes' not in get_word('to', trie1) assert 'to' in get_prefix('to', trie1) and 'toes' in get_prefix('to', trie1) trie_delete('to', 'to', trie1) assert 'to' not in get_prefix('to', trie1) and 'toes' in get_prefix('to', trie1) trie_put('toes', 'toes2', trie1) assert 'toes' in get_word('toes', trie1) and 'toes2' in get_word('toes', trie1) trie2 = tree() words = 'aaa aab aac aaa abb abc acc acb'.split() pivot = 'aac' for w in words: trie_put(w, w, trie2) gte = get_gte(pivot, trie2) assert 'aac' in gte and 'abb' in gte and 'acb' in gte and 'aab' not in gte gte2 = get_gte('aaaa', trie2) assert 'aac' in gte2 and 'aaa' not in gte2 and 'aaaa' not in gte2 create(note1) create(note2) content = indexes['content'] assert '6BA8DC47-EB38-40D9-BE32-5D5DD82E9EC7' in get_word('for', content) assert '450E1369-9D9D-4168-8969-2A4DCC8DDEC4' in get_word('for', content) query = 'civil fo*' queries = (parse_query(qs) for qs in query.split()) intersection = search_intersection(queries) assert '450E1369-9D9D-4168-8969-2A4DCC8DDEC4' in intersection assert '6BA8DC47-EB38-40D9-BE32-5D5DD82E9EC7' not in intersection queries2 = (parse_query(qs) for qs in 'tag:poetry'.split()) intersection2 = search_intersection(queries2) assert '450E1369-9D9D-4168-8969-2A4DCC8DDEC4' in intersection2 assert '6BA8DC47-EB38-40D9-BE32-5D5DD82E9EC7' in intersection2 queries3 = (parse_query(qs) for qs in 'tag:blah'.split()) assert len(search_intersection(queries3)) == 0 queries4 = (parse_query(qs) for qs in 'tag:Poetry'.split()) assert len(search_intersection(queries4)) == 2 return 'tests pass!' if __name__ == '__main__': print test()
Python
0.000001
8dd3207298e7d81f5d4abdfa62604d5849d132fd
Add Python hello client
example/hello.py
example/hello.py
#!/usr/bin/env python import dbus bus = dbus.SessionBus() remote_object = bus.get_object("org.za.hem.DBus", "/Root") dbus_interface = "org.designfu.SampleInterface" iface = dbus.Interface(remote_object, dbus_interface) hello_reply_list = remote_object.HelloWorld(17, "Hello from example-client.py!", dbus_interface=dbus_interface) print (hello_reply_list) remote_object.HelloWorld(1, {"php":"Rasmus Lerdorf",\ "perl":"Larry Wall",\ "python":"Guido van Rossum"}) hello_reply_tuple = iface.GetTuple() print str(hello_reply_tuple) hello_reply_dict = iface.GetDict() print str(hello_reply_dict)
Python
0.000206
5836eb513b244a21e33e111cd3c3d6f33530aeae
Add a simple widget that sets up basic layout for common controls.
source/harmony/ui/widget/simple.py
source/harmony/ui/widget/simple.py
# :coding: utf-8 # :copyright: Copyright (c) 2013 Martin Pengelly-Phillips # :license: See LICENSE.txt. from PySide import QtGui from .base import Widget class Simple(Widget): '''Simple widget that wraps a single control.''' def _construct(self): '''Construct widget.''' super(Simple, self)._construct() self.setLayout(QtGui.QHBoxLayout()) self.layout().addWidget(self._requiredIndicator) self.layout().addWidget(self._titleLabel) self._prefix = QtGui.QFrame() self._prefix.setLayout(QtGui.QHBoxLayout()) self._prefix.layout().addWidget(self._requiredIndicator) self._prefix.layout().addWidget(self._titleLabel) self.layout().addWidget(self._prefix, stretch=0) self._control = self._constructControl() self.layout().addWidget(self._control, stretch=1) self.layout().addWidget(self._errorIndicator, stretch=0) def _constructControl(self): '''Return the control widget. Subclasses should override this to return an appropriate control widget. ''' raise NotImplementedError() def value(self): '''Return current value.''' raise NotImplementedError() def setValue(self, value): '''Set current *value*.''' raise NotImplementedError()
Python
0
6b5850d70a1c52b617fc7daeefa57023f6ef63c7
Add some paramaterized tests for skolemization and de-skolemization
test/test_graph/test_skolemization.py
test/test_graph/test_skolemization.py
import logging import re from test.testutils import GraphHelper from typing import Pattern, Union import pytest from rdflib import Graph from rdflib.namespace import Namespace from rdflib.term import BNode, Literal, URIRef EG = Namespace("http://example.com/") base_triples = { (EG.subject, EG.predicate, EG.object0), (EG.subject, EG.predicate, EG.object1), } @pytest.mark.parametrize( ["node", "expected_uri"], [ (URIRef("http://example.com"), None), (Literal("some string in here ..."), None), (BNode("GMeng4V7"), "http://rdlib.net/.well-known/genid/rdflib/GMeng4V7"), ( BNode(), re.compile("^" + re.escape("http://rdlib.net/.well-known/genid/rdflib/")), ), ], ) def test_skolemization( node: Union[BNode, URIRef, Literal], expected_uri: Union[Pattern[str], str, None] ) -> None: g = Graph() for triple in base_triples: g.add(triple) g.add((EG.scheck, EG.pcheck, node)) assert len(g) == 3 dsg = g.skolemize() if expected_uri is None: GraphHelper.assert_sets_equals(g, dsg) else: assert len(dsg) == len(g) iset = GraphHelper.triple_or_quad_set(dsg) logging.debug("iset = %s", iset) assert iset.issuperset(base_triples) check_triples = list(dsg.triples((EG.scheck, EG.pcheck, None))) assert len(check_triples) == 1 sbnode = check_triples[0][2] logging.debug("sbnode = %s, sbnode_value = %s", sbnode, f"{sbnode}") assert isinstance(sbnode, URIRef) if isinstance(expected_uri, str): assert expected_uri == f"{sbnode}" else: assert expected_uri.match(f"{sbnode}") is not None @pytest.mark.parametrize( ["iri", "expected_bnode_value"], [ ("http://example.com", None), ("http://example.com/not/.well-known/genid/1", None), ("http://rdlib.net/not/.well-known/genid/1", None), ("http://example.com/.well-known/genid/1", re.compile("^N")), ("http://rdlib.net/.well-known/genid/rdflib/GMeng4V7", "GMeng4V7"), ], ) def test_deskolemization( iri: str, expected_bnode_value: Union[str, None, Pattern[str]] ) -> None: g = Graph() for triple in base_triples: g.add(triple) g.add((EG.scheck, EG.pcheck, URIRef(iri))) assert len(g) == 3 dsg = g.de_skolemize() if expected_bnode_value is None: GraphHelper.assert_sets_equals(g, dsg) else: assert len(dsg) == len(g) iset = GraphHelper.triple_or_quad_set(dsg) logging.debug("iset = %s", iset) assert iset.issuperset(base_triples) check_triples = list(dsg.triples((EG.scheck, EG.pcheck, None))) assert len(check_triples) == 1 bnode = check_triples[0][2] logging.debug("bnode = %s, bnode_value = %s", bnode, f"{bnode}") assert isinstance(bnode, BNode) if isinstance(expected_bnode_value, str): assert expected_bnode_value == f"{bnode}" else: assert expected_bnode_value.match(f"{bnode}") is not None
Python
0
a35be4a666c26ec13d61ee30639d9c3894d129e1
add py script for profiling time
scripts/profile-time.py
scripts/profile-time.py
import sys import timeit import logging import argparse import subprocess import tqdm import tabulate import capa.main import capa.perf import capa.rules import capa.engine import capa.helpers import capa.features import capa.features.common import capa.features.freeze logger = logging.getLogger("capa.profile") def main(argv=None): if argv is None: argv = sys.argv[1:] label = subprocess.run( "git show --pretty=oneline --abbrev-commit | head -n 1", shell=True, capture_output=True, text=True ).stdout.strip() is_dirty = ( subprocess.run( "git status | grep 'modified: ' | grep -v 'rules' | grep -v 'tests/data'", shell=True, capture_output=True, text=True, ).stdout != "" ) if is_dirty: label += " (dirty)" parser = argparse.ArgumentParser(description="Profile capa performance") capa.main.install_common_args(parser, wanted={"format", "sample", "signatures", "rules"}) parser.add_argument("--number", type=int, default=3, help="batch size of profile collection") parser.add_argument("--repeat", type=int, default=30, help="batch count of profile collection") parser.add_argument("--label", type=str, default=label, help="description of the profile collection") args = parser.parse_args(args=argv) capa.main.handle_common_args(args) try: taste = capa.helpers.get_file_taste(args.sample) except IOError as e: logger.error("%s", str(e)) return -1 try: with capa.main.timing("load rules"): rules = capa.rules.RuleSet(capa.main.get_rules(args.rules, disable_progress=True)) except (IOError) as e: logger.error("%s", str(e)) return -1 try: sig_paths = capa.main.get_signatures(args.signatures) except (IOError) as e: logger.error("%s", str(e)) return -1 if (args.format == "freeze") or (args.format == "auto" and capa.features.freeze.is_freeze(taste)): with open(args.sample, "rb") as f: extractor = capa.features.freeze.load(f.read()) else: extractor = capa.main.get_extractor( args.sample, args.format, capa.main.BACKEND_VIV, sig_paths, should_save_workspace=False ) with tqdm.tqdm(total=args.number * args.repeat) as pbar: def do_iteration(): capa.perf.reset() capa.main.find_capabilities(rules, extractor, disable_progress=True) pbar.update(1) samples = timeit.repeat(do_iteration, number=args.number, repeat=args.repeat) logger.debug("perf: find capabilities: min: %0.2fs" % (min(samples) / float(args.number))) logger.debug("perf: find capabilities: avg: %0.2fs" % (sum(samples) / float(args.repeat) / float(args.number))) logger.debug("perf: find capabilities: max: %0.2fs" % (max(samples) / float(args.number))) for (counter, count) in capa.perf.counters.most_common(): logger.debug("perf: counter: {:}: {:,}".format(counter, count)) print( tabulate.tabulate( [ ( args.label, "{:,}".format(capa.perf.counters["evaluate.feature"]), "%0.2fs" % (sum(samples) / float(args.repeat) / float(args.number)), "%0.2fs" % (min(samples) / float(args.number)), "%0.2fs" % (max(samples) / float(args.number)), ) ], headers=["label", "count(evaluations)", "avg(time)", "min(time)", "max(time)"], tablefmt="github", ) ) return 0 if __name__ == "__main__": sys.exit(main())
Python
0
8628ab8cbcb185826e97af9148ec7d07861e29e7
Add setup_pfiles.py to setup parameter files for CIAO tools
scripts/setup_pfiles.py
scripts/setup_pfiles.py
# Copyright (c) 2017 Weitian LI <liweitianux@live.com> # MIT license # # Weitian LI # 2017-02-06 """ Prepare the CIAO parameter files and setup the PFILES environment variable to keep the pfiles locally, in order to avoid the conflicts between multiple instance of the same CIAO tools. """ import os import subprocess import shutil def setup_pfiles(tools): """ Copy the parameter files of the specified tools to the current working directory, and setup the ``PFILES`` environment variable. Parameters ---------- tools : list[str] Name list of the tools to be set up """ for tool in tools: pfile = subprocess.check_output([ "paccess", tool ]).decode("utf-8").strip() subprocess.check_call(["punlearn", tool]) shutil.copy(pfile, ".") # Setup the ``PFILES`` environment variable os.environ["PFILES"] = "./:" + os.environ["PFILES"]
Python
0
6c8966e0e299c12e95d41009a8dde7519946b432
add db level constraint for one active subscription per domain
corehq/apps/accounting/migrations/0006_unique_active_domain_subscription.py
corehq/apps/accounting/migrations/0006_unique_active_domain_subscription.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-04-22 17:18 from __future__ import unicode_literals from django.db import migrations from corehq.sql_db.operations import HqRunSQL class Migration(migrations.Migration): dependencies = [ ('accounting', '0005_automatic_downgrade_adjustment_method'), ] operations = [ HqRunSQL( """ CREATE UNIQUE INDEX accounting_subscription_active_subscriber ON accounting_subscription(subscriber_id) WHERE (is_active = TRUE and is_hidden_to_ops = FALSE); """, reverse_sql= """ DROP INDEX accounting_subscription_active_subscriber; """, ) ]
Python
0
05c6336c3d3f34994b9597396e86bfd8d20ded4c
implement basic Sentiment Analyzer Dataset: Sentiment140 Classifier: Naive Bayes
nltk/sentiment/sentiment_analyzer.py
nltk/sentiment/sentiment_analyzer.py
from __future__ import print_function from nltk.tokenize import word_tokenize from nltk.probability import FreqDist from nltk.classify.util import apply_features, accuracy from nltk.classify.naivebayes import NaiveBayesClassifier import pdb import csv import sys import time import io import pickle import random # To improve flexibility we could later abstract from tweets to generic documents class SentimentAnalyzer(object): def parse_labeled_set(self, filename, max_entries=None): '''Parse training file and output train and test sets in (text, label) format''' start = time.time() labeled_tweets = [] with io.open(filename, 'rt', encoding='utf-8', errors='replace') as csvfile: # with io.open(filename, 'rt', encoding='latin-1') as csvfile: # with io.open(filename, 'rt', encoding='utf-8') as csvfile: reader = csv.reader(csvfile) i = 0 # for label, text, score in reader: for row in reader: if max_entries and reader.line_num == max_entries: break # print(row[0], row[5]) sys.stderr.write("Loaded %d sentences\r" % (reader.line_num)) i += 1 tokenized_tweet = [w.lower() for w in word_tokenize(row[5])] # We are creating a list of training tokenized tweets # tokenized_tweet = [w.lower() for w in word_tokenize(text)] # We are creating a list of training tokenized tweets # print(row[1]) labeled_tweets.append((tokenized_tweet, row[0])) end = time.time() # print("time: ", end - start) print("Loaded {} sentences".format(i+1)) return labeled_tweets def get_all_words(self, tweets): # all_words = FreqDist(word.lower() for word,sent in tweets) all_words = [] for words, sentiment in tweets: all_words.extend(words) return all_words def get_word_features(self, words): # This method could be put outside the class, and the word_features variable # can be made more generic (e.g. a list of feature lists for bigrams, trigrams, etc.) self.word_features = FreqDist(word.lower() for word in words) # print(word_features.most_common(5)) # print(list(word_features)[:5]) # With NLTK 3 this would not output a sorted result return [w for w,f in self.word_features.most_common()] def extract_features(self, tweet): # Questo metodo utilizza word_features che viene creato fuori. Devo renderlo # piu modulare # tokenized_tweet = word_tokenize(tweet) # The tweet is already a tokenized list features = {} for word in self.word_features: features['contains({})'.format(word)] = word in set(tweet) return features def classify_nb(self, training_set, test_set, load_file=None, save_file=None): print("Training NaiveBayesClassifier") # pdb.set_trace() if load_file: nb_classifier = load_classifier(load_file) else: nb_classifier = NaiveBayesClassifier.train(training_set) # classifier2 = NaiveBayesClassifier.train(training_set2) # print("fine NaiveBayesClassifier.train(training_set)") # pdb.set_trace() print("Accuracy: ", accuracy(nb_classifier, test_set)) if save_file: save_classifier(nb_classifier, save_file) def save_classifier(classifier, filename): print("Saving", filename) with io.open(filename, 'wb') as storage_file: classifier = pickle.dump(classifier, storage_file) def load_classifier(filename): print("Loading", filename) with io.open(filename, 'rb') as storage_file: classifier = pickle.load(storage_file) return classifier def shuffle_csv(source_csv, output_csv): # This method is temporary. It can be used to overcome the limitations of a # training set whose rows are sorted by label, in case we want to use only its # first n rows. print("Shuffling", source_csv) with open(source_csv,'r', encoding='latin-1') as source: data = [(random.random(), line) for line in source] data.sort() with open(output_csv,'w') as target: for _, line in data: target.write(line) def main(): # Sentiment140 training set can be found at: http://help.sentiment140.com/for-students # sentiment140_train = '../../../sentiment140/training.1600000.processed.noemoticon.csv' sentiment140_train_shuffled = '../../../sentiment140/shuffled_training_ppp.csv' # training_tweets = parse_labeled_set("sentiment140/training.1600000.processed.noemoticon.csv", max_entries=20000) # training_tweets = parse_labeled_set("sentiment140/shuffled_training_ppp.csv", max_entries=2) sa = SentimentAnalyzer() # shuffle_csv('sentiment140/training.1600000.processed.noemoticon.csv', 'sentiment140/shuffled_training_ppp.csv') training_tweets = sa.parse_labeled_set(sentiment140_train_shuffled, max_entries=20000) testing_tweets = sa.parse_labeled_set("sentiment140/testdata.manual.2009.06.14.csv") all_words = sa.get_all_words(training_tweets) # global word_features # We declare word_features as global so that extract_features() can use it. sa.get_word_features(all_words) # sa.word_features = sa.get_word_features(all_words) # print(word_features) training_set = apply_features(sa.extract_features, training_tweets) # training_set2 = apply_features(extract_features, training_tweets2) test_set = apply_features(sa.extract_features, testing_tweets) # Aggiunto ora # pdb.set_trace() # classify_nb(training_tweets, testing_tweets) # classify_nb(training_set, test_set, load_file='nb_classifier.pickle') # classify_nb(training_set, test_set, save_file='stored_classifiers/nb_classifier_train-2.pickle') # classify_nb(training_set, test_set, save_file='nb_classifier_train-20000.pickle') start = time.time() sa.classify_nb(training_set, test_set, load_file='nb_classifier_train-20000.pickle') end = time.time() tot_time = end - start # print(tot_time) mins = int(tot_time / 60) secs = int(round(tot_time % 60)) # in Python 2.x round() will return a float, so we also convert it to int print('{} mins and {} secs'.format(mins, secs)) if __name__ == '__main__': main()
Python
0.999989
faaa206923b99f4d986a32ddfd854b234377e988
Add ena2fasta script
bin/ena2fasta.py
bin/ena2fasta.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright [2009-2020] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import click from Bio import SeqIO @click.command() @click.argument('filename', type=click.File('r')) @click.argument('output', type=click.File('w')) def main(filename, output): """ Convert a ENA EMBL file into a fasta file suitable for ribotyper analysis. """ SeqIO.convert(filename, "embl", output, "fasta") if __name__ == '__main__': main()
Python
0
c913d1fc3ac24b2491783dc9c33f401180c7b4b0
test delivering with failures, refactor tests
tests/test_postman.py
tests/test_postman.py
from pytest import fixture from mock import Mock, call from mailthon.postman import Postman from mailthon.envelope import Envelope from mailthon.enclosure import PlainText from mailthon.headers import sender, to, subject @fixture def smtp(): smtp = Mock() smtp.return_value = smtp smtp.noop.return_value = (250, 'ok') smtp.sendmail.return_value = {} return smtp @fixture def envelope(): env = Envelope( headers=[sender('Me <me@mail.com>'), to('him@mail.com'), subject('subject')], enclosure=[PlainText('Hi!')], ) env.string = Mock(return_value='--email--') return env class TestPostman: host = 'smtp.mail.com' port = 587 @fixture def postman(self, smtp): postman = Postman(self.host, self.port) postman.transport = smtp return postman def test_connection(self, postman): with postman.connection() as conn: assert conn.mock_calls == [ call(self.host, self.port), call.ehlo(), ] def test_options(self, postman): postman.options = dict(timeout=0) with postman.connection() as conn: expected = call(self.host, self.port, timeout=0) assert conn.mock_calls[0] == expected def test_deliver(self, postman, envelope): with postman.connection() as conn: r = postman.deliver(conn, envelope) sendmail = call.sendmail( envelope.sender.encode(), [k.encode() for k in envelope.receivers], envelope.string(), ) noop = call.noop() conn.assert_has_calls([sendmail, noop], any_order=True) assert r.ok def test_deliver_with_failures(self, smtp, postman, envelope): smtp.sendmail.return_value = { 'addr': (255, 'something-bad'), } with postman.connection() as conn: r = postman.deliver(conn, envelope) assert not r.rejected['addr'].ok assert not r.ok def test_send(self, postman, smtp, envelope): postman.deliver = Mock() postman.send(envelope) assert postman.deliver.mock_calls == [ call(smtp, envelope) ] def test_use(self, postman): postman.use(lambda conn: conn.login('username', 'password')) with postman.connection() as smtp: assert smtp.login.mock_calls == [call('username', 'password')]
from pytest import fixture from mock import Mock, call from mailthon.postman import Postman from mailthon.envelope import Envelope from mailthon.enclosure import PlainText @fixture def smtp(): smtp = Mock() smtp.return_value = smtp smtp.noop.return_value = (250, 'ok') smtp.sendmail.return_value = {} return smtp @fixture def envelope(): env = Envelope( headers={ 'From': 'Me <me@mail.com>', 'To': 'him@mail.com', 'Subject': 'subject', }, enclosure=[ PlainText('Hi!'), ], ) env.string = Mock(return_value='--email--') return env class TestPostman: host = 'smtp.mail.com' port = 587 @fixture def postman(self, smtp): postman = Postman(self.host, self.port) postman.transport = smtp return postman def test_connection(self, postman): with postman.connection() as conn: assert conn.mock_calls == [ call(self.host, self.port), call.ehlo(), ] def test_options(self, postman): postman.options = dict(timeout=0) with postman.connection() as conn: expected = call(self.host, self.port, timeout=0) assert conn.mock_calls[0] == expected def test_deliver(self, postman, envelope): with postman.connection() as conn: r = postman.deliver(conn, envelope) calls = [ call.sendmail(envelope.sender.encode(), [k.encode() for k in envelope.receivers], envelope.string()), call.noop(), ] conn.assert_has_calls(calls, any_order=True) assert r.ok def test_send(self, postman, smtp, envelope): postman.deliver = Mock(return_value=1) assert postman.send(envelope) == 1 assert postman.deliver.mock_calls == [ call(smtp, envelope) ] def test_use(self, postman): postman.use(lambda conn: conn.login('username', 'password')) with postman.connection() as smtp: assert smtp.login.mock_calls == [call('username', 'password')]
Python
0.000001
c6af972ca6dfd6396b3f16e2e218263faffe16ab
Add run_mraic.py
bin/run_mraic.py
bin/run_mraic.py
#!/usr/bin/env python """ Name: run_mraic.py Author: Michael G. Harvey Date: 5 July 2013 Description: Run mraic.pl (Nylanderb 2004) on a folder of alignments in phylip/phyml format. Usage: python run_mraic.py mraic_dir in_dir out_dir python run_mraic.py /Users/michaelharvey/Applications/mraic /Users/michaelharvey/Desktop/pic/beast/deep_UCEs/77_loci_phylip ~/Desktop/mraic_out python run_mraic.py /Users/michaelharvey/Applications/mraic /Users/michaelharvey/Desktop/pic/beast/shallow_UCEs/Xm/orthologs/phylip ~/Desktop/mraic_UCE_shallow_out """ import os import sys import argparse def get_args(): parser = argparse.ArgumentParser( description="""Program description""") parser.add_argument( "mraic_dir", type=str, help="""The directory for mraic.pl""" ) parser.add_argument( "in_dir", type=str, help="""The output directory""" ) parser.add_argument( "out_dir", type=str, help="""The output directory""" ) return parser.parse_args() def main(): args = get_args() outfile = open("{0}/mraic_out.txt".format(args.out_dir), 'wb') files = list() prefiles = os.listdir("{0}".format(args.in_dir)) for prefile in prefiles: # Remove hidden files if not prefile.startswith('.'): files.append(prefile) os.chdir("{0}".format(args.mraic_dir)) for file in files: os.system("perl mraic.pl {0}/{1}".format(args.in_dir, file)) infile = open("{0}/{1}.MrAIC.txt".format(args.in_dir, file), 'r') for line in infile: if line.startswith("Minimum AICc model:"): parts = line.split() outfile.write("{0}\t{1}\n".format(file, parts[3])) infile.close() outfile.flush() outfile.close() if __name__ == '__main__': main()
Python
0.000005
2e43441e43942b2f24a89209cfdc56652523901d
Create test_pir.py
test/test_pir.py
test/test_pir.py
from ... if __name__ == "__main__": try : pir = PIR() pir.start() while 1 : print(pir.result()) except KeyboardInterrupt : pir.stop() pir.cleanup()
Python
0.000051
ed2548ca027b4fd062a10ddf2ce359d9115f40a4
add a __main__.py to nuitka works
borg/__main__.py
borg/__main__.py
from borg.archiver import main main()
Python
0.000037
8195278aa5044371c8fa7963be15169209e1b92a
Add new test to call neutron API and check results in dragonflow db.
dragonflow/tests/fullstack/test_neutron_api.py
dragonflow/tests/fullstack/test_neutron_api.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from dragonflow.common import common_params from dragonflow.common import exceptions as df_exceptions from neutron.common import config as common_config from neutron.tests import base from neutronclient.neutron import client import os_client_config from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import importutils cfg.CONF.register_opts(common_params.df_opts, 'df') def get_cloud_config(cloud='devstack-admin'): return os_client_config.OpenStackConfig().get_one_cloud(cloud=cloud) def credentials(cloud='devstack-admin'): """Retrieves credentials to run functional tests""" return get_cloud_config(cloud=cloud).get_auth_args() class TestNeutronAPIandDB(base.BaseTestCase): def setUp(self): super(TestNeutronAPIandDB, self).setUp() creds = credentials() tenant_name = creds['project_name'] auth_url = creds['auth_url'] + "/v2.0" self.neutron = client.Client('2.0', username=creds['username'], password=creds['password'], auth_url=auth_url, tenant_name=tenant_name) self.neutron.format = 'json' common_config.init(['--config-file', '/etc/neutron/neutron.conf']) db_driver_class = importutils.import_class(cfg.CONF.df.nb_db_class) self.db_driver = db_driver_class() self.db_driver.initialize(db_ip=cfg.CONF.df.remote_db_ip, db_port=cfg.CONF.df.remote_db_port) def test_create_network(self): test_network = 'mynetwork1' network = {'name': test_network, 'admin_state_up': True} network = self.neutron.create_network({'network': network}) if not network or not network['network']: self.fail("Failed to create network using neutron API") network_id = network['network']['id'] table = 'lswitch' try: value = self.db_driver.get_key(table, network_id) except df_exceptions.DBKeyNotFound: self.fail("Failed to create network using neutron API") return value2 = jsonutils.loads(value) if 'external_ids' in value2: if (value2['external_ids']['neutron:network_name'] == test_network): self.neutron.delete_network(network_id) return self.fail("Failed to find newly created network in Dragonflow DB")
Python
0.000006
fefe47a03337d072e47b439a940d7b6eeef56b93
Add OpenTracing example
example/opentracing.py
example/opentracing.py
# encoding=utf-8 import opentracing import instana import time # Loop continuously with a 2 second sleep to generate traces while True: entry_span = opentracing.tracer.start_span('universe') entry_span.set_tag('http.method', 'GET') entry_span.set_tag('http.url', '/users') entry_span.set_tag('span.kind', 'entry') intermediate_span = opentracing.tracer.start_span('nebula', child_of=entry_span) intermediate_span.finish() db_span = opentracing.tracer.start_span('black-hole', child_of=entry_span) db_span.set_tag('db.instance', 'users') db_span.set_tag('db.statement', 'SELECT * FROM user_table') db_span.set_tag('db.type', 'mysql') db_span.set_tag('db.user', 'mysql_login') db_span.set_tag('span.kind', 'exit') db_span.finish() intermediate_span = opentracing.tracer.start_span('space-dust', child_of=entry_span) intermediate_span.log_kv({'message': 'All seems ok'}) intermediate_span.finish() entry_span.set_tag('http.status_code', 200) entry_span.finish() time.sleep(2)
Python
0
f3448d0b37fdac8976a2cc8e4604a6cb2ea7a4ed
add vsmlib.utils which should have been part of commit 6238f235
vsmlib/utils.py
vsmlib/utils.py
def parse_signal_num_file(filename): signal_to_num = {} vsi_version = -1 try: with open(filename) as signal_to_num_file: lines = signal_to_num_file.readlines() for line in lines: line_stripped = line.strip() if vsi_version < 0: try: vsi_version = float(line_stripped) except ValueError as err: print("failed to parse VSI file version number from " \ "line: {}: {}".format(line, err), file=sys.stderr) exit(1) else: try: signal, signum_str = line_stripped.split(" ") signal = signal.strip() signum = int(signum_str.strip()) signal_to_num[signal] = signum except ValueError as err: print("malformed signal number file line: line: {}: " \ "{}".format(line, err), file=sys.stderr) exit(1) except Exception as file_err: print("failed to open signal number file: {}".format(file_err), file=sys.stderr) exit(1) return signal_to_num, vsi_version
Python
0
8bd66387ba5cd50dc0b545dc7b627792ed601faa
Add test
tests/context.py
tests/context.py
import os import sys sys.path.insert(0, os.path.abspath('..')) import gaend
Python
0.000005
59e546ae5afe22aab967e5376c8799e29ccbd86a
Add the basic version of my file comparison script
directoryFileContentCmp.py
directoryFileContentCmp.py
#! /usr/env/python import os import hashlib import sys bufsize = 65536 # Path1 = '/Users/kirkchambers/Desktop' # Path2 = '/Users/kirkchambers/DataSets' def generate_file_digests_for(path): path_set = set() for item in os.walk(path): (directory, _subdirectories, files) = item for file in files: if (file[0] == '.'): continue else: fqFilename = os.path.join(directory, file) path_set.add(generate_file_digest(fqFilename, file)) return path_set def generate_file_digest(fqFilename, shortFilename): hasher = hashlib.md5() with open(fqFilename, 'rb') as filestream: fileBuffer = filestream.read(bufsize) while len(fileBuffer) > 0: hasher.update(fileBuffer) fileBuffer = filestream.read(bufsize) # return "Filename:{file}\nHash:{hash}\nSize:{size}\n".format(file=fqFilename, hash=hasher.hexdigest(), size=os.path.getsize(fqFilename)) return (hasher.hexdigest(), fqFilename, os.path.getsize(fqFilename)) def usage(): print "file_list.py directory1 directory2" print "Prints out the files present in directory1 which are NOT present in directory2" if __name__ == "__main__": try: (_command, Path1, Path2) = sys.argv except: usage() exit(1) path_set_1 = generate_file_digests_for(Path1) path_set_2 = generate_file_digests_for(Path2) # union = path_set_1 | path_set_2 set_1_exclusives = path_set_1 - path_set_2 # set_2_exclusives = path_set_2 - path_set_1 # print "length of 1: {}".format(len(path_set_1)) # print "length of 2: {}".format(len(path_set_2)) # print "length of union: {}".format(len(union)) # print "length of set1 uniqs: {}".format(len(set_1_exclusives)) # print "length of set2 uniqs: {}".format(len(set_2_exclusives)) print "Files present in {path1} and not in {path2}:".format(path1=Path1, path2=Path2) for item in set_1_exclusives: print item[1]
Python
0
f4709d2cadfcb5fde3b5a19664e6e316632bd6f7
Add smile_followers module
smile_followers/tools/mail_followers.py
smile_followers/tools/mail_followers.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014 Toyota Industrial Equipment SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import SUPERUSER_ID def _get_args(self, args, kwargs): if hasattr(self, 'env'): cr, uid, context = self.env.args ids = self.ids vals = args[0] else: cr, uid = args[:2] if isinstance(args[2], dict): ids, vals = [], args[2] index = 2 else: ids, vals = args[2:4] index = 3 context = {} if index + 1 < len(args): context = args[index + 1] context = context or kwargs.get('context') or {} return cr, uid, ids, vals, context def _special_wrapper(self, method, fields, *args, **kwargs): # Remove followers linked to old partner cr, uid, ids, vals, context = _get_args(self, args, kwargs) follower_obj = self.pool['mail.followers'] for field in fields: if vals.get(field) and ids: follower_ids = follower_obj.search(cr, uid, [ ('res_model', '=', self._name), ('res_id', 'in', ids), ('partner_id.parent_id', 'in', [getattr(r, field).id for r in self]) ], context) follower_obj.browse(cr, uid, follower_ids, context).unlink() res = method(self, *args, **kwargs) # Add followers linked to new partner for field in fields: field_to_recompute = field in self.pool.pure_function_fields if not field_to_recompute: for expr in self._fields[field].depends: if expr.split('.')[0] in vals: field_to_recompute = True if field in vals or field_to_recompute: if hasattr(res, 'ids'): ids = res.ids records = self.pool[self._name].browse(cr, uid, ids, context) notification_filter = lambda c: self._name in [m.model for m in c.notification_model_ids] for record in records: for contact in getattr(record, field).child_ids.filtered(notification_filter): follower_obj.create(cr, SUPERUSER_ID, { 'res_model': self._name, 'res_id': record.id, 'partner_id': contact.id, }, context) return res def add_followers(fields=['partner_id']): def decorator(create_or_write): def add_followers_wrapper(self, *args, **kwargs): return _special_wrapper(self, create_or_write, fields, *args, **kwargs) return add_followers_wrapper return decorator def _add_followers(fields=['partner_id']): def add_followers_wrapper(self, *args, **kwargs): return _special_wrapper(self, add_followers_wrapper.origin, fields, *args, **kwargs) return add_followers_wrapper def AddFollowers(fields=['partner_id']): def decorator(original_class): def _register_hook(self, cr): model_obj = self.pool.get(self._name) for method_name in ('create', 'write'): method = getattr(model_obj, method_name) if method.__name__ != 'add_followers_wrapper': model_obj._patch_method(method_name, _add_followers(fields)) return super(original_class, self)._register_hook(cr) original_class._register_hook = _register_hook return original_class return decorator
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014 Toyota Industrial Equipment SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import api from openerp.tools.func import wraps def AddFollowers(fields=['partner_id']): def decorator(original_class): original_class.create = api.model(add_followers(fields)(original_class.create)) original_class.write = api.multi(add_followers(fields)(original_class.write)) return original_class return decorator def add_followers(fields=['partner_id']): def decorator(create_or_write): @wraps(create_or_write) def wrapper(self, vals): follower_obj = self.env['mail.followers'] for field in fields: # Remove followers linked to old partner if vals.get(field) and self.ids: follower_obj.search([ ('res_model', '=', self._name), ('res_id', 'in', self.ids), ('partner_id.parent_id', 'in', [getattr(r, field).id for r in self]) ]).unlink() res = create_or_write(self, vals) for field in fields: # Add followers linked to new partner if vals.get(field): record_ids = self.ids or [res.id] notification_filter = lambda c: self._name in [m.model for m in c.notification_model_ids] for contact in self.env['res.partner'].browse(vals[field]).child_ids.filtered(notification_filter): for record_id in record_ids: follower_obj.sudo().create({ 'res_model': self._name, 'res_id': record_id, 'partner_id': contact.id, }) return res return wrapper return decorator
Python
0.000001
345758259d9ee80826758373c3970db1c28a870b
Bump development version
djangocms_blog/__init__.py
djangocms_blog/__init__.py
__version__ = '0.3.a3'
__version__ = '0.3.a2'
Python
0
4af2a6a62e4be78bd20550c3ae5089c51b4fec62
add separate function for pagination
students/utils.py
students/utils.py
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger def paginate(objects, size, request, context, var_name='object_list'): """Paginate objects provided by view. This function takes: * list of elements; * number of objects per page; * request object to get url parameters from; * context to set new variables into; * var_name - variable name for list of objects. It returns updated context object. """ # apply pagination paginator = Paginator(objects, size) # try to get page number from request page = request.GET.get('page', '1') try: object_list = paginator.page(page) except PageNotAnInteger: # if page is not an integer, deliver first page object_list = paginator.page(1) except EmptyPage: # if page is out of range (e.g. 9999), # deliver last page of results object_list = paginator.page(paginator.num_pages) # set variables into context context[var_name] = object_list context['is_paginated'] = object_list.has_other_pages() context['page_obj'] = object_list context['paginator'] = paginator return context
Python
0.000007
0ee1150e1f8f0c8cae7b906c4d349b8867bbe0b4
Add dmrg example
examples/dmrg/30-dmrg_casscf_nevpt2_for_Cr2.py
examples/dmrg/30-dmrg_casscf_nevpt2_for_Cr2.py
#!/usr/bin/env python import numpy from pyscf import gto from pyscf import scf from pyscf import mcscf from pyscf.mrpt.nevpt2 import sc_nevpt from pyscf.dmrgscf.dmrgci import DMRGSCF # # This calculation requires about 10 GB memory per processor. # b = 1.5 mol = gto.Mole() mol.verbose = 5 mol.output = 'cr2-%3.2f.out' % b mol.max_memory = 70000 mol.atom = [ ['Cr',( 0.000000, 0.000000, -b/2)], ['Cr',( 0.000000, 0.000000, b/2)], ] mol.basis = {'Cr': 'ccpvdz-dk'} mol.symmetry = True mol.build() m = scf.sfx2c1e(scf.RHF(mol)) m.conv_tol = 1e-9 m.chkfile = 'hf_chk-%s'%b m.level_shift = 0.5 m.kernel() dm = m.make_rdm1() m.level_shift = 0 m.scf(dm) mc = DMRGSCF(m, 20, 28) # 20o, 28e mc.fcisolver.maxM = 1000 mc.fcisolver.tol = 1e-6 mc.chkfile = 'mc_chk_18o-%s'%b cas_occ = {'A1g':4, 'A1u':4, 'E1ux':2, 'E1uy':2, 'E1gx':2, 'E1gy':2, 'E2ux':1, 'E2uy':1, 'E2gx':1, 'E2gy':1} mo = mc.sort_mo_by_irrep(cas_occ) mc.kernel(mo) # # DMRG-NEVPT2 # sc_nevpt(mc)
Python
0
1830c24988fccd7069bb4f9d4c66940ce623425f
add execute apcupsd cgi sample
execute_apcupsd_cgi.py
execute_apcupsd_cgi.py
from http.server import CGIHTTPRequestHandler, test import os def main(): # http://stackoverflow.com/questions/11419572/how-to-set-the-documentroot-while-using-pythons-httpserver os.chdir(r"C:\apcupsd") # ディレクトリ名の前の`/`を付け忘れると正常に動作しない CGIHTTPRequestHandler.cgi_directories = ["/cgi"] test(HandlerClass=CGIHTTPRequestHandler, port=8080) if __name__ == "__main__": main()
Python
0
c3afc6c28530c3dfc3bd57d9a1841a60bf92ba4f
Fix bug which caused page cyclers to always clear cache before load.
tools/perf/benchmarks/netsim_top25.py
tools/perf/benchmarks/netsim_top25.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry import test from perf_tools import page_cycler class NetsimTop25(test.Test): """Measures load time of the top 25 sites under simulated cable network.""" test = page_cycler.PageCycler page_set = 'tools/perf/page_sets/top_25.json' options = { 'extra_wpr_args': [ '--shaping_type=proxy', '--net=cable' ], 'pageset_repeat': '5', } def __init__(self): super(NetsimTop25, self).__init__() self.test.clear_cache_before_each_run = True
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry import test from perf_tools import page_cycler class NetsimTop25(test.Test): """Measures load time of the top 25 sites under simulated cable network.""" test = page_cycler.PageCycler test.clear_cache_before_each_run = True page_set = 'tools/perf/page_sets/top_25.json' options = { 'extra_wpr_args': [ '--shaping_type=proxy', '--net=cable' ], 'pageset_repeat': '5', }
Python
0.000008
00a99f80f555ae23aeefa8b8fe284f74c28f4252
add update example
scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir_update.py
scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir_update.py
import stl_path from trex_stl_lib.api import * import imp import time import json from pprint import pprint import argparse # IMIX test # it maps the ports to sides # then it load a predefind profile 'IMIX' # and attach it to both sides and inject # at a certain rate for some time # finally it checks that all packets arrived def imix_test (server): # create client c = STLClient(server = server) passed = True try: # connect to server c.connect() # take all the ports c.reset() dir_0 = [0] dir_1 = [1] print "Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1) # load IMIX profile profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'imix.py') profile1 = STLProfile.load_py(profile_file, direction=0) profile2 = STLProfile.load_py(profile_file, direction=1) stream1 = profile1.get_streams() stream2 = profile2.get_streams() # add both streams to ports c.add_streams(stream1, ports = dir_0) c.add_streams(stream2, ports = dir_1) # clear the stats before injecting c.clear_stats() c.start(ports = (dir_0 + dir_1), mult = "100kpps", total = True) while True: for rate in range(200,1000,10): # choose rate and start traffic for 10 seconds on 5 mpps #mult = "30%" my_mult = ("%dkpps"%rate) print "Injecting {0} <--> {1} on total rate of '{2}' ".format(dir_0, dir_1, my_mult) c.clear_stats() c.update(ports = (dir_0 + dir_1), mult = my_mult) time.sleep(2); # block until done #c.wait_on_traffic(ports = (dir_0 + dir_1)) # read the stats after the test stats = c.get_stats() # use this for debug info on all the stats pprint(stats) # sum dir 0 dir_0_opackets = sum([stats[i]["opackets"] for i in dir_0]) dir_0_ipackets = sum([stats[i]["ipackets"] for i in dir_0]) # sum dir 1 dir_1_opackets = sum([stats[i]["opackets"] for i in dir_1]) dir_1_ipackets = sum([stats[i]["ipackets"] for i in dir_1]) lost_0 = dir_0_opackets - dir_1_ipackets lost_1 = dir_1_opackets - dir_0_ipackets print "\nPackets injected from {0}: {1:,}".format(dir_0, dir_0_opackets) print "Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets) print "\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_1, lost_0) print "packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_0, lost_1) if (lost_0 <= 0) and (lost_1 <= 0): # less or equal because we might have incoming arps etc. passed = True else: passed = False except STLError as e: passed = False print e finally: c.disconnect() if passed: print "\nTest has passed :-)\n" else: print "\nTest has failed :-(\n" parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic") parser.add_argument('-s', '--server', dest='server', help='Remote trex address', default='127.0.0.1', type = str) args = parser.parse_args() # run the tests imix_test(args.server)
Python
0
b5c21a5eeb8894ae93290c4c78fa23e5207bc0b3
Create Flaskapp.wsgi
Flaskapp.wsgi
Flaskapp.wsgi
#!/usr/bin/python import sys import logging logging.basicConfig(stream=sys.stderr) sys.path.insert(0,"/var/www/FlaskApp/") from FlaskApp import app as application application.secret_key = 'Add your secret key'
Python
0.000206
3c2f3baa1a76d386d5604c0c1dc8d4f3a33b11ad
Create Helloworld.py
Helloworld.py
Helloworld.py
print('Hello World!')
Python
0.000446
13e4fbdf81c9012ff82081d6030191be226d716c
Add script for verifying commit message format.
tools/verifygitlog.py
tools/verifygitlog.py
#!/usr/bin/env python3 import re import subprocess import sys verbosity = 0 # Show what's going on, 0 1 or 2. suggestions = 1 # Set to 0 to not include lengthy suggestions in error messages. def verbose(*args): if verbosity: print(*args) def very_verbose(*args): if verbosity > 1: print(*args) def git_log(pretty_format, *args): # Delete pretty argument from user args so it doesn't interfere with what we do. args = ["git", "log"] + [arg for arg in args if "--pretty" not in args] args.append("--pretty=format:" + pretty_format) very_verbose("git_log", *args) # Generator yielding each output line. for line in subprocess.Popen(args, stdout=subprocess.PIPE).stdout: yield line.decode().rstrip("\r\n") def verify(sha): verbose("verify", sha) errors = [] warnings = [] def error_text(err): return "commit " + sha + ": " + err def error(err): errors.append(error_text(err)) def warning(err): warnings.append(error_text(err)) # Author and committer email. for line in git_log("%ae%n%ce", sha, "-n1"): very_verbose("email", line) if "noreply" in line: error("Unwanted email address: " + line) # Message body. raw_body = list(git_log("%B", sha, "-n1")) if not raw_body: error("Message is empty") return errors, warnings # Subject line. subject_line = raw_body[0] very_verbose("subject_line", subject_line) if not re.match(r"^[^!]+: [A-Z]+.+ .+\.$", subject_line): error("Subject line should contain ': ' and end in '.': " + subject_line) if len(subject_line) >= 73: error("Subject line should be 72 or less characters: " + subject_line) # Second one divides subject and body. if len(raw_body) > 1 and raw_body[1]: error("Second message line should be empty: " + raw_body[1]) # Message body lines. for line in raw_body[2:]: if len(line) >= 76: error("Message lines should be 75 or less characters: " + line) if not raw_body[-1].startswith("Signed-off-by: ") or "@" not in raw_body[-1]: warning("Message should be signed-off") return errors, warnings def run(args): verbose("run", *args) has_errors = False has_warnings = False for sha in git_log("%h", *args): errors, warnings = verify(sha) has_errors |= any(errors) has_warnings |= any(warnings) for err in errors: print("error:", err) for err in warnings: print("warning:", err) if has_errors or has_warnings: if suggestions: print("See https://github.com/micropython/micropython/blob/master/CODECONVENTIONS.md") else: print("ok") if has_errors: sys.exit(1) def show_help(): print("usage: verifygitlog.py [-v -n -h] ...") print("-v : increase verbosity, can be speficied multiple times") print("-n : do not print multi-line suggestions") print("-h : print this help message and exit") print("... : arguments passed to git log to retrieve commits to verify") print(" see https://www.git-scm.com/docs/git-log") print(" passing no arguments at all will verify all commits") print("examples:") print("verifygitlog.py -n10 # Check last 10 commits") print("verifygitlog.py -v master..HEAD # Check commits since master") if __name__ == "__main__": args = sys.argv[1:] verbosity = args.count("-v") suggestions = args.count("-n") == 0 if "-h" in args: show_help() else: args = [arg for arg in args if arg not in ["-v", "-n", "-h"]] run(args)
Python
0.000153
ee614036b45e9f10f680cef56a5eaa2d86c424fb
Create cybercrimeatmtracker.py
plugins/feeds/public/cybercrimeatmtracker.py
plugins/feeds/public/cybercrimeatmtracker.py
import re import logging from dateutil import parser from datetime import datetime, timedelta from core.observables import Hash from core.feed import Feed from core.errors import ObservableValidationError class CybercrimeAtmTracker(Feed): default_values = { 'frequency': timedelta(hours=1), 'name': 'CybercrimeAtmTracker', 'source': 'http://atm.cybercrime-tracker.net/rss.php', 'description': 'CyberCrime ATM Tracker - Latest 40 CnC URLS', } def update(self): for item in self.update_xml( 'item', ['title', 'link', 'pubDate', 'description']): self.analyze(item) def analyze(self, item): observable_sample = item['title'] context_sample = {} context_sample['description'] = 'ATM sample' context_sample['date_added'] = parser.parse(item['pubDate']) context_sample['source'] = self.name family = False if ' - ' in observable_sample: family, observable_sample = observable_sample.split(' - ') try: sample = Hash.get_or_create(value=observable_sample) sample.add_context(context_sample) sample.add_source('feed') sample_tags = ['atm'] if family: sample_tags.append(family) sample.tag(sample_tags) except ObservableValidationError as e: logging.error(e) return
Python
0.000156
ece838042acd75ba7edde833856ac02e4efe9977
Create PPTconnect.py
PPTconnect.py
PPTconnect.py
from TwitterAPI import TwitterAPI import win32com.client from MSO import * # Open PowerPoint Application = win32com.client.Dispatch("PowerPoint.Application") # Add a presentation Presentation = Application.Presentations.Add() # Go to http://dev.twitter.com and create an app. # The consumer key and secret will be generated for you after consumer_key = "e1WhbINIG0betPfLmm16g" consumer_secret = "JVU8Rhrq9QANJX8rybNhWhEKhqMrU4yqC7yvU2Gxh0" # After the step above, you will be redirected to your app's page. # Create an access token under the the "Your access token" section access_token_key = "14888261-5JLox5DCiHe7iQRPdJaTb93syK9W8DqZotMy8V5OF" access_token_secret ="Ws1dUSp5eApbtPggPtOn276t5fM1LgnHiFyVWaylbKsKP" # Create a Twitter client twitter = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) ##for tweet in results.get_iterator(): ## print (tweet['id'], tweet['text']) def draw_tweet(Base, item, pos): y = 40 + (pos % 4) * 120 image = Base.Shapes.AddPicture( # To get the larger resolution image, just remove _normal from the URL item['user']['profile_image_url'].replace('_normal', ''), LinkToFile=True, SaveWithDocument=False, Left=20, Top=y, Width=100, Height=100) try: status = item['text'].encode('cp1252') except UnicodeEncodeError: status = item['text'] text = Base.Shapes.AddShape(1, 130, y, 460, 100) text.Fill.ForeColor.ObjectThemeColor = 2 text.Fill.ForeColor.Brightness = +0.95 text.Line.Visible = False text.TextFrame.TextRange.Text = status text.TextFrame.TextRange.Font.Color.ObjectThemeColor = 3 text.TextFrame.TextRange.ParagraphFormat.Alignment = 1 user = Base.Shapes.AddShape(9, 600, y, 100, 100) user.Fill.ForeColor.ObjectThemeColor = 4 user.Line.Visible = False user.TextFrame.TextRange.Text = '@' + item['user']['screen_name'] Base = Presentation.Slides.Add(1, 12) #query = {'q' : 'Top Chef', 'lang' : 'es', 'count': 100} results = twitter.request('statuses/filter', {'track': 'blue'}) ##for tweet in results.get_iterator(): ## print (tweet['id'], tweet['text']) for pos, item in enumerate(results.get_iterator()): draw_tweet(Base, item, pos) if pos > 20: break
Python
0
65029a09af9dcafc156a5a0632a63e3cf4b6c50d
add benchmark to compare to lasagne
benchmarks/lag_task_lasgne.py
benchmarks/lag_task_lasgne.py
from __future__ import division, absolute_import from __future__ import print_function, unicode_literals import numpy as np import theano import theano.tensor as T import lasagne fX = theano.config.floatX # ################################## config ################################## N_TRAIN = 1000 LAG = 10 LENGTH = 50 HIDDEN_STATE_SIZE = 10 BATCH_SIZE = 64 # ############################### prepare data ############################### def binary_toy_data(lag=1, length=20): inputs = np.random.randint(0, 2, length).astype(fX) outputs = np.array(lag * [0] + list(inputs), dtype=fX)[:length] return inputs, outputs def minibatch(lag, length, batch_size): inputs = [] outputs = [] for _ in range(batch_size): i, o = binary_toy_data(lag, length) inputs.append(i) outputs.append(o) return np.array(inputs)[..., np.newaxis], np.array(outputs)[..., np.newaxis] # ############################## prepare model ############################## l = lasagne.layers.InputLayer(shape=(None, None, 1)) l = lasagne.layers.LSTMLayer(l, num_units=HIDDEN_STATE_SIZE, grad_clipping=1, learn_init=True) l = lasagne.layers.ReshapeLayer(l, shape=(-1, HIDDEN_STATE_SIZE)) l = lasagne.layers.DenseLayer(l, num_units=1, nonlinearity=lasagne.nonlinearities.sigmoid) in_var = T.tensor3() targets = T.tensor3() outputs = lasagne.layers.get_output(l, in_var).reshape(in_var.shape) loss = T.mean((targets - outputs) ** 2) all_params = lasagne.layers.get_all_params(l) updates = lasagne.updates.adam(loss, all_params) train_fn = theano.function([in_var, targets], [loss], updates=updates) valid_fn = theano.function([in_var], [outputs]) # ################################# training ################################# print("Starting training...") import time st = time.time() for i in range(N_TRAIN): inputs, outputs = minibatch(lag=LAG, length=LENGTH, batch_size=BATCH_SIZE) loss = train_fn(inputs, outputs)[0] print(loss) print("total_time: %s" % (time.time() - st)) inputs, outputs = minibatch(lag=LAG, length=LENGTH, batch_size=BATCH_SIZE) pred = valid_fn(inputs)[0] pred_accuracies = (np.round(pred) == outputs).mean(axis=0)[LAG:] print(pred_accuracies) print(pred_accuracies.mean())
Python
0
33abec38e82e132a6e192d5ae0535b84d8aa47f4
add import script for Poole
polling_stations/apps/data_collection/management/commands/import_poole.py
polling_stations/apps/data_collection/management/commands/import_poole.py
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter class Command(BaseXpressDemocracyClubCsvImporter): council_id = 'E06000029' addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.CSV' stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.CSV' elections = ['parl.2017-06-08']
Python
0
b6b92e278202c27b124909aa5352726799d8d162
add stack with max python solution
08-stack-n-queue/8.1-stack-with-max/python/stackMax.py
08-stack-n-queue/8.1-stack-with-max/python/stackMax.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Fri Aug 18 10:08:25 2017 @author: LiuQianKevin """ class Stack: class cache: def __init__(self, _max = -float('inf'), count = 0): self.max = _max; self.count = count; def __init__(self): self._element = []; self._maxCache =[]; def push(self, x): #update elemetn self._element.append(x); #update cache #if x larger than maxchache[-1], or maxcheche empty, add if(not self._maxCache or x > self._maxCache[-1].max): self._maxCache.append(self.cache(x, 1)); #if x equal to maxcache[-1].max, cout += 1 elif(x == self._maxCache[-1].max): self._maxCache[-1].count += 1; #if x larger than maxchache[-1].max, do nothing def pop(self): #update element result = self._element.pop(); #update cache #if result < maxCache[-1].max, no update #if result == ---------------, cout -= 1, if cout == 0, pop it if(result == self.max()): self._maxCache[-1].count -= 1; if(self._maxCache[-1].count == 0): self._maxCache.pop(); return result; def empty(self): return not self._element; def max(self): return self._maxCache[-1].max; def main(): s = Stack() s.push(1) s.push(2) assert s.max() == 2 print(s.max()) # 2 print(s.pop()) # 2 assert s.max() == 1 print(s.max()) # 1 s.push(3) s.push(2) assert s.max() == 3 print(s.max()) # 3 s.pop() assert s.max() == 3 print(s.max()) # 3 s.pop() assert s.max() == 1 print(s.max()) # 1 s.pop() try: s.max() s.pop() s.pop() s.pop() s.pop() except IndexError as e: print(e) if __name__ == '__main__': main()
Python
0
f78f74d836d2eca1cafe3b6401b5c8d13e6d139b
Fix type1/type2
geotrek/tourism/migrations/0004_auto_20190328_1339.py
geotrek/tourism/migrations/0004_auto_20190328_1339.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.14 on 2019-03-28 12:39 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tourism', '0003_auto_20190306_1417'), ] operations = [ migrations.AlterField( model_name='touristiccontent', name='type1', field=models.ManyToManyField(blank=True, db_table=b't_r_contenu_touristique_type1', related_name='contents1', to='tourism.TouristicContentType1', verbose_name='Type 1'), ), migrations.AlterField( model_name='touristiccontent', name='type2', field=models.ManyToManyField(blank=True, db_table=b't_r_contenu_touristique_type2', related_name='contents2', to='tourism.TouristicContentType2', verbose_name='Type 2'), ), ]
Python
0.999983
ef751116c597dfbd12eac849dd0cb3ca02eb6315
Add SettingsFromCategoryModel that lists settings in a certain category
UM/Qt/Bindings/SettingsFromCategoryModel.py
UM/Qt/Bindings/SettingsFromCategoryModel.py
from PyQt5.QtCore import Qt, QCoreApplication, pyqtSlot from UM.Qt.ListModel import ListModel from UM.Settings.Setting import Setting from UM.Resources import Resources from UM.Application import Application class SettingsFromCategoryModel(ListModel): NameRole = Qt.UserRole + 1 TypeRole = Qt.UserRole + 2 ValueRole = Qt.UserRole + 3 ValidRole = Qt.UserRole + 4 KeyRole = Qt.UserRole + 5 OptionsRole = Qt.UserRole + 6 UnitRole = Qt.UserRole + 7 DescriptionRole = Qt.UserRole + 8 def __init__(self, category, parent = None): super().__init__(parent) self._category = category self._updateSettings() self.addRoleName(self.NameRole, "name") self.addRoleName(self.TypeRole,"type") self.addRoleName(self.ValueRole,"value") self.addRoleName(self.ValidRole,"valid") self.addRoleName(self.KeyRole,"key") self.addRoleName(self.OptionsRole,"options") self.addRoleName(self.UnitRole,"unit") self.addRoleName(self.DescriptionRole, "description") ## Triggred by setting if it has a conditional activation #def handleActiveChanged(self, key): #temp_setting = self._machine_settings.getSettingByKey(key) #if temp_setting is not None: #index = self._find(self.items,"key",temp_setting.getKey()) #if index != -1: #self.setProperty(index, 'disabled', (temp_setting.checkAllChildrenVisible() or not temp_setting.isActive())) #self.setProperty(index, 'visibility', (temp_setting.isVisible() and temp_setting.isActive())) #for child_setting in temp_setting.getAllChildren(): #index = self._find(self.items,"key",child_setting.getKey()) #if index != -1: #self.setProperty(index, 'disabled', (child_setting.checkAllChildrenVisible() or not child_setting.isActive())) #self.setProperty(index, 'visibility', (child_setting.isVisible() and child_setting.isActive())) @pyqtSlot(int, str, str) ## Notification that setting has changed. def setSettingValue(self, index, key, value): if self._category.getSettingByKey(key) is not None: self._category.getSettingByKey(key).setValue(value) self.setProperty(index,'valid', self.isValid(key)) @pyqtSlot(str,result=int) ## Check if the entered value of the setting is valid (warning/error) # \returns error key. def isValid(self,key): if self._category.getSettingByKey(key) is not None: return self._category.getSettingByKey(key).validate() return 5 ## Create model for combo box (used by enum type setting) # \param options List of strings # \return ListModel with "text":value pairs def createOptionsModel(self, options): model = ListModel() model.addRoleName(self.NameRole,"text") for option in options: model.appendItem({"text":str(option)}) return model @pyqtSlot(str,bool) ## Set the visibility of a setting. # Note that this might or might not effect the disabled property aswel! # \param key Key of the setting that is affected # \param visibility Visibility of the setting. def setVisibility(self, key, visibility): setting = self._machine_settings.getSettingByKey(key) if setting is not None: setting.setVisible(visibility) for index in range(0,len(self.items)): temp_setting = self._machine_settings.getSettingByKey(self.items[index]["key"]) if temp_setting is not None: self.setProperty(index, 'disabled', temp_setting.checkAllChildrenVisible()) self.setProperty(index, 'visibility', temp_setting.isVisible()) self.setProperty(index, 'value', temp_setting.getValue()) # Convenience function that finds the index in a list of dicts based on key value pair def _find(self,lst, key, value): for i, dic in enumerate(lst): if dic[key] == value: return i return -1 def _updateSettings(self): for setting in self._category.getAllSettings(): if setting.isVisible() and setting.isActive(): self.appendItem({ "name": setting.getLabel(), "description": setting.getDescription(), "type": setting.getType(), "value": setting.getValue(), "valid": setting.validate(), "key": setting.getKey(), "options": self.createOptionsModel(setting.getOptions()), "unit": setting.getUnit() }) #setting.visibleChanged.connect(self._onSettingVisibleChanged) #setting.activeChanged.connect(self.handleActiveChanged)
Python
0
f29a0845bc0983e18ce6484543b206dfb3091818
Add easier way to import cv2
vision/opencv.py
vision/opencv.py
import sys sys.path.append('lib/opencv/build/lib') import cv2
Python
0.000003
2848955e59b5106ffe48c4ebfa05095a6be460e5
Add visual script
visual/visual.py
visual/visual.py
#!/usr/bin/env python3 import re import numpy as np import matplotlib.pyplot as plt import unittest class Parser: '''Wta log parser''' def __init__(self): '''Open log file''' self._pattern = re.compile( r'^ra=((?:[0-9a-fA-F]{2}:){5}(?:[0-9a-fA-F]{2}))[ \t]+' + r'ta=((?:[0-9a-fA-F]{2}:){5}(?:[0-9a-fA-F]{2}))[ \t]+' + r'tsf=([0-9]+)[ \t]+' + r'seq=([0-9]+)[ \t]+' + r'rssi=(-[0-9]+)$') def _match(self, line, ra, ta): match = self._pattern.match(line) if not match: return None if ra == match.group(1) and ta == match.group(2): return (match.group(1), match.group(2), int(match.group(3)), int(match.group(5))) def getRecords(self, path, ra, ta): f = open(path) records = [] for line in self.f.lines(): r = _match(line, ra, ta) if r: records.append(r) return records class ParserTest(unittest.TestCase): '''Parser's unit test class''' def test_match(self): line = "ra=00:4b:69:6e:73:30 ta=c8:93:46:a3:8e:74 tsf=1473507516 seq=28769 rssi=-60" ra = "00:4b:69:6e:73:30" ta = "c8:93:46:a3:8e:74" tsf = 1473507516 rssi = -60 p = Parser() r = p._match(line, ra, ta) self.assertTrue(r is not None) self.assertEqual(r[0], ra) self.assertEqual(r[1], ta) self.assertEqual(r[2], tsf) self.assertEqual(r[3], rssi) def main(): pass if __name__ == "__main__": main()
Python
0.000001
b5cc83a705eaa22872d304b92c7b6e57b5581604
Add unit-test for "readbytes_multiple"
puresnmp/test/test/test_helpers.py
puresnmp/test/test/test_helpers.py
''' Tests for unit-test helpers ''' from textwrap import dedent from binascii import hexlify import puresnmp.test as th from io import StringIO def test_readbytes_multiple(): data = StringIO(dedent( '''\ # # This is a comment # 30 2d 02 01 01 04 07 70 72 69 76 61 74 65 a2 1f 0-.....private.. ---- 30 2d 02 01 01 04 07 70 72 69 76 61 74 65 a2 1f 0-.....private.. ---- 30 2e 02 01 01 04 07 70 72 69 76 61 74 65 a2 20 0......private. ''' )) expected = [ b'\x30\x2d\x02\x01\x01\x04\x07\x70\x72\x69\x76\x61\x74\x65\xa2\x1f', b'\x30\x2d\x02\x01\x01\x04\x07\x70\x72\x69\x76\x61\x74\x65\xa2\x1f', b'\x30\x2e\x02\x01\x01\x04\x07\x70\x72\x69\x76\x61\x74\x65\xa2\x20', ] result = list(th.readbytes_multiple(data)) assert result == expected
Python
0.000002
f434e45b58bfa7001d21d1920a65903f941df833
Add __main__.py so that the package can be executed by `python -m jiebarpc` [ciskip]
jiebarpc/__main__.py
jiebarpc/__main__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import sys import argparse from jiebarpc import JiebaRPCServer, JiebaRPCDispatcher def main(host, port, processnum=1): server = JiebaRPCServer(JiebaRPCDispatcher(processnum)) server.listen(host, port) server.start() return 0 if __name__ == '__main__': parser = argparse.ArgumentParser( 'python -m jiebarpc', description='Run jiebarpc server' ) parser.add_argument('-n', '--processnum', type=int, default=1, help='How many processes to use.') parser.add_argument('address', help='Server listen address like localhost:8888',) ns = parser.parse_args() address = ns.address.split(':') host = address[0] port = int(address[1]) sys.exit(main(host, port, ns.processnum))
Python
0.000001
5d297710416ebaea3a79e1ded0604d53178c493a
add python solution for Project Euler problem 1
python_challenges/project_euler/problem_1.py
python_challenges/project_euler/problem_1.py
__author__ = 'tilmannbruckhaus' def divisible_by_3_or_5(i): divisible = i % 3 == 0 or i % 5 == 0 # print("natural number:", i, "is divisible:", divisible) return divisible def sum_of_multiples_of_3_or_5(limit): # If we list all the natural numbers below 10 that are multiples of 3 or 5, # we get 3, 5, 6 and 9. The sum of these multiples is 23. # Find the sum of all the multiples of 3 or 5 below 1000. multi_sum = 0 for limit in range(limit): if divisible_by_3_or_5(limit): multi_sum += limit return multi_sum for test_limit in [10, 1000]: print sum_of_multiples_of_3_or_5(test_limit)
Python
0.998958
1db14473edff479f97703fb68cb1aa8d65c25023
Add Python benchmark
lib/node_modules/@stdlib/math/base/special/exp/benchmark/python/benchmark.py
lib/node_modules/@stdlib/math/base/special/exp/benchmark/python/benchmark.py
#!/usr/bin/env python """Benchmark exp.""" import timeit name = "exp" repeats = 3 iterations = 1000000 def print_version(): """Print the TAP version.""" print("TAP version 13") def print_summary(total, passing): """Print the benchmark summary. # Arguments * `total`: total number of tests * `passing`: number of passing tests """ print("#") print("1.." + str(total)) # TAP plan print("# total " + str(total)) print("# pass " + str(passing)) print("#") print("# ok") def print_results(elapsed): """Print benchmark results. # Arguments * `elapsed`: elapsed time (in seconds) # Examples ``` python python> print_results(0.131009101868) ``` """ rate = iterations / elapsed print(" ---") print(" iterations: " + str(iterations)) print(" elapsed: " + str(elapsed)) print(" rate: " + str(rate)) print(" ...") def benchmark(): """Run the benchmark and print benchmark results.""" setup = "from math import exp; from random import random;" stmt = "y = exp(100.0*random() - 50.0)" t = timeit.Timer(stmt, setup=setup) print_version() for i in xrange(3): print("# python::" + name) elapsed = t.timeit(number=iterations) print_results(elapsed) print("ok " + str(i+1) + " benchmark finished") print_summary(repeats, repeats) def main(): """Run the benchmark.""" benchmark() if __name__ == "__main__": main()
Python
0.000138
d1f4e257b449b6993e0cdc87055113018b6efabb
Create promoter_bin.py
code_collection/promoter_bin.py
code_collection/promoter_bin.py
import sys peak=[] with open(sys.argv[1],'r') as f: for line in f: line=line.strip('\n').split('\t') peak.append(int(line[3])) f.close() num=int(len(peak)/100.0) bin=[] for i in range(99): bin.append(str(i+1)+'\t'+str(sum(peak[num*i:num*(i+1)])/(num*1.0))+'\n') bin.append('100'+'\t'+str(sum(peak[num*99:])/(num*1.0))+'\n') with open('bin.txt','w') as f: f.writelines(bin) f.close
Python
0.000002
93a3b7d61877e9350ea2b32ade918755fc874bb8
Create run_test.py
recipes/django-environ/run_test.py
recipes/django-environ/run_test.py
import django from django.conf import settings settings.configure(INSTALLED_APPS=['environ', 'django.contrib.contenttypes', 'django.contrib.auth']) django.setup() import environ
Python
0.000004
19186f44b1ed4c4b60ffc1ef796fa0894b25da68
Add garage.partdefs.sockets
py/garage/garage/partdefs/sockets.py
py/garage/garage/partdefs/sockets.py
from garage import parameters from garage import parts from garage import sockets PARTS = parts.Parts(sockets.__name__) PARTS.patch_getaddrinfo = parts.AUTO PARAMS = parameters.define_namespace(sockets.__name__, 'socket utils') PARAMS.patch_getaddrinfo = parameters.create( False, 'enable patching getaddrinfo for caching query results') @parts.define_maker def make() -> PARTS.patch_getaddrinfo: if PARAMS.patch_getaddrinfo.get(): sockets.patch_getaddrinfo()
Python
0.000051
a25e2c0e5d8466e449641fb4c1c74a6e9872da75
add connection module for pyRpc
python/proto/pyRpc/tcp_connection.py
python/proto/pyRpc/tcp_connection.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # # Copyright (c) 2016 ASMlover. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list ofconditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materialsprovided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import asyncore import socket import const from pyRpc.logger import LoggerMgr class TcpConnection(asyncore.dispatcher): ST_INIT = 0 ST_ESTABLISHED = 1 ST_DISCONNECTED = 2 def __init__(self, fd, peername): super(TcpConnection, self).__init__(fd) self.logger = LoggerMgr.getLogger('pyRpc.TcpConnection') self.peername = peername self.writebuf = '' self.readbuf_len = const.RPC_READBUF_LEN self.status = TcpConnection.ST_INIT if fd: self.status = TcpConnection.ST_DISCONNECTED self.set_socket_option() self.rpc_channel = None def set_socket_option(self): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) def set_rpc_channel(self, rpc_channel): self.rpc_channel = rpc_channel def get_rpc_channel(self): return self.rpc_channel def is_established(self): return self.status == TcpConnection.ST_ESTABLISHED def set_readbuf_len(self, readbuf_len): self.readbuf_len = readbuf_len def get_peername(self): return self.peername def writable(self): return len(self.writebuf) > 0 def write_data(self, data): self.writebuf += data def disconnect(self): if self.status == TcpConnection.ST_DISCONNECTED: return if self.rpc_channel: self.rpc_channel.on_disconnected() self.rpc_channel = None if self.socket: super(TcpConnection, self).close() self.status = TcpConnection.ST_DISCONNECTED def handle_read(self): self.logger.debug('TcpConnection.handle_read') data = self.recv(self.readbuf_len) if data: if not self.rpc_channel: return self.rpc_channel.on_read(data) def handle_write(self): self.logger.debug('TcpConnection.handle_write') if self.writebuf: size = self.send(self.writebuf) self.writebuf = self.writebuf[size:] def handle_close(self): self.logger.debug('TcpConnection.handle_close') super(TcpConnection, self).handle_close() self.disconnect() def handle_error(self): self.logger.debug('TcpConnection.handle_error') super(TcpConnection, self).handle_error() self.disconnect() def handle_expt(self): self.logger.debug('TcpConnection.handle_expt') super(TcpConnection, self).handle_expt() self.disconnect()
Python
0.000001
b7d23a337ad121a032a8aa2c395c3705bad12b28
add migration to grandfather in all existing plans to have Case Sharing via Groups and Child Cases privileges
corehq/apps/accounting/migrations/0043_grandfather_case_privs.py
corehq/apps/accounting/migrations/0043_grandfather_case_privs.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.21 on 2019-07-23 16:43 from __future__ import unicode_literals from __future__ import absolute_import from django.core.management import call_command from django.db import migrations from corehq.apps.hqadmin.management.commands.cchq_prbac_bootstrap import ( cchq_prbac_bootstrap, ) from corehq.privileges import ( CASE_SHARING_GROUPS, CHILD_CASES, ) def _grandfather_case_privs(apps, schema_editor): call_command( 'cchq_prbac_grandfather_privs', CASE_SHARING_GROUPS, CHILD_CASES, noinput=True, ) class Migration(migrations.Migration): dependencies = [ ('accounting', '0042_domain_user_history__unique__and__nonnullable'), ] operations = [ migrations.RunPython(cchq_prbac_bootstrap), migrations.RunPython(_grandfather_case_privs), ]
Python
0
8bc0c88ef8436d066d6746c0fde96b3b01408d4e
Add examples for geospatial types for the manual (#618)
tests/integration/examples/test_geospatial_types_example.py
tests/integration/examples/test_geospatial_types_example.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) "Neo4j" # Neo4j Sweden AB [http://neo4j.com] # # This file is part of Neo4j. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest # python -m pytest tests/integration/examples/test_geospatial_types_example.py -s -v def _echo(tx, x): return tx.run("RETURN $x AS fieldName", x=x).single() def test_cartesian_point(driver): # tag::geospatial-types-cartesian-import[] from neo4j.spatial import CartesianPoint # end::geospatial-types-cartesian-import[] # tag::geospatial-types-cartesian[] # Creating a 2D point in Cartesian space point2d = CartesianPoint((1, 5.1)) # Or in 3D point3d = CartesianPoint((1, -2., 3.1)) # end::geospatial-types-cartesian[] # storing points for later assertions in_point2d = point2d in_point3d = point3d with driver.session() as session: record_with_2d_point = session.read_transaction(_echo, point2d) record_with_3d_point = session.read_transaction(_echo, point3d) # tag::geospatial-types-cartesian[] # Reading a 2D point from a record point2d = record_with_2d_point.get("fieldName") # type: CartesianPoint str(point2d) # POINT(1.0 5.1) point2d.x # 1.0 point2d.y # 5.1 # point2d.z raises AttributeError point2d.srid # 7203 len(point2d) # 2 # Reading a 3D point from a record point3d = record_with_3d_point.get("fieldName") # type: CartesianPoint str(point3d) # POINT(1.0 -2.0 3.1) point3d.x # 1.0 point3d.y # -2.0 point3d.z # 3.1 point3d.srid # 9157 len(point2d) # 3 # end::geospatial-types-cartesian[] assert str(point2d) == "POINT(1.0 5.1)" assert isinstance(point2d.x, float) and point2d.x == 1.0 assert isinstance(point2d.y, float) and point2d.y == 5.1 with pytest.raises(AttributeError): point2d.z assert point2d.srid == 7203 assert len(point2d) == 2 assert point2d == in_point2d assert str(point3d) == "POINT(1.0 -2.0 3.1)" assert isinstance(point3d.x, float) and point3d.x == 1.0 assert isinstance(point3d.y, float) and point3d.y == -2.0 assert isinstance(point3d.z, float) and point3d.z == 3.1 assert point3d.srid == 9157 assert len(point3d) == 3 assert point3d == in_point3d def test_wgs84_point(driver): # tag::geospatial-types-wgs84-import[] from neo4j.spatial import WGS84Point # end::geospatial-types-wgs84-import[] # tag::geospatial-types-wgs84[] # Creating a 2D point in WSG84 space point2d = WGS84Point((1, 5.1)) # Or in 3D point3d = WGS84Point((1, -2., 3.1)) # end::geospatial-types-wgs84[] # storing points for later assertions in_point2d = point2d in_point3d = point3d with driver.session() as session: record_with_2d_point = session.read_transaction(_echo, point2d) record_with_3d_point = session.read_transaction(_echo, point3d) # tag::geospatial-types-wgs84[] # Reading a 2D point from a record point2d = record_with_2d_point.get("fieldName") # type: WGS84Point str(point2d) # POINT(1.0 5.1) point2d.longitude # 1.0 (point2d.x is an alias for longitude) point2d.latitude # 5.1 (point2d.y is an alias for latitude) # point2d.height raises AttributeError (same with point2d.z) point2d.srid # 4326 len(point2d) # 2 # Reading a 3D point from a record point3d = record_with_3d_point.get("fieldName") # type: WGS84Point str(point3d) # POINT(1.0 -2.0 3.1) point3d.longitude # 1.0 (point3d.x is an alias for longitude) point3d.latitude # -2.0 (point3d.y is an alias for latitude) point3d.height # 3.1 (point3d.z is an alias for height) point3d.srid # 4979 len(point2d) # 3 # end::geospatial-types-wgs84[] assert str(point2d) == "POINT(1.0 5.1)" assert isinstance(point2d.longitude, float) and point2d.longitude == 1.0 assert isinstance(point2d.x, float) and point2d.x == 1.0 assert isinstance(point2d.latitude, float) and point2d.latitude == 5.1 assert isinstance(point2d.y, float) and point2d.y == 5.1 with pytest.raises(AttributeError): point2d.height with pytest.raises(AttributeError): point2d.z assert point2d.srid == 4326 assert len(point2d) == 2 assert point2d == in_point2d assert str(point3d) == "POINT(1.0 -2.0 3.1)" assert isinstance(point3d.longitude, float) and point3d.longitude == 1.0 assert isinstance(point3d.x, float) and point3d.x == 1.0 assert isinstance(point3d.latitude, float) and point3d.latitude == -2.0 assert isinstance(point3d.y, float) and point3d.y == -2.0 assert isinstance(point3d.height, float) and point3d.height == 3.1 assert isinstance(point3d.z, float) and point3d.z == 3.1 assert point3d.srid == 4979 assert len(point3d) == 3 assert point3d == in_point3d
Python
0
304826205804e3972968b16fbf9bb9021eaf9acd
add FieldOfStudyHierarchy class
scholarly_citation_finder/apps/core/migrations/0015_fieldofstudyhierarchy.py
scholarly_citation_finder/apps/core/migrations/0015_fieldofstudyhierarchy.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-03-11 13:19 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core', '0014_publicationreference_source'), ] operations = [ migrations.CreateModel( name='FieldOfStudyHierarchy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('child_level', models.SmallIntegerField()), ('parent_level', models.SmallIntegerField()), ('confidence', models.FloatField()), ('child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fieldofstudyhierarchy_child', to='core.FieldOfStudy')), ('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fieldofstudyhierarchy_parent', to='core.FieldOfStudy')), ], ), ]
Python
0.000108
ce4bcc19e61518273e054553494288364ab4f677
Add lc085_maximal_rectangle.py
lc085_maximal_rectangle.py
lc085_maximal_rectangle.py
"""Leetcode 85. Maximal Rectangle Hard URL: https://leetcode.com/problems/maximal-rectangle/ Given a 2D binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area. Example: Input: [ ["1","0","1","0","0"], ["1","0","1","1","1"], ["1","1","1","1","1"], ["1","0","0","1","0"] ] Output: 6 """ class Solution(object): def maximalRectangle(self, matrix): """ :type matrix: List[List[str]] :rtype: int """ pass def main(): pass if __name__ == '__main__': main()
Python
0.002349
5d8af7dec1806e7f897a89d1a54ff5f2dc5bfec0
Add 'merge-json.py' script to make the final annotations file.
bin/merge-json.py
bin/merge-json.py
#!/usr/bin/env python from collections import Mapping import json import sys filename1 = sys.argv[1] filename2 = sys.argv[2] json_data1=open(filename1).read() dictA = json.loads(json_data1) json_data2=open(filename2).read() dictB = json.loads(json_data2) merged_dict = {key: value for (key, value) in (dictA.items() + dictB.items())} # string dump of the merged dict print json.dumps(merged_dict)
Python
0
eda01dc886cde85ee9ee84d54fa0d5c5a11a776e
Disable failing android tests on cros.
tools/telemetry/telemetry/core/platform/android_platform_backend_unittest.py
tools/telemetry/telemetry/core/platform/android_platform_backend_unittest.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import unittest from telemetry import test from telemetry.core import bitmap from telemetry.core import util from telemetry.core.platform import android_platform_backend from telemetry.unittest import system_stub class MockAdbCommands(object): def __init__(self, mock_content): self.mock_content = mock_content def CanAccessProtectedFileContents(self): return True # pylint: disable=W0613 def GetProtectedFileContents(self, file_name, log_result): return self.mock_content def PushIfNeeded(self, host_binary, device_path): pass def RunShellCommand(self, command): return [] class AndroidPlatformBackendTest(unittest.TestCase): def setUp(self): self._stubs = system_stub.Override(android_platform_backend, ['perf_control', 'thermal_throttle']) def tearDown(self): self._stubs.Restore() @test.Disabled('chromeos') def testGetCpuStats(self): proc_stat_content = [ '7702 (.android.chrome) S 167 167 0 0 -1 1077936448 ' '3247 0 0 0 4 1 0 0 20 0 9 0 5603962 337379328 5867 ' '4294967295 1074458624 1074463824 3197495984 3197494152 ' '1074767676 0 4612 0 38136 4294967295 0 0 17 0 0 0 0 0 0 ' '1074470376 1074470912 1102155776'] adb_valid_proc_content = MockAdbCommands(proc_stat_content) backend = android_platform_backend.AndroidPlatformBackend( adb_valid_proc_content, False) cpu_stats = backend.GetCpuStats('7702') self.assertEquals(cpu_stats, {'CpuProcessTime': 5.0}) @test.Disabled('chromeos') def testGetCpuStatsInvalidPID(self): # Mock an empty /proc/pid/stat. adb_empty_proc_stat = MockAdbCommands([]) backend = android_platform_backend.AndroidPlatformBackend( adb_empty_proc_stat, False) cpu_stats = backend.GetCpuStats('7702') self.assertEquals(cpu_stats, {}) @test.Disabled def testFramesFromMp4(self): mock_adb = MockAdbCommands([]) backend = android_platform_backend.AndroidPlatformBackend(mock_adb, False) try: backend.InstallApplication('avconv') finally: if not backend.CanLaunchApplication('avconv'): logging.warning('Test not supported on this platform') return # pylint: disable=W0150 vid = os.path.join(util.GetUnittestDataDir(), 'vid.mp4') expected_timestamps = [ 0, 763, 783, 940, 1715, 1732, 1842, 1926, ] # pylint: disable=W0212 for i, timestamp_bitmap in enumerate(backend._FramesFromMp4(vid)): timestamp, bmp = timestamp_bitmap self.assertEquals(timestamp, expected_timestamps[i]) expected_bitmap = bitmap.Bitmap.FromPngFile(os.path.join( util.GetUnittestDataDir(), 'frame%d.png' % i)) self.assertTrue(expected_bitmap.IsEqual(bmp))
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import unittest from telemetry import test from telemetry.core import bitmap from telemetry.core import util from telemetry.core.platform import android_platform_backend from telemetry.unittest import system_stub class MockAdbCommands(object): def __init__(self, mock_content): self.mock_content = mock_content def CanAccessProtectedFileContents(self): return True # pylint: disable=W0613 def GetProtectedFileContents(self, file_name, log_result): return self.mock_content def PushIfNeeded(self, host_binary, device_path): pass def RunShellCommand(self, command): return [] class AndroidPlatformBackendTest(unittest.TestCase): def setUp(self): self._stubs = system_stub.Override(android_platform_backend, ['perf_control', 'thermal_throttle']) def tearDown(self): self._stubs.Restore() def testGetCpuStats(self): proc_stat_content = [ '7702 (.android.chrome) S 167 167 0 0 -1 1077936448 ' '3247 0 0 0 4 1 0 0 20 0 9 0 5603962 337379328 5867 ' '4294967295 1074458624 1074463824 3197495984 3197494152 ' '1074767676 0 4612 0 38136 4294967295 0 0 17 0 0 0 0 0 0 ' '1074470376 1074470912 1102155776'] adb_valid_proc_content = MockAdbCommands(proc_stat_content) backend = android_platform_backend.AndroidPlatformBackend( adb_valid_proc_content, False) cpu_stats = backend.GetCpuStats('7702') self.assertEquals(cpu_stats, {'CpuProcessTime': 5.0}) def testGetCpuStatsInvalidPID(self): # Mock an empty /proc/pid/stat. adb_empty_proc_stat = MockAdbCommands([]) backend = android_platform_backend.AndroidPlatformBackend( adb_empty_proc_stat, False) cpu_stats = backend.GetCpuStats('7702') self.assertEquals(cpu_stats, {}) @test.Disabled def testFramesFromMp4(self): mock_adb = MockAdbCommands([]) backend = android_platform_backend.AndroidPlatformBackend(mock_adb, False) try: backend.InstallApplication('avconv') finally: if not backend.CanLaunchApplication('avconv'): logging.warning('Test not supported on this platform') return # pylint: disable=W0150 vid = os.path.join(util.GetUnittestDataDir(), 'vid.mp4') expected_timestamps = [ 0, 763, 783, 940, 1715, 1732, 1842, 1926, ] # pylint: disable=W0212 for i, timestamp_bitmap in enumerate(backend._FramesFromMp4(vid)): timestamp, bmp = timestamp_bitmap self.assertEquals(timestamp, expected_timestamps[i]) expected_bitmap = bitmap.Bitmap.FromPngFile(os.path.join( util.GetUnittestDataDir(), 'frame%d.png' % i)) self.assertTrue(expected_bitmap.IsEqual(bmp))
Python
0.990071
7f319b9f84e441cbe893fd2cc68ecd77cfcfd987
create perl-file-which package (#6800)
var/spack/repos/builtin/packages/perl-file-which/package.py
var/spack/repos/builtin/packages/perl-file-which/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PerlFileWhich(PerlPackage): """Perl implementation of the which utility as an API""" homepage = "http://cpansearch.perl.org/src/PLICEASE/File-Which-1.22/lib/File/Which.pm" url = "http://search.cpan.org/CPAN/authors/id/P/PL/PLICEASE/File-Which-1.22.tar.gz" version('1.22', 'face60fafd220dc83fa581ef6f96d480')
Python
0
a2b4389db17759086c4cd804b6cbfb1b658d547e
Create equal_sides_of_an_array.py
equal_sides_of_an_array.py
equal_sides_of_an_array.py
#Kunal Gautam #Codewars : @Kunalpod #Problem name: Equal Sides Of An Array #Problem level: 6 kyu def find_even_index(arr): if not sum(arr[1:]): return 0 if not sum(arr[:len(arr)-1]): return len(arr)-1 for i in range(1, len(arr)-1): if sum(arr[:i])==sum(arr[i+1:]): return i return -1
Python
0.998288
d2978eae5b502cc5bc4b020044b88f02522f90cd
Add jobs.utils module
virtool/jobs/utils.py
virtool/jobs/utils.py
def is_running_or_waiting(document): latest_state = document["status"][-1]["state"] return latest_state != "waiting" and latest_state != "running"
Python
0.000001
3bae93629c81cc33e565912e4b9bafeff536ec22
Create hostgroup_info.py
examples/hostgroup_info.py
examples/hostgroup_info.py
def queryHostGroupInfo(): """ "query host group info" """ if lsf.lsb_init("queryHostGroupInfo") > 0: return -1; strArr = lsf.new_stringArray(2); lsf.stringArray_setitem(strArr, 0, "hg1"); lsf.stringArray_setitem(strArr, 1, "hg2"); for hgroupInfo in lsf.get_hostgroup_info_by_name(strArr,2): if hgroupInfo != None: print 'hgroup name = %s' % hgroupInfo.group; print 'hgroup list = %s' % hgroupInfo.memberList; else: print 'hgroupInfo is null' return -1; return 0; if __name__ == '__main__': queryHostGroupInfo();
Python
0.000001
986b20363cc84be1822588dd7cc935fca7ef7f48
add test for get_genofile_samplelist in marker_regression/run_mapping.py
wqflask/tests/wqflask/marker_regression/test_run_mapping.py
wqflask/tests/wqflask/marker_regression/test_run_mapping.py
import unittest from unittest import mock from wqflask.marker_regression.run_mapping import get_genofile_samplelist class AttributeSetter: def __init__(self,obj): for k,v in obj.items(): setattr(self,k,v) class MockDataSetGroup(AttributeSetter): def get_genofiles(self): return [{"location":"~/genofiles/g1_file","sample_list":["S1","S2","S3","S4"]}] class TestRunMapping(unittest.TestCase): def setUp(self): self.group=MockDataSetGroup({"genofile":"~/genofiles/g1_file"}) self.dataset=AttributeSetter({"group":self.group}) def tearDown(self): self.dataset=AttributeSetter({"group":{"location":"~/genofiles/g1_file"}}) def test_get_genofile_samplelist(self): #location true and sample list true results_1=get_genofile_samplelist(self.dataset) self.assertEqual(results_1,["S1","S2","S3","S4"]) #return empty array self.group.genofile="~/genofiles/g2_file" result_2=get_genofile_samplelist(self.dataset) self.assertEqual(result_2,[])
Python
0
fe145fd87db777d9eeb361688d502b1b3ec4b2e1
Add a new Model-View-Projection matrix tool.
Transformation.py
Transformation.py
# -*- coding:utf-8 -*- # *************************************************************************** # Transformation.py # ------------------- # update : 2013-11-13 # copyright : (C) 2013 by Michaël Roy # email : microygh@gmail.com # *************************************************************************** # *************************************************************************** # * * # * This program is free software; you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation; either version 2 of the License, or * # * (at your option) any later version. * # * * # *************************************************************************** # # External dependencies # from numpy import *
Python
0
c2089b3ed549d89942f57075d0b6d573d980bc30
make app load in worker in uwsgi.ini, pass db configuration dynamically to docker image as env variable
app/config.py
app/config.py
from datetime import timedelta class Config(object): DEBUG = False TESTING = False SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{}:{}@{}/{}' APP_NAME = '{} Server' SECRET_KEY = '{}' JWT_EXPIRATION_DELTA = timedelta(days=30) JWT_AUTH_URL_RULE = '/api/v1/auth' SECURITY_REGISTERABLE = True SECURITY_RECOVERABLE = True SECURITY_TRACKABLE = True SECURITY_PASSWORD_HASH = 'sha512_crypt' SECURITY_PASSWORD_SALT = '{}' SQLALCHEMY_TRACK_MODIFICATIONS = False class ProductionConfig(Config): APP_NAME = '{} Production Server' DEBUG = False class DevelopmentConfig(Config): DEBUG = True MAIL_SUPPRESS_SEND = False class TestingConfig(Config): TESTING = True
Python
0
f9874e059df50dc81803fcfdfd1045cc09624894
Add functional regressions tests for server_group_members OverQuota
nova/tests/functional/regressions/test_bug_1780373.py
nova/tests/functional/regressions/test_bug_1780373.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit.image import fake as fake_image from nova.tests.unit import policy_fixture from nova.virt import fake as fake_virt class TestMultiCreateServerGroupMemberOverQuota( test.TestCase, integrated_helpers.InstanceHelperMixin): """This tests a regression introduced in the Pike release. Starting in the Pike release, quotas are no longer tracked using usages and reservations tables but instead perform a resource counting operation at the point of resource creation. When creating multiple servers in the same request that belong in the same server group, the [quota]/server_group_members config option is checked to determine if those servers can belong in the same group based on quota. However, the quota check for server_group_members only counts existing group members based on live instances in the cell database(s). But the actual instance record isn't created in the cell database until *after* the server_group_members quota check happens. Because of this, it is possible to bypass the server_group_members quota check when creating multiple servers in the same request. """ def setUp(self): super(TestMultiCreateServerGroupMemberOverQuota, self).setUp() self.flags(server_group_members=2, group='quota') self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api self.api.microversion = '2.37' # so we can specify networks='none' fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) group = {'name': 'test group', 'policies': ['soft-anti-affinity']} self.created_group = self.api.post_server_groups(group) def test_multi_create_server_group_members_over_quota(self): """Recreate scenario for the bug where we create an anti-affinity server group and then create 3 servers in the group using a multi-create POST /servers request. """ # TODO(mriedem): We don't need a compute service when the bug is fixed # because we won't be able to get past nova-api validation. self.start_service('conductor') self.start_service('scheduler') fake_virt.set_nodes(['host1']) self.addCleanup(fake_virt.restore_nodes) self.start_service('compute', host='host1') server_req = self._build_minimal_create_server_request( self.api, 'test_multi_create_server_group_members_over_quota', image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, networks='none') server_req['min_count'] = 3 server_req['return_reservation_id'] = True hints = {'group': self.created_group['id']} # FIXME(mriedem): When bug 1780373 is fixed this should result in a # 403 error response and 0 members in the group. reservation_id = self.api.post_server( {'server': server_req, 'os:scheduler_hints': hints})['reservation_id'] # Assert that three servers were created regardless of the # [quota]/server_group_members=2 quota limit. servers = self.api.get_servers( detail=False, search_opts={'reservation_id': reservation_id}) self.assertEqual(3, len(servers)) group = self.api.api_get( '/os-server-groups/%s' % self.created_group['id']).body['server_group'] self.assertEqual(3, len(group['members'])) def test_concurrent_request_server_group_members_over_quota(self): """Recreate scenario for the bug where we create 3 servers in the same group but in separate requests. The NoopConductorFixture is used to ensure the instances are not created in the nova cell database which means the quota check will have to rely on counting group members using build requests from the API DB. """ # These aren't really concurrent requests, but we can simulate that # by using NoopConductorFixture. self.useFixture(nova_fixtures.NoopConductorFixture()) for x in range(3): server_req = self._build_minimal_create_server_request( self.api, 'test_concurrent_request_%s' % x, image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, networks='none') hints = {'group': self.created_group['id']} # FIXME(mriedem): When bug 1780373 is fixed this should result in a # 403 error response on the 3rd create server request. self.api.post_server( {'server': server_req, 'os:scheduler_hints': hints}) # Assert that three servers were created regardless of the # [quota]/server_group_members=2 quota limit. servers = self.api.get_servers(detail=False) # FIXME(mriedem): When the bug is fixed, there should only be 2 servers # created and 2 members in the group. self.assertEqual(3, len(servers)) group = self.api.api_get( '/os-server-groups/%s' % self.created_group['id']).body['server_group'] self.assertEqual(3, len(group['members']))
Python
0.000031
2f0700093141643bd66e99d271f9e74087e148e6
Add Message model migration file.
core/migrations/0002_message.py
core/migrations/0002_message.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-08-05 19:19 from __future__ import unicode_literals import django.contrib.postgres.fields from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.CreateModel( name='Message', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('goes_id', models.CharField(max_length=8)), ('goes_channel', models.PositiveSmallIntegerField()), ('goes_spacecraft', models.CharField(choices=[('E', 'East'), ('W', 'West')], default='E', max_length=1)), ('arrival_time', models.DateTimeField()), ('failure_code', models.CharField(max_length=1)), ('signal_strength', models.PositiveSmallIntegerField()), ('frequency_offset', models.CharField(max_length=2)), ('modulation_index', models.CharField(choices=[('N', 'Normal (60 degrees +/- 5)'), ('L', 'Low (50 degrees)'), ('H', 'High (70 degrees)')], default='N', max_length=1)), ('data_quality', models.CharField(choices=[('N', 'Normal (error rate < 10^-6)'), ('F', 'Fair (10^-6 < error rate < 10^-4)'), ('P', 'Poor (error rate > 10^-4)')], default='N', max_length=1)), ('data_source', models.CharField(choices=[('LE', 'Cincinnati East; USACE LRD Cincinnati'), ('d1', 'NIFC West Boise ID - Unit 1; NIFC Boise'), ('d2', 'NIFC West Boise ID - Unit 2; NIFC Boise'), ('OW', 'Omaha West; USACE NWO'), ('RE', 'Rock Island East; USACE MVR'), ('RW', 'Rock Island West; USACE MVR'), ('SF', 'West Palm Beach East; SFWMD'), ('UB', 'Ucom Backup @ WCDA; NOAA Wallops CDA'), ('UP', 'Ucom Primary @ WCDA; NOAA Wallops CDA'), ('XE', 'Sioux Falls, East; USGS EROS'), ('XW', 'Sioux Falls, West; USGS EROS'), ('XL', 'Sioux Falls, LRIT; USGS EROS'), ('RL', 'Reston, LRIT; Reston, Virginia')], max_length=2)), ('recorded_message_length', models.PositiveSmallIntegerField()), ('values', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)), ('message_text', models.TextField()), ('station', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Station')), ], ), ]
Python
0
30c5b785863df0269c7abbbc5000d83df4f815c2
Predict some data similar to problem on assignment
outlier_detection/svm_classification_with_synthetic_data.py
outlier_detection/svm_classification_with_synthetic_data.py
import numpy as np from matplotlib import pyplot as plt import matplotlib.font_manager from sklearn import svm def main(): tests = 20 # Generate train data X = (np.random.randn(120, 2) * np.array([0.08, 0.02]) + np.array([0.3, 0.6])) X_train = X[:-tests] X_test = X[-tests:] X_outliers = np.copy(X_test) X_outliers = (X_outliers + np.random.uniform(low=-0.1, high=0.1, size=(tests, 2))) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel='rbf', gamma=0.1) clf.fit(X_train) y_pred_train = clf.predict(X_train) y_pred_test = clf.predict(X_test) y_pred_outliers = clf.predict(X_outliers) print(y_pred_test) print(y_pred_outliers) s = 40 plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s) plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s) plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s) plt.axis('equal') plt.show() if __name__ == '__main__': main()
Python
0.999856
68dbfedf90fb9e6c922971deaeccad148a258a70
Add tests for PyEcore extension (EClass/EModelElement tests)
tests/test_dynamic_ecore_extension.py
tests/test_dynamic_ecore_extension.py
import pytest from pyecore.ecore import * import pyecore.ecore as ecore from ordered_set import OrderedSet def test__EModelElement_extension(): A = EClass('A', superclass=(EModelElement.eClass)) a = A() assert a.eAnnotations == OrderedSet() annotation = EAnnotation(source='testAnnot') annotation.details['test'] = 'value' a.eAnnotations.append(annotation) assert len(a.eAnnotations) == 1 assert a.getEAnnotation('testAnnot') is annotation assert a.getEAnnotation('testAnnot').details['test'] == 'value' def test__EClass_extension(): SuperEClass = EClass('SuperEClass', superclass=(EClass.eClass,)) A = SuperEClass(name='A') assert isinstance(A, EClass) a = A() assert isinstance(a, EObject) assert a.eClass is A def test__EClass_modification(): EClass.new_feature = EAttribute('new_feature', EInt) A = EClass('A') assert A.new_feature == 0 A.new_feature = 5 assert A.new_feature == 5 with pytest.raises(BadValueError): A.new_feature = 'a'
Python
0
40431228c8535f325b005bb52485cae87a8be714
Add test module for napalm_acl
tests/unit/modules/test_napalm_acl.py
tests/unit/modules/test_napalm_acl.py
# -*- coding: utf-8 -*- ''' :codeauthor: :email:`Anthony Shaw <anthonyshaw@apache.org>` ''' # Import Python Libs from __future__ import absolute_import # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import ( MagicMock, NO_MOCK, NO_MOCK_REASON ) import tests.support.napalm as napalm_test_support import salt.modules.napalm_acl as napalm_acl # NOQA @skipIf(NO_MOCK, NO_MOCK_REASON) class NapalmAclModuleTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): module_globals = { '__salt__': { 'config.option': MagicMock(return_value={ 'test': { 'driver': 'test', 'key': '2orgk34kgk34g' } }), 'file.file_exists': napalm_test_support.true, 'file.join': napalm_test_support.join, 'file.get_managed': napalm_test_support.get_managed_file, 'random.hash': napalm_test_support.random_hash } } return {napalm_acl: module_globals} def test_load_term_config(self): ret = napalm_acl.load_term_config("test_filter", "test_term") assert ret['out'] is napalm_test_support.TEST_TERM_CONFIG
Python
0
f987b39bb43301c735f30169010832665953efe6
Add a sample permission plugin for illustrating the check on realm resources, related to #6211.
sample-plugins/public_wiki_policy.py
sample-plugins/public_wiki_policy.py
from fnmatch import fnmatchcase from trac.config import Option from trac.core import * from trac.perm import IPermissionPolicy class PublicWikiPolicy(Component): """Sample permission policy plugin illustrating how to check permission on realms. Don't forget to integrate that plugin in the appropriate place in the list of permission policies: {{{ [trac] permission_policies = PublicWikiPolicy, DefaultPermissionPolicy }}} Then you can configure which pages you want to make public: {{{ [public_wiki] view = Public* modify = PublicSandbox/* }}} """ implements(IPermissionPolicy) view = Option('public_wiki', 'view', 'Public*', """Case-sensitive glob pattern used for granting view permission on all Wiki pages matching it.""") modify = Option('public_wiki', 'modify', 'Public*', """Case-sensitive glob pattern used for granting modify permissions on all Wiki pages matching it.""") def check_permission(self, action, username, resource, perm): if resource: # fine-grained permission check if resource.realm == 'wiki': # wiki realm or resource if resource.id: # ... it's a resource if action == 'WIKI_VIEW': # (think 'VIEW' here) pattern = self.view else: pattern = self.modify if fnmatchcase(resource.id, pattern): return True else: # ... it's a realm return True # this policy ''may'' grant permissions on some wiki pages else: # coarse-grained permission check # # support for the legacy permission checks: no resource specified # and realm information in the action name itself. # if action.startswith('WIKI_'): return True # this policy ''may'' grant permissions on some wiki pages
Python
0
784cd71fe24b1f5ce57a1982186dabc768892883
Fix discount calculation logic
saleor/product/models/discounts.py
saleor/product/models/discounts.py
from __future__ import unicode_literals from django.conf import settings from django.db import models from django.utils.translation import pgettext_lazy from django.utils.encoding import python_2_unicode_compatible from django_prices.models import PriceField from prices import FixedDiscount class NotApplicable(ValueError): pass @python_2_unicode_compatible class FixedProductDiscount(models.Model): name = models.CharField(max_length=255) products = models.ManyToManyField('Product', blank=True) discount = PriceField(pgettext_lazy('Discount field', 'discount value'), currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2) class Meta: app_label = 'product' def __repr__(self): return 'FixedProductDiscount(name=%r, discount=%r)' % ( str(self.discount), self.name) def __str__(self): return self.name def modifier_for_product(self, variant): from ...product.models import ProductVariant if isinstance(variant, ProductVariant): pk = variant.product.pk check_price = variant.get_price_per_item() else: pk = variant.pk check_price = variant.get_price_per_item(variant) if not self.products.filter(pk=pk).exists(): raise NotApplicable('Discount not applicable for this product') if self.discount > check_price: raise NotApplicable('Discount too high for this product') return FixedDiscount(self.discount, name=self.name) def get_product_discounts(variant, discounts, **kwargs): for discount in discounts: try: yield discount.modifier_for_product(variant, **kwargs) except NotApplicable: pass
from __future__ import unicode_literals from django.conf import settings from django.db import models from django.utils.translation import pgettext_lazy from django.utils.encoding import python_2_unicode_compatible from django_prices.models import PriceField from prices import FixedDiscount class NotApplicable(ValueError): pass @python_2_unicode_compatible class FixedProductDiscount(models.Model): name = models.CharField(max_length=255) products = models.ManyToManyField('Product', blank=True) discount = PriceField(pgettext_lazy('Discount field', 'discount value'), currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2) class Meta: app_label = 'product' def __repr__(self): return 'FixedProductDiscount(name=%r, discount=%r)' % ( str(self.discount), self.name) def __str__(self): return self.name def modifier_for_product(self, variant): if not self.products.filter(pk=variant.product.pk).exists(): raise NotApplicable('Discount not applicable for this product') if self.discount > variant.get_price(discounted=False): raise NotApplicable('Discount too high for this product') return FixedDiscount(self.discount, name=self.name) def get_product_discounts(variant, discounts, **kwargs): for discount in discounts: try: yield discount.modifier_for_product(variant, **kwargs) except NotApplicable: pass
Python
0.000106
c35fa6caa631b03c3b5b3c9ea5bf494254ed9b1f
add script for usb backend to receive tracing data
scripts/tracing/trace_capture_usb.py
scripts/tracing/trace_capture_usb.py
#!/usr/bin/env python3 # # Copyright (c) 2019 Intel Corporation. # # SPDX-License-Identifier: Apache-2.0 """ Script to capture tracing data with USB backend. """ import usb.core import usb.util import argparse import sys def parse_args(): global args parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("-v", "--vendor_id", required=True, help="usb device vendor id") parser.add_argument("-p", "--product_id", required=True, help="usb device product id") parser.add_argument("-o", "--output", default='channel0_0', required=False, help="tracing data output file") args = parser.parse_args() def main(): parse_args() if args.vendor_id.isdecimal(): vendor_id = int(args.vendor_id) else: vendor_id = int(args.vendor_id, 16) if args.product_id.isdecimal(): product_id = int(args.product_id) else: product_id = int(args.product_id, 16) output_file = args.output try: usb_device = usb.core.find(idVendor=vendor_id, idProduct=product_id) except Exception as e: sys.exit("{}".format(e)) if usb_device is None: sys.exit("No device found, check vendor_id and product_id") if usb_device.is_kernel_driver_active(0): try: usb_device.detach_kernel_driver(0) except usb.core.USBError as e: sys.exit("{}".format(e)) # set the active configuration. With no arguments, the first # configuration will be the active one try: usb_device.set_configuration() except usb.core.USBError as e: sys.exit("{}".format(e)) configuration = usb_device[0] interface = configuration[(0, 0)] # match the only IN endpoint read_endpoint = usb.util.find_descriptor(interface, custom_match = \ lambda e: \ usb.util.endpoint_direction( \ e.bEndpointAddress) == \ usb.util.ENDPOINT_IN) # match the only OUT endpoint write_endpoint = usb.util.find_descriptor(interface, custom_match = \ lambda e: \ usb.util.endpoint_direction( \ e.bEndpointAddress) == \ usb.util.ENDPOINT_OUT) usb.util.claim_interface(usb_device, interface) #enable device tracing write_endpoint.write('enable') #try to read to avoid garbage mixed to useful stream data buff = usb.util.create_buffer(8192) read_endpoint.read(buff, 10000) with open(output_file, "wb") as file_desc: while True: buff = usb.util.create_buffer(8192) length = read_endpoint.read(buff, 100000) for index in range(length): file_desc.write(chr(buff[index]).encode('latin1')) usb.util.release_interface(usb_device, interface) if __name__=="__main__": try: main() except KeyboardInterrupt: print('Data capture interrupted, data saved into {}'.format(args.output)) sys.exit(0)
Python
0.000001
964d01fd9a730d02aac85740bce0ef9dace6517b
add migrations
molo/core/migrations/0054_merged_cms_models.py
molo/core/migrations/0054_merged_cms_models.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.12 on 2017-02-21 12:13 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import modelcluster.fields class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0032_add_bulk_delete_page_permission'), ('core', '0053_add_next_and_recommended_functionality'), ] operations = [ migrations.CreateModel( name='Languages', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='SiteLanguageRelation', fields=[ ('sitelanguage_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.SiteLanguage')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('language_setting', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='languages', to='core.Languages')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, bases=('core.sitelanguage', models.Model), ), ]
Python
0.000001
17b4efb401d36060f51e07da5ace83c008d421c5
Create table charge_observation.
problem/charge_state/alembic/versions/2154afa58ba0_create_table_charge_observation.py
problem/charge_state/alembic/versions/2154afa58ba0_create_table_charge_observation.py
"""Create table charge_observation. Revision ID: 2154afa58ba0 Revises: Create Date: 2020-01-05 12:18:25.331846 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '2154afa58ba0' down_revision = None branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('charge_observation', sa.Column('id', sa.Integer(), nullable=False), sa.Column('stamp', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('charge_observation') # ### end Alembic commands ###
Python
0