commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
42c496c78fe2dcc06df65641ba1df33c02e41533
|
Revert "updates"
|
cvlib_example.py
|
cvlib_example.py
|
Python
| 0
|
@@ -0,0 +1,326 @@
+%22%22%22Example using cvlib.%22%22%22%0Aimport cvlib as cv%0Afrom cvlib.object_detection import draw_bbox%0Aimport cv2%0A%0Aimg = image = cv2.imread(%22assets/sv.jpg%22)%0Abbox, label, conf = cv.detect_common_objects(img, model=%22largess%22)%0Aprint(label)%0A%0Aoutput_image = draw_bbox(img, bbox, label, conf)%0Acv2.imwrite(%22cvlib-example-out.jpg%22, output_image)%0A
|
|
5831dee7a6c14c85933658610ae991fbc0af9442
|
Add basic tests for stream.me plugin (#391)
|
tests/test_plugin_streamme.py
|
tests/test_plugin_streamme.py
|
Python
| 0
|
@@ -0,0 +1,631 @@
+import unittest%0A%0Afrom streamlink.plugins.streamme import StreamMe%0A%0A%0Aclass TestPluginStreamMe(unittest.TestCase):%0A def test_can_handle_url(self):%0A # should match%0A self.assertTrue(StreamMe.can_handle_url(%22http://www.stream.me/nameofstream%22))%0A%0A # shouldn't match%0A self.assertFalse(StreamMe.can_handle_url(%22http://www.livestream.me/nameofstream%22))%0A self.assertFalse(StreamMe.can_handle_url(%22http://www.streamme.com/nameofstream%22))%0A self.assertFalse(StreamMe.can_handle_url(%22http://www.streamme.me/nameofstream%22))%0A self.assertFalse(StreamMe.can_handle_url(%22http://www.youtube.com/%22))%0A
|
|
2b9909004a761047fd935ad51b06102032dbe30a
|
Create __init__.py
|
src/nupic/research/frameworks/htm/temporal_memory/__init__.py
|
src/nupic/research/frameworks/htm/temporal_memory/__init__.py
|
Python
| 0.000429
|
@@ -0,0 +1,1209 @@
+# ----------------------------------------------------------------------%0A# Numenta Platform for Intelligent Computing (NuPIC)%0A# Copyright (C) 2022, Numenta, Inc. Unless you have an agreement%0A# with Numenta, Inc., for a separate license for this software code, the%0A# following terms and conditions apply:%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero Public License version 3 as%0A# published by the Free Software Foundation.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.%0A# See the GNU Affero Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero Public License%0A# along with this program. If not, see http://www.gnu.org/licenses.%0A#%0A# http://numenta.org/licenses/%0A# ----------------------------------------------------------------------%0A%0Afrom .temporal_memory_apical_tiebreak import TemporalMemoryApicalTiebreak%0Afrom .sequence_memory_apical_tiebreak import SequenceMemoryApicalTiebreak%0A#from temporal_memory.pair_memory_apical_tiebreak import PairMemoryApicalTiebreak%0A%0A%0A
|
|
4ebfc2e6ffb21fd55ef1fc4f1fd836153b2da545
|
Add tests for all exceptions
|
tests/unit/test_exceptions.py
|
tests/unit/test_exceptions.py
|
Python
| 0
|
@@ -0,0 +1,2015 @@
+# coding: utf-8%0A%0Aimport pytest%0Aimport responses%0Aimport kiteconnect.exceptions as ex%0A%0A%0A@responses.activate%0Adef test_wrong_json_response(kiteconnect):%0A responses.add(%0A responses.GET,%0A %22%25s%25s%22 %25 (kiteconnect.root, kiteconnect._routes%5B%22portfolio.positions%22%5D),%0A body=%22%7Ba:b%7D%22,%0A content_type=%22application/json%22%0A )%0A with pytest.raises(ex.DataException) as exc:%0A positions = kiteconnect.positions()%0A assert exc.message == %22Couldn't parse the JSON response %22%5C%0A %22received from the server: %7Ba:b%7D%22%0A%0A%0A@responses.activate%0Adef test_wrong_content_type(kiteconnect):%0A rdf_data = %22%3Crdf:Description rdf:about=''%3E%3Crdfs:label%3Ezerodha%3C/rdfs:label%3E%3C/rdf:Description%22%0A responses.add(%0A responses.GET,%0A %22%25s%25s%22 %25 (kiteconnect.root, kiteconnect._routes%5B%22portfolio.positions%22%5D),%0A body=rdf_data,%0A content_type=%22application/rdf+xml%22%0A )%0A with pytest.raises(ex.DataException) as exc:%0A positions = kiteconnect.positions()%0A assert exc.message == %22Unknown Content-Type (%7Bcontent_type%7D) with response: (%7Bcontent%7D)%22.format(%0A content_type='application/rdf+xml',%0A content=rdf_data%0A )%0A%0A%0A@pytest.mark.parametrize(%22error_type,message%22, %5B%0A ('PermissionException', 'oops! permission issue'),%0A ('OrderException', 'oops! cannot place order'),%0A ('InputException', 'missing or invalid params'),%0A ('NetworkException', 'oopsy doopsy network issues damn!'),%0A ('CustomException', 'this is an exception i just created')%0A%5D)%0A@responses.activate%0Adef test_native_exceptions(error_type, message, kiteconnect):%0A responses.add(%0A responses.GET,%0A %22%25s%25s%22 %25 (kiteconnect.root, kiteconnect._routes%5B%22portfolio.positions%22%5D),%0A body='%7B%22error_type%22: %22%25s%22, %22message%22: %22%25s%22%7D' %25 (error_type, message),%0A content_type=%22application/json%22%0A )%0A with pytest.raises(getattr(ex, error_type, ex.GeneralException)) as exc:%0A positions = kiteconnect.positions()%0A assert exc.message == message%0A
|
|
cec436eba6174fbf52dc7908e1d5218cd9bea1e7
|
add tests around internal classes of Vcf class
|
tests/vcf_tests/file_tests.py
|
tests/vcf_tests/file_tests.py
|
Python
| 0
|
@@ -0,0 +1,1234 @@
+from unittest import TestCase, main%0Afrom svtools.vcf.file import Vcf%0A%0Aclass Test_Format(TestCase):%0A def test_init(self):%0A f = Vcf.Format('GT', 1, 'String', '%22Genotype%22')%0A self.assertEqual(f.id, 'GT')%0A self.assertEqual(f.number, '1')%0A self.assertEqual(f.type, 'String')%0A self.assertEqual(f.desc, 'Genotype')%0A self.assertEqual(f.hstring, '##FORMAT=%3CID=GT,Number=1,Type=String,Description=%22Genotype%22%3E')%0A%0Aclass Test_Info(TestCase):%0A def test_init(self):%0A i = Vcf.Info('NS', 1, 'Integer', '%22Number of Samples With Data%22')%0A self.assertEqual(i.id, 'NS')%0A self.assertEqual(i.number, '1')%0A self.assertEqual(i.type, 'Integer')%0A self.assertEqual(i.desc, 'Number of Samples With Data')%0A self.assertEqual(i.hstring, '##INFO=%3CID=NS,Number=1,Type=Integer,Description=%22Number of Samples With Data%22%3E')%0A%0Aclass Test_Alt(TestCase):%0A def test_init(self):%0A a = Vcf.Alt('DEL:ME:ALU', '%22Deletion of ALU element%22')%0A self.assertEqual(a.id, 'DEL:ME:ALU')%0A self.assertEqual(a.desc, 'Deletion of ALU element')%0A self.assertEqual(a.hstring, '##ALT=%3CID=DEL:ME:ALU,Description=%22Deletion of ALU element%22%3E')%0A%0Aif __name__ == %22__main__%22:%0A main()%0A%0A
|
|
f131cd221b2ce6fc144b2aa9882cb0ad1b116675
|
Add (failing) tests for the dashboard
|
tests/views/test_dashboard.py
|
tests/views/test_dashboard.py
|
Python
| 0.000001
|
@@ -0,0 +1,1802 @@
+#!/usr/bin/env python2.5%0A#%0A# Copyright 2011 the Melange authors.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22Tests for dashboard view.%0A%22%22%22%0A%0A__authors__ = %5B%0A '%22Sverre Rabbelier%22 %3Csverre@rabbelier.nl%3E',%0A %5D%0A%0A%0Aimport httplib%0A%0Afrom tests.profile_utils import GSoCProfileHelper%0Afrom tests.test_utils import DjangoTestCase%0A%0A# TODO: perhaps we should move this out?%0Afrom soc.modules.seeder.logic.seeder import logic as seeder_logic%0A%0A%0Aclass DashboardTest(DjangoTestCase):%0A %22%22%22Tests dashboard page.%0A %22%22%22%0A%0A def setUp(self):%0A from soc.modules.gsoc.models.program import GSoCProgram%0A self.gsoc = seeder_logic.seed(GSoCProgram)%0A self.data = GSoCProfileHelper(self.gsoc)%0A%0A def assertDashboardTemplatesUsed(self, response):%0A %22%22%22Asserts that all the templates from the dashboard were used.%0A %22%22%22%0A self.assertGSoCTemplatesUsed(response)%0A self.assertTemplateUsed(response, 'v2/modules/gsoc/dashboard/base.html')%0A%0A def testDasbhoardNoRole(self):%0A url = '/gsoc/dashboard/' + self.gsoc.key().name()%0A response = self.client.get(url)%0A self.assertDashboardTemplatesUsed(response)%0A%0A def testDashboardWithProfile(self):%0A self.data.createProfile()%0A url = '/gsoc/dashboard/' + self.gsoc.key().name()%0A response = self.client.get(url)%0A self.assertDashboardTemplatesUsed(response)%0A
|
|
920870a310a3c32b851dfe5927aad48d7b86b0c0
|
Update model verbose names
|
project/bhs/migrations/0008_auto_20190514_0904.py
|
project/bhs/migrations/0008_auto_20190514_0904.py
|
Python
| 0.000001
|
@@ -0,0 +1,776 @@
+# Generated by Django 2.1.8 on 2019-05-14 16:04%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('bhs', '0007_auto_20190513_1349'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='group',%0A options=%7B'ordering': %5B'tree_sort'%5D, 'verbose_name_plural': 'Groups'%7D,%0A ),%0A migrations.AlterModelOptions(%0A name='member',%0A options=%7B'verbose_name_plural': 'Members'%7D,%0A ),%0A migrations.AlterModelOptions(%0A name='officer',%0A options=%7B'verbose_name_plural': 'Officers'%7D,%0A ),%0A migrations.AlterModelOptions(%0A name='person',%0A options=%7B'verbose_name_plural': 'Persons'%7D,%0A ),%0A %5D%0A
|
|
226c2f36b9cc8257ce99bd15648be4aba2ccb606
|
Move utility functions for checking passported benefits into separate module
|
cla_public/apps/checker/utils.py
|
cla_public/apps/checker/utils.py
|
Python
| 0
|
@@ -0,0 +1,261 @@
+from cla_public.apps.checker.constants import PASSPORTED_BENEFITS, %5C%0A NASS_BENEFITS%0A%0A%0Adef passported(benefits):%0A return bool(set(benefits).intersection(PASSPORTED_BENEFITS))%0A%0A%0Adef nass(benefits):%0A return bool(set(benefits).intersection(NASS_BENEFITS))%0A
|
|
6185e7bbc12c3bc9aba1efcfd53275cc109f2e91
|
Add a snippet.
|
python/pyqt/pyqt5/widget_QPainter_draw_polygon.py
|
python/pyqt/pyqt5/widget_QPainter_draw_polygon.py
|
Python
| 0.000002
|
@@ -0,0 +1,1677 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A# See https://www.youtube.com/watch?v=96FBrNR3XOY%0A%0Aimport sys%0Afrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QLabel, QVBoxLayout%0Afrom PyQt5.QtGui import QPainter, QBrush, QColor, QPen, QPolygon%0Afrom PyQt5.QtCore import Qt, QPoint%0A%0A%0Aclass MyPaintWidget(QWidget):%0A%0A def __init__(self):%0A super().__init__()%0A%0A # Set window background color%0A self.setAutoFillBackground(True)%0A%0A palette = self.palette()%0A palette.setColor(self.backgroundRole(), Qt.white)%0A%0A self.setPalette(palette)%0A%0A def paintEvent(self, event):%0A qp = QPainter(self)%0A%0A qp.setRenderHint(QPainter.Antialiasing) # %3C- Set anti-aliasing See https://wiki.python.org/moin/PyQt/Painting%2520and%2520clipping%2520demonstration%0A%0A qp.setPen(QPen(Qt.black, 5, Qt.SolidLine))%0A%0A qp.setBrush(QBrush(Qt.red, Qt.SolidPattern))%0A #qp.setBrush(QBrush(Qt.red, Qt.DiagCrossPattern))%0A%0A points = QPolygon(%5B%0A QPoint(10, 10),%0A QPoint(20, 100),%0A QPoint(100, 50),%0A QPoint(150, 10),%0A QPoint(100, 100)%0A %5D)%0A%0A qp.drawPolygon(points)%0A%0A%0Aif __name__ == '__main__':%0A app = QApplication(sys.argv)%0A%0A widget = MyPaintWidget()%0A widget.show()%0A%0A # The mainloop of the application. The event handling starts from this point.%0A # The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.%0A exit_code = app.exec_()%0A%0A # The sys.exit() method ensures a clean exit.%0A # The environment will be informed, how the application ended.%0A sys.exit(exit_code)%0A
|
|
d56b94ef5acaafcaf11ebcb4ccb5b61390448974
|
Update loading to use results.AddValue(...)
|
tools/perf/metrics/loading.py
|
tools/perf/metrics/loading.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import Metric
class LoadingMetric(Metric):
"""A metric for page loading time based entirely on window.performance"""
def Start(self, page, tab):
raise NotImplementedError()
def Stop(self, page, tab):
raise NotImplementedError()
def AddResults(self, tab, results):
load_timings = tab.EvaluateJavaScript('window.performance.timing')
# NavigationStart relative markers in milliseconds.
load_start = (
float(load_timings['loadEventStart']) - load_timings['navigationStart'])
results.Add('load_start', 'ms', load_start)
dom_content_loaded_start = (
float(load_timings['domContentLoadedEventStart']) -
load_timings['navigationStart'])
results.Add('dom_content_loaded_start', 'ms', dom_content_loaded_start)
fetch_start = (
float(load_timings['fetchStart']) - load_timings['navigationStart'])
results.Add('fetch_start', 'ms', fetch_start, data_type='unimportant')
request_start = (
float(load_timings['requestStart']) - load_timings['navigationStart'])
results.Add('request_start', 'ms', request_start, data_type='unimportant')
# Phase measurements in milliseconds.
domain_lookup_duration = (
float(load_timings['domainLookupEnd']) -
load_timings['domainLookupStart'])
results.Add('domain_lookup_duration', 'ms', domain_lookup_duration,
data_type='unimportant')
connect_duration = (
float(load_timings['connectEnd']) - load_timings['connectStart'])
results.Add('connect_duration', 'ms', connect_duration,
data_type='unimportant')
request_duration = (
float(load_timings['responseStart']) - load_timings['requestStart'])
results.Add('request_duration', 'ms', request_duration,
data_type='unimportant')
response_duration = (
float(load_timings['responseEnd']) - load_timings['responseStart'])
results.Add('response_duration', 'ms', response_duration,
data_type='unimportant')
|
Python
| 0.000001
|
@@ -182,16 +182,51 @@
t Metric
+%0Afrom telemetry.value import scalar
%0A%0Aclass
@@ -729,17 +729,72 @@
ults.Add
-(
+Value(scalar.ScalarValue(%0A results.current_page,
'load_st
@@ -812,24 +812,25 @@
load_start)
+)
%0A%0A dom_co
@@ -960,25 +960,80 @@
results.Add
-(
+Value(scalar.ScalarValue(%0A results.current_page,
'dom_content
@@ -1049,24 +1049,32 @@
tart', 'ms',
+%0A
dom_content
@@ -1087,16 +1087,17 @@
d_start)
+)
%0A%0A fe
@@ -1202,17 +1202,72 @@
ults.Add
-(
+Value(scalar.ScalarValue(%0A results.current_page,
'fetch_s
@@ -1287,38 +1287,33 @@
fetch_start,
- data_type='un
+%0A
important')%0A
@@ -1301,33 +1301,39 @@
important
-'
+=False)
)%0A%0A request_s
@@ -1427,33 +1427,88 @@
%0A results.Add
-(
+Value(scalar.ScalarValue(%0A results.current_page,
'request_start',
@@ -1528,30 +1528,25 @@
t_start,
- data_type='un
+%0A
importan
@@ -1542,25 +1542,31 @@
important
-'
+=False)
)%0A%0A # Pha
@@ -1736,17 +1736,72 @@
ults.Add
-(
+Value(scalar.ScalarValue(%0A results.current_page,
'domain_
@@ -1823,16 +1823,24 @@
', 'ms',
+%0A
domain_
@@ -1859,38 +1859,9 @@
ion,
-%0A data_type='un
+
impo
@@ -1861,25 +1861,31 @@
n, important
-'
+=False)
)%0A%0A conne
@@ -1989,17 +1989,72 @@
ults.Add
-(
+Value(scalar.ScalarValue(%0A results.current_page,
'connect
@@ -2097,37 +2097,16 @@
- data_type='un
importan
@@ -2098,33 +2098,39 @@
important
-'
+=False)
)%0A%0A request_d
@@ -2233,17 +2233,72 @@
ults.Add
-(
+Value(scalar.ScalarValue(%0A results.current_page,
'request
@@ -2341,37 +2341,16 @@
- data_type='un
importan
@@ -2350,17 +2350,23 @@
mportant
-'
+=False)
)%0A%0A r
@@ -2477,17 +2477,72 @@
ults.Add
-(
+Value(scalar.ScalarValue(%0A results.current_page,
'respons
@@ -2591,29 +2591,8 @@
- data_type='un
impo
@@ -2596,11 +2596,17 @@
mportant
-'
+=False)
)%0A
|
7d917f7cbf6f00eec93d97adcdb545ae55c5b345
|
Add core actions file
|
opps/core/actions.py
|
opps/core/actions.py
|
Python
| 0.000001
|
@@ -0,0 +1,1442 @@
+# coding: utf-8%0Aimport csv%0A%0Afrom django.http import HttpResponse%0Afrom django.utils.translation import ugettext_lazy as _%0Afrom django.utils import timezone%0A%0A%0Adef export_to_csv(modeladmin, request, queryset):%0A %22%22%22Exporting queryset results and filter into CSV%22%22%22%0A # limit only staff and super_user accounts%0A if request.user.is_staff or request.user.is_superuser:%0A if queryset.count() %3E 0:%0A # generate response and filename%0A response = HttpResponse(mimetype=%22text/csv%22)%0A today = timezone.now().strftime(%22%25Y-%25M-%25d_%25H:%25M:%25S%22)%0A filename = %22%7B%7D-%7B%7D.csv%22.format(queryset.model, today)%0A response%5B%22Content-Disposition%22%5D = ('attachment; filename=%22%25s%22' %25%0A filename)%0A writer = csv.writer(response)%0A%0A # Get column name%0A columns = %5Bfield.name for field in queryset%5B0%5D._meta.fields%5D%0A writer.writerow(columns)%0A%0A # Write data%0A for obj in queryset:%0A fields = map(lambda x: _generate_value(obj, x), columns)%0A writer.writerow(fields)%0A%0A return response%0A%0Aexport_to_csv.short_description = _(u%22Export results in CSV%22)%0A%0A%0Adef _generate_value(obj, column):%0A %22%22%22Get fields value and convert to ASCIC for string type%22%22%22%0A row = getattr(obj, column)%0A if isinstance(row, basestring):%0A row = row.encode('ascii', 'ignore')%0A return row%0A
|
|
65245e2ef91952f3d9383f520f8e875b8a2a2648
|
add translate type
|
modularodm/fields/BooleanField.py
|
modularodm/fields/BooleanField.py
|
from . import Field
from ..validators import validate_boolean
class BooleanField(Field):
# default = False
validate = validate_boolean
def __init__(self, *args, **kwargs):
super(BooleanField, self).__init__(*args, **kwargs)
|
Python
| 0.000017
|
@@ -137,16 +137,42 @@
_boolean
+%0A translate_type = bool
%0A%0A de
|
ef2bad889159941b344808cb88179135d3908f19
|
Add missing file
|
GroupedPurchaseOrder/api.py
|
GroupedPurchaseOrder/api.py
|
Python
| 0.000006
|
@@ -0,0 +1,2143 @@
+####################################################################################################%0A# %0A# GroupedPurchaseOrder - A Django Application.%0A# Copyright (C) 2014 Fabrice Salvaire%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as%0A# published by the Free Software Foundation, either version 3 of the%0A# License, or (at your option) any later version.%0A# %0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A# %0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A# %0A####################################################################################################%0A%0A####################################################################################################%0A%0Afrom tastypie.authentication import SessionAuthentication, ApiKeyAuthentication%0Afrom tastypie.resources import ModelResource%0Afrom tastypie.authorization import DjangoAuthorization, ReadOnlyAuthorization%0A%0A####################################################################################################%0A%0Afrom .models import Order%0A%0A####################################################################################################%0A%0Aclass OrderResource(ModelResource):%0A%0A class Meta:%0A queryset = Order.objects.all()%0A resource_name = 'order'%0A # authentication = ApiKeyAuthentication()%0A authentication = SessionAuthentication()%0A authorization = ReadOnlyAuthorization()%0A%0A ##############################################%0A%0A def dehydrate(self, bundle):%0A%0A bundle.data%5B'name'%5D = bundle.obj.name()%0A return bundle%0A%0A####################################################################################################%0A# %0A# End%0A# %0A####################################################################################################%0A
|
|
7b118afcc96edf335d5c259adc3e2065648ace6f
|
update site URL
|
conf.py
|
conf.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# Data about this site
BLOG_AUTHOR = "chronodekar"
BLOG_TITLE = "Note To Self"
SITE_URL = "https://note2self-abrahamvarricatt.github.io/"
BLOG_EMAIL = "no@email.here"
BLOG_DESCRIPTION = "Snippets of information"
# Multi-lingual settings (Not used - keeping them blank)
DEFAULT_LANG = "en"
TRANSLATIONS = {
DEFAULT_LANG: "",
}
# Sidebar/Navigation
NAVIGATION_LINKS = {
DEFAULT_LANG: (
("/archive.html", "Archives"),
("/categories/index.html", "Tags"),
("/rss.xml", "RSS feed"),
),
}
# Theme settings
THEME = "bootstrap3"
THEME_COLOR = '#5670d4'
# POST/PAGE tuples
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.rst", "stories", "story.tmpl"),
("stories/*.html", "stories", "story.tmpl"),
)
TIMEZONE = "UTC+5:30"
# Mapping language with file extension
COMPILERS = {
"rest": ('.rst', '.txt'),
"html": ('.html', '.htm'),
}
WRITE_TAG_CLOUD = True
POSTS_SECTIONS = True
CATEGORY_ALLOW_HIERARCHIES = False
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
FRONT_INDEX_HEADER = {
DEFAULT_LANG: '',
}
REDIRECTIONS = []
GITHUB_COMMIT_SOURCE = True
OUTPUT_FOLDER = 'output'
IMAGE_FOLDERS = {'images': 'images'}
GLOBAL_CONTEXT = {}
GLOBAL_CONTEXT_FILLER = []
COMMENT_SYSTEM = ""
COMMENT_SYSTEM_ID = ""
LICENSE = ""
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by<a href="https://getnikola.com" rel="nofollow">Nikola</a> {license}'
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE,
}
)
}
|
Python
| 0
|
@@ -166,17 +166,16 @@
= %22http
-s
://note2
@@ -182,35 +182,22 @@
self
--
+.
abraham
-varricatt.github.io
+-v.com
/%22%0AB
|
b2dd561322f6f277f470eae425028412a209da93
|
Add Repository class and module
|
morenines/repository.py
|
morenines/repository.py
|
Python
| 0
|
@@ -0,0 +1,1015 @@
+import os%0A%0Afrom morenines import output%0Afrom morenines import util%0A%0Afrom morenines.index import Index%0Afrom morenines.ignores import Ignores%0A%0A%0ANAMES = %7B%0A 'repo_dir': '.morenines',%0A 'index': 'index',%0A 'ignore': 'ignore',%0A%7D%0A%0A%0Aclass Repository(object):%0A def __init__(self):%0A self.path = None%0A self.index = None%0A self.ignores = None%0A%0A def open(self, path):%0A repo_dir_path = find_repo(path)%0A%0A if not repo_dir_path:%0A output.error(%22Cannot find repository in '%7B%7D' or any parent dir%22.format(path))%0A util.abort()%0A%0A self.path = repo_dir_path%0A%0A self.index = Index.read(os.path.join(self.path, NAMES%5B'index'%5D))%0A%0A self.ignores = Ignores.read(os.path.join(self.path, NAMES%5B'ignore'%5D))%0A%0A%0Adef find_repo(start_path):%0A if start_path == '/':%0A return None%0A%0A path = os.path.join(start_path, NAMES%5B'repo_dir'%5D)%0A%0A if os.path.isdir(path):%0A return path%0A%0A parent = os.path.split(start_path)%5B0%5D%0A%0A return find_repo(parent)%0A
|
|
8273d67aaf74f4f05aa9c9fa86f710a937c708d4
|
Test that submitting twice works
|
mysite/profile/tests.py
|
mysite/profile/tests.py
|
import django.test
from search.models import Project
import twill
from twill import commands as tc
from twill.shell import TwillCommandLoop
from django.test import TestCase
from django.core.servers.basehttp import AdminMediaHandler
from django.core.handlers.wsgi import WSGIHandler
from StringIO import StringIO
# FIXME: Later look into http://stackoverflow.com/questions/343622/how-do-i-submit-a-form-given-only-the-html-source
# Functions you'll need:
def twill_setup():
app = AdminMediaHandler(WSGIHandler())
twill.add_wsgi_intercept("127.0.0.1", 8080, lambda: app)
def twill_teardown():
twill.remove_wsgi_intercept('127.0.0.1', 8080)
def make_twill_url(url):
# modify this
return url.replace("http://openhatch.org/",
"http://127.0.0.1:8080/")
def twill_quiet():
# suppress normal output of twill.. You don't want to
# call this if you want an interactive session
twill.set_output(StringIO())
class ProfileTests(django.test.TestCase):
def setUp(self):
twill_setup()
def tearDown(self):
twill_teardown()
def testSlash(self):
response = self.client.get('/profile/')
def testAddContribution(self):
url = 'http://openhatch.org/profile/'
tc.go(make_twill_url(url))
tc.fv('add_contrib', 'project', 'Babel')
tc.fv('add_contrib', 'contrib_text', 'msgctxt support')
tc.fv('add_contrib', 'url', 'http://babel.edgewall.org/ticket/54')
tc.submit()
# Assert that we are not in some weird GET place with
# CGI args
tc.url(r'^[^?]*$')
tc.find('Babel')
# Verify that leaving and coming back has it still
# there
tc.go(make_twill_url(url))
tc.find('Babel')
|
Python
| 0
|
@@ -1624,16 +1624,224 @@
'Babel')
+%0A tc.fv('add_contrib', 'project', 'Baber')%0A tc.fv('add_contrib', 'contrib_text', 'msgctxt support')%0A tc.fv('add_contrib', 'url', 'http://babel.edgewall.org/ticket/54')%0A tc.submit()
%0A%0A
@@ -1969,10 +1969,35 @@
Babel')%0A
+ tc.find('Baber')%0A
%0A%0A
|
22b851b9c787f6075d1d555dfdf98aed1ef9ef35
|
Add bite_size_tag_name field to search.models.Bug.
|
mysite/search/models.py
|
mysite/search/models.py
|
from django.db import models
from django.core.files.base import ContentFile
from django.core.files.images import get_image_dimensions
from django.conf import settings
import datetime
import StringIO
import Image
import uuid
from django.db.models import Q
from mysite.customs import ohloh
def get_image_data_scaled(image_data, width):
# scale it
image_fd = StringIO.StringIO(image_data)
im = Image.open(image_fd)
image_fd.seek(0)
w, h = get_image_dimensions(image_fd)
new_w = width
new_h = (h * 1.0 / w) * width
smaller = im.resize((new_w, new_h),
Image.ANTIALIAS)
# "Save" it to memory
new_image_fd = StringIO.StringIO()
smaller.save(new_image_fd, format='PNG')
new_image_fd.seek(0)
# pull data out
image_data = new_image_fd.getvalue()
return image_data
# Create your models here.
class Project(models.Model):
@staticmethod
def generate_random_icon_path(instance, filename):
# MEDIA_ROOT is prefixed automatically.
return 'images/icons/projects/%s.png' % uuid.uuid4().hex
name = models.CharField(max_length=200, unique = True)
language = models.CharField(max_length=200)
# FIXME: Replace this with 'icon'
icon_url = models.URLField(max_length=200)
# In case we need it
# dont_use_ohloh_icon = models.BooleanField(default=False)
icon = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a, b),
null=True,
default=None)
date_icon_was_fetched_from_ohloh = models.DateTimeField(null=True, default=None)
icon_smaller_for_badge = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a,b),
null=True,
default=None)
icon_for_search_result = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a,b),
null=True,
default=None)
def populate_icon_from_ohloh(self):
oh = ohloh.get_ohloh()
try:
icon_data = oh.get_icon_for_project(self.name)
self.date_icon_was_fetched_from_ohloh = datetime.datetime.utcnow()
except ValueError:
self.date_icon_was_fetched_from_ohloh = datetime.datetime.utcnow()
return None
# if you want to scale, use get_image_data_scaled(icon_data)
self.icon.save('', ContentFile(icon_data))
# Since we are saving an icon, also update our scaled-down version of
# that icon for the badge.
self.update_scaled_icons_from_self_icon()
def get_url_of_icon_or_generic(self):
if self.icon:
return self.icon.url
else:
return settings.MEDIA_URL + 'no-project-icon.png'
def get_url_of_badge_size_icon_or_generic(self):
if self.icon_smaller_for_badge:
return self.icon_smaller_for_badge.url
else:
return settings.MEDIA_URL + 'no-project-icon-w=40.png'
def get_url_of_search_result_icon_or_generic(self):
if self.icon_for_search_result:
return self.icon_for_search_result.url
else:
return settings.MEDIA_URL + 'no-project-icon-w=20.png'
def update_scaled_icons_from_self_icon(self):
'''This method should be called when you update the Project.icon attribute.
Side-effect: Saves a scaled-down version of that icon in the
Project.icon_smaller_for_badge field.'''
# First of all, do nothing if self.icon is a false value.
if not self.icon:
return
# Okay, now we must have some normal-sized icon.
normal_sized_icon_data = self.icon.file.read()
# Scale it down to badge size, which
# happens to be width=40
badge_icon_data = get_image_data_scaled(normal_sized_icon_data, 40)
self.icon_smaller_for_badge.save('', ContentFile(badge_icon_data))
# Scale normal-sized icon down to a size that fits in the search results--20px by 20px
search_result_icon_data = get_image_data_scaled(normal_sized_icon_data, 20)
self.icon_for_search_result.save('', ContentFile(search_result_icon_data))
def get_contributors(self):
"""Return a list of Person objects who are contributors to
this Project."""
from mysite.profile.models import PortfolioEntry
# What portfolio entries point to this project?
pf_entries = PortfolioEntry.objects.filter(
Q(project=self), Q(is_deleted=False),
Q(is_published=True) )
# List the owners of those portfolio entries.
return [pf_entry.person for pf_entry in pf_entries]
def get_n_other_contributors_than(self, n, person):
# FIXME: Use the method above.
from mysite.profile.models import PortfolioEntry
pf_entries = list(PortfolioEntry.objects.filter(Q(project=self),
~Q(person=person),
Q(is_deleted=False),
Q(is_published=True),
))
import random
random.shuffle(pf_entries)
pf_entries = pf_entries[:n] # Slicing the pf entries has the same effect as
# slicing the list of people.
other_contributors = [p.person for p in pf_entries]
return other_contributors
def __unicode__(self):
return "<Project name='%s' language='%s'>" % (self.name, self.language)
def populate_icon_on_project_creation(instance, created, *args, **kwargs):
if created and not instance.icon:
instance.populate_icon_from_ohloh()
models.signals.post_save.connect(populate_icon_on_project_creation, Project)
# An easy way to find
class OpenBugsManager(models.Manager):
def get_query_set(self):
return super(OpenBugsManager, self).get_query_set().filter(
looks_closed=False)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
canonical_bug_link = models.URLField(max_length=200)
good_for_newcomers = models.BooleanField(default=False)
looks_closed = models.BooleanField(default=False)
all_bugs = models.Manager()
open_ones = OpenBugsManager()
def __unicode__(self):
return "<Bug title='%s' project='%s' project__language='%s' description='%s...'>" % (self.title, self.project.name, self.project.language, self.description[:50])
# vim: set ai ts=4 nu:
|
Python
| 0
|
@@ -6568,16 +6568,74 @@
t=False)
+%0A bize_size_tag_name = models.CharField(max_length=50)
%0A%0A al
|
292b3c99fc294f9855bd7eb26b0089a04a5f93b5
|
Create match_x_y_repetitions.py
|
regex/repetitions/python3/match_x_y_repetitions.py
|
regex/repetitions/python3/match_x_y_repetitions.py
|
Python
| 0.000195
|
@@ -0,0 +1,69 @@
+Regex_Pattern = r'%5E%5Cd%7B1,2%7D%5Ba-zA-z%5D%7B3,%7D%5C.%7B0,3%7D$'%09# Do not delete 'r'.%0A
|
|
f3c8d092b67ad16bdd0937651ef34e3d84b15e2b
|
Add coverage for composer's send_email function (#2174)
|
webapp/tests/test_composer.py
|
webapp/tests/test_composer.py
|
Python
| 0
|
@@ -0,0 +1,1381 @@
+import mock%0A%0Afrom urllib3.response import HTTPResponse%0Afrom graphite.util import BytesIO%0A%0Afrom .base import TestCase%0Atry:%0A from django.urls import reverse%0Aexcept ImportError: # Django %3C 1.10%0A from django.core.urlresolvers import reverse%0A%0A%0Aclass ComposerTest(TestCase):%0A @mock.patch('six.moves.http_client.HTTPConnection.request')%0A @mock.patch('six.moves.http_client.HTTPConnection.getresponse')%0A @mock.patch('graphite.composer.views.SMTP')%0A @mock.patch('django.conf.settings.SMTP_SERVER', 'localhost')%0A def test_send_email(self, mock_smtp, http_response, http_request):%0A url = reverse('composer_send_email')%0A request = %7B %22to%22: %22noreply@localhost%22,%0A %22url%22: 'https://localhost:8000/render?target=sumSeries(a.b.c.d)&title=Test&width=500&from=-55minutes&until=now&height=400'%7D%0A%0A response = self.client.get(reverse('render'), %7B'target': 'test'%7D)%0A self.assertEqual(response%5B'Content-Type'%5D, 'image/png')%0A data = response.content%0A responseObject = HTTPResponse(body=BytesIO(data), status=200, preload_content=False)%0A http_request.return_value = responseObject%0A http_response.return_value = responseObject%0A%0A instance = mock_smtp.return_value%0A instance.sendmail.return_value = %7B%7D%0A%0A response = self.client.get(url, request)%0A self.assertEqual(response.content, b'OK')%0A
|
|
c39c8955782c3015f30a9ef7f8e8783ac105ae70
|
add harvester for dailyssrn:
|
scrapi/harvesters/dailyssrn.py
|
scrapi/harvesters/dailyssrn.py
|
Python
| 0
|
@@ -0,0 +1,1880 @@
+from __future__ import unicode_literals%0A%0Afrom dateutil.parser import parse%0A# from datetime import date, timedelta%0A%0Aimport furl%0Afrom lxml import etree%0A%0Afrom scrapi import requests%0A# from scrapi import settings%0Afrom scrapi.base import XMLHarvester%0Afrom scrapi.linter import RawDocument%0Afrom scrapi.util import copy_to_unicode%0Afrom scrapi.base.helpers import compose, single_result%0A%0A%0Aclass DailyssrnHarvester(XMLHarvester):%0A short_name = 'dailyssrn'%0A long_name = 'RSS Feed from the Social Science Research Network'%0A url = 'http://papers.ssrn.com/'%0A%0A schema = %7B%0A %22description%22: ('//description/node()', compose(lambda x: x.strip(), single_result)),%0A %22title%22: ('//title/node()', compose(lambda x: x.strip(), single_result)),%0A %22providerUpdatedDateTime%22: ('//pubDate/node()', compose(lambda x: x.isoformat(), parse, lambda x: x.strip(), single_result)),%0A %22contributors%22: '//contributors/node()',%0A %22uris%22: %7B%0A %22canonicalUri%22: ('//link/node()', compose(lambda x: x.strip(), single_result)),%0A %7D%0A %7D%0A%0A def harvest(self, start_date=None, end_date=None):%0A%0A url = 'http://dailyssrn.com/rss/rss-all-2.0.xml'%0A%0A data = requests.get(url)%0A doc = etree.XML(data.content)%0A%0A records = doc.xpath('channel/item')%0A%0A xml_list = %5B%5D%0A for record in records:%0A # import ipdb; ipdb.set_trace()%0A doc_id = parse_id_from_url(record.xpath('link/node()'))%0A record = etree.tostring(record)%0A xml_list.append(RawDocument(%7B%0A 'doc': record,%0A 'source': self.short_name,%0A 'docID': copy_to_unicode(doc_id),%0A 'filetype': 'xml'%0A %7D))%0A%0A return xml_list%0A%0A%0Adef parse_id_from_url(url):%0A # import ipdb; ipdb.set_trace()%0A parsed_url = furl.furl(url%5B0%5D)%0A return parsed_url.args%5B'abstract_id'%5D%0A
|
|
118627a170e3ba3cf8863b3bb0cdaf9b5e0441ff
|
Create monte_carlo_sliced_doughnut.py
|
monte_carlo_sliced_doughnut.py
|
monte_carlo_sliced_doughnut.py
|
Python
| 0.000314
|
@@ -0,0 +1,1535 @@
+#center of mass of a sliced doughnut%0A%0Afrom random import random%0Afrom math import pi,sin,cos,atan %0A%0AM=100000000 #number of samples%0Ay_samples = 0 #samples which have been in correct y range%0Ax_samples = 0 #samples which have been in correct x range%0A%0A#do something random%0Adef rand_r():%0A return random()%0A%0Adef rand_theta():%0A return pi*random() - pi/2%0A%0Adef rand_phi():%0A return 2*pi*random()%0A%0A%0Adef x_is_in(r, theta, phi): #check that x value is in correct domain%0A x = (3+r*cos(phi))*cos(theta)%0A if x%3E=1:%0A return True%0A else:%0A return False%0A%0Adef y_is_in(r, theta, phi): #check that y value is in correct domain %0A y = (3+r*cos(phi))*sin(theta)%0A if y%3E=-3:%0A return True%0A else:%0A return False%0A%0A#main function. N is the number of evaluations to make (at random points) withing the sliced doughnut%0Adef monte_carlo(N):%0A i=0%0A actual_sample_size = 0%0A x=y=0 %0A while i%3C=N:%0A r = rand_r()%0A theta = rand_theta()%0A phi = rand_phi()%0A if x_is_in(r, theta, phi) and y_is_in(r, theta, phi):%0A #print (3+r*cos(phi))*cos(theta), (3+r*cos(phi))*sin(theta), r*sin(phi) #temporary: to plot coordinates%0A x += (3+r*cos(phi))*cos(theta)%0A y += (3+r*cos(phi))*sin(theta) %0A actual_sample_size += 1%0A i +=1%0A print 'center of mass in x: ',x/float(actual_sample_size)%0A print 'center of mass in y: ',y/float(actual_sample_size)%0A print 'number of sample points: ',float(actual_sample_size)%0Amonte_carlo(M)%0A
|
|
ee29d0b8a6fd2995441904452d90068bb0fe9af8
|
add explicit mk_rd/mk_wr
|
pclib/ifcs/MemMsg.py
|
pclib/ifcs/MemMsg.py
|
#=========================================================================
# MemMsg
#=========================================================================
# Contains memory request and response messages.
from pymtl import *
import math
#-------------------------------------------------------------------------
# MemReqMsg
#-------------------------------------------------------------------------
# Memory request messages can either be for a read or write. Read
# requests include an address and the number of bytes to read, while
# write requests include an address, the number of bytes to write, and
# the actual data to write.
#
# Message Format:
#
# 1b addr_nbits calc data_nbits
# +------+-----------+------+-----------+
# | type | addr | len | data |
# +------+-----------+------+-----------+
#
# The message type is parameterized by the number of address and data
# bits. Note that the size of the length field is caclulated from the
# number of bits in the data field, and that the length field is
# expressed in _bytes_. If the value of the length field is zero, then
# the read or write should be for the full width of the data field.
#
# For example, if the address size is 32 bits and the data size is also
# 32 bits, then the message format is as follows:
#
# 66 66 65 34 33 32 31 0
# +------+-----------+------+-----------+
# | type | addr | len | data |
# +------+-----------+------+-----------+
#
# The length field is two bits. A length value of one means read or write
# a single byte, a length value of two means read or write two bytes, and
# so on. A length value of zero means read or write all four bytes. Note
# that not all memories will necessarily support any alignment and/or any
# value for the length field.
class MemReqMsg( BitStructDefinition ):
TYPE_READ = 0
TYPE_WRITE = 1
def __init__( s, addr_nbits, data_nbits ):
s.type_nbits = 1
s.addr_nbits = addr_nbits
s.len_nbits = int( math.ceil( math.log( data_nbits/8, 2) ) )
s.data_nbits = data_nbits
s.type_ = BitField( s.type_nbits )
s.addr = BitField( s.addr_nbits )
s.len = BitField( s.len_nbits )
s.data = BitField( s.data_nbits )
def mk_msg( s, type_, addr, len_, data ):
msg = s()
msg.type_ = type_
msg.addr = addr
msg.len = len_
msg.data = data
return msg
def __str__( s ):
if s.type_ == MemReqMsg.TYPE_READ:
return "rd:{}:{}".format( s.addr, ' '*(s.data.nbits/4) )
elif s.type_ == MemReqMsg.TYPE_WRITE:
return "wr:{}:{}".format( s.addr, s.data )
#-------------------------------------------------------------------------
# MemReqMsg
#-------------------------------------------------------------------------
# Memory response messages can either be for a read or write. Read
# responses include the actual data and the number of bytes, while write
# responses currently include nothing other than the type.
#
# Message Format:
#
# 1b calc data_nbits
# +------+------+-----------+
# | type | len | data |
# +------+------+-----------+
#
# The message type is parameterized by the number of address and data
# bits. Note that the size of the length field is caclulated from the
# number of bits in the data field, and that the length field is
# expressed in _bytes_. If the value of the length field is zero, then
# the read or write should be for the full width of the data field.
#
# For example, if the address size is 32 bits and the data size is also
# 32 bits, then the message format is as follows:
#
# 34 34 33 32 31 0
# +------+------+-----------+
# | type | len | data |
# +------+------+-----------+
#
# The length field is two bits. A length value of one means a single byte
# of read data is valid, a length value of two means two bytes of read
# data is valid, and so on. A length value of zero means all four bytes
# of the read data is valid. Note that not all memories will necessarily
# support any alignment and/or any value for the length field.
class MemRespMsg( BitStructDefinition ):
TYPE_READ = 0
TYPE_WRITE = 1
def __init__( s, data_nbits ):
s.type_nbits = 1
s.len_nbits = int( math.ceil( math.log( data_nbits/8, 2 ) ) )
s.data_nbits = data_nbits
s.type_ = BitField( s.type_nbits )
s.len = BitField( s.len_nbits )
s.data = BitField( s.data_nbits )
def mk_msg( s, type_, len_, data ):
msg = s()
msg.type_ = type_
msg.len = len_
msg.data = data
return msg
# What exactly is this method for? -cbatten
def unpck( s, msg ):
resp = s()
resp.value = msg
return resp
def __str__( s ):
if s.type_ == MemRespMsg.TYPE_READ:
return "rd:{}".format( s.data )
elif s.type_ == MemRespMsg.TYPE_WRITE:
return "wr:{}".format( ' '*(s.data.nbits/4) )
#-------------------------------------------------------------------------
# MemMsg
#-------------------------------------------------------------------------
# Single class that contains both the memory request and response types.
# This simplifies parameterizing models both both message types since (1)
# we can specifcy the address and data nbits in a single step, and (2) we
# can pass a single object into the parameterized model.
class MemMsg( object ):
def __init__( s, addr_nbits, data_nbits ):
s.req = MemReqMsg ( addr_nbits, data_nbits )
s.resp = MemRespMsg( data_nbits )
|
Python
| 0
|
@@ -2389,16 +2389,354 @@
rn msg%0A%0A
+ def mk_rd( s, addr, len_ ):%0A%0A msg = s()%0A msg.type_ = MemReqMsg.TYPE_READ%0A msg.addr = addr%0A msg.len = len_%0A msg.data = 0%0A%0A return msg%0A%0A def mk_wr( s, addr, len_, data ):%0A%0A msg = s()%0A msg.type_ = MemReqMsg.TYPE_WRITE%0A msg.addr = addr%0A msg.len = len_%0A msg.data = data%0A%0A return msg%0A%0A
def __
|
e74f78a663a8467e19d071d8e68ef11689c0b7ec
|
Add replay.py
|
perf/tests/replay.py
|
perf/tests/replay.py
|
Python
| 0.000001
|
@@ -0,0 +1,2238 @@
+from __future__ import division, print_function, absolute_import%0A%0Aimport os%0Aimport perf%0Aimport tempfile%0A%0A%0Adef get_raw_values(filename, run_id):%0A bench = perf.Benchmark.load(filename)%0A run = bench.get_runs()%5Brun_id%5D%0A inner_loops = run._get_inner_loops()%0A raw_values = %5Bvalue * (loops * inner_loops)%0A for loops, value in run.warmups%5D%0A total_loops = run.get_total_loops()%0A raw_values.extend(value * total_loops for value in run.values)%0A return (run, raw_values)%0A%0A%0Aclass Replay(object):%0A def __init__(self, runner, filename):%0A self.runner = runner%0A self.args = runner.args%0A self.filename = filename%0A self.value_id = 0%0A self.init()%0A%0A def init(self):%0A args = runner.args%0A if args.worker:%0A self.read_session()%0A%0A run, self.raw_values = get_raw_values(self.filename, self.run_id)%0A args.loops = run._get_loops()%0A # FIXME: handle inner_loops%0A self.run_id += 1%0A self.write_session()%0A else:%0A args.session_filename = tempfile.mktemp()%0A self.run_id = self.args.first_run - 1%0A self.write_session()%0A%0A def read_session(self):%0A filename = self.args.session_filename%0A with open(filename, %22r%22) as fp:%0A line = fp.readline()%0A self.run_id = int(line.rstrip())%0A%0A def write_session(self):%0A filename = self.args.session_filename%0A with open(filename, %22w%22) as fp:%0A print(self.run_id, file=fp)%0A fp.flush()%0A%0A def time_func(self, loops):%0A raw_value = self.raw_values%5Bself.value_id%5D%0A self.value_id += 1%0A return raw_value%0A%0A%0Adef add_cmdline_args(cmd, args):%0A cmd.append(args.filename)%0A if args.session_filename:%0A cmd.extend(('--session-filename', args.session_filename))%0A%0A%0Arunner = perf.Runner(add_cmdline_args=add_cmdline_args)%0Arunner.argparser.add_argument('filename')%0Arunner.argparser.add_argument('--session-filename')%0Arunner.argparser.add_argument('--first-run', type=int, default=1)%0A%0Aargs = runner.parse_args()%0Areplay = Replay(runner, args.filename)%0Arunner.bench_time_func('bench', replay.time_func)%0Aif not args.worker:%0A os.unlink(args.session_filename)%0A
|
|
e8561caeb3c95633e99f540965d33a67046df3e5
|
Add __init__ module for the `msgpack_rpc` subpackage
|
neovim/msgpack_rpc/__init__.py
|
neovim/msgpack_rpc/__init__.py
|
Python
| 0.000009
|
@@ -0,0 +1,1224 @@
+%22%22%22Msgpack-rpc subpackage.%0A%0AThis package implements a msgpack-rpc client. While it was designed for%0Ahandling some Nvim particularities(server-%3Eclient requests for example), the%0Acode here should work with other msgpack-rpc servers.%0A%22%22%22%0Afrom .async_session import AsyncSession%0Afrom .event_loop import EventLoop%0Afrom .msgpack_stream import MsgpackStream%0Afrom .session import Session%0A%0A%0A__all__ = ('tcp_session', 'socket_session', 'stdio_session', 'spawn_session')%0A%0A%0Adef session(transport_type='stdio', *args, **kwargs):%0A loop = EventLoop(transport_type, *args, **kwargs)%0A msgpack_stream = MsgpackStream(loop)%0A async_session = AsyncSession(msgpack_stream)%0A session = Session(async_session)%0A return session%0A%0A%0Adef tcp_session(address, port=7450):%0A %22%22%22Create a msgpack-rpc session from a tcp address/port.%22%22%22%0A return session('tcp', address, port)%0A%0A%0Adef socket_session(path):%0A %22%22%22Create a msgpack-rpc session from a unix domain socket.%22%22%22%0A return session('socket', path)%0A%0A%0Adef stdio_session():%0A %22%22%22Create a msgpack-rpc session from stdin/stdout.%22%22%22%0A return session('stdio')%0A%0A%0Adef spawn_session(argv):%0A %22%22%22Create a msgpack-rpc session from a new Nvim instance.%22%22%22%0A return session('spawn', argv)%0A
|
|
92cb3088d63ff6fc511c01d1d151f1f1857df496
|
create smiles tokenizer unit test
|
deepchem/feat/tests/test_smiles_tokenizer.py
|
deepchem/feat/tests/test_smiles_tokenizer.py
|
Python
| 0
|
@@ -0,0 +1,824 @@
+# Requriments - transformers, tokenizers%0A%0Afrom unittest import TestCase%0Afrom deepchem.feat.smiles_tokenizer import SmilesTokenizer%0Afrom transformers import RobertaForMaskedLM%0A%0A%0Aclass TestSmilesTokenizer(TestCase):%0A %22%22%22Tests the SmilesTokenizer to load the USPTO vocab file and a ChemBERTa Masked LM model with pre-trained weights..%22%22%22%0A%0A%0A def test_featurize(self):%0A from rdkit import Chem%0A smiles = %5B%22Cn1c(=O)c2c(ncn2C)n(C)c1=O%22, %22CC(=O)N1CN(C(C)=O)C(O)C1O%22%5D%0A mols = %5BChem.MolFromSmiles(smile) for smile in smiles%5D%0A featurizer = dc.feat.one_hot.OneHotFeaturizer(dc.feat.one_hot.zinc_charset)%0A one_hots = featurizer.featurize(mols)%0A untransformed = featurizer.untransform(one_hots)%0A assert len(smiles) == len(untransformed)%0A for i in range(len(smiles)):%0A assert smiles%5Bi%5D == untransformed%5Bi%5D%5B0%5D%0A
|
|
97d455da87d3175c1d5cf2ce3091f26184cf4a10
|
Add heos discoverable (#250)
|
netdisco/discoverables/heos.py
|
netdisco/discoverables/heos.py
|
Python
| 0
|
@@ -0,0 +1,296 @@
+%22%22%22Discover Heos devices.%22%22%22%0Afrom . import SSDPDiscoverable%0A%0A%0Aclass Discoverable(SSDPDiscoverable):%0A %22%22%22Add support for discovering DLNA services.%22%22%22%0A%0A def get_entries(self):%0A %22%22%22Get all the HEOS devices.%22%22%22%0A return self.find_by_st(%22urn:schemas-denon-com:device:ACT-Denon:1%22)%0A
|
|
7e02831897baae9206fd2c0d53fc1c313c780074
|
Test case fixed
|
erpnext/accounts/doctype/payment_request/test_payment_request.py
|
erpnext/accounts/doctype/payment_request/test_payment_request.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
from erpnext.accounts.doctype.payment_request.payment_request import make_payment_request
from erpnext.accounts.doctype.sales_invoice.test_sales_invoice import create_sales_invoice
# test_records = frappe.get_test_records('Payment Request')
test_dependencies = ["Currency Exchange", "Journal Entry", "Contact", "Address"]
payment_gateway = {
"doctype": "Payment Gateway",
"gateway": "_Test Gateway"
}
payment_method = [
{
"doctype": "Payment Gateway Account",
"is_default": 1,
"gateway": "_Test Gateway",
"payment_account": "_Test Bank - _TC",
"currency": "INR"
},
{
"doctype": "Payment Gateway Account",
"gateway": "_Test Gateway",
"payment_account": "_Test Bank - _TC",
"currency": "USD"
}
]
class TestPaymentRequest(unittest.TestCase):
def setUp(self):
if not frappe.db.get_value("Payment Gateway", payment_gateway["gateway"], "name"):
frappe.get_doc(payment_gateway).insert(ignore_permissions=True)
for method in payment_method:
if not frappe.db.get_value("Payment Gateway Account", {"gateway": method["gateway"],
"currency": method["currency"]}, "name"):
frappe.get_doc(method).insert(ignore_permissions=True)
def test_payment_request_linkings(self):
SO_INR = make_sales_order(currency="INR")
pr = make_payment_request(dt="Sales Order", dn=SO_INR.name, recipient_id="saurabh@erpnext.com")
self.assertEquals(pr.reference_doctype, "Sales Order")
self.assertEquals(pr.reference_name, SO_INR.name)
self.assertEquals(pr.currency, "INR")
SI_USD = create_sales_invoice(currency="USD", conversion_rate=50)
pr = make_payment_request(dt="Sales Invoice", dn=SI_USD.name, recipient_id="saurabh@erpnext.com")
self.assertEquals(pr.reference_doctype, "Sales Invoice")
self.assertEquals(pr.reference_name, SI_USD.name)
self.assertEquals(pr.currency, "USD")
def test_payment_entry(self):
SO_INR = make_sales_order(currency="INR")
pr = make_payment_request(dt="Sales Order", dn=SO_INR.name, recipient_id="saurabh@erpnext.com",
mute_email=1, submit_doc=1)
jv = pr.set_as_paid()
SO_INR = frappe.get_doc("Sales Order", SO_INR.name)
self.assertEquals(SO_INR.advance_paid, jv.total_debit)
SI_USD = create_sales_invoice(customer="_Test Customer USD", debit_to="_Test Receivable USD - _TC",
currency="USD", conversion_rate=50)
pr = make_payment_request(dt="Sales Invoice", dn=SI_USD.name, recipient_id="saurabh@erpnext.com",
mute_email=1, return_doc=1, payemnt_gateway="_Test Gateway - USD")
self.assertRaises(frappe.ValidationError, pr.save)
|
Python
| 0.000001
|
@@ -937,32 +937,36 @@
t%22: %22_Test Bank
+USD
- _TC%22,%0A%09%09%22curre
|
b90e3b0bce680154d7fea8ed071f740db963c402
|
fix bug 1458641: fix reports-clean crontabber app
|
alembic/versions/e70541df7ed7_bug_1458641_fix_reports_clean.py
|
alembic/versions/e70541df7ed7_bug_1458641_fix_reports_clean.py
|
Python
| 0
|
@@ -0,0 +1,488 @@
+%22%22%22bug 1458641 fix reports clean crontabber app%0A%0ARevision ID: e70541df7ed7%0ARevises: 3474e98b321f%0ACreate Date: 2018-05-02 18:20:19.064954%0A%0A%22%22%22%0A%0Afrom alembic import op%0Afrom socorro.lib.migrations import load_stored_proc%0A%0A%0A# revision identifiers, used by Alembic.%0Arevision = 'e70541df7ed7'%0Adown_revision = '3474e98b321f'%0A%0A%0Adef upgrade():%0A # Note: This should have been done in migration 3474e98b321f.%0A load_stored_proc(op, %5B'001_update_reports_clean.sql'%5D)%0A%0A%0Adef downgrade():%0A pass%0A
|
|
307816d3b6cb5e57f50f80e10eccd0a701c698a9
|
Fix syntax errors.
|
pymatgen/io/abinitio/__init__.py
|
pymatgen/io/abinitio/__init__.py
|
from .calculations import *
from .eos import *
from .pseudos import *
from .netcdf import *
from .events import *
from .task import *
from .workflow import *
|
Python
| 0.000017
|
@@ -1,32 +1,4 @@
-from .calculations import *%0A
from
@@ -89,16 +89,17 @@
om .task
+s
import
@@ -114,16 +114,45 @@
workflow
+s import *%0Afrom .calculations
import
|
d4e6f44c5257afd02883bd739cf6b3953e398857
|
Create auth.py
|
photonix/photos/auth.py
|
photonix/photos/auth.py
|
Python
| 0.000001
|
@@ -0,0 +1,1032 @@
+import os%0A%0Afrom django.contrib.auth import get_library_model%0A%0Aimport graphene%0Afrom graphene_django.types import DjangoObjectType%0Aimport graphql_jwt%0A%0ALibrary = get_library_model()%0A%0Aclass Mutation(graphene.ObjectType):%0A token_auth = graphql_jwt.ObtainJSONWebToken.Field()%0A verify_token = graphql_jwt.Verify.Field()%0A refresh_token = graphql_jwt.Refresh.Field()%0A revoke_token = graphql_jwt.Revoke.Field()%0A%0Aclass UserType(DjangoObjectType):%0A class Meta:%0A model = Library%0A%0Aclass Environment(graphene.ObjectType):%0A demo = graphene.Boolean()%0A first_run = graphene.Boolean()%0A%0Aclass Query(graphene.ObjectType):%0A profile = graphene.Field(UserType)%0A environment = graphene.Field(Environment)%0A%0A def resolve_profile(self, info):%0A user = info.context.user%0A if user.is_anonymous:%0A raise Exception('Not logged in')%0A return user%0A%0A def resolve_environment(self, info):%0A return %7B%0A 'demo': os.environ.get('DEMO', False),%0A 'first_run': False,%0A %7D%0A
|
|
2e084358f3ff38c40ba443b5fadf549bded82eb7
|
FIX typo in example. My bad.
|
examples/decomposition/plot_faces_decomposition.py
|
examples/decomposition/plot_faces_decomposition.py
|
"""
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print __doc__
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD
import logging
from time import time
from numpy.random import RandomState
import pylab as pl
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print "Dataset consists of %d faces" % n_samples
###############################################################################
def plot_gallery(title, images):
pl.figure(figsize=(2. * n_col, 2.26 * n_row))
pl.suptitle(title, size=16)
for i, comp in enumerate(images):
pl.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
pl.imshow(comp.reshape(image_shape), cmap=pl.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
pl.xticks(())
pl.yticks(())
pl.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True, False),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False, False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True,
max_iter=10),
True, True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, chunk_size=3,
random_state=rng),
True, False),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_atoms=15, alpha=0.1,
n_iter=50, chunk_size=3,
random_state=rng),
True, False),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_cluster=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True, False)
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center, transpose in estimators:
print "Extracting the top %d %s..." % (n_components, name)
t0 = time()
data = faces
if center:
data = faces_centered
if transpose:
data = data.T
estimator.fit(data)
train_time = (time() - t0)
print "done in %0.3fs" % train_time
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if transpose:
components_ = components_.T
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
pl.show()
|
Python
| 0.000001
|
@@ -3151,16 +3151,17 @@
_cluster
+s
=n_compo
|
ed05755f1f5213cdd95203f51d0097bfbd91e6e1
|
Create FindtheDifference_BitandHash.py
|
leetcode/389-Find-the-Difference/FindtheDifference_BitandHash.py
|
leetcode/389-Find-the-Difference/FindtheDifference_BitandHash.py
|
Python
| 0.000001
|
@@ -0,0 +1,391 @@
+class Solution(object):%0A def findTheDifference(self, s, t):%0A %22%22%22%0A :type s: str%0A :type t: str%0A :rtype: str%0A %22%22%22%0A chrs = %7B%7D%0A res = 0%0A for w in t:%0A if w not in chrs:%0A chrs%5Bw%5D = 1 %3C%3C (ord(w) - 97)%0A res += chrs%5Bw%5D%0A for w in s:%0A res -= chrs%5Bw%5D%0A return chr(len(bin(res)) + 94)%0A
|
|
fddcad6de86d8d1dbd37c549b0d4258260c13a3a
|
Read in sample text file 'proteinGroups.txt'
|
proteinGroups.py
|
proteinGroups.py
|
Python
| 0.000001
|
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Thu Oct 8 20:48:41 2015%0A%0A@author: student%0A%22%22%22%0A%0Aimport pandas as pd%0A#import numpy as np%0A%0A# read in file%0Adf = pd.read_table('/Users/student/Desktop/pubs/Sample text files/proteinGroups.txt', index_col=0)%0A#print df.dtypes%0Aprint df%5B'Intensity'%5D
|
|
558d45ed0b9c3d375daa81383125e0c4664df7af
|
Add adwaita-icon-theme
|
packages/adwaita-icon-theme.py
|
packages/adwaita-icon-theme.py
|
Python
| 0
|
@@ -0,0 +1,85 @@
+GnomeXzPackage ('adwaita-icon-theme', version_major = '3.16', version_minor = '2.1')%0A
|
|
af2a0a851be91931f96a7e9d44a1e8c460d70918
|
Migrate creation date to new casebooks
|
web/main/migrations/0052_migrate_casebook_dates.py
|
web/main/migrations/0052_migrate_casebook_dates.py
|
Python
| 0.000002
|
@@ -0,0 +1,658 @@
+# Generated by Django 2.2.10 on 2020-04-14 11:38%0A%0Afrom django.db import migrations%0Afrom main.models import Casebook%0A%0Adef copy_old_dates(app, schema):%0A update_cbs = %5B%5D%0A for casebook in Casebook.objects.select_related('old_casebook').all():%0A if casebook.old_casebook:%0A casebook.created_at = casebook.old_casebook.created_at%0A update_cbs.append(casebook)%0A Casebook.objects.bulk_update(update_cbs, %5B'created_at'%5D)%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('main', '0051_auto_20200407_1714'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(copy_old_dates, migrations.RunPython.noop)%0A %5D%0A
|
|
a62b95b4764ab0cab3d14f98798319913c83c044
|
fix name display
|
quora/pyquora.py
|
quora/pyquora.py
|
import requests
from bs4 import BeautifulSoup
import feedparser
import re
### Configuration ###
POSSIBLE_FEED_KEYS = ['link', 'id', 'published', 'title', 'summary']
### Enumerated Types ###
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
ACTIVITY_ITEM_TYPES = enum(UPVOTE=1, USER_FOLLOW=2, QUESTION_FOLLOW=3, ANSWER=4, QUESTION=5, REVIEW_REQUEST=6)
####################################################################
# Helpers
####################################################################
def try_cast(s):
try:
return int(s)
except ValueError:
return s
def get_count(element):
return try_cast(element.find('span', class_='profile-tab-count').string.replace(',', ''))
def get_count_for_user_href(soup, user, suffix):
return get_count(soup.find('a', class_='link_label', href='/' + user + '/' + suffix))
def build_feed_item(item):
dict = {}
keys = POSSIBLE_FEED_KEYS
for key in keys:
if key in item.keys():
dict[key] = item[key]
return dict
def check_activity_type(description):
soup = BeautifulSoup(description)
tag = soup.find('div', style="color: #666666;")
if tag is not None:
if 'voted up this' in tag.string:
return ACTIVITY_ITEM_TYPES.UPVOTE
elif 'followed a question' in tag.string:
return ACTIVITY_ITEM_TYPES.QUESTION_FOLLOW
elif 'added this answer' in tag.string:
return ACTIVITY_ITEM_TYPES.ANSWER
elif 'added a question' in tag.string:
return ACTIVITY_ITEM_TYPES.QUESTION
elif 'requested reviews.' in tag.string:
return ACTIVITY_ITEM_TYPES.REVIEW_REQUEST
else: # hopefully.
return ACTIVITY_ITEM_TYPES.USER_FOLLOW
def is_new_ui(soup):
return soup.find('div', attrs={'class': 'ProfileTabs'}) is not None
####################################################################
# API
####################################################################
class Quora:
@staticmethod
def get_user_stats(user):
soup = BeautifulSoup(requests.get('http://www.quora.com/' + user).text)
data_stats = []
err = None
for item in soup.findAll('span', attrs={'class' : 'profile_count'}):
m = re.findall('\d', str(item))
element = ''.join(m)
data_stats.append(element)
user_dict = {'answers' : try_cast(data_stats[1]),
'blogs' : err,
'edits' : try_cast(data_stats[5]),
'followers' : try_cast(data_stats[3]),
'following' : try_cast(data_stats[4]),
'name' : user,
'posts' : try_cast(data_stats[2]),
'questions' : try_cast(data_stats[0]),
'topics' : err,
'username' : user }
return user_dict
@staticmethod
def get_user_activity(user):
f = feedparser.parse('http://www.quora.com/' + user + '/rss')
dict = {
'username': user,
'last_updated': f.feed.updated
}
for entry in f.entries:
if 'activity' not in dict.keys():
dict['activity'] = []
dict['activity'].append(build_feed_item(entry))
return dict
@staticmethod
def get_activity(user):
f = feedparser.parse('http://www.quora.com/' + user + '/rss')
activity = Activity()
for entry in f.entries:
type = check_activity_type(entry['description'])
if type is not None:
if type == ACTIVITY_ITEM_TYPES.UPVOTE:
activity.upvotes.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.USER_FOLLOW:
activity.user_follows.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.QUESTION_FOLLOW:
activity.question_follows.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.ANSWER:
activity.answers.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.QUESTION:
activity.questions.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.REVIEW_REQUEST:
activity.review_requests.append(build_feed_item(entry))
return activity
@staticmethod
def get_activity_keys():
return POSSIBLE_FEED_KEYS
class Activity:
def __init__(self, upvotes=[], user_follows=[], question_follows=[], answers=[], questions=[], review_requests=[]):
self.upvotes = upvotes
self.user_follows = user_follows
self.question_follows = question_follows
self.answers = answers
self.questions = questions
self.review_requests = review_requests
|
Python
| 0.000001
|
@@ -777,16 +777,74 @@
turn s%0A%0A
+def render_name(user):%0A return user.replace('-', ' ')%0A%0A
def get_
@@ -2918,20 +2918,33 @@
:
+render_name(
user
+)
,%0A
|
7e53ec5de1d094eafa6a0bba6efcdaf845d5a7b8
|
Create 0007.py
|
pylyria/0007/0007.py
|
pylyria/0007/0007.py
|
Python
| 0.00013
|
@@ -0,0 +1,1275 @@
+#! /usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# vim:fenc=utf-8%0A# Copyright By PyLyria%0A# CreateTime: 2016-03-04 19:36:40%0Aimport os%0A%0Adef get_path(root = os.curdir):%0A root += os.sep%0A for path, dirs, files in os.walk(root):%0A for file_name in files:%0A yield path, file_name%0A%0Adef get_lines(file_name):%0A with open(file_name,'rt',encoding='utf-8') as f:%0A for line in f:%0A yield line.strip()%0A%0Aif __name__ == '__main__':%0A paths = get_path()%0A format = ('.py', '.c', '.cpp', '.sql')%0A annotation = ('#', '//', '--', '/*')%0A code_statistics = %7B%7D%0A%0A for path, file_name in paths:%0A if file_name.endswith(format):%0A code_statistics%5Bfile_name%5D = %7B%7D%0A lines = get_lines(path + os.sep + file_name)%0A for line in lines:%0A if len(line) ==0:%0A code_statistics%5Bfile_name%5D%5B'EmptyLine'%5D = code_statistics%5Bfile_name%5D.get('EmptyLine', 0) + 1%0A elif line.startswith(annotation):%0A code_statistics%5Bfile_name%5D%5B'AnnotationLine'%5D = code_statistics%5Bfile_name%5D.get('AnnotationLine', 0) + 1%0A else:%0A code_statistics%5Bfile_name%5D%5B'CodeLine'%5D = code_statistics%5Bfile_name%5D.get('CodeLine', 0) + 1%0A%0A print(code_statistics)%0A
|
|
0b4c873ad2d0923e88fbee5b52435ff1ee68d03c
|
Create PedidoVer.py
|
backend/Models/Grau/PedidoVer.py
|
backend/Models/Grau/PedidoVer.py
|
Python
| 0
|
@@ -0,0 +1,320 @@
+from Framework.Pedido import Pedido%0Afrom Framework.ErroNoHTTP import ErroNoHTTP%0A%0Aclass PedidoVer(Pedido):%0A%0A%09def __init__(self,variaveis_do_ambiente):%0A%09%09super(PedidoVer, self).__init__(variaveis_do_ambiente)%0A%09%09try:%0A%09%09%09self.id = self.corpo%5B'id'%5D%09%09%09%0A%09%09except:%0A%09%09%09raise ErroNoHTTP(400)%0A%09%09%0A%09def getId(self):%0A%09%09return self.id%0A
|
|
d72f6c0d989c3f40d460b1ce5d45b7ebf27ec295
|
create tests for kornia
|
tests/test_kornia.py
|
tests/test_kornia.py
|
Python
| 0
|
@@ -0,0 +1,508 @@
+import unittest%0A%0Aimport cv2%0Aimport torch%0Aimport kornia%0A%0Aclass TestOpenCV(unittest.TestCase):%0A def test_imread_opencv(self):%0A img = cv2.imread('/input/tests/data/dot.png')%0A img_t = kornia.image_to_tensor(img)%0A%0A self.assertEqual(img.shape, (1, 1, 3))%0A self.assertEqual(img_t.shape, (3, 1, 1))%0A%0A def test_grayscale_torch(self):%0A img_rgb = torch.rand(2, 3, 4, 5)%0A img_gray = kornia.rgb_to_grayscale(img_rgb)%0A%0A self.assertEqual(img_gray.shape, (2, 1, 4, 5))%0A
|
|
e3d0681cf2e449b06abebabb7e8726079997eb01
|
Add reflex game
|
reflex/reflex.py
|
reflex/reflex.py
|
Python
| 0.000021
|
@@ -0,0 +1,2228 @@
+from microbit import *%0Aimport random%0A%0Ascore = 0%0Apixel = None%0Afade_step = 300%0Aclicked = False%0AMAX_PAUSE = 3000%0A%0A%0Adef get_rand_coord(limit=4):%0A return random.randint(0, limit)%0A%0A%0Adef get_rand_side():%0A return random.choice(%5B-1, 1%5D)%0A%0A%0Adef handle_correct_click(i):%0A global score, clicked%0A clicked = True%0A score += (i+1)*10%0A display.set_pixel(pixel%5B0%5D, pixel%5B1%5D, 0)%0A sleep(1000)%0A%0Adisplay.scroll(%22REFLEX%22)%0A%0Adisplay.scroll('Press any button to play', wait=False, loop=True)%0Awhile True:%0A if button_a.is_pressed() or button_b.is_pressed():%0A print(%22start playing%22)%0A break%0A%0Adisplay.clear()%0Awhile True:%0A%0A for y in range(0, 5):%0A display.set_pixel(2, y, 5)%0A%0A for r in range(1, 16):%0A%0A print(%22ROUND %25d%22 %25 r)%0A wait_time = random.random()*MAX_PAUSE%0A print (%22WAIT %25d%22, wait_time)%0A%0A start_time = running_time()%0A diff = 0%0A while diff %3C= wait_time:%0A%0A new_time = running_time()%0A diff = new_time - start_time%0A%0A y = get_rand_coord()%0A x = (get_rand_side() * (get_rand_coord(1)+1)) + 2%0A%0A pixel = (x, y)%0A%0A print(pixel)%0A%0A clicked = False%0A for i in range(9, -1, -1):%0A display.set_pixel(pixel%5B0%5D, pixel%5B1%5D, i)%0A%0A start_time = running_time()%0A diff = 0%0A while diff %3C= fade_step and not clicked:%0A if x %3C 2:%0A%0A if button_a.is_pressed():%0A handle_correct_click(i)%0A break%0A elif button_b.is_pressed():%0A score -= 10%0A%0A elif x %3E 2:%0A%0A if button_b.is_pressed():%0A handle_correct_click(i)%0A break%0A elif button_a.is_pressed():%0A score -= 10%0A%0A new_time = running_time()%0A diff = new_time - start_time%0A%0A pixel = None%0A%0A if r %25 5 == 0:%0A%0A fade_step -= 25%0A print (%22%25d: %25d%22 %25 (r, fade_step))%0A%0A display.scroll(%22Score: %25d%22 %25 score, wait=False, loop=True)%0A%0A while True:%0A if button_a.is_pressed() or button_b.is_pressed():%0A display.clear()%0A break%0A
|
|
998e7ac87ef6e96bb5d421860683f87e8b373428
|
Create view.py
|
blobstore-python/src/app/view.py
|
blobstore-python/src/app/view.py
|
Python
| 0
|
@@ -0,0 +1 @@
+%0A
|
|
9c22d354da4c09d2e98b657d334e7594df1042d7
|
Create q2.py
|
work/q2.py
|
work/q2.py
|
Python
| 0.000114
|
@@ -0,0 +1,257 @@
+def union(arr1, arr2):%0A result = %5B%5D%0A for i in range(1, len(arr1)):%0A result.append(arr1%5Bi%5D)%0A result.append(arr2%5Bi%5D)%0A return result%0A%0Adef create_array():%0A return %5Bx for x in range(0,100)%5D%0A%0Aprint(union(create_array(), create_array()))%0A
|
|
c100a97b4a2edb838fa5fd19f80909a86addf761
|
Version 3.2.1
|
tvrenamr/__init__.py
|
tvrenamr/__init__.py
|
__version__ = (3, 2, 0)
def get_version():
return '.'.join(map(str, __version__))
|
Python
| 0.000001
|
@@ -18,9 +18,9 @@
2,
-0
+1
)%0A%0Ad
|
4e1f87bf7805d20e52015b8c283181e4035de54b
|
Create _init_.py
|
luowang/tools/tree-tagger-windows-3.2/TreeTagger/cmd/_init_.py
|
luowang/tools/tree-tagger-windows-3.2/TreeTagger/cmd/_init_.py
|
Python
| 0.000145
|
@@ -0,0 +1 @@
+%0A
|
|
40ef5b1a6347d54eeb043c64f36286768b41dc3e
|
Add lldbToolBox.py scaffolding in ./utils for adding lldb python helpers to use when debugging swift.
|
utils/lldbToolBox.py
|
utils/lldbToolBox.py
|
Python
| 0.00001
|
@@ -0,0 +1,994 @@
+%22%22%22%0ALLDB Helpers for working with the swift compiler.%0A%0ALoad into LLDB with 'command script import /path/to/lldbToolBox.py'%0A%0AThis will also import LLVM data formatters as well, assuming that llvm is next%0Ato the swift checkout.%0A%22%22%22%0A%0Aimport os%0A%0AREPO_BASE = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir,%0A os.pardir))%0ASWIFT_REPO = os.path.join(REPO_BASE, %22swift%22)%0ALLVM_REPO = os.path.join(REPO_BASE, %22llvm%22)%0ALLVM_DATAFORMATTER_PATH = os.path.join(LLVM_REPO, %22utils%22,%0A %22lldbDataFormatters.py%22)%0A%0A%0Adef import_llvm_dataformatters(debugger):%0A if not os.access(LLVM_DATAFORMATTER_PATH, os.F_OK):%0A print(%22WARNING! Could not find LLVM data formatters!%22)%0A return%0A cmd = 'command script import %7B%7D'.format(LLVM_DATAFORMATTER_PATH)%0A debugger.HandleCommand(cmd)%0A print(%22Loaded LLVM data formatters.%22)%0A%0A%0Adef __lldb_init_module(debugger, internal_dict):%0A import_llvm_dataformatters(debugger)%0A
|
|
e0db4982016a724c368feafbe4182016dc0fa67d
|
Create mongo_to_csv.py
|
mongo_to_csv.py
|
mongo_to_csv.py
|
Python
| 0.000362
|
@@ -0,0 +1,2132 @@
+import unicodecsv%0Aimport sys%0Afrom pymongo import MongoClient%0A%0A# call this with 3 arguments: 1) mongodb uri 2) collection nam e3) output filename%0A%0Aclass generic_converter:%0A%0A def __init__(self):%0A self.header_dict = %7B%7D%0A%0A def retrieve_headers(self, test_dict, name_var):%0A for element in test_dict:%0A if isinstance(test_dict%5Belement%5D, dict):%0A self.retrieve_headers(test_dict%5Belement%5D, name_var +%0A '%7C%7C' + element)%0A else:%0A self.header_dict%5Bname_var + '%7C%7C' + element%5D = test_dict%5Belement%5D%0A%0A def converter_main(self, csv_writer):%0A mongo_uri_or_db_name = sys.argv%5B1%5D%0A if mongo_uri_or_db_name.startswith(%22mongodb://%22): # mongodb uri given%0A client = MongoClient(mongo_uri_or_db_name)%0A db = client%5Bmongo_uri_or_db_name.split(%22/%22)%5B-1%5D%5D%0A else: # database name given%0A client = MongoClient()%0A db = client%5Bmongo_uri_or_db_name%5D%0A collection_obj = db%5Bsys.argv%5B2%5D%5D%0A cursor_records = collection_obj.find()%0A header_list = %5B%5D%0A%0A for cursor in cursor_records:%0A self.retrieve_headers(cursor, '')%0A for item_label in self.header_dict:%0A if item_label not in header_list:%0A header_list.append(item_label)%0A self.header_dict = %7B%7D%0A csv_writer.writerow(header_list)%0A%0A cursor_records = collection_obj.find()%0A for cursor in cursor_records:%0A row_to_push = %5B%5D%0A self.header_dict = %7B%7D%0A self.retrieve_headers(cursor, '')%0A for item_label in header_list:%0A if item_label in self.header_dict:%0A row_to_push.append(self.header_dict%5Bitem_label%5D)%0A else:%0A row_to_push.append('')%0A csv_writer.writerow(row_to_push)%0A%0A%0Adef main():%0A f_write = open(sys.argv%5B3%5D, 'wb')%0A csv_writer = unicodecsv.writer(f_write, delimiter=',', quotechar='%22')%0A converter_object = generic_converter()%0A converter_object.converter_main(csv_writer)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
f5c8f8d819143b4a49064847a6eb1a7813a3f06b
|
Create solution.py
|
hackerrank/algorithms/sorting/easy/closest_numbers/py/solution.py
|
hackerrank/algorithms/sorting/easy/closest_numbers/py/solution.py
|
Python
| 0.000018
|
@@ -0,0 +1,516 @@
+#!/bin/python%0A%0Asize = int(raw_input())%0Avalues = sorted(%5Bint(value) for value in raw_input().split()%5D%5B:size%5D)%0Adifferences = sorted(%5B(values%5Bi - 1%5D, values%5Bi%5D) for i in range(1, len(values))%5D, key = lambda x : abs(x%5B0%5D - x%5B1%5D))%0Ai = 1%0Awhile (i %3C len(differences) %0A and abs(differences%5Bi%5D%5B0%5D - differences%5Bi%5D%5B1%5D) == abs(differences%5Bi - 1%5D%5B0%5D - differences%5Bi - 1%5D%5B1%5D)):%0A i += 1%0AsmallestDifferences = differences%5B:i%5D%0Aprint %22 %22.join(%22 %22.join(str(value) for value in difference) for difference in smallestDifferences)%0A
|
|
823bf93a9d931ed106ac4ed83f0448215c38580a
|
Create network_auth.py
|
network_auth.py
|
network_auth.py
|
Python
| 0.000003
|
@@ -0,0 +1,1321 @@
+#!/usr/bin/python%0A# Authenticates against a LAN using HTTP Basic Auth%0A%0Aimport sys%0A%0Aif len(sys.argv) != 4:%0A print (%22Invalid arguments%22)%0A print (%22Proper syntax is: %22 + sys.argv%5B0%5D + %22 %5Burl%5D %5Busername%5D %5Bpassword%5D%22)%0A sys.exit(1)%0A%0Aimport requests%0Aimport requests.exceptions%0A%0Aauth_target = sys.argv%5B1%5D%0Ausername = sys.argv%5B2%5D%0Apassword = sys.argv%5B3%5D%0A%0Aprint (%22Checking connection to: %22 + auth_target)%0A%0Atry:%0A auth_check = requests.get(auth_target)%0Aexcept requests.exceptions.ConnectionError as e:%0A print (e)%0A sys.exit(2)%0Aexcept requests.exceptions.Timeout as e:%0A print (e)%0A sys.exit(3)%0Aexcept Exception as e:%0A print (e)%0A sys.exit(98)%0A%0Acheck_status = auth_check.status_code%0Aprint (%22Response status code is: %22 + str(check_status))%0A%0Aif check_status == 200:%0A print (%22You are already authenticated%22)%0A sys.exit(0)%0A%0Aprint (%22Authenticating...%22)%0A%0Atry:%0A do_auth = requests.get(auth_target, auth=(username, password))%0Aexcept requests.exceptions.ConnectionError as e:%0A print (e)%0A sys.exit(4)%0Aexcept requests.exceptions.Timeout as e:%0A print (e)%0A sys.exit(5)%0Aexcept Exception as e:%0A print (e)%0A sys.exit(99)%0A%0Aauth_status = do_auth.status_code%0Aif auth_status == 200:%0A print (%22Authentication successful%22)%0Aelse:%0A print (%22Authentication failed with response code: %22 + str(auth_status))%0A%0A
|
|
1b440f67d3150d066014bddeaa718eee8216f602
|
add SWSFinance test.
|
restclients/tests.py
|
restclients/tests.py
|
from django.utils import unittest
#from restclients.test.sws.compatible import SWSTest
from restclients.test.sws.term import SWSTestTerm
from restclients.test.sws.err404.dao import SWSTestDAO404
from restclients.test.sws.err500.dao import SWSTestDAO500
from restclients.test.sws.invalid_dao import SWSTestInvalidDAO
from restclients.test.sws.file_implementation.dao import SWSTestFileDAO
from restclients.test.sws.schedule_data import SWSTestScheduleData
from restclients.test.sws.enrollment import SWSTestEnrollments
from restclients.test.sws.section import SWSTestSectionData
from restclients.test.sws.section_status import SWSTestSectionStatusData
from restclients.test.sws.independent_study import SWSIndependentStudy
from restclients.test.sws.instructor_no_regid import SWSMissingRegid
from restclients.test.sws.registrations import SWSTestRegistrations
from restclients.test.sws.campus import SWSTestCampus
from restclients.test.sws.college import SWSTestCollege
from restclients.test.sws.department import SWSTestDepartment
from restclients.test.sws.curriculum import SWSTestCurriculum
from restclients.test.sws.graderoster import SWSTestGradeRoster
from restclients.test.pws.person import PWSTestPersonData
from restclients.test.pws.entity import PWSTestEntityData
from restclients.test.pws.idcard import TestIdCardPhoto
from restclients.test.pws.err404.dao import PWSTestDAO404
from restclients.test.pws.err404.pws import PWSTest404
from restclients.test.pws.err500.dao import PWSTestDAO500
from restclients.test.pws.err500.pws import PWSTest500
from restclients.test.pws.invalid_dao import PWSTestInvalidDAO
from restclients.test.pws.file_implementation.dao import PWSTestFileDAO
from restclients.test.gws.group import GWSGroupBasics
from restclients.test.gws.course_group import GWSCourseGroupBasics
from restclients.test.gws.search import GWSGroupSearch
from restclients.test.cache.none import NoCacheTest
from restclients.test.cache.time import TimeCacheTest
from restclients.test.cache.etag import ETagCacheTest
from restclients.test.book.by_schedule import BookstoreScheduleTest
from restclients.test.amazon_sqs.queues import SQSQueue
from restclients.test.sms.send import SMS
from restclients.test.sms.invalid_phone_number import SMSInvalidNumbers
from restclients.test.nws.subscription import NWSTestSubscription
from restclients.test.nws.channel import NWSTestChannel
from restclients.test.nws.endpoint import NWSTestEndpoint
from restclients.test.nws.message import NWSTestMessage
from restclients.test.nws.person import NWSTestPerson
from restclients.test.canvas.enrollments import CanvasTestEnrollment
from restclients.test.canvas.accounts import CanvasTestAccounts
from restclients.test.canvas.admins import CanvasTestAdmins
from restclients.test.canvas.roles import CanvasTestRoles
from restclients.test.canvas.courses import CanvasTestCourses
from restclients.test.canvas.sections import CanvasTestSections
from restclients.test.canvas.bad_sis_ids import CanvasBadSISIDs
from restclients.test.canvas.terms import CanvasTestTerms
from restclients.test.canvas.users import CanvasTestUsers
from restclients.test.canvas.submissions import CanvasTestSubmissions
from restclients.test.canvas.assignments import CanvasTestAssignments
from restclients.test.canvas.quizzes import CanvasTestQuizzes
from restclients.test.catalyst.gradebook import CatalystTestGradebook
from restclients.test.trumba.accounts import TrumbaTestAccounts
from restclients.test.trumba.calendars import TrumbaTestCalendars
from restclients.test.gws.trumba_group import TestGwsTrumbaGroup
from restclients.test.thread import ThreadsTest
from restclients.test.view import ViewTest
from restclients.test.dao_implementation.mock import TestMock
from restclients.test.library.mylibinfo import MyLibInfoTest
from restclients.test.sws.notice import SWSNotice
|
Python
| 0.00005
|
@@ -32,60 +32,224 @@
st%0A%0A
-#from restclients.test.sws.compatible import SWSTest
+from restclients.test.library.mylibinfo import MyLibInfoTest%0Afrom restclients.test.sws.compatible import SWSTest%0Afrom restclients.test.sws.financial import SWSFinance%0Afrom restclients.test.sws.notice import SWSNotice
%0Afro
@@ -3908,116 +3908,5 @@
ck%0A%0A
-from restclients.test.library.mylibinfo import MyLibInfoTest%0A%0Afrom restclients.test.sws.notice import SWSNotice
%0A
|
0cad5e1673069d0fb8f2abb4eb6b062e3461fb70
|
Add fortran ABI mismatch test for scipy.linalg.
|
scipy/linalg/tests/test_build.py
|
scipy/linalg/tests/test_build.py
|
Python
| 0
|
@@ -0,0 +1,1652 @@
+from subprocess import call, PIPE, Popen%0Aimport sys%0Aimport re%0A%0Aimport numpy as np%0Afrom numpy.testing import TestCase, dec%0A%0Afrom scipy.linalg import flapack%0A%0A# XXX: this is copied from numpy trunk. Can be removed when we will depend on%0A# numpy 1.3%0Aclass FindDependenciesLdd:%0A def __init__(self):%0A self.cmd = %5B'ldd'%5D%0A%0A try:%0A st = call(self.cmd, stdout=PIPE, stderr=PIPE)%0A except OSError:%0A raise RuntimeError(%22command %25s cannot be run%22 %25 self.cmd)%0A%0A def get_dependencies(self, file):%0A p = Popen(self.cmd + %5Bfile%5D, stdout=PIPE, stderr=PIPE)%0A stdout, stderr = p.communicate()%0A if not (p.returncode == 0):%0A raise RuntimeError(%22Failed to check dependencies for %25s%22 %25 libfile)%0A%0A return stdout%0A%0A def grep_dependencies(self, file, deps):%0A stdout = self.get_dependencies(file)%0A%0A rdeps = dict(%5B(dep, re.compile(dep)) for dep in deps%5D)%0A founds = %5B%5D%0A for l in stdout.splitlines():%0A for k, v in rdeps.items():%0A if v.search(l):%0A founds.append(k)%0A%0A return founds%0A%0Aclass TestF77Mismatch(TestCase):%0A @dec.skipif(not(sys.platform%5B:5%5D == 'linux'),%0A %22Skipping fortran compiler mismatch on non Linux platform%22)%0A def test_lapack(self):%0A f = FindDependenciesLdd()%0A deps = f.grep_dependencies(flapack.__file__,%0A %5B'libg2c', 'libgfortran'%5D)%0A self.failIf(len(deps) %3E 1,%0A%22%22%22Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to%0Acause random crashes and wrong results. See numpy INSTALL.txt for more%0Ainformation.%22%22%22)%0A
|
|
096a1d94c2f54246d51954b59fc5c3fdb28154b2
|
add persistence strategy enum
|
keen/__init__.py
|
keen/__init__.py
|
__author__ = 'dkador'
|
Python
| 0.000002
|
@@ -15,8 +15,398 @@
dkador'%0A
+%0Aclass PersistenceStrategy:%0A %22%22%22%0A An enum that defines the persistence strategy used by the KeenClient.%0A Currently supported: DIRECT, which means any time add_event is called the%0A client will call out directly to Keen, or REDIS, which means add_event%0A will simply add the event to a defined Redis instance which can be%0A cleared later.%0A %22%22%22%0A DIRECT = 0,%0A REDIS = 1
|
46d7ce6a8ce93eb439617cb942d3a7e923b2ed7a
|
hello world
|
example/hello.py
|
example/hello.py
|
Python
| 0.999981
|
@@ -0,0 +1,22 @@
+print(%22Hello World!%22)%0A
|
|
295dc1e11563350181558001366275369df90639
|
Add a sysutil module
|
sahgutils/sysutil.py
|
sahgutils/sysutil.py
|
Python
| 0
|
@@ -0,0 +1,987 @@
+# System utility functions%0Afrom subprocess import Popen, PIPE%0A%0Adef exec_command(cmd_args):%0A %22%22%22Execute a shell command in a subprocess%0A%0A Convenience wrapper around subprocess to execute a shell command%0A and pass back stdout, stderr, and the return code. This function%0A waits for the subprocess to complete, before returning.%0A%0A Usage example:%0A %3E%3E%3E stdout, stderr, retcode = exec_command(%5B'ls', '-lhot'%5D)%0A%0A Parameters%0A ----------%0A cmd_args : list of strings%0A The args to pass to subprocess. The first arg is the program%0A name.%0A%0A Returns%0A -------%0A stdout : string%0A The contents of stdout produced by the shell command%0A stderr : string%0A The contents of stderr produced by the shell command%0A retcode : int%0A The return code produced by the shell command%0A%0A %22%22%22%0A proc = Popen(cmd_args, stdout=PIPE, stderr=PIPE)%0A stdout, stderr = proc.communicate()%0A proc.wait()%0A%0A return stdout, stderr, proc.returncode%0A
|
|
74034ccc6d1b7436c81520fb287330b852d54c62
|
Create a.py
|
a.py
|
a.py
|
Python
| 0.000489
|
@@ -0,0 +1,1578 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A#%0A# Simple Bot to reply to Telegram messages. This is built on the API wrapper, see%0A# echobot2.py to see the same example built on the telegram.ext bot framework.%0A# This program is dedicated to the public domain under the CC0 license.%0Aimport logging%0Aimport telegram%0Afrom telegram.error import NetworkError, Unauthorized%0Afrom time import sleep%0A%0A%0Aupdate_id = None%0A%0Adef main():%0A global update_id%0A # Telegram Bot Authorization Token%0A bot = telegram.Bot('277679081:AAGk3IXlId9PKUn3n_5wrfrUIR_mgsUVCeE')%0A%0A # get the first pending update_id, this is so we can skip over it in case%0A # we get an %22Unauthorized%22 exception.%0A try:%0A update_id = bot.getUpdates()%5B0%5D.update_id%0A except IndexError:%0A update_id = None%0A%0A logging.basicConfig(format='%25(asctime)s - %25(name)s - %25(levelname)s - %25(message)s')%0A%0A while True:%0A try:%0A echo(bot)%0A except NetworkError:%0A sleep(1)%0A except Unauthorized:%0A # The user has removed or blocked the bot.%0A update_id += 1%0A%0A%0Adef echo(bot):%0A global update_id%0A # Request updates after the last update_id%0A for update in bot.getUpdates(offset=update_id, timeout=10):%0A # chat_id is required to reply to any message%0A chat_id = update.message.chat_id%0A update_id = update.update_id + 1%0A%0A if update.message: # your bot can receive updates without messages%0A # Reply to the message%0A update.message.reply_text(update.message.text)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
e2dbee01734a981e8fcbbdca7d7d96f0506f929b
|
Create b.py
|
b.py
|
b.py
|
Python
| 0.000018
|
@@ -0,0 +1,7 @@
+b = 43%0A
|
|
ef7b6fb0bbe0c0d263a8c28ccaed1365f50f0ad9
|
Solve Knowit2019/07
|
knowit2019/07.py
|
knowit2019/07.py
|
Python
| 0.99801
|
@@ -0,0 +1,463 @@
+def zee_special_divison_operator(exp_r, x):%0A for y_d in range(2, 27644437):%0A b = y_d * x%0A r = b %25 27644437%0A%0A if exp_r == r:%0A break%0A%0A return y_d%0A%0A%0Adef test_special():%0A assert 13825167 == zee_special_divison_operator(5897, 2)%0A assert 9216778 == zee_special_divison_operator(5897, 3)%0A assert 20734802 == zee_special_divison_operator(5897, 4)%0A%0A%0Aif __name__ == '__main__':%0A print(zee_special_divison_operator(5897, 7))
|
|
849321eb5a34518afa85e0e5643c1a8f30aad4dc
|
remove encoding
|
petl/io/xlsx.py
|
petl/io/xlsx.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import locale
from petl.util.base import Table
def fromxlsx(filename, sheet=None, range_string=None, row_offset=0,
column_offset=0, **kwargs):
"""
Extract a table from a sheet in an Excel .xlsx file.
N.B., the sheet name is case sensitive.
The `sheet` argument can be omitted, in which case the first sheet in
the workbook is used by default.
The `range_string` argument can be used to provide a range string
specifying a range of cells to extract.
The `row_offset` and `column_offset` arguments can be used to
specify offsets.
Any other keyword arguments are passed through to
:func:`openpyxl.load_workbook()`.
"""
return XLSXView(filename, sheet=sheet, range_string=range_string,
row_offset=row_offset, column_offset=column_offset,
**kwargs)
class XLSXView(Table):
def __init__(self, filename, sheet=None, range_string=None,
row_offset=0, column_offset=0, **kwargs):
self.filename = filename
self.sheet = sheet
self.range_string = range_string
self.row_offset = row_offset
self.column_offset = column_offset
self.kwargs = kwargs
def __iter__(self):
import openpyxl
wb = openpyxl.load_workbook(filename=self.filename,
read_only=True, **self.kwargs)
if self.sheet is None:
ws = wb.get_sheet_by_name(wb.get_sheet_names()[0])
elif isinstance(self.sheet, int):
ws = wb.get_sheet_by_name(wb.get_sheet_names()[self.sheet])
else:
ws = wb.get_sheet_by_name(str(self.sheet))
for row in ws.iter_rows(range_string=self.range_string,
row_offset=self.row_offset,
column_offset=self.column_offset):
yield tuple(cell.value for cell in row)
try:
wb._archive.close()
except AttributeError as e:
# just here in case openpyxl stops exposing an _archive property.
pass
def toxlsx(tbl, filename, sheet=None, encoding=None):
"""
Write a table to a new Excel .xlsx file.
"""
import openpyxl
if encoding is None:
encoding = locale.getpreferredencoding()
wb = openpyxl.Workbook(write_only=True, encoding=encoding)
ws = wb.create_sheet(title=sheet)
for row in tbl:
ws.append(row)
wb.save(filename)
Table.toxlsx = toxlsx
|
Python
| 0.9998
|
@@ -2443,27 +2443,8 @@
True
-, encoding=encoding
)%0A
|
54bc1400a90408a77751b9fda275ec963a2db0bc
|
lowercase SettlementCode
|
source_data/etl_tasks/refresh_master.py
|
source_data/etl_tasks/refresh_master.py
|
import sys, os
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import IntegrityError
from datapoints.models import Indicator, DataPoint, Region, Campaign, Office, Source
from source_data.models import VCMSummaryNew,VCMSettlement,ProcessStatus
from django.contrib.auth.models import User
from dateutil import parser
from decimal import InvalidOperation
import pprint as pp
import pandas as pd
import csv
class VcmEtl(object):
def __init__(self,request_guid):
print 'initializing VCM ETL Object'
self.request_guid = request_guid
self.column_to_indicator_map = self.build_indicator_map()
self.non_indicator_fields = ['submissiondate','deviceid','simserial',\
'phonenumber','dateofreport','date_implement','settlementcode',\
'meta_instanceid','key', 'id','process_status_id','request_guid',\
'created_at']
def build_indicator_map(self):
self.inds = Indicator.objects.all()
self.source_columns = VCMSummaryNew._meta.get_all_field_names()
# map columns to indicators #
column_to_indicator_map = {}
for col in self.source_columns:
if col in [ind.name for ind in self.inds]:
column_to_indicator_map[col] = Indicator.objects.get(name=col).id
return column_to_indicator_map
def ingest_vcm_datapoints(self):
to_process = pd.DataFrame(list(VCMSummaryNew.objects.filter(process_status__status_text='TO_PROCESS').values()))
print 'ROWS TO PROCESS: ' + str(len(to_process))
column_list = to_process.columns.tolist()
for i, row in enumerate(to_process.values):
print 'processing row: ' + str(i)
row_dict = {}
for row_i,cell in enumerate(row):
row_dict[column_list[row_i]] = cell
row_process_status = self.process_row(row_dict)
process_status = ProcessStatus.objects.get(status_text=row_process_status)
row_obj = VCMSummaryNew.objects.get(id=row_dict['id'])
row_obj.process_status = process_status
row_obj.save()
def process_row(self,row_dict):
print row_dict['SettlementCode']
try:
sett_code = row_dict['SettlementCode'].replace('.0','')
region_id = Region.objects.get(settlement_code=sett_code).id
except TypeError:
return 'VCM_SUMMARY_NO_SETT_CODE'
## Settlement Is Null
except ValueError:
return 'VCM_SUMMARY_NO_CAMPAIGN'
except ObjectDoesNotExist:
return 'VCM_SUMMARY_NO_SETT_CODE'
try:
date_impl = parser.parse(row_dict['date_implement'])
campaign_id = Campaign.objects.get(start_date=date_impl).id
except TypeError:
return 'VCM_SUMMARY_NO_CAMPAIGN'
except ValueError:
return 'VCM_SUMMARY_NO_CAMPAIGN'
except ObjectDoesNotExist:
return 'VCM_SUMMARY_NO_CAMPAIGN'
all_cell_status = []
# process all cells in the row that represnet an indicator value
for column_name,cell_value, in row_dict.iteritems():
if column_name not in self.non_indicator_fields:
source_guid = row_dict['KEY'] + '_' + column_name
cell_status = self.process_cell(region_id,campaign_id,column_name,cell_value,source_guid)
all_cell_status.append(cell_status)
# HANDLE DUPE ENTRIES BETTER #
if 'ALREADY_EXISTS' in all_cell_status:
return 'ALREADY_EXISTS'
else:
return 'SUCESS_INSERT'
def process_cell(self,region_id,campaign_id,column_name,cell_value,src_key):
if cell_value == "nan":
return
if column_name in self.non_indicator_fields:
return
cleaned_cell_value = self.clean_cell_value(cell_value)
indicator_id = Indicator.objects.get(name = column_name).id
source_id = Source.objects.get(source_name = 'odk').id
try:
dp = DataPoint.objects.get_or_create(
indicator_id = indicator_id, \
region_id = region_id, \
campaign_id = campaign_id, \
value = cleaned_cell_value, \
source_id = source_id, \
source_guid = src_key, \
changed_by_id = User.objects.get(source_name='odk')
)
except IntegrityError as e:
return 'ALREADY_EXISTS'
# NEED TO HANDLE DUPE DATA POINTS BETTER #
return 'SUCESS_INSERT'
def clean_cell_value(self,cell_value):
# cell_value = cell_value.lower()
if cell_value == 'yes':
cleaned = 1
elif cell_value == 'no':
cleaned = 0
else:
cleaned = cell_value
return cleaned
##########################
#### META DATA INGEST ####
##########################
def ingest_indicators(self):
v = VCMSummaryNew()
all_fields = v._meta.fields
indicators = []
for f in all_fields:
if f.name not in self.non_indicator_fields:
indicators.append(f.name)
for i in indicators:
try:
created = Indicator.objects.create(name = i,description = i, \
is_reported = 1)
except IntegrityError:
pass
def ingest_regions(self):
to_process = VCMSettlement.objects.filter(process_status__status_text='TO_PROCESS')
for row in to_process:
print row
try:
created = Region.objects.create(
full_name = row.settlementname ,\
settlement_code = row.settlementcode ,\
office = Office.objects.get(name='Nigeria') ,\
latitude = row.settlementgps_latitude ,\
longitude = row.settlementgps_longitude ,\
source = Source.objects.get(source_name='odk') ,\
source_guid = row.key
)
row.process_status=ProcessStatus.objects.get(status_text='SUCESS_INSERT')
row.save()
except IntegrityError:
# THIS SHOULD BE AN UPDATE SO THAT NEWER REGIONS ARE INSERTED #
# AND THE OLD ONES ARE BROUGTH UP FOR REVIEW #
# THIS SHOULD ALSO BE ABSTRACTED TO WORK FOR ALL' MASTER' OBJECTS
row.process_status=ProcessStatus.objects.get(status_text='ALREADY_EXISTS')
row.save()
def ingest_campaigns(self):
all_data = VCMSummaryNew.objects.all()
all_campaigns = []
# Ensure the Office ID is in there
try:
ng_office_id = Office.objects.get(name='Nigeria')
except ObjectDoesNotExist:
ng_office = Office.objects.create(name='Nigeria')
ng_office_id = ng_office.id
for row in all_data:
if row.date_implement == 'nan':
return
try:
created = Campaign.objects.create(
name = 'Nigeria Starting:' + row.date_implement, \
office = ng_office_id, \
start_date = parser.parse(row.date_implement), \
end_date = parser.parse(row.date_implement)
)
except IntegrityError:
pass
|
Python
| 0.999852
|
@@ -2190,50 +2190,8 @@
):%0A%0A
- print row_dict%5B'SettlementCode'%5D%0A%0A
@@ -2233,17 +2233,17 @@
w_dict%5B'
-S
+s
ettlemen
@@ -2243,17 +2243,17 @@
ttlement
-C
+c
ode'%5D.re
@@ -3254,11 +3254,11 @@
ct%5B'
-KEY
+key
'%5D +
|
e61dbf66d6f73e4999a5ff9f732a8df0637fdbf2
|
Add an example of SQLalchemy model
|
server/models.py
|
server/models.py
|
Python
| 0.000051
|
@@ -0,0 +1,477 @@
+from flask.ext.sqlalchemy import SQLAlchemy%0A%0Aapp.config%5B'SQLALCHEMY_DATABASE_URI'%5D = 'sqlite:////tmp/test.db'%0Adb = SQLAlchemy(app)%0A%0Aclass User(db.Model):%0A id = db.Column(db.Integer, primary_key=True)%0A username = db.Column(db.String(80), unique=True)%0A email = db.Column(db.String(120), unique=True)%0A%0A def __init__(self, username, email):%0A self.username = username%0A self.email = email%0A%0A def __repr__(self):%0A return '%3CUser %25r%3E' %25 self.username%0A
|
|
b6fb4cadb9ac1506fef3a230ee7ec983daa64922
|
Remove tail
|
judge/templatetags/markdown/lazy_load.py
|
judge/templatetags/markdown/lazy_load.py
|
from copy import deepcopy
from django.contrib.staticfiles.templatetags.staticfiles import static
from lxml import html
def lazy_load(tree):
blank = static('blank.gif')
for img in tree.xpath('.//img'):
src = img.get('src')
if src.startswith('data'):
continue
noscript = html.Element('noscript')
noscript.append(deepcopy(img))
img.addprevious(noscript)
img.set('data-src', src)
img.set('src', blank)
img.set('class', img.get('class') + ' unveil' if img.get('class') else 'unveil')
|
Python
| 0.001071
|
@@ -334,16 +334,68 @@
cript')%0A
+ copy = deepcopy(img)%0A copy.tail = ''%0A
@@ -410,29 +410,20 @@
.append(
-deepcopy(img)
+copy
)%0A
|
eb145b78d4c84a29ee77fbe77142dee6f97f67dd
|
put urls and getter in its own file
|
filemail/urls.py
|
filemail/urls.py
|
Python
| 0
|
@@ -0,0 +1,966 @@
+import os%0Afrom errors import FMConfigError%0A%0A%0Abase_url = 'https://www.filemail.com'%0A%0Aapi_urls = %7B%0A 'login': 'api/authentication/login',%0A 'logout': 'api/authentication/logout',%0A 'init': 'api/transfer/initialize',%0A 'get': 'api/transfer/get',%0A 'complete': 'api/transfer/complete',%0A 'forward': 'api/transfer/forward',%0A 'share': 'api/transfer/share',%0A 'cancel': 'api/transfer/cancel',%0A 'delete': 'api/transfer/delete',%0A 'zip': 'api/transfer/zip',%0A 'file_rename': 'api/transfer/file/rename',%0A 'file_delete': 'api/transfer/file/delete',%0A 'update': 'api/transfer/update',%0A 'sent_get': 'api/transfer/sent/get',%0A 'received_get': 'api/transfer/received/get',%0A 'user_get': 'api/user/get',%0A 'user_update': 'api/user/update'%0A %7D%0A%0A%0Adef getURL(action):%0A if action in api_urls:%0A url = os.path.join(base_url, api_urls%5Baction%5D)%0A return url%0A%0A raise FMConfigError('You passed an invalid action: %7B%7D'.format(action))%0A
|
|
beb549ba090a1a72761a7e81feb3edcbf85ca543
|
Add files via upload
|
first_attempt.py
|
first_attempt.py
|
Python
| 0
|
@@ -0,0 +1,21 @@
+print(%22Hello world%22)%0A
|
|
747fa222d1e382ced363ced9d2565f384769316c
|
add button listener
|
button-listen.py
|
button-listen.py
|
Python
| 0.000001
|
@@ -0,0 +1,633 @@
+#!/usr/bin/env python%0A%0Aimport sys%0Afrom time import time, sleep%0Aimport RPi.GPIO as GPIO%0A%0A%0Adef main(argv=sys.argv):%0A channel = int(argv%5B1%5D)%0A GPIO.setmode(GPIO.BCM)%0A%0A try:%0A GPIO.setup(channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)%0A%0A ts = 0%0A%0A while True:%0A GPIO.wait_for_edge(channel, GPIO.BOTH)%0A%0A if time() - ts %3C= 0.2:%0A continue%0A %0A sleep(0.01)%0A print %22%7B%7D:%7B%7D%22.format(channel, GPIO.input(channel))%0A sys.stdout.flush()%0A ts = time()%0A finally:%0A GPIO.cleanup()%0A%0A%0Aif __name__ == %22__main__%22:%0A sys.exit(main())%0A
|
|
8dc7f657e816ab9becbabcf032e62d088f2b6b3c
|
Add network visualization tool
|
viz.py
|
viz.py
|
Python
| 0
|
@@ -0,0 +1,1286 @@
+import os%0Aimport json%0Aimport hashlib%0A%0Adef get_data_path():%0A if not 'OPENSHIFT_DATA_DIR' in os.environ:%0A return '../data/data.json'%0A else:%0A return os.path.join(os.environ%5B'OPENSHIFT_DATA_DIR'%5D, 'data.json')%0A%0Adef get_data():%0A if not os.path.isfile(get_data_path()):%0A with open(get_data_path(), 'w') as fh:%0A fh.write(%22%7B%7D%22)%0A data = json.load(open(get_data_path(), 'r'))%0A return data%0A%0Adef weird_hash(data):%0A hashed = hashlib.md5()%0A hashed.update(data)%0A digest = hashed.hexdigest()%0A uppercase_offset = ord('A') - ord('0')%0A for x in range(ord('0'), ord('9')):%0A digest = digest.replace(chr(x), chr(x + uppercase_offset))%0A return digest%0A%0Aout = 'graph main %7B%5Cn'%0Adot_usernames = ''%0Adot_relations = ''%0A%0Adata = get_data()%0A%0Afor k in data:%0A user = data%5Bk%5D%0A username = user%5B'username'%5D%0A dot_usernames += weird_hash(k) + '%5Blabel=%22' + weird_hash(k)%5B:5%5D + '%22%5D' + '%5Cn'%0A if not 'friends' in user:%0A continue%0A for friend in user%5B'friends'%5D:%0A if not (weird_hash(friend) + '--' +%0A weird_hash(k) + '%5Cn') in dot_relations:%0A dot_relations += weird_hash(k) + '--' + %5C%0A weird_hash(friend) + '%5Cn'%0Aout += dot_usernames%0Aout += dot_relations%0Aout += '%7D'%0A%0Aprint(out)%0A
|
|
0f3815ed22c4e25d311f36e0d9be9c5b38bd32bd
|
Create the basic structure for the topic handler.
|
handler/topic.py
|
handler/topic.py
|
Python
| 0
|
@@ -0,0 +1,284 @@
+class IndexHandler(BaseHandler):%0A%0Aclass ViewHandler(BaseHandler):%0A%0Aclass CreateHandler(BaseHandler):%0A%0Aclass EditHandler(BaseHandler):%0A%0Aclass FavoriteHandler(BaseHandler):%0A%0Aclass CancelFavoriteHandler(BaseHandler):%0A%0Aclass VoteHandler(BaseHandler):%0A%0Aclass ReplyEditHandler(BaseHandler):
|
|
b48a17f45bbb9a2202c8c3fcb377037b92961f0b
|
Create na.py
|
na.py
|
na.py
|
Python
| 0.000005
|
@@ -0,0 +1,8 @@
+hjghjgj%0A
|
|
00f2a9ae8a7deaa8a0cb49b9f1ce5b9b6a41f654
|
handle None for timestamps
|
src/robot/result/configurer.py
|
src/robot/result/configurer.py
|
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import utils
class SuiteConfigurer(object):
def __init__(self, name=None, doc=None, metadata=None, set_tags=None,
include_tags=None, exclude_tags=None, include_suites=None,
include_tests=None, remove_keywords=None, log_level=None,
critical=None, noncritical=None, starttime=None,
endtime=None):
self.name = name
self.doc = doc
self.metadata = metadata
self.set_tags = set_tags or []
self.critical_tags = critical
self.non_critical_tags = noncritical
self.include_tags = include_tags
self.exclude_tags = exclude_tags
self.include_suites = include_suites
self.include_tests = include_tests
self.remove_keywords = remove_keywords
self.log_level = log_level
self.starttime = self._get_time(starttime)
self.endtime = self._get_time(endtime)
@property
def add_tags(self):
return [t for t in self.set_tags if not t.startswith('-')]
@property
def remove_tags(self):
return [t[1:] for t in self.set_tags if t.startswith('-')]
def configure(self, suite):
self._set_suite_attributes(suite)
suite.filter(self.include_suites, self.include_tests,
self.include_tags, self.exclude_tags)
suite.set_tags(self.add_tags, self.remove_tags)
suite.remove_keywords(self.remove_keywords)
suite.filter_messages(self.log_level)
suite.set_criticality(self.critical_tags, self.non_critical_tags)
def _set_suite_attributes(self, suite):
if self.name:
suite.name = self.name
if self.doc:
suite.doc = self.doc
if self.metadata:
suite.metadata.update(self.metadata)
if self.starttime:
suite.starttime = self.starttime
if self.endtime:
suite.endtime = self.endtime
def _get_time(self, timestamp):
if utils.eq(timestamp, 'N/A'):
return None
try:
secs = utils.timestamp_to_secs(timestamp, seps=list(' :.-_'),
millis=True)
except ValueError:
return None
return utils.secs_to_timestamp(secs, millis=True)
|
Python
| 0.000387
|
@@ -2589,16 +2589,33 @@
if
+not timestamp or
utils.eq
|
48a9b87dd86d600cdab4224c84aa5ce0685b775c
|
Add fetch data file
|
python/fetch.py
|
python/fetch.py
|
Python
| 0.000001
|
@@ -0,0 +1,2089 @@
+#!/usr/bin/env python%0A%0Aimport time%0Aimport json%0Aimport requests%0A%0Aheaders = %7B%0A %22Host%22: %22xgs15.c.bytro.com%22,%0A %22User-Agent%22: %22Mozilla/5.0 (X11; Linux x86_64; rv:56.0) Gecko/20100101 Firefox/56.0%22,%0A %22Accept%22: %22text/plain, */*; q=0.01%22,%0A %22Accept-Language%22: %22en-US,en;q=0.5%22,%0A %22Accept-Encoding%22: %22gzip, deflate, br%22,%0A %22Content-Type%22: %22application/x-www-form-urlencoded; charset=UTF-8%22,%0A %22Content-Length%22: %22387%22,%0A %22Origin%22: %22https://www.supremacy1914.com%22,%0A %22DNT%22: %221%22,%0A %22Connection%22: %22keep-alive%22,%0A %7D%0A%0ApayloadSample = %7B%0A %22@c%22: %22ultshared.action.UltUpdateGameStateAction%22,%0A %22gameID%22: %222117045%22,%0A %22playerID%22: 0,%0A %22userAuth%22: %22787925a25d0c072c3eaff5c1eff52829475fd506%22,%0A %22tstamp%22: int(time.time())%0A %7D%0A%0Aurl = 'https://xgs15.c.bytro.com/'%0A%0Adef print_json(jsonText):%0A print(json.dumps(jsonText, sort_keys=True, indent=4))%0A%0Adef get_day():%0A payload = payloadSample%0A payload%5B%22stateType%22%5D = 12%0A payload%5B%22option%22%5D = 30%0A%0A r = requests.post(url, headers=headers, json=payload)%0A%0A response = json.loads(r.text)%0A result = response%5B%22result%22%5D%0A return result%5B%22dayOfGame%22%5D%0A%0Adef get_score(day):%0A payload = payloadSample%0A payload%5B%22stateType%22%5D = 2%0A payload%5B%22option%22%5D = day%0A%0A r = requests.post(url, headers=headers, json=payload)%0A%0A text = json.loads(r.text)%0A return text%5B%22result%22%5D%5B%22ranking%22%5D%5B%22ranking%22%5D%0A%0A%0Adef write_results():%0A resultsFile = open(%22results.csv%22,%22w%22)%0A%0A for day in range(0, get_day()):%0A day += 1;%0A%0A print(%22day: %22 + str(day))%0A result = get_score(day)%0A result.pop(0)%0A%0A formatedResult = str();%0A for player in result:%0A formatedResult += str(player) + %22,%22%0A%0A resultsFile.write(formatedResult + %22%5Cn%22);%0A%0A resultsFile.close()%0A%0Adef get_players():%0A payload = payloadSample%0A payload%5B%22stateType%22%5D = 1%0A%0A r = requests.post(url, headers=headers, json=payload)%0A%0A text = json.loads(r.text)%0A print_json(text%5B%22result%22%5D%5B%22players%22%5D)%0A%0A%0Aget_players()%0A#write_results()%0A%0Aprint(%22%5Cndone!%22)%0A
|
|
3c7e8f08699fa6d2b004f86e6bdb0bc4792ae8c2
|
Create regex.py
|
python/regex.py
|
python/regex.py
|
Python
| 0.000212
|
@@ -0,0 +1,126 @@
+# re.IGNORECASE can be used for allowing user to type arbitrary cased texts.%0AQUIT_NO_CASE = re.compile('quit', re.IGNORECASE)%0A
|
|
b01c602f156b5a72db1ea4f27989aa5b1afdada8
|
ADD Cleaning before each test
|
src/behavior/features/terrain.py
|
src/behavior/features/terrain.py
|
Python
| 0
|
@@ -0,0 +1,277 @@
+from lettuce import *%0Aimport requests%0A%0ATARGET_URL='http://localhost:8080'%0A%0AtenantList = %5B %22511%22, %22615%22, %22634%22, %22515%22 %5D%0A%0A@before.each_scenario%0Adef cleanContext(feature):%0A for tenant in tenantList:%0A url = TARGET_URL + '/pap/v1/' + tenant%0A r = requests.delete(url)%0A%0A %0A
|
|
5f4263b6968c839bd67a60f4a2ffd89f8b373193
|
Update __init__.py
|
tendrl/provisioning/objects/definition/__init__.py
|
tendrl/provisioning/objects/definition/__init__.py
|
import pkg_resources
from ruamel import yaml
from tendrl.commons import objects
class Definition(objects.BaseObject):
internal = True
def __init__(self, *args, **kwargs):
self._defs = True
super(Definition, self).__init__(*args, **kwargs)
self.data = pkg_resources.resource_string(__name__,
"provisioning.yaml")
self._parsed_defs = yaml.safe_load(self.data)
self.value = '_NS/provisioning/definitions'
def get_parsed_defs(self):
self._parsed_defs = yaml.safe_load(self.data)
return self._parsed_defs
|
Python
| 0.000072
|
@@ -531,16 +531,92 @@
(self):%0A
+ if self._parsed_defs:%0A return self._parsed_defs%0A %0A
|
f784228170557643bc5cb1efc61ea38b45796210
|
Add flask application
|
app.py
|
app.py
|
Python
| 0.000001
|
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-%0Afrom flask import Flask%0A%0Aapp = Flask(__name__)%0A%0A%0A@app.route('/')%0Adef main():%0A return 'hello'%0A%0A%0Aif __name__ == %22__main__%22:%0A app.run()%0A
|
|
ef67bf3d8a418399fca676502a87ccb7d3914ed1
|
Add module with common potentials, with force versions for some
|
Lib/potentials.py
|
Lib/potentials.py
|
Python
| 0
|
@@ -0,0 +1,1501 @@
+import numpy as np%0Aimport utils%0A%0Adef LJ(r_0, U_0):%0A '''%0A Lennard-Jones with minimum at (r_0, -U_0).%0A '''%0A r_0_6 = r_0 ** 6%0A def func(r_sq):%0A six_term = r_0_6 / r_sq ** 3%0A return U_0 * (six_term ** 2 - 2.0 * six_term)%0A return func%0A%0Adef step(r_0, U_0):%0A '''%0A Potential Well at r with U(r %3C r_0) = 0, U(r %3E r_0) = U_0.%0A '''%0A def func(r_sq):%0A return np.where(r_sq %3C r_0 ** 2, U_0, 0.0)%0A return func%0A%0Adef inv_sq(k):%0A '''%0A Inverse-square law, U(r) = -k / r.%0A '''%0A def func(r_sq):%0A return -k / np.sqrt(r_sq)%0A return func%0A%0Adef harm_osc(k):%0A '''%0A Harmonic oscillator, U(r) = k * (r ** 2) / 2.0.%0A '''%0A def func(r_sq):%0A return 0.5 * k * r_sq ** 2%0A return func%0A%0Adef harm_osc_F(k):%0A '''%0A Harmonic oscillator, F(r) = -k * r.%0A '''%0A def func(r):%0A return -k * r%0A return func%0A%0Adef logistic(r_0, U_0, k):%0A ''' Logistic approximation to step function. '''%0A def func(r_sq):%0A return 0.5 * U_0 * (1.0 + np.tanh(k * (np.sqrt(r_sq) - r_0)))%0A return func%0A%0Adef logistic_F(r_0, U_0, k):%0A def func(r):%0A r_sq = utils.vector_mag(r)%0A return -U_0 * utils.vector_unit_nonull(r) * (1.0 - np.square(np.tanh(k * (np.sqrt(r_sq) - r_0))))%5B:, np.newaxis%5D%0A return func%0A%0Adef anis_wrap(func_iso):%0A '''%0A Wrap an isotropic potential in an anisotropic envelope%0A '''%0A def func_anis(r_sq, theta):%0A return func_iso(r_sq) * (0.5 + np.cos(0.5*theta) ** 2)%0A return func_anis%0A
|
|
1ece8c8640214d69a224f94f1b1ac93ec53d7699
|
Add image processing system (dummy)
|
chunsabot/modules/images.py
|
chunsabot/modules/images.py
|
Python
| 0
|
@@ -0,0 +1,201 @@
+from chunsabot.botlogic import brain%0A%0A@brain.route(%22@image%22)%0Adef add_image_description(msg, extras):%0A attachment = extras%5B'attachment'%5D%0A if not attachment:%0A return None%0A%0A return %22asdf%22%0A
|
|
84be951a9160e9998f3ed702542cee7274081091
|
Create __init__.py
|
spectrum/__init__.py
|
spectrum/__init__.py
|
Python
| 0.000429
|
@@ -0,0 +1 @@
+%0A
|
|
254f4ef03ad50c2227850e8d67b45697a211a42d
|
Update command.py
|
src/command.py
|
src/command.py
|
# -*- coding: utf-8 -*-
import math
def parse_command(commands, game_stats):
"""
Parse a command from a player and run it.
Parameters
----------
command : command from the player (str).
game_stats : stat of the game (dic).
Return
------
game_stats : game stat after the command execution (dic).
Version
-------
specification v1. Nicolas Van Bossuyt (10/2/2017)
implementation v1. Nicolas Van Bossuyt (10/2/2017)
"""
commands = commands.split(' ')
for cmd in commands:
sub_cmd = cmd.split(':')
ship_name = sub_cmd[0]
ship_action = sub_cmd[1]
if ship_action == 'slower' or ship_action == 'faster':
game_stats = command_change_speed(ship_name, ship_action, game_stats)
elif ship_action == 'left' or ship_action == 'right':
game_stats = command_rotate(ship_name, ship_action, game_stats)
else:
ship_action = ship_action.split('-')
coordinate = (int(ship_action[0]), int(ship_action[1]))
game_stats = command_attack(ship_name, coordinate, game_stats)
return game_stats
def command_change_speed(ship, change, game_stats):
"""
Increase the speed of a ship.
Parameters
----------
ship : name of the ship to Increase the speed (str).
change : the way to change the speed <"slower"|"faster"> (str).
game_stats : stats of the game (dic).
Returns
-------
game_stats : the game after the command execution (dic)
Version
-------
specification : Nicolas Van Bossuyt (v1. 09/02/2017)
implementation : Bayron Mahy (v1. 10/02/2017)
"""
type = game_stats['ship'][ship]['type']
# Make the ship move faster.
if change == 'faster' and gamestats['ship'][ship]['speed'] < gamestats['model_ship'][type]['max_speed']:
game_stats['ship'][ship]['speed']+=1
# make the ship move slower.
elif change == 'slower' and gamestats['ship'][ship]['speed'] > 0:
game_stats['ship'][ship]['speed']-=1
# show a message when is a invalide change.
else:
print 'you cannot make that change on the speed of this ship'
return game_stats
def command_rotate(ship, direction, game_stats):
"""
Rotate the ship.
Parameters
----------
ship : name of the ship to Increase the speed.
direction : the direction to rotate the ship <"left"|"right">(str)
game_stats : stats of the game (dic).
Returns
-------
new_game_stats : the game after the command execution.
Version
-------
specification v1. Nicolas Van Bossuyt (10/2/2017)
implementation v1. Nicolas Van Bossuyt (10/2/2017)
"""
def rotate_vector_2D(vector, radian):
"""
Rotate a vector in a 2D space by a specified angle in radian.
Parameters
----------
vector : 2D vector ton rotate (tuple(int,int)).
radian : angle appli to the 2D vector (float).
return
------
vector : rotate vector 2d (tuple(int,int)).
Version
-------
specification v1. Nicolas Van Bossuyt (10/2/2017)
implementation v1. Nicolas Van Bossuyt (10/2/2017)
"""
new_vector = (.0,.0)
# Here is were the magic append.
new_vector[0] = vector[0] * math.cos(radian) - vector[1] * math.sin(radian)
new_vector[1] = vector[0] * math.sin(radian) + vector[1] * math.cos(radian)
return new_vector
if direction == 'left':
gamestats['ship'][ship]['direction'] = rotate_vector_2D(gamestats['ship'][ship]['direction'], -math.pi / 4)
elif direction == 'right':
gamestats['ship'][ship]['direction'] = rotate_vector_2D(gamestats['ship'][ship]['direction'], math.pi / 4)
return game_stats
def command_attack(ship, coordinate, game_stats):
"""
Rotate the ship.
Parameters
----------
ship : name of the ship to Increase the speed.
coordinate : coordinate of the tile to attack (tuple(int,int)).
game_stats : stats of the game (dic).
Returns
-------
new_game_stats : the game after the command execution.
"""
board_widts=game_stats['board_size'][0]
board_lenght=game_stats['board_size'][1]
damages=game_stats ['ship'][ship]['damages']
ship_abscissa=game_stats['ship'][ship]['position'][0]
ship_orderly=game_stats['ship'][ship]['position'][1]
distance=(coordinate[0]-ship_abscissa ) + (coordinate[1]-ship_orderly )
if distance<=game_stats ['ship'][ship]['range'] :
for element in game_stats['board'][coordinate] :
game_stats['ship'][element]['heal_point']-=damages
if game_stats['ship'][element]['heal_point']<=0:
game_stats['board'][coordinate].remove(element)
return new_game_stats
raise NotImplementedError
|
Python
| 0.000002
|
@@ -4235,16 +4235,18 @@
ze'%5D%5B1%5D%0A
+%09%0A
%09damages
@@ -4283,16 +4283,16 @@
mages'%5D%0A
-
%09ship_ab
@@ -4393,16 +4393,18 @@
on'%5D%5B1%5D%0A
+%09%0A
%09distanc
@@ -4468,16 +4468,18 @@
derly )%0A
+%09%0A
%09if dist
@@ -4521,16 +4521,19 @@
nge'%5D :%0A
+%09%09%0A
%09%09for el
@@ -4575,16 +4575,20 @@
nate%5D :%0A
+%09%09%09%0A
%09%09%09game_
@@ -4633,16 +4633,20 @@
damages%0A
+%09%09%09%0A
%09%09%09if ga
|
92fde42097c4e0abbf5a7835a72f58f52c9b8499
|
Create example.py
|
Python/example.py
|
Python/example.py
|
Python
| 0.000001
|
@@ -0,0 +1,28 @@
+#This is an example script.%0A
|
|
3a56b89aacad2f948bf85b78c0834edf7c8d8d01
|
Add missing file.
|
renamer/util.py
|
renamer/util.py
|
Python
| 0.000001
|
@@ -0,0 +1,1410 @@
+import re%0A%0A%0Aclass ConditionalReplacer(object):%0A def __init__(self, cond, regex, repl):%0A super(ConditionalReplacer, self).__init__()%0A%0A self.cond = re.compile(cond)%0A self.regex = re.compile(regex)%0A self.repl = repl%0A%0A @classmethod%0A def fromString(cls, s):%0A return cls(*s.strip().split('%5Ct'))%0A%0A def replace(self, input, predInput):%0A if self.cond.search(predInput) is None:%0A return input%0A return self.regex.sub(self.repl, input, int(not self.globalReplace))%0A%0A%0Aclass Replacer(ConditionalReplacer):%0A def __init__(self, regex, replace):%0A super(Replacer, self).__init__(r'.*', regex, replace)%0A%0A def replace(self, input, predInput):%0A return super(Replacer, self).replace(input, input)%0A%0A%0Aclass Replacement(object):%0A def __init__(self, replacers):%0A super(Replacement, self).__init__()%0A self.replacers = replacers%0A%0A @classmethod%0A def fromFile(cls, fd, replacerType=Replacer):%0A replacers = %5B%5D%0A if fd is not None:%0A replacers = %5BreplacerType.fromString(line) for line in fd%5D%0A return cls(replacers)%0A%0A def add(self, replacer):%0A self.replacers.append(replacer)%0A%0A def replace(self, input, predInput=None):%0A if predInput is None:%0A predInput = input%0A%0A for r in self.replacers:%0A input = r.replace(input, predInput)%0A%0A return input%0A
|
|
066673aea6887d9272646d8bac8f99c69387e61d
|
add management command to check the status of a bounced email
|
corehq/util/management/commands/check_bounced_email.py
|
corehq/util/management/commands/check_bounced_email.py
|
Python
| 0
|
@@ -0,0 +1,3086 @@
+from django.core.management.base import BaseCommand%0A%0Afrom corehq.util.models import (%0A BouncedEmail,%0A PermanentBounceMeta,%0A ComplaintBounceMeta,%0A)%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Check on the bounced status of an email%22%0A%0A def add_arguments(self, parser):%0A parser.add_argument('bounced_email')%0A parser.add_argument(%0A '--show-details',%0A action='store_true',%0A default=False,%0A help='Show extra details of bounced messages',%0A )%0A%0A def handle(self, bounced_email, **options):%0A is_bounced = BouncedEmail.objects.filter(email=bounced_email).exists()%0A show_details = options%5B'show_details'%5D%0A%0A if not is_bounced:%0A self.stdout.write(f'%5Cn%7Bbounced_email%7D is NOT bouncing. '%0A f'All clear!%5Cn%5Cn')%0A return%0A%0A self.stdout.write('%5Cn%5Cn')%0A self.stdout.write('*' * 200)%0A self.stdout.write(f'! YES, %7Bbounced_email%7D is marked as bounced%5Cn')%0A self.stdout.write('*' * 200)%0A self.stdout.write('%5Cn')%0A%0A permanent_bounces = PermanentBounceMeta.objects.filter(%0A bounced_email__email=bounced_email).all()%0A%0A if permanent_bounces:%0A self.stdout.write('The following Permanent Bounce '%0A 'records were found:')%0A self.stdout.write('%5CnSub-Type%5CtSNS Timestamp'%0A '%5Ct%5Ct%5CtCreated on HQ%5Ct%5Ct%5CtReason')%0A self.stdout.write('.' * 200)%0A for record in permanent_bounces:%0A self.stdout.write(f'%7Brecord.sub_type%7D'%0A f'%5Ct%5Ct%7Brecord.timestamp%7D'%0A f'%5Ct%7Brecord.created%7D'%0A f'%5Ct%7Brecord.reason%7D')%0A if show_details:%0A for key, val in record.headers:%0A self.stdout.write(f'%5Ct%5Ct%7Bkey%7D:%5Ct%7Bval%7D')%0A self.stdout.write(f'%5Ct%5Ctdestination:%5Ct%7Brecord.destination%7D')%0A self.stdout.write('%5Cn%5Cn')%0A%0A complaints = ComplaintBounceMeta.objects.filter(%0A bounced_email__email=bounced_email).all()%0A%0A if complaints:%0A self.stdout.write('The following Complaint '%0A 'records were found:')%0A self.stdout.write('%5CnSNS Timestamp'%0A '%5Ct%5Ct%5CtCreated on HQ'%0A '%5Ct%5Ct%5CtFeedback Type'%0A '%5Ct%5CtSub-Type'%0A '%5CtDestination')%0A self.stdout.write('.' * 200)%0A for record in complaints:%0A self.stdout.write(f'%7Brecord.timestamp%7D'%0A f'%5Ct%7Brecord.created%7D'%0A f'%5Ct%7Brecord.feedback_type%7D'%0A f'%5Ct%7Brecord.sub_type%7D'%0A f'%5Ct%7Brecord.destination%7D')%0A if show_details:%0A for key, val in record.headers:%0A self.stdout.write(f'%5Ct%5Ct%7Bkey%7D:%5Ct%7Bval%7D')%0A self.stdout.write('%5Cn%5Cn')%0A
|
|
fea6011cf14e87492d511db3ed9415f5938929bf
|
add ex8
|
ex8.py
|
ex8.py
|
Python
| 0.99848
|
@@ -0,0 +1,353 @@
+formatter = %22%25r %25r %25r %25r%22%0A%0Aprint formatter %25(1, 2, 3, 4)%0Aprint formatter %25 (%22one%22, %22two%22, %22three%22, %22four%22)%0Aprint formatter %25(True, False, False, True)%0Aprint formatter %25(formatter, formatter, formatter,formatter)%0Aprint formatter %25 (%0A %22I had this thing.%22,%0A %22That you could type up right%22,%0A %22But it did't sing.%22,%0A %22So I said goodnight.%22%0A )%0A%0A
|
|
2ea014495f559072c5ecfac0b1117979793cf042
|
Create ruuvitag-web.py
|
ruuvitag-web.py
|
ruuvitag-web.py
|
Python
| 0
|
@@ -0,0 +1,1773 @@
+#!/usr/bin/python3%0A%0Afrom flask import Flask, render_template%0Afrom datetime import datetime, timedelta%0Aimport sqlite3%0Aimport json%0Aimport random%0A%0Aapp = Flask(__name__)%0A%0Adef randomRGB():%0A r, g, b = %5Brandom.randint(0,255) for i in range(3)%5D%0A return r, g, b, 1%0A%0A%0A@app.route('/')%0Adef index():%0A%09conn = sqlite3.connect(%22ruuvitag.db%22)%0A%09conn.row_factory = sqlite3.Row%0A%0A%09# set hom many days you want to see in charts%0A%09N = 30 # show charts for 30 days%0A%09%0A%09date_N_days_ago = str(datetime.now() - timedelta(days=N))%0A%09tags = conn.execute(%22SELECT DISTINCT mac, name FROM sensors WHERE timestamp %3E '%22+date_N_days_ago+%22' ORDER BY name, timestamp DESC%22)%0A%0A%09sensors = %5B'temperature', 'humidity', 'pressure'%5D%0A%0A%09sList = %7B%7D%0A%09datasets = %7B%7D%0A%09for sensor in sensors:%0A%09%09datasets%5Bsensor%5D = %5B%5D%0A%0A%09for tag in tags:%0A%09%09if tag%5B'name'%5D:%0A%09%09%09sList%5B'timestamp'%5D = %5B%5D%0A%09%09%09for sensor in sensors:%0A%09%09%09%09sList%5Bsensor%5D = %5B%5D%0A%0A%09%09%09sData = conn.execute(%22SELECT timestamp, temperature, humidity, pressure FROM sensors WHERE mac = '%22+tag%5B'mac'%5D+%22' AND timestamp %3E '%22+date_N_days_ago+%22' ORDER BY timestamp%22)%0A%09%09%09for sRow in sData:%0A%09%09%09%09sList%5B'timestamp'%5D.append(str(sRow%5B'timestamp'%5D)%5B:-3%5D) # remove seconds from timestamp%0A%09%09%09%09for sensor in sensors:%0A%09%09%09%09%09sList%5Bsensor%5D.append(sRow%5Bsensor%5D)%0A%0A%09%09%09color = randomRGB()%0A%09%09%09%0A%09%09%09dataset = %22%22%22%7B%7B%0A%09%09%09%09label: '%7B%7D',%0A%09%09%09%09borderColor: 'rgba%7B%7D',%0A%09%09%09%09fill: false,%0A%09%09 lineTension: 0.2,%0A%09%09%09%09data: %7B%7D%0A%09%09%09%7D%7D%22%22%22%0A%09%09%09for sensor in sensors:%0A%09%09%09%09datasets%5Bsensor%5D.append(dataset.format(tag%5B'name'%5D, color, sList%5Bsensor%5D))%0A%0A%09conn.close()%0A%09return render_template('ruuvitag.html', time = sList%5B'timestamp'%5D, temperature = datasets%5B'temperature'%5D, humidity = datasets%5B'humidity'%5D, pressure = datasets%5B'pressure'%5D)%0A%0Aif __name__ == '__main__':%0A app.run(debug=True, host='0.0.0.0', port=int('80'))%0A
|
|
2becf3b5223da8dc8d312462ae84f32ec3aff129
|
Create hw1.py
|
hw1.py
|
hw1.py
|
Python
| 0.000015
|
@@ -0,0 +1,2386 @@
+# Name: Yicheng Liang%0A# Computing ID: yl9jv%0A%0Aimport math%0A%0Ak = raw_input(%22Please enter the value for k: %22)%0Awhile (not k.isdigit()):%0A k = raw_input(%22Please enter a number for k: %22)%0Ak = int(k)%0A %0Am = raw_input(%22Please enter the value for M: %22)%0Awhile (not m.isdigit()):%0A m = raw_input(%22Please enter a number for M: %22)%0Am = int(m)%0A%0Aitems = %5B%5D%0A%0Afilename = raw_input(%22Please enter the file name: %22)%0Af = open(filename, 'r')%0Awhile (m %3E 0):%0A line = f.readline()%0A if line == %22%22:%0A break%0A else:%0A m -= 1%0A temp = line.split()%0A items.append((temp%5B0%5D, float(temp%5B1%5D), float(temp%5B2%5D)))%0A%0Anew_item = raw_input(%22Please enter the values for x and y: %22)%0Atemp = new_item.split()%0Ax = float(temp%5B0%5D)%0Ay = float(temp%5B1%5D)%0A%0Awhile (x != 1.0 and y != 1.0):%0A neighbors = %5B%5D%0A%0A for item in items:%0A distance = math.sqrt(math.pow((x - item%5B1%5D), 2) + math.pow((y - item%5B2%5D), 2))%0A neighbors.append((item%5B0%5D, item%5B1%5D, item%5B2%5D, distance))%0A %0A neighbors = sorted(neighbors, key=lambda a:a%5B3%5D)%0A counter = 0%0A cat1 = %22%22%0A cat2 = %22%22%0A num_cat1 = 0%0A num_cat2 = 0%0A cat1_distance = 0%0A cat2_distance = 0%0A print %22(1) nearest neighbors: %22%0A k_NN = %5B%5D%0A while counter != k:%0A print neighbors%5Bcounter%5D%0A if cat1 == %22%22:%0A cat1 = neighbors%5Bcounter%5D%5B0%5D%0A num_cat1 += 1%0A cat1_distance += neighbors%5Bcounter%5D%5B3%5D%0A elif (cat1 != neighbors%5Bcounter%5D%5B0%5D and cat2 == %22%22):%0A cat2 = neighbors%5Bcounter%5D%5B0%5D%0A num_cat2 += 1%0A cat2_distance += neighbors%5Bcounter%5D%5B3%5D%0A elif cat1 == neighbors%5Bcounter%5D%5B0%5D:%0A num_cat1 += 1%0A cat1_distance += neighbors%5Bcounter%5D%5B3%5D%0A elif cat2 == neighbors%5Bcounter%5D%5B0%5D:%0A num_cat2 += 1%0A cat2_distance += neighbors%5Bcounter%5D%5B3%5D%0A counter += 1%0A k_NN.append(neighbors%5Bcounter%5D)%0A print ''%0A %0A if num_cat1 %3C num_cat2:%0A print %22(2)point classified as: %22 + cat2%0A else:%0A print %22(2)point classified as: %22 + cat1%0A print ''%0A %0A print %22(3) average distance from %22 + cat1 + %22 is: %22 + str(cat1_distance / num_cat1)%0A print %22 average distance from %22 + cat2 + %22 is: %22 + str(cat2_distance / num_cat2)%0A print %22%22%0A print %22%22%0A %0A new_item = raw_input(%22Please enter the values for x and y: %22)%0A temp = new_item.split()%0A x = float(temp%5B0%5D)%0A y = float(temp%5B1%5D)%0A
|
|
6274ee8d776c829998dfaa56cb419d1263242a48
|
Add topological sorting in Python
|
Algorithms/Sort_Algorithms/Topological_Sort/TopologicalSort.py
|
Algorithms/Sort_Algorithms/Topological_Sort/TopologicalSort.py
|
Python
| 0.000001
|
@@ -0,0 +1,935 @@
+'''%0ATopological sort.%0ATaken from : %0Ahttp://stackoverflow.com/questions/15038876/topological-sort-python%0A'''%0A%0Afrom collections import defaultdict%0Afrom itertools import takewhile, count%0A%0Adef sort_topologically(graph):%0A levels_by_name = %7B%7D%0A names_by_level = defaultdict(set)%0A%0A def walk_depth_first(name):%0A if name in levels_by_name:%0A return levels_by_name%5Bname%5D%0A children = graph.get(name, None)%0A level = 0 if not children else (1 + max(walk_depth_first(lname) for lname in children))%0A levels_by_name%5Bname%5D = level%0A names_by_level%5Blevel%5D.add(name)%0A return level%0A%0A for name in graph:%0A walk_depth_first(name)%0A%0A return list(takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in count())))%0A%0A%0Agraph = %7B%0A 1: %5B2, 3%5D,%0A 2: %5B4, 5, 6%5D,%0A 3: %5B4,6%5D,%0A 4: %5B5,6%5D,%0A 5: %5B6%5D,%0A 6: %5B%5D%0A %7D%0A%0Aprint(sort_topologically(graph))
|
|
df52febb14761d741a20dcdc1cbfd5ea8cd7e07b
|
add my bing script as an example
|
bingaling.aclark.py
|
bingaling.aclark.py
|
Python
| 0
|
@@ -0,0 +1,751 @@
+#!/usr/bin/python%0A%0Aimport re%0A%0Aimport baseformat%0Aimport bingaling%0A%0Abingcheck_restr = r'(%5Ba4%5D%5Bc%5D%5Bl1%5D%5Ba4%5D%5Br%5D%5Bk%5D)%7C(%5Ba4%5D%5Bl1%5D%5Bi1%5D)'%0Abingcheck_full = re.compile(r'(('+'%5Cx04%5Cx65'+r')%7C('+'%5Cx04%5Cx63'r')%7C(%5B%5E%5Cw%5C-'+'%5Cx04'+r'%5D)%7C(%5E)%7C(%5Ct))(' + bingcheck_restr + r')((%5B%5E%5Cw%5C-%5D)%7C($))', re.IGNORECASE)%0A%0Adef bingcheck(line):%0A r = baseformat.splitter.match(line)%0A%0A if r:%0A sub = r.group(3) + '%5Ct' + r.group(4) + '%5Ct' + re.sub(bingcheck_full, '%5C%5C1%5Cx16%5C%5C7%5Cx16%5C%5C11', r.group(5));%0A%0A if r.group(1):%0A sub = r.group(1) + sub%0A else:%0A sub = re.sub(bingcheck_full, '%5Cx16%5C%5C7%5Cx16', line);%0A%0A if sub != line:%0A bingaling.sendmail(line)%0A%0A return sub%0A%0Adef main():%0A bingaling.bingpipe(bingcheck)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
d95732ce90c5c9ac571ffc78b45eaa4424a11038
|
Create nested_scrape.py
|
nested_scrape.py
|
nested_scrape.py
|
Python
| 0.000001
|
@@ -0,0 +1,1013 @@
+%22%22%22 PROJECT SCRAPER %22%22%22%09%09%0A%0Afrom bs4 import BeautifulSoup%0Aimport urllib2%0A%0A%0A%0Adef scraper(url, outer_tag, outer_attr, outer_attr_name, inner_tag, inner_attr, inner_attr_name):%0A%0A%09''' BEAUTIFUL SOUP INIT '''%0A%09web=urllib2.urlopen(url)%0A%09soup=BeautifulSoup(web,'html.parser',from_encoding='utf-8')%0A%0A%09''' ***** CONTENT LIST ***** '''%0A%09content=%5B%5D%0A%09%0A%0A%09for outer_ in soup.find_all(outer_tag,%7Bouter_attr : outer_attr_name%7D):%0A%09%09for inner_ in outer_.find_all( inner_tag,%7Binner_attr : inner_attr_name%7D):%0A%09%09%09content.append(inner_.text.encode('utf-8'))%0A%09print content%0A%0Adef decode_prams():%0A%09pass%0A%0Aif __name__=='__main__':%0A%09print 'url'%0A%09url=raw_input()%0A%09print 'outer tag name'%0A%09outer_tag=raw_input()%0A%09print ' outer attr'%0A%09outer_attr=raw_input()%0A%09print 'outer attr name'%0A%09outer_attr_name=raw_input()%0A%09print 'inner tag'%0A%09inner_tag=raw_input()%0A%09print 'inner attr'%0A%09inner_attr=raw_input()%0A%09print 'inner attr name'%0A%09inner_attr_name=raw_input()%0A%09scraper(url, outer_tag, outer_attr, outer_attr_name, inner_tag, inner_attr, inner_attr_name)%0A
|
|
147a0815f21f807ac6a3e1c39c820e2b8364ad02
|
Tweak style.
|
script/libuv.py
|
script/libuv.py
|
#!/usr/bin/env python
# Downloads and compiles libuv.
from __future__ import print_function
import os
import os.path
import platform
import shutil
import subprocess
import sys
LIB_UV_VERSION = "v1.6.1"
LIB_UV_DIR = "build/libuv"
def ensure_dir(dir):
"""Creates dir if not already there."""
if os.path.isdir(dir):
return
os.makedirs(dir)
def remove_dir(dir):
"""Recursively removes dir."""
if platform.system() == "Windows":
# rmtree gives up on readonly files on Windows
# rd doesn't like paths with forward slashes
subprocess.check_call(['cmd', '/c', 'rd', '/s', '/q', dir.replace('/', '\\')])
else:
shutil.rmtree(LIB_UV_DIR)
def download_libuv():
"""Clones libuv into build/libuv and checks out the right version."""
# Delete it if already there so we ensure we get the correct version if the
# version number in this script changes.
if os.path.isdir(LIB_UV_DIR):
print("Cleaning output directory...")
remove_dir(LIB_UV_DIR)
ensure_dir("build")
print("Cloning libuv...")
run([
"git", "clone", "--quiet", "--depth=1",
"https://github.com/libuv/libuv.git",
LIB_UV_DIR
])
print("Getting tags...")
run([
"git", "fetch", "--quiet", "--depth=1", "--tags"
], cwd=LIB_UV_DIR)
print("Checking out libuv " + LIB_UV_VERSION + "...")
run([
"git", "checkout", "--quiet", LIB_UV_VERSION
], cwd=LIB_UV_DIR)
# TODO: Pin gyp to a known-good commit. Update a previously downloaded gyp
# if it doesn't match that commit.
print("Downloading gyp...")
run([
"git", "clone", "--quiet", "--depth=1",
"https://chromium.googlesource.com/external/gyp.git",
LIB_UV_DIR + "/build/gyp"
])
def build_libuv_mac():
# Create the XCode project.
run([
"python", LIB_UV_DIR + "/gyp_uv.py", "-f", "xcode"
])
# Compile it.
# TODO: Support debug builds too.
run([
"xcodebuild",
# Build a 32-bit + 64-bit universal binary:
"ARCHS=i386 x86_64", "ONLY_ACTIVE_ARCH=NO",
"-project", LIB_UV_DIR + "/uv.xcodeproj",
"-configuration", "Release",
"-target", "All"
])
def build_libuv_linux():
run(["python", "gyp_uv.py", "-f", "make"], cwd=LIB_UV_DIR)
run(["make", "-C", "out", "BUILDTYPE=Release"], cwd=LIB_UV_DIR)
def build_libuv_windows():
run(["cmd", "/c", "vcbuild.bat", "release"], cwd=LIB_UV_DIR)
def build_libuv():
if platform.system() == "Darwin":
build_libuv_mac()
elif platform.system() == "Linux":
build_libuv_linux()
elif platform.system() == "Windows":
build_libuv_windows()
else:
print("Unsupported platform: " + platform.system())
sys.exit(1)
def run(args, cwd=None):
"""Spawn a process to invoke [args] and mute its output."""
subprocess.check_output(args, cwd=cwd, stderr=subprocess.STDOUT)
def main():
download_libuv()
build_libuv()
main()
|
Python
| 0
|
@@ -402,18 +402,16 @@
dir.%22%22%22%0A
-
%0A if pl
@@ -566,16 +566,25 @@
ck_call(
+%0A
%5B'cmd',
|
d2ed4e7a0d8edafa250044e8b9ecf319c14b85e0
|
add pkc.py
|
pkc.py
|
pkc.py
|
Python
| 0.000308
|
@@ -0,0 +1,1043 @@
+class PkcError(BaseException):%0A pass%0A%0Aclass PkcTypeError(TypeError, PkcError):%0A pass%0A%0Aclass PkcCertificateError(ValueError, PkcError):%0A pass%0A%0Aclass PkcPublickeyError(ValueError, PkcError):%0A pass%0A%0Adef pkc_extract_publickey_from_certificate(certificate):%0A if type(certificate) is bytes:%0A return _pkc_extract_publickey_from_certificate(certificate)%0A else:%0A raise PkcTypeError%0A%0Adef pkc_compress_publickey(publickey):%0A if type(publickey) is bytes:%0A return _pkc_compress_publickey(publickey)%0A else:%0A raise PkcTypeError%0A%0Adef pkc_verify_signature(publickey, message, signatures):%0A if (type(publickey) is bytes and type(message) is bytes%0A and type(signatures) is bytes):%0A return _pkc_verify_signature(publickey, message, signatures)%0A else:%0A raise PkcTypeError%0A%0Adef _pkc_extract_publickey_from_certificate(certificate_bytes):%0A pass%0A%0Adef _pkc_compress_publickey(publickey_bytes):%0A pass%0A%0Adef _pkc_verify_signature(publickey_bytes, message_bytes, signatures_bytes):%0A pass%0A
|
|
3641160e055128c0d799926229959fef33ffa26e
|
use our own django style, width increased to flow nicely with default toolbar
|
ckeditor/widgets.py
|
ckeditor/widgets.py
|
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.utils.encoding import force_unicode
from django.utils import simplejson
from django.core.exceptions import ImproperlyConfigured
from django.forms.util import flatatt
json_encode = simplejson.JSONEncoder().encode
DEFAULT_CONFIG = {
'skin': 'v2',
'toolbar': 'Full',
'height': 291,
'width': 618,
'filebrowserWindowWidth': 940,
'filebrowserWindowHeight': 747,
}
class CKEditorWidget(forms.Textarea):
"""
Widget providing CKEditor for Rich Text Editing.
Supports direct image uploads and embed.
"""
class Media:
try:
js = (
settings.CKEDITOR_MEDIA_PREFIX + 'ckeditor/ckeditor.js',
)
except AttributeError:
raise ImproperlyConfigured("django-ckeditor requires \
CKEDITOR_MEDIA_PREFIX setting. This setting specifies a \
URL prefix to the ckeditor JS and CSS media (not \
uploaded media). Make sure to use a trailing slash: \
CKEDITOR_MEDIA_PREFIX = '/media/ckeditor/'")
def __init__(self, config_name='default', *args, **kwargs):
super(CKEditorWidget, self).__init__(*args, **kwargs)
# Setup config from defaults.
self.config = DEFAULT_CONFIG
# Try to get valid config from settings.
configs = getattr(settings, 'CKEDITOR_CONFIGS', None)
if configs != None:
if isinstance(configs, dict):
# Make sure the config_name exists.
if configs.has_key(config_name):
config = configs[config_name]
# Make sure the configuration is a dictionary.
if not isinstance(config, dict):
raise ImproperlyConfigured('CKEDITOR_CONFIGS["%s"] \
setting must be a dictionary type.' % \
config_name)
# Override defaults with settings config.
self.config.update(config)
else:
raise ImproperlyConfigured("No configuration named '%s' \
found in your CKEDITOR_CONFIGS setting." % \
config_name)
else:
raise ImproperlyConfigured('CKEDITOR_CONFIGS setting must be a\
dictionary type.')
def render(self, name, value, attrs={}):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
self.config['filebrowserUploadUrl'] = reverse('ckeditor_upload')
self.config['filebrowserBrowseUrl'] = reverse('ckeditor_browse')
return mark_safe(render_to_string('ckeditor/widget.html', {
'final_attrs': flatatt(final_attrs),
'value': conditional_escape(force_unicode(value)),
'id': final_attrs['id'],
'config': json_encode(self.config)
})
)
|
Python
| 0
|
@@ -506,10 +506,14 @@
': '
-v2
+django
',%0A
@@ -570,11 +570,11 @@
h':
-618
+835
,%0A
|
b716ae64ec574d741386b1dfc18c76e9bddec9a0
|
add closure example
|
closure.py
|
closure.py
|
Python
| 0.000001
|
@@ -0,0 +1,1154 @@
+%22%22%22%0A%25%25closure cell magic for running the cell in a function,%0Areducing pollution of the namespace%0A%0A%25%25forget does the same thing, but explicitly deletes new names,%0Arather than wrapping the cell in a function.%0A%22%22%22%0A%0Afrom IPython.utils.text import indent%0A%0Adef closure(line, cell):%0A %22%22%22run the cell in a function, generating a closure%0A %0A avoids affecting the user's namespace%0A %22%22%22%0A ip = get_ipython()%0A func_name = %22_closure_magic_f%22%0A block = '%5Cn'.join(%5B%0A %22def %25s():%22 %25 func_name,%0A indent(cell),%0A %22%25s()%22 %25 func_name%0A %5D)%0A ip.run_cell(block)%0A ip.user_ns.pop(func_name, None)%0A%0Adef forget(line, cell):%0A %22%22%22cleanup any new variables defined in the cell%0A %0A avoids UnboundLocals that might show up in %25%25closure%0A %0A changes to existing variables are not affected%0A %22%22%22%0A ip = get_ipython()%0A before = set(ip.user_ns.keys())%0A ip.run_cell(cell)%0A after = set(ip.user_ns.keys())%0A for key in after.difference(before):%0A ip.user_ns.pop(key)%0A%0Adef load_ipython_extension(ip):%0A mm = ip.magics_manager%0A mm.register_function(closure, 'cell')%0A mm.register_function(forget, 'cell')%0A%0A
|
|
7d28f97fb16684c58cf9e55bcca213e853741ca4
|
Create rmq.py
|
rmq.py
|
rmq.py
|
Python
| 0.000002
|
@@ -0,0 +1,950 @@
+#!/usr/local/bin/python3%0Afrom sys import stdin%0Afrom math import ceil, log%0Afrom decimal import Decimal as d%0A%0Aclass RMQ(object):%0A def __init__(self, numbers):%0A self.e = %5B%5D%0A n = len(numbers)%0A %0A if (n & (n-1))!=0:%0A x = ceil(log(n, 2))%0A nn = 2**x;%0A %0A while n != nn:%0A numbers.append(d('Infinity'))%0A %0A self.size = len() %0A %0A def build(self):%0A idx = self.size - 1%0A while idx %3E 1:%0A self.e%5Bidx//2%5D = min(self.e%5Bidx%5D, self.e%5Bidx+1%5D)%0A idx -= 2%0A %0A def min(self, left, right):%0A pass%0A %0A def set(self, origin, value):%0A pass%0A %0A %0Aif __name__ == '__main__':%0A %0A f = open('input.txt', 'r')%0A n, m = map(int, f.readline().split())%0A numbers = list(map(int, f.readline().split()))%0A # print(n, m)%0A rmq = RMQ(numbers)%0A %0A # print(numbers)%0A %0A for i in range(0, m):%0A c, x, y = f.readline().split()%0A %0A if c == 'Min':%0A rmq.min(x, y)%0A elif c == 'Set':%0A rmq.set(x, y)%0A
|
|
72678c437f1b1110fb8a14c78dcdd4c3c8b64157
|
Add initial version of bot script
|
rtm.py
|
rtm.py
|
Python
| 0
|
@@ -0,0 +1,840 @@
+import time%0Afrom slackclient import SlackClient%0A%0Atoken = 'kekmao'%0Asc = SlackClient(token)%0Ateam_join_event = 'team_join'%0A%0A%0Adef send_welcome_message(user):%0A user_id = user%5B'id'%5D%0A response = sc.api_call('im.open', user=user_id)%0A try:%0A dm_channel_id = response%5B'channel'%5D%5B'id'%5D%0A except (KeyError, ValueError):%0A print('Shite happened')%0A return%0A sc.rtm_send_message(dm_channel_id, 'welcome to devup')%0A%0A%0Adef main():%0A if sc.rtm_connect():%0A while True:%0A for event in sc.rtm_read():%0A if event.get('type') == team_join_event and (%0A event%5B'user'%5D%5B'is_bot'%5D is False):%0A send_welcome_message(user=event%5B'user'%5D)%0A time.sleep(1)%0A else:%0A print (%22Connection Failed, invalid token?%22)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
4c499d366429f68ff29c7a2f93553b06f3697405
|
Add missing oslo/__init__.py
|
oslo/__init__.py
|
oslo/__init__.py
|
Python
| 0.000007
|
@@ -0,0 +1,629 @@
+# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0A__import__('pkg_resources').declare_namespace(__name__)%0A
|
|
c18bbb7cb752e6a4421b98c100f682f3c7c45882
|
Fix bug 1254581 - Exclude file upload view from zone middleware.
|
kuma/wiki/middleware.py
|
kuma/wiki/middleware.py
|
from django.http import HttpResponseRedirect
from django.shortcuts import render
from kuma.core.utils import urlparams
from .exceptions import ReadOnlyException
from .jobs import DocumentZoneURLRemapsJob
class ReadOnlyMiddleware(object):
"""
Renders a 403.html page with a flag for a specific message.
"""
def process_exception(self, request, exception):
if isinstance(exception, ReadOnlyException):
context = {'reason': exception.args[0]}
return render(request, '403.html', context, status=403)
return None
class DocumentZoneMiddleware(object):
"""
For document zones with specified URL roots, this middleware modifies the
incoming path_info to point at the internal wiki path
"""
def process_request(self, request):
# https://bugzil.la/1189222
# Don't redirect POST $subscribe requests to GET zone url
if request.method == 'POST' and '$subscribe' in request.path:
return None
remaps = DocumentZoneURLRemapsJob().get(request.LANGUAGE_CODE)
for original_path, new_path in remaps:
if (
request.path_info == original_path or
request.path_info.startswith(u''.join([original_path, '/']))
):
# Is this a request for the "original" wiki path? Redirect to
# new URL root, if so.
new_path = request.path_info.replace(original_path,
new_path,
1)
new_path = '/%s%s' % (request.LANGUAGE_CODE, new_path)
query = request.GET.copy()
if 'lang' in query:
query.pop('lang')
new_path = urlparams(new_path, query_dict=query)
return HttpResponseRedirect(new_path)
elif request.path_info.startswith(new_path):
# Is this a request for the relocated wiki path? If so, rewrite
# the path as a request for the proper wiki view.
request.path_info = request.path_info.replace(new_path,
original_path,
1)
break
|
Python
| 0
|
@@ -906,16 +906,17 @@
if
+(
request.
@@ -935,17 +935,34 @@
OST' and
-
+%0A (
'$subscr
@@ -981,16 +981,46 @@
est.path
+ or '$files' in request.path))
:%0A
|
5ee021af46f7b6420b5edeac38f5f34f675fa625
|
create basic crawler
|
crawler.py
|
crawler.py
|
Python
| 0.000005
|
@@ -0,0 +1,2475 @@
+# -*- coding:utf-8 -*-%0A%0Afrom urllib import request, parse, error%0Afrom time import sleep%0Aimport re, os%0A%0Astart_tid = '2507213' # change initial url at here%0ASEXINSEX_URLS_PREFIX = 'http://www.sexinsex.net/forum/'%0Aencoding = 'gbk'%0Apath = os.path.abspath('.')%0Asleeptime = 0%0A%0Adef generate_url(tid,pid):%0A return ''.join(%5BSEXINSEX_URLS_PREFIX, 'thread-', str(tid), '-', str(pid), '-1.html'%5D)%0A%0Adef download(url):%0A r = request.Request(url)%0A r.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A')%0A with request.urlopen(r) as f:%0A with open('%25s/html/%25s'%25(path,parse.quote_plus(url)), 'w') as s:%0A s.write(f.read().decode(encoding=encoding,errors='ignore'))%0A%0Aclass thread():%0A%0A def __init__(self,tid):%0A self.tid = tid%0A self.extracted_pids = %5B%5D%0A self.url = generate_url(self.tid, 1)%0A%0A # extract all pages in a thread, used to extract contents not in the first page%0A def extract_pids(self):%0A with open('%25s/html/%25s'%25(path,parse.quote_plus(self.url)), 'r') as f:%0A p = re.compile(r'''%3Cdiv class=%22pages%22%3E.*%3Ca href=%22.*%22 class=%22last%22%3E(.*)%3C/a%3E.*%3C/div%3E''')%0A for line in f.readlines():%0A n = p.search(line)%0A if n:%0A self.extracted_pids = list(range(1,int(n.group(1))+1))%0A break%0A%0Aclass page():%0A%0A def __init__(self,tid,pid):%0A self.tid = tid%0A self.pid = pid%0A self.extracted_tids = %5B%5D%0A self.url = generate_url(self.tid,self.pid)%0A%0A # extract all refered tids in this page%0A def extract_tids(self):%0A with open('%25s/html/%25s'%25(path,parse.quote_plus(self.url)), 'r') as f:%0A p = re.compile(r'''%3Ca href=%22thread-(%5Cd*)-%5Cd*-%5Cd*.html%22''')%0A for line in f.readlines():%0A n = p.search(line)%0A if n:%0A self.extracted_tids.append(n.group(1))%0A%0Adef main():%0A download(generate_url(start_tid,1))%0A start_page = page(start_tid,1)%0A start_page.extract_tids()%0A%0A print('Extracted tids:')%0A print(start_page.extracted_tids)%0A%0A for tid in start_page.extracted_tids:%0A try:%0A if sleeptime:%0A print('sleeping...')%0A sleep(sleeptime)%0A print('downloading:',tid)%0A download(generate_url(tid,1))%0A except error.HTTPError:%0A print('HTTPError')%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
5f3a665e4611ae8faf82fcfb2804a0fd9aa84d2b
|
Create majority_number_iii.py
|
lintcode/majority_number_iii/py/majority_number_iii.py
|
lintcode/majority_number_iii/py/majority_number_iii.py
|
Python
| 0.999146
|
@@ -0,0 +1,458 @@
+class Solution:%0A %22%22%22%0A @param nums: A list of integers%0A @param k: As described%0A @return: The majority number%0A %22%22%22%0A def majorityNumber(self, nums, k):%0A import collections%0A %0A ratio = 1.0 / k * len(nums)%0A counter = collections.Counter(nums)%0A %0A for num in counter:%0A count = counter%5Bnum%5D%0A %0A if count %3E ratio:%0A return num%0A %0A return None%0A
|
|
8be862467344b9cf45b567008f10face0ed3ebf3
|
Create zhconvert.py for Alpha 1.0.3
|
packages/zhconvert.py
|
packages/zhconvert.py
|
Python
| 0
|
@@ -0,0 +1,611 @@
+import requests%0A%0A_url = 'http://opencc.byvoid.com/convert/'%0A%0Adef toTraditional(text):%0A # if len(text) %3E 100:%0A req = requests.post(_url, data=%7B'text':text,'config':'s2t.json','precise':'0'%7D)%0A return req.text%0A # else:%0A # result = ''%0A # for segment in %5Btext%5Bi:i+1000%5D for i in range(0, len(text), 1000)%5D:%0A # req = requests.post(_url, data=%7B'text':segment,'config':'s2t.json','precise':'0'%7D)%0A # result += req.text%0A # return result%0A%0Adef toSimplified(text):%0A req = requests.post(_url, data=%7B'text':text,'config':'t2s.json','precise':'0'%7D)%0A return req.text%0A
|
|
22f550dd3499d7d063501a2940a716d42362f6bc
|
Add missing file.
|
migrations/versions/0031_add_manage_team_permission.py
|
migrations/versions/0031_add_manage_team_permission.py
|
Python
| 0.000001
|
@@ -0,0 +1,1485 @@
+%22%22%22empty message%0A%0ARevision ID: 0031_add_manage_team_permission%0ARevises: 0030_add_template_permission%0ACreate Date: 2016-02-26 10:33:20.536362%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '0031_add_manage_team_permission'%0Adown_revision = '0030_add_template_permission'%0Aimport uuid%0Afrom datetime import datetime%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Afrom sqlalchemy.dialects import postgresql%0A%0Adef upgrade():%0A ### commands auto generated by Alembic - please adjust! ###%0A conn = op.get_bind()%0A user_services = conn.execute(%22SELECT * FROM user_to_service%22).fetchall()%0A for entry in user_services:%0A id_ = uuid.uuid4()%0A created_at = datetime.now().isoformat().replace('T', ' ')%0A conn.execute((%0A %22INSERT INTO permissions (id, user_id, service_id, permission, created_at)%22%0A %22 VALUES ('%7B%7D', '%7B%7D', '%7B%7D', 'manage_team', '%7B%7D')%22).format(id_, entry%5B0%5D, entry%5B1%5D, created_at))%0A conn.execute((%0A %22INSERT INTO permissions (id, user_id, service_id, permission, created_at)%22%0A %22 VALUES ('%7B%7D', '%7B%7D', '%7B%7D', 'view_activity', '%7B%7D')%22).format(id_, entry%5B0%5D, entry%5B1%5D, created_at))%0A ### end Alembic commands ###%0A%0A%0Adef downgrade():%0A ### commands auto generated by Alembic - please adjust! ###%0A conn = op.get_bind()%0A conn.execute(%22DELETE FROM permissions where permission='manage_team'%22)%0A conn.execute(%22DELETE FROM permissions where permission='view_activity'%22)%0A%0A ### end Alembic commands ###
|
|
950fa531697ec82a56a742241d941e0573559ffd
|
fix merge conflict. plugin_parser now handles templates if not core or modes
|
plugin_parser.py
|
plugin_parser.py
|
#!/usr/bin/env python2.7
import ConfigParser
import os
import shutil
import sys
def add_plugins(plugin_url):
# !! TODO keep track of changes so that they can be removed later on
try:
os.system("git config --global http.sslVerify false")
os.system("cd /tmp && git clone "+plugin_url)
if ".git" in plugin_url:
plugin_url = plugin_url.split(".git")[0]
plugin_name = plugin_url.split("/")[-1]
subdirs = [x[0] for x in os.walk("/tmp/"+plugin_name)]
check_modes = True
for subdir in subdirs:
try:
if subdir.startswith("/tmp/"+plugin_name+"/collectors/"):
recdir = subdir.split("/tmp/"+plugin_name+"/collectors/")[1]
# only go one level deep, and copy recursively below that
if not "/" in recdir:
dest = "/var/lib/docker/data/collectors/"+recdir
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(subdir, dest)
elif subdir.startswith("/tmp/"+plugin_name+"/plugins/"):
recdir = subdir.split("/tmp/"+plugin_name+"/plugins/")[1]
# only go one level deep, and copy recursively below that
if not "/" in recdir:
dest = "/var/lib/docker/data/plugins/"+recdir
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(subdir, dest)
elif subdir.startswith("/tmp/"+plugin_name+"/visualization/"):
recdir = subdir.split("/tmp/"+plugin_name+"/visualization/")[1]
# only go one level deep, and copy recursively below that
if not "/" in recdir:
dest = "/var/lib/docker/data/visualization/"+recdir
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(subdir, dest)
elif subdir == "/tmp/"+plugin_name+"/visualization":
# only files, not dirs
dest = "/var/lib/docker/data/visualization/"
for (dirpath, dirnames, filenames) in os.walk(subdir):
for filename in filenames:
shutil.copyfile(subdir+"/"+filename, dest+filename)
elif subdir == "/tmp/"+plugin_name+"/templates":
# only files, not dirs
dest = "/var/lib/docker/data/templates/"
for (dirpath, dirnames, filenames) in os.walk(subdir):
for filename in filenames:
if filename == "modes.template":
check_modes = False
shutil.copyfile(subdir+"/"+filename, dest+filename)
if filename == "core.template":
read_config = ConfigParser.RawConfigParser()
read_config.read('/var/lib/docker/data/templates/core.template')
write_config = ConfigParser.RawConfigParser()
write_config.read(subdir+"/"+filename)
write_sections = write_config.sections()
for section in write_sections:
if read_config.has_section(section):
read_config.remove_section(section)
read_config.add_section(section)
recdir = "/tmp/"+plugin_name+"/core/"+section
dest1 = "/var/lib/docker/data/core/"+section
if os.path.exists(dest1):
shutil.rmtree(dest1)
shutil.copytree(recdir, dest1)
with open('/var/lib/docker/data/templates/core.template', 'w') as configfile:
read_config.write(configfile)
except:
pass
# update modes.template if it wasn't copied up to include new plugins
if check_modes:
files = [x[2] for x in os.walk("/var/lib/docker/data/templates")][0]
config = ConfigParser.RawConfigParser()
config.read('/var/lib/docker/data/templates/modes.template')
plugin_array = config.options("plugins")
plugins = {}
for f in files:
f_name = f.split(".template")[0]
if f_name != "README.md" and not f_name in plugin_array and f_name != "modes":
config.set("plugins", f_name, "all")
with open('/var/lib/docker/data/templates/modes.template', 'w') as configfile:
config.write(configfile)
shutil.rmtree("/tmp/"+plugin_name)
except:
pass
def remove_plugins(plugin_url):
try:
# !! TODO
os.system("ls")
except:
pass
if __name__ == "__main__":
if len(sys.argv) == 3:
if sys.argv[1] == "add_plugins":
add_plugins(sys.argv[2])
elif sys.argv[1] == "remove_plugins":
remove_plugins(sys.argv[2])
else:
print "invalid plugin type to parse"
else:
print "not enough arguments"
|
Python
| 0
|
@@ -2970,32 +2970,34 @@
+el
if filename == %22
@@ -4189,32 +4189,150 @@
ite(configfile)%0A
+ else:%0A shutil.copyfile(subdir+%22/%22+filename, dest+filename)%0A
exce
|
f9d399fb9fa923c68581279085566ba479349903
|
test for api export endpoint
|
onadata/apps/api/tests/viewsets/test_export_viewset.py
|
onadata/apps/api/tests/viewsets/test_export_viewset.py
|
Python
| 0.000001
|
@@ -0,0 +1,2507 @@
+import os%0A%0Afrom django.test import RequestFactory%0A%0Afrom onadata.apps.api.viewsets.export_viewset import ExportViewSet%0Afrom onadata.apps.main.tests.test_base import TestBase%0A%0A%0Aclass TestDataViewSet(TestBase):%0A%0A def setUp(self):%0A super(self.__class__, self).setUp()%0A self._create_user_and_login()%0A self._publish_transportation_form()%0A self.factory = RequestFactory()%0A self.extra = %7B%0A 'HTTP_AUTHORIZATION': 'Token %25s' %25 self.user.auth_token%7D%0A%0A def _filename_from_disposition(self, content_disposition):%0A filename_pos = content_disposition.index('filename=')%0A self.assertTrue(filename_pos != -1)%0A return content_disposition%5Bfilename_pos + len('filename='):%5D%0A%0A def test_form_list(self):%0A view = ExportViewSet.as_view(%7B%0A 'get': 'list',%0A %7D)%0A data = %7B%0A 'owner': 'http://testserver/api/v1/users/bob',%0A 'public': False,%0A 'public_data': False,%0A 'description': u'',%0A 'downloadable': True,%0A 'is_crowd_form': False,%0A 'allows_sms': False,%0A 'encrypted': False,%0A 'sms_id_string': u'transportation_2011_07_25',%0A 'id_string': u'transportation_2011_07_25',%0A 'title': u'transportation_2011_07_25',%0A 'bamboo_dataset': u''%0A %7D%0A request = self.factory.get('/', **self.extra)%0A response = view(request)%0A self.assertEqual(response.status_code, 200)%0A self.assertDictContainsSubset(data, response.data%5B0%5D)%0A%0A def test_form_get(self):%0A self._make_submissions()%0A view = ExportViewSet.as_view(%7B%0A 'get': 'retrieve'%0A %7D)%0A formid = self.xform.pk%0A request = self.factory.get('/', **self.extra)%0A response = view(request)%0A self.assertEqual(response.status_code, 400)%0A self.assertEqual(response.data,%0A %7B'detail': 'Expected URL keyword argument %60owner%60.'%7D)%0A%0A # csv%0A request = self.factory.get('/', **self.extra)%0A response = view(request, owner='bob', pk=formid, format='csv')%0A self.assertEqual(response.status_code, 200)%0A headers = dict(response.items())%0A content_disposition = headers%5B'Content-Disposition'%5D%0A filename = self._filename_from_disposition(content_disposition)%0A basename, ext = os.path.splitext(filename)%0A self.assertEqual(headers%5B'Content-Type'%5D, 'application/csv')%0A self.assertEqual(ext, '.csv')%0A
|
|
890f2d61db6925eb9baba74421fecd1aba205c96
|
922. Sort Array By Parity II
|
LeetCode/SortArrayByParity2.py
|
LeetCode/SortArrayByParity2.py
|
Python
| 0.999988
|
@@ -0,0 +1,1013 @@
+%22%22%22%0Agiven half of them are even and half are odd and internal order between odd and even indexes doesn't matter,%0Awe can scan odd and even indexes until we find a pair that need to be swapped and swap that.%0AUnder the assumptions everything can be put in place with swaps like that.%0A%22%22%22%0A%0Aclass Solution:%0A # in-place implementation - original list is modified%0A # note: this looks like a double loop, but due to the next_odd_index update it's actually linear%0A def sortArrayByParityII(self, nums: List%5Bint%5D) -%3E List%5Bint%5D:%0A next_odd_index = 1%0A for i in range(0, len(nums), 2):%0A if nums%5Bi%5D %25 2 == 1: # odd number in even index%0A for j in range(next_odd_index, len(nums), 2):%0A if nums%5Bj%5D %25 2 == 0: # even number in odd index - swap them%0A temp = nums%5Bi%5D%0A nums%5Bi%5D = nums%5Bj%5D%0A nums%5Bj%5D = temp%0A next_odd_index = j + 2%0A break%0A return nums%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.