commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
fe11a1b638b1779e51da87eaa30f1f12b2d0911c
Add a module for known data models: CDS, ECMWF, etc.
cf2cdm/datamodels.py
cf2cdm/datamodels.py
Python
0
@@ -0,0 +1,1991 @@ +#%0A# Copyright 2017-2018 European Centre for Medium-Range Weather Forecasts (ECMWF).%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A#%0A# Authors:%0A# Alessandro Amici - B-Open - https://bopen.eu%0A#%0A%0Afrom __future__ import absolute_import, division, print_function, unicode_literals%0A%0A%0ACDS = %7B%0A 'latitude': %7B%0A 'out_name': 'lat',%0A 'stored_direction': 'increasing',%0A %7D,%0A 'longitude': %7B%0A 'out_name': 'lon',%0A 'stored_direction': 'increasing',%0A %7D,%0A 'isobaricInhPa': %7B%0A 'out_name': 'plev',%0A 'units': 'Pa',%0A 'stored_direction': 'decreasing',%0A %7D,%0A 'isobaricInPa': %7B%0A 'out_name': 'plev',%0A 'units': 'Pa',%0A 'stored_direction': 'decreasing',%0A %7D,%0A 'number': %7B%0A 'out_name': 'realization',%0A 'stored_direction': 'increasing',%0A %7D,%0A 'time': %7B%0A 'out_name': 'forecast_reference_time',%0A 'stored_direction': 'increasing',%0A %7D,%0A 'valid_time': %7B%0A 'out_name': 'time',%0A 'stored_direction': 'increasing',%0A %7D,%0A 'leadtime': %7B%0A 'out_name': 'forecast_period',%0A 'stored_direction': 'increasing',%0A %7D,%0A%7D%0A%0A%0AECMWF = %7B%0A 'isobaricInhPa': %7B%0A 'out_name': 'level',%0A 'units': 'hPa',%0A 'stored_direction': 'decreasing',%0A %7D,%0A 'isobaricInPa': %7B%0A 'out_name': 'level',%0A 'units': 'hPa',%0A 'stored_direction': 'decreasing',%0A %7D,%0A 'hybrid': %7B%0A 'out_name': 'level',%0A 'stored_direction': 'increasing',%0A %7D,%0A%7D
b6303a18051f5bc050c141b72935b9a87c752a59
move into separate module
utils/rms_paths.py
utils/rms_paths.py
Python
0.000001
@@ -0,0 +1,951 @@ +import pathlib%0Afrom collections import namedtuple%0A%0A%0Adef get_paths(prj_root):%0A %22%22%22%0A Maps path structure into a namedtuple.%0A :return:dict: namedtuple paths%0A %22%22%22%0A root_dir = pathlib.Path(prj_root).absolute().parent%0A RMSPaths = namedtuple(%22RMSPaths%22, %22root logs warnings commands journals com_warnings com_qc db xml_exp xml_imp%22)%0A path_map = RMSPaths(root=root_dir,%0A logs=root_dir / %22logs%22,%0A warnings=root_dir / %22warnings%22,%0A commands=root_dir / %22commands%22,%0A journals=root_dir / %22journals%22,%0A com_warnings=root_dir / %22commands%22 / %22warnings%22,%0A com_qc=root_dir / %22commands%22 / %22qc%22,%0A db=root_dir / %22db%22,%0A xml_exp=root_dir / %22db%22 / %22xml_export%22,%0A xml_imp=root_dir / %22db%22 / %22xml_import%22,%0A )%0A return path_map%0A
1a1ee9eff1f04d6e40c9288e15dc3fad7515c2b8
Make a unittest for CArray class
tests/cupy_tests/core_tests/test_carray.py
tests/cupy_tests/core_tests/test_carray.py
Python
0.000011
@@ -0,0 +1,1519 @@ +import unittest%0A%0Aimport cupy%0Afrom cupy import testing%0A%0A%0Aclass TestCArray(unittest.TestCase):%0A%0A def test_size(self):%0A x = cupy.arange(3).astype('i')%0A y = cupy.ElementwiseKernel(%0A 'raw int32 x', 'int32 y', 'y = x.size()', 'test_carray_size',%0A )(x, size=1)%0A self.assertEqual(int(y%5B0%5D), 3)%0A%0A def test_shape(self):%0A x = cupy.arange(6).reshape((2, 3)).astype('i')%0A y = cupy.ElementwiseKernel(%0A 'raw int32 x', 'int32 y', 'y = x.shape()%5Bi%5D', 'test_carray_shape',%0A )(x, size=2)%0A testing.assert_array_equal(y, (2, 3))%0A%0A def test_strides(self):%0A x = cupy.arange(6).reshape((2, 3)).astype('i')%0A y = cupy.ElementwiseKernel(%0A 'raw int32 x', 'int32 y', 'y = x.strides()%5Bi%5D',%0A 'test_carray_strides',%0A )(x, size=2)%0A testing.assert_array_equal(y, (12, 4))%0A%0A def test_getitem_int(self):%0A x = cupy.arange(24).reshape((2, 3, 4)).astype('i')%0A y = cupy.empty_like(x)%0A y = cupy.ElementwiseKernel(%0A 'raw T x', 'int32 y', 'y = x%5Bi%5D', 'test_carray_getitem_int',%0A )(x, y)%0A testing.assert_array_equal(y, x)%0A%0A def test_getitem_idx(self):%0A x = cupy.arange(24).reshape((2, 3, 4)).astype('i')%0A y = cupy.empty_like(x)%0A y = cupy.ElementwiseKernel(%0A 'raw T x', 'int32 y',%0A 'int idx%5B%5D = %7Bi / 12, i / 4 %25 3, i %25 4%7D; y = x%5Bidx%5D',%0A 'test_carray_getitem_idx',%0A )(x, y)%0A testing.assert_array_equal(y, x)%0A
2cf5f7baf115511c9dd8a8a0333a9b579455b9a3
Add file for rule's symbol tests
tests/rules_tests/FromSymbolComputeTest.py
tests/rules_tests/FromSymbolComputeTest.py
Python
0
@@ -0,0 +1,264 @@ +#!/usr/bin/env python%0A%22%22%22%0A:Author Patrik Valkovic%0A:Created 23.06.2017 16:39%0A:Licence GNUv3%0APart of grammpy%0A%0A%22%22%22%0A%0Afrom unittest import main, TestCase%0Afrom grammpy import Rule%0A%0A%0Aclass FromSymbolComputeTest(TestCase):%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
3f63b647d9f1e3c0ea8c83a869db8fc0028127c5
Create 05.FibonacciNumbers.py
TechnologiesFundamentals/ProgrammingFundamentals/MethodsAndDebugging-Excercises/05.FibonacciNumbers.py
TechnologiesFundamentals/ProgrammingFundamentals/MethodsAndDebugging-Excercises/05.FibonacciNumbers.py
Python
0.000008
@@ -0,0 +1,204 @@ +def fibonacci(n):%0A if n == 0 or n == 1:%0A return 1%0A else:%0A return fibonacci(n - 1) + fibonacci(n - 2)%0A%0Aif __name__ == '__main__':%0A number = int(input())%0A print(fibonacci(number))%0A
f5a0227042b64c6e8a8d85b9e0bc9cf437dc11b8
resolve migration conflict
ovp_users/migrations/0012_merge_20170112_2144.py
ovp_users/migrations/0012_merge_20170112_2144.py
Python
0.000008
@@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.1 on 2017-01-12 21:44%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('ovp_users', '0011_auto_20170111_1633'),%0A ('ovp_users', '0011_auto_20170112_1417'),%0A %5D%0A%0A operations = %5B%0A %5D%0A
3247ff26cbe63d875a493382c3f567f112de8b58
Add maintenance script to fix missing menu links
bin/maintenance/update_menus.py
bin/maintenance/update_menus.py
Python
0
@@ -0,0 +1,1197 @@ +from __future__ import division%0A%0Afrom collections import Counter%0A%0Afrom indico.core.db import DBMgr%0Afrom indico.util.console import conferenceHolderIterator, success%0Afrom MaKaC.conference import ConferenceHolder%0Afrom MaKaC.webinterface.displayMgr import ConfDisplayMgrRegistery%0A%0A%0Adef update_menus(dbi):%0A links = ('collaboration', 'downloadETicket')%0A ch = ConferenceHolder()%0A cdmr = ConfDisplayMgrRegistery()%0A counter = Counter()%0A%0A for __, event in conferenceHolderIterator(ch, deepness='event'):%0A menu = cdmr.getDisplayMgr(event).getMenu()%0A must_update = False%0A for linkname in links:%0A if menu.getLinkByName(linkname) is None:%0A counter%5Blinkname%5D += 1%0A must_update = True%0A if must_update:%0A menu.updateSystemLink()%0A counter%5B'updated'%5D += 1%0A if counter%5B'updated'%5D %25 100:%0A dbi.commit()%0A%0A for linkname in links:%0A print %22%7B%7D links missing: %7B%7D%22.format(linkname, counter%5Blinkname%5D)%0A success(%22Event menus updated: %7B%7D%22.format(counter%5B'updated'%5D))%0A%0A%0Aif __name__ == '__main__':%0A dbi = DBMgr.getInstance()%0A dbi.startRequest()%0A update_menus(dbi)%0A dbi.endRequest()%0A
1324ae9a6ba9d57841df3f7b729036120eee3d47
delete easyui
exercise/file/somecript.py
exercise/file/somecript.py
Python
0.000001
@@ -0,0 +1,86 @@ +import sys%0Atext = sys.stdin.read()%0Awords = text.split()%0Awordcount = len(words)%0Aprint()
3e23bf784a75de688811a4655e29bd2ae9bc163a
condense utility
topicmodel/interpret/condense_doctopics.py
topicmodel/interpret/condense_doctopics.py
Python
0.999754
@@ -0,0 +1,1459 @@ +# condense_doctopics.py%0A%0A# The doctopics file produced by MALLET can be very%0A# bulky. This script condenses it by keeping only the%0A# rows needed to evaluate our preregistered hypotheses.%0A%0Aimport sys, csv, os%0A%0Adef getdoc(anid):%0A '''%0A Gets the docid part of a character id%0A '''%0A%0A if '%7C' in anid:%0A thedoc = anid.split('%7C')%5B0%5D%0A else:%0A print('error', anid)%0A thedoc = anid%0A%0A return thedoc%0A%0A# MAIN starts here%0A%0Aargs = sys.argv%0A%0Adoctopic_path = args%5B1%5D%0Aoutpath = doctopic_path.replace('_doctopics.txt', '_vols.tsv')%0Aprint(outpath)%0A%0Aif os.path.isfile(outpath):%0A print(outpath, ' already exists')%0A user = input('Ok to overwrite (y for yes): ')%0A if user != 'y':%0A sys.exit(0)%0A%0Asignificant_vols = set()%0A%0Awith open('../../evaluation/hypotheses.tsv', encoding = 'utf-8') as f:%0A reader = csv.DictReader(f, delimiter = '%5Ct')%0A for row in reader:%0A ids = %5Brow%5B'firstsim'%5D, row%5B'secondsim'%5D, row%5B'distractor'%5D%5D%0A for anid in ids:%0A docid = getdoc(anid)%0A significant_vols.add(docid)%0A%0Aoutlines = %5B%5D%0A%0Awith open(doctopic_path, encoding = 'utf-8') as f:%0A for line in f:%0A fields = line.strip().split('%5Ct')%0A charid = fields%5B1%5D%0A docid = getdoc(charid)%0A if docid not in significant_vols:%0A continue%0A else:%0A outlines.append(line)%0A%0Awith open(outpath, mode = 'w', encoding = 'utf-8') as f:%0A for line in outlines:%0A f.write(line)%0A
b333deae9db90a193d274bf235dca8d6dddff5c2
Add script for getting milestone contributors (#4396)
docs/_bin/get-milestone-contributors.py
docs/_bin/get-milestone-contributors.py
Python
0
@@ -0,0 +1,1156 @@ +#!/usr/bin/env python3%0A%0Aimport json%0Aimport sys%0A%0Aimport requests%0A%0A# tested with python 3.6 and requests 2.13.0%0A%0Aif len(sys.argv) != 2:%0A sys.stderr.write('usage: program %3Cmilestone-number%3E%5Cn')%0A sys.stderr.write('Provide the github milestone number, not name. (e.g., 19 instead of 0.10.1)%5Cn')%0A sys.exit(1)%0A%0Amilestone_num = sys.argv%5B1%5D%0A%0Adone = False%0Apage_counter = 1%0Acontributors = set()%0A%0A# Get all users who created a closed issue or merged PR for a given milestone%0Awhile not done:%0A resp = requests.get(%22https://api.github.com/repos/druid-io/druid/issues?milestone=%25s&state=closed&page=%25s%22 %25 (milestone_num, page_counter))%0A pagination_link = resp.headers%5B%22Link%22%5D%0A%0A # last page doesn't have a %22next%22%0A if %22rel=%5C%22next%5C%22%22 not in pagination_link:%0A done = True%0A else:%0A page_counter += 1%0A%0A issues = json.loads(resp.text)%0A for issue in issues:%0A contributor_name = issue%5B%22user%22%5D%5B%22login%22%5D%0A contributors.add(contributor_name)%0A%0A# doesn't work as-is for python2, the contributor names are %22unicode%22 instead of %22str%22 in python2%0Acontributors = sorted(contributors, key=str.lower)%0Afor contributor_name in contributors:%0A print(%22@%25s%22 %25 contributor_name)%0A
28377ff5ac680cb5c97997f01b6300debe1abd80
add missing migration. fix #118
waliki/migrations/0006_auto_20170326_2008.py
waliki/migrations/0006_auto_20170326_2008.py
Python
0.000706
@@ -0,0 +1,520 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.6 on 2017-03-26 20:08%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('waliki', '0005_auto_20141124_0020'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='redirect',%0A name='status_code',%0A field=models.IntegerField(choices=%5B(302, '302 Found'), (301, '301 Moved Permanently')%5D, default=302),%0A ),%0A %5D%0A
648c51c41b51fe7919798de755ab1f04f40502a7
Add Simple File Example
examples/simple_file_instance.py
examples/simple_file_instance.py
Python
0
@@ -0,0 +1,609 @@ +#!/usr/bin/env python%0A%22%22%22Creates the CybOX content for CybOX_Simple_File_Instance.xml%0A%22%22%22%0A%0Afrom cybox.common import Hash%0Afrom cybox.core import Observable, Observables%0Afrom cybox.objects.file_object import File%0A%0Adef main():%0A h = Hash(%22a7a0390e99406f8975a1895860f55f2f%22)%0A%0A f = File()%0A f.file_name = %22bad_file24.exe%22%0A f.file_path = %22AppData%5CMozilla%22%0A f.file_extension = %22.exe%22%0A f.size_in_bytes = 3282%0A f.add_hash(h)%0A%0A o = Observable(f)%0A o.description = %22This observable specifies a specific file observation.%22%0A%0A print Observables(o).to_xml()%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
47e7d9fc28214e69ed9b0a36104feed80422a5b7
Create molecule.py
molecule.py
molecule.py
Python
0.000493
@@ -0,0 +1,1689 @@ +import numpy as np%0A%0A%0A%0A%0A%0Adef grab_iter_dual(i, bond_hash, mol_used, body_hash=None):%0A s = %5Bi%5D%0A r = %5B%5D%0A while s:%0A v = s.pop()%0A if not mol_used%5Bv%5D:%0A r.append(v)%0A mol_used%5Bv%5D = True%0A # for w in bond_hash%5Bv%5D:%0A # s.append(w)%0A s.extend(bond_hash%5Bv%5D)%0A if not body_hash:%0A continue%0A for w in body_hash.get(v):%0A s.append(w)%0A for x in bond_hash%5Bw%5D:%0A s.append(x)%0A return r%0A%0Adef bond_hash_dualdirect(bond, natoms):%0A %22%22%22%0A :param bond: bond data in hoomdxml format (name, id1, id2)%0A :param natoms: total number of particles%0A :return: hash table of with value in %7Bbondname1: %5Bidxes%5D, bondname2:%5Bidxes%5D...%7D for each particle (in dual direct)%0A %22%22%22%0A bond_hash_nn = %7B%7D%0A print('Building bond hash...')%0A if not isinstance(bond, np.ndarray):%0A return %7B%7D%0A for i in range(natoms):%0A bond_hash_nn%5Bi%5D = %5B%5D%0A for b in bond:%0A idx = b%5B1%5D%0A jdx = b%5B2%5D%0A bond_hash_nn%5Bidx%5D.append(jdx)%0A bond_hash_nn%5Bjdx%5D.append(idx)%0A print('Done.')%0A return bond_hash_nn%0A%0Adef molecules(bond, natoms):%0A bond_hash = bond_hash_dualdirect(bond, natoms)%0A mol_used = %7B%7D%0A for i in range(natoms):%0A mol_used%5Bi%5D = False%0A _ret, ml = %5B%5D, %5B%5D%0A for i in range(natoms):%0A mol = grab_iter_dual(i, bond_hash, mol_used)%0A if len(mol) %3E 1:%0A _ret.append(mol)%0A ml.append(len(mol))%0A ret = np.zeros((len(_ret), max(ml)), dtype=np.int64) - 1%0A for i, mol in enumerate(_ret):%0A ret%5Bi%5D%5B:ml%5Bi%5D%5D = _ret%5Bi%5D%0A return ret, np.array(ml, dtype=np.int64), bond_hash%0A
7cdeb30d5beefbed8d44e3b8eb5384ff05a1f09e
change logg buff, but no save
class4/class4_ex2.py
class4/class4_ex2.py
Python
0
@@ -0,0 +1,1853 @@ +#!/usr/bin/env python%0A%0Aimport paramiko%0Aimport time%0Afrom getpass import getpass%0A%0A%0Adef prevent_paging(remote_conn):%0A%0A ''' stop pagination '''%0A remote_conn.send(%22%5Cn%22)%0A remote_conn.send(%22term len 0%5Cn%22)%0A time.sleep(1)%0A%0A ''' clear output buffer '''%0A output = remote_conn.recv(1000)%0A return output%0A%0A%0Adef close_connection(remote_conn):%0A%0A ''' close SSH connection '''%0A remote_conn.close()%0A%0A%0Adef start_config_mode(remote_conn):%0A%0A ''' get into configuration mode on Cisco gear '''%0A remote_conn.send(%22%5Cn%22)%0A remote_conn.send(%22config t%5Cn%22)%0A time.sleep(1)%0A%0A%0Adef exit_config_mode(remote_conn):%0A%0A ''' leave config mode '''%0A remote_conn.send(%22%5Cn%22)%0A remote_conn.send(%22end%5Cn%22)%0A%0A%0A%0Aif __name__ == '__main__':%0A%0A%0A ''' set static variables '''%0A%0A device = '184.105.247.71'%0A username = 'pyclass'%0A password = getpass()%0A%0A%0A ''' initialize variables '''%0A remote_conn_pre = paramiko.SSHClient()%0A remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())%0A%0A%0A ''' connect to device '''%0A remote_conn_pre.connect(device, username=username, password=password, look_for_keys=False, allow_agent=False)%0A remote_conn = remote_conn_pre.invoke_shell()%0A%0A%0A ''' go into configuration mode '''%0A start_config_mode(remote_conn)%0A%0A%0A ''' send config change commands to device '''%0A remote_conn.send(%22%5Cn%22)%0A remote_conn.send(%22logging buffered 99999%5Cn%22)%0A time.sleep(1)%0A%0A%0A ''' exit configuration mode '''%0A exit_config_mode(remote_conn)%0A%0A%0A ''' disable paging using function '''%0A prevent_paging(remote_conn)%0A%0A%0A ''' send command to device and print results '''%0A remote_conn.send(%22%5Cn%22)%0A remote_conn.send(%22sho run %7C inc buffered%5Cn%22)%0A time.sleep(1)%0A output = remote_conn.recv(50000)%0A print output%0A%0A%0A ''' close connection using function '''%0A close_connection(remote_conn)%0A
dfb4b3ab679a5c8767bd7571da0fdd40850d2d84
Add __main__.py
crox/__main__.py
crox/__main__.py
Python
0.000019
@@ -0,0 +1,262 @@ +#!/usr/bin/env python%0Afrom __future__ import division, print_function, absolute_import%0A%0Aimport os%0Aimport sys%0Asys.path = %5Bos.path.join(os.path.abspath(os.path.dirname(__file__)), %22..%22)%5D + sys.path%0Afrom crox.core import main%0A%0Aif __name__ == '__main__':%0A main()%0A
2a81e39a843e31181af455a89ad2b200b7d2f024
Add migrations for session changes
director/sessions_/migrations/0005_auto_20160316_2124.py
director/sessions_/migrations/0005_auto_20160316_2124.py
Python
0
@@ -0,0 +1,1693 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.1 on 2016-03-16 21:24%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('sessions_', '0004_auto_20160124_0931'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='session',%0A name='network',%0A field=models.IntegerField(blank=True, default=-1, help_text='Network limit for this session. -1 = unlimited', null=True),%0A ),%0A migrations.AddField(%0A model_name='sessiontype',%0A name='lifetime',%0A field=models.IntegerField(default=-1, help_text='Minutes before the session is terminated. -1 = infinite.'),%0A ),%0A migrations.AddField(%0A model_name='sessiontype',%0A name='number',%0A field=models.IntegerField(default=-1, help_text='Maximum number of this session type that can be running at one time. -1 = infinite.'),%0A ),%0A migrations.AddField(%0A model_name='sessiontype',%0A name='rank',%0A field=models.IntegerField(default=1, help_text='Nominal rank of session type (higher ~ more powerful)'),%0A ),%0A migrations.AlterField(%0A model_name='sessiontype',%0A name='network',%0A field=models.FloatField(default=0, help_text='Gigabytes (GB) of network transfer allocated to the session.'),%0A ),%0A migrations.AlterField(%0A model_name='sessiontype',%0A name='timeout',%0A field=models.IntegerField(default=60, help_text='Minutes of inactivity before the session is terminated'),%0A ),%0A %5D%0A
92c7efdfd0a1a70081d6dd5e3c318432124bc87d
Add disk caching so we don't have to always wait for closure
util/builder.py
util/builder.py
import os import subprocess import zipfile from zipfile import ZipFile from StringIO import StringIO # Helper class for generating custom builds of rainbow class RainbowBuilder(object): def __init__(self, js_path, closure_path): self.js_path = js_path self.closure_path = closure_path self.js_files_to_include = [] self.file_name = "" def getPathForLanguage(self, language): return os.path.join(self.js_path, 'language/' + language + '.js') def getRainbowPath(self): return os.path.join(self.js_path, 'rainbow.js') def verifyPaths(self): if not os.path.exists(self.js_path): raise Exception('directory does not exist at: %s' % self.js_path) if not os.path.exists(self.closure_path): raise Exception('closure compiler does not exist at: %s' % self.closure_path) def getZipForLanguages(self, languages, path=None): self.verifyPaths() # strip out any duplicates languages = list(set(languages)) write_to = StringIO() if path is None else path zip_file = ZipFile(write_to, 'w') zip_file.write(self.getRainbowPath(), 'rainbow.js', zipfile.ZIP_DEFLATED) for language in languages: zip_file.write(self.getPathForLanguage(language), os.path.join('language', language + '.js'), zipfile.ZIP_DEFLATED) zip_file.close() return write_to def getFileForLanguages(self, languages): self.verifyPaths() # strip out any duplicates languages = list(set(languages)) self.js_files_to_include = [self.getRainbowPath()] for language in languages: path = self.getPathForLanguage(language) if not os.path.exists(path): continue self.js_files_to_include.append(path) proc = subprocess.Popen(['java', '-jar', self.closure_path, '--compilation_level', 'ADVANCED_OPTIMIZATIONS'] + self.js_files_to_include, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, err = proc.communicate() self.file_name = 'rainbow' + ('-custom' if len(languages) else '') + '.min.js' lines = output.splitlines() comments = lines[0:4] version = comments[1].replace(' @version ', '') url = comments[2].replace(' @url ', '') new_comment = '/* Rainbow v' + version + ' ' + url if len(languages): new_comment += ' | included languages: ' + ', '.join(languages) new_comment += ' */' output = new_comment + '\n' + '\n'.join(lines[4:]) return output
Python
0
@@ -32,24 +32,49 @@ ort zipfile%0A +import hashlib%0Aimport re%0A from zipfile @@ -124,62 +124,8 @@ IO%0A%0A -# Helper class for generating custom builds of rainbow %0Acla @@ -713,30 +713,30 @@ not os.path. -exists +isfile (self.closur @@ -1392,16 +1392,445 @@ ite_to%0A%0A + def openFile(self, path):%0A file = open(path, %22r%22)%0A content = file.read()%0A file.close()%0A return content%0A%0A def writeFile(self, path, content):%0A file = open(path, %22w%22)%0A file.write(content)%0A file.close()%0A%0A def getVersion(self):%0A contents = self.openFile(self.getRainbowPath())%0A match = re.search(r'@version%5Cs(.*)%5Cs+?', contents)%0A return match.group(1)%0A%0A def @@ -1856,32 +1856,48 @@ (self, languages +, cache_dir=None ):%0A self. @@ -2265,32 +2265,497 @@ -proc = subprocess.Popen( +self.file_name = 'rainbow' + ('-custom' if len(languages) else '') + '.min.js'%0A%0A if not os.path.isdir(cache_dir):%0A cache_dir = None%0A%0A if cache_dir is not None:%0A version = self.getVersion()%0A cache_key = hashlib.md5(''.join(self.js_files_to_include)).hexdigest()%0A cache_path = os.path.join(cache_dir, version, cache_key)%0A%0A if os.path.isfile(cache_path):%0A return self.openFile(cache_path)%0A%0A command = %5B'ja @@ -2861,16 +2861,56 @@ _include +%0A proc = subprocess.Popen(command , stdout @@ -2997,96 +2997,8 @@ ()%0A%0A - self.file_name = 'rainbow' + ('-custom' if len(languages) else '') + '.min.js'%0A%0A @@ -3417,16 +3417,230 @@ s%5B4:%5D)%0A%0A + if cache_dir is not None:%0A save_to = os.path.join(cache_dir, version)%0A if not os.path.isdir(save_to):%0A os.mkdir(save_to)%0A self.writeFile(cache_path, output)%0A%0A
43d2045611320bbe78c1167e6505135425bf9499
Add customfilters.py to theme
customfilters.py
customfilters.py
Python
0
@@ -0,0 +1,2256 @@ +#!/usr/bin/env python%0Aimport bs4%0A%0Adef menu_filter(pelican_pages, direct_templates):%0A %22%22%22%0A Jinja filter for Pelican page object list%0A%0A Structures pages into a three-level menu that can be parsed by Jinja2%0A templating. Reads page metadata of the form:%0A :menu: %3Cparent%3E, %3Cname%3E, %3Cweight%3E; %3Cparent2%3E, %3Cname2%3E, %3Cweight2%3E; ...%0A where the top-level menu items have a parent name 'top'.%0A %22%22%22%0A page_list = %5B%5D%0A menu = %5B%5D%0A%0A # Pull menu metadata from Pelican page object list%0A for page in pelican_pages:%0A if hasattr(page, 'menu'):%0A%0A # Split into list of menu locations for each page%0A menu_data = page.menu.split(';')%0A%0A # Record each menu location per page%0A for item in menu_data:%0A temp_data = item.split(',')%0A temp_dict = %7B%0A 'parent': temp_data%5B0%5D.strip(),%0A 'name': temp_data%5B1%5D.strip(),%0A 'weight': int(temp_data%5B2%5D),%0A 'link': %22/%7B0%7D%22.format(page.slug),%0A 'children': %5B%5D,%0A %7D%0A%0A #Add each menu location to a page list%0A page_list.append(temp_dict)%0A%0A # Add the direct templates before sorting%0A for item in direct_templates:%0A page_list.append(item.copy())%0A%0A # Sort the page list by weight%0A page_list = sorted(page_list, key=lambda k: k%5B'weight'%5D)%0A%0A # Find top-level menu items and place in menu%0A for item in page_list:%0A if item%5B'parent'%5D == 'top':%0A menu.append(item.copy())%0A%0A # For each top-menu item, find its children%0A for parent in menu:%0A for page in page_list:%0A if page%5B'parent'%5D == parent%5B'name'%5D:%0A parent%5B'children'%5D.append(page.copy())%0A%0A # For each second-level menu item, find its children%0A for parent in menu:%0A for child in parent%5B'children'%5D:%0A for page in page_list:%0A if page%5B'parent'%5D == child%5B'name'%5D:%0A child%5B'children'%5D.append(page.copy())%0A%0A return menu%0A%0Adef close_html_tags(html_string):%0A %22%22%22Closes any html tags in html_string that have been opened but have not%0A been closed.%0A %22%22%22%0A soup = bs4.BeautifulSoup(html_string, %22html.parser%22)%0A return soup%0A
9968aad924fee0c80b895bc24e1452f5c525ee9e
add invite sender
invite.py
invite.py
Python
0.000001
@@ -0,0 +1,369 @@ +from auth import init_manager_for_invite%0Aimport sys%0A%0Adef main():%0A m = init_manager_for_invite()%0A res = m.send_invite(sys.argv%5B1%5D,%0A email_template='templates/emailinvite.html',%0A host='http://localhost:8088')%0A if res:%0A print('Success')%0A else:%0A print('Fail')%0A%0Aif __name__ == %22__main__%22:%0A main()%0A%0A%0A
e16043547bb43476f56195d5652881ebeb684e57
Add new create_realm management command.
zerver/management/commands/create_realm.py
zerver/management/commands/create_realm.py
Python
0
@@ -0,0 +1,2219 @@ +import argparse%0Afrom typing import Any%0A%0Afrom django.core.management.base import CommandError%0A%0Afrom zerver.lib.actions import do_create_realm, do_create_user%0Afrom zerver.lib.management import ZulipBaseCommand%0Afrom zerver.models import UserProfile%0A%0A%0Aclass Command(ZulipBaseCommand):%0A help = %22%22%22%5C%0ACreate a new Zulip organization (realm) via the command line.%0A%0AWe recommend %60./manage.py generate_realm_creation_link%60 for most%0Ausers, for several reasons:%0A%0A* Has a more user-friendly web flow for account creation.%0A* Manages passwords in a more natural way.%0A* Automatically logs the user in during account creation.%0A%0AThis management command is available as an alternative for situations%0Awhere one wants to script the realm creation process.%0A%0ASince every Zulip realm must have an owner, this command creates the%0Ainitial organization owner user for the new realm, using the same%0Aworkflow as %60./manage.py create_user%60.%0A%22%22%22%0A%0A def add_arguments(self, parser: argparse.ArgumentParser) -%3E None:%0A parser.add_argument(%22realm_name%22, help=%22Name for the new organization%22)%0A parser.add_argument(%0A %22--string-id%22,%0A help=%22Subdomain for the new organization. Empty if root domain.%22,%0A default=%22%22,%0A )%0A self.add_create_user_args(parser)%0A%0A def handle(self, *args: Any, **options: str) -%3E None:%0A realm_name = options%5B%22realm_name%22%5D%0A string_id = options%5B%22string_id%22%5D%0A%0A create_user_params = self.get_create_user_params(options)%0A%0A try:%0A realm = do_create_realm(string_id=string_id, name=realm_name)%0A except AssertionError as e:%0A raise CommandError(str(e))%0A%0A do_create_user(%0A create_user_params.email,%0A create_user_params.password,%0A realm,%0A create_user_params.full_name,%0A # Explicitly set tos_version=None. For servers that%0A # have configured Terms of Service, this means that%0A # users created via this mechanism will be prompted to%0A # accept the Terms of Service on first login.%0A role=UserProfile.ROLE_REALM_OWNER,%0A realm_creation=True,%0A tos_version=None,%0A acting_user=None,%0A )%0A
059a29aede4fb1b4db914131b0aad9ca581201fe
Create followers.py
followers.py
followers.py
Python
0
@@ -0,0 +1,645 @@ +from twython import Twython%0Aimport datetime%0Aprint datetime.datetime.now()%0Aplayers = %5B%22fabiofogna%22,%0A %22richardgasquet1%22,%0A %22JohnIsner%22,%0A %22keinishikori%22,%0A %22andy_murray%22,%0A %22milosraonic%22,%0A %22GrigorDimitrov%22,%0A %22delpotrojuan%22,%0A %22DavidFerrer87%22,%0A %22tomasberdych%22,%0A %22stanwawrinka%22,%0A %22RafaelNadal%22,%0A %22rogerfederer%22,%0A %22DjokerNole%22%5D%0AAPIKEY = %22REGISTER_AN_API_KEY%22%0AAPISEC = %22GRAB_YOUR_API_SECRET%22%0Atwitter = Twython(APIKEY,APISEC)%0Afor p in players:%0A followers = twitter.show_user(screen_name = p)%0A print (p,followers%5B'followers_count'%5D)%0A
0c775a1bb685ff5a77f7f4fb3bbde58d0f1f4334
add missing catalogo app migration
indicarprocess/catalogo/migrations/0002_catalogorapideye.py
indicarprocess/catalogo/migrations/0002_catalogorapideye.py
Python
0
@@ -0,0 +1,1029 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Aimport django.contrib.gis.db.models.fields%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('catalogo', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='CatalogoRapidEye',%0A fields=%5B%0A ('gid', models.AutoField(serialize=False, primary_key=True)),%0A ('image', models.CharField(max_length=80, unique=True)),%0A ('path', models.CharField(max_length=120)),%0A ('tms', models.CharField(max_length=254)),%0A ('quicklook', models.CharField(max_length=150)),%0A ('data', models.DateField()),%0A ('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4674, null=True, blank=True)),%0A ('nuvens', models.FloatField()),%0A %5D,%0A options=%7B%0A 'db_table': 'catalogo_rapideye',%0A %7D,%0A ),%0A %5D%0A
c994cfda927353c195048b7c5ac42ed557d8ac0a
Create setup.py
swift/setup.py
swift/setup.py
Python
0.000001
@@ -0,0 +1 @@ +%0A
ffa99e9ae77a2b651e29dcdbf6abf7a2c9c142a6
Create DP_Coin_Change_Problem_ctci.py
DP_Coin_Change_Problem_ctci.py
DP_Coin_Change_Problem_ctci.py
Python
0.000003
@@ -0,0 +1,881 @@ +#!/bin/python%0A'''Given a number of dollars, N, and a list of dollar values for C = %7BC0,C1,C2,...CM%7D distinct coins, M, find%0Aand print the number of different ways you can make change for N dollars if each coin is available in an infinite quantity.'''%0A%0Aimport sys%0A %0Adef make_change(coins, n): %0A result = dfs(coins,n) %0A return result%0A%0A%0Adef dfs(coins,n): %0A array = %5B0 for x in range(n+1)%5D%0A array%5B0%5D = 1%0A matrix = %5Barray%5B:%5D for x in range(len(coins) + 1)%5D%0A for i in range(1,len(coins) + 1):%0A for j in range(1,n+1):%0A if j %3E= coins%5Bi-1%5D:%0A matrix%5Bi%5D%5Bj%5D = matrix%5Bi%5D%5Bj- coins%5Bi-1%5D%5D + matrix%5Bi-1%5D%5Bj%5D%0A else :%0A matrix%5Bi%5D%5Bj%5D = matrix%5Bi-1%5D%5Bj%5D%0A return matrix%5Blen(coins)%5D%5Bn%5D%0A %0An,m = raw_input().strip().split(' ')%0An,m = %5Bint(n),int(m)%5D%0Acoins = map(int,raw_input().strip().split(' '))%0Aprint make_change(coins, n)%0A
67ee018391df9682e5fe96dd0beba687517f1bf1
Create autoclearbuf.py
HexChat/autoclearbuf.py
HexChat/autoclearbuf.py
Python
0.000001
@@ -0,0 +1,963 @@ +import hexchat%0A%0A__module_name__ = 'autoclearbuf'%0A__module_author__ = 'fladd & TingPing'%0A__module_version__ = '1.0'%0A__module_description__ = 'Auto clear buffer of closed queries with znc'%0A%0A# TODO:%0A# Don't run on non-znc networks%0A# Actually check for channel type (currently crashes)%0A%0Arecently_cleared = %5B%5D%0A%0Adef privmsg_cb(word, word_eol, userdata):%0A%09# ZNC helpfully tells us what we just did.. so lets hide that spam%0A%09if word%5B0%5D == ':*status!znc@znc.in' and word_eol%5B4%5D.startswith('buffers matching'):%0A%09%09cleared = word%5B6%5D%5B1:-1%5D # %5Bnick%5D%0A%09%09if cleared in recently_cleared:%0A%09%09%09recently_cleared.remove(cleared)%0A%09%09%09return hexchat.EAT_ALL%0A%0Adef close_cb(word, word_eol, userdata):%0A%09name = hexchat.get_info('channel')%0A%0A%09# Ignore ZNC queries and channels%0A%09if name%5B0%5D != '*' and name%5B0%5D != '#':%0A%09%09recently_cleared.append(name)%0A%09%09hexchat.command('znc clearbuffer %7B%7D'.format(name))%0A%0Ahexchat.hook_print('Close Context', close_cb)%0Ahexchat.hook_server('PRIVMSG', privmsg_cb)%0A
fdc1145b91175673552d21abbfb7ba41c034c426
Add blaze css
wdom/themes/blaze.py
wdom/themes/blaze.py
Python
0.000169
@@ -0,0 +1,1775 @@ +#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0Afrom wdom.tag import NewTagClass as NewTag%0Afrom wdom.tag import *%0A%0A%0Acss_files = %5B%0A '//cdn.jsdelivr.net/blazecss/latest/blaze.min.css',%0A%5D%0A%0AButton = NewTag('Button', bases=Button, class_='button')%0ADefaultButton = NewTag('DefaultButton', 'button', Button, class_='button--default')%0APrimaryButton = NewTag('PrimaryButton', 'button', Button, class_='button--primary')%0ASuccessButton = NewTag('SuccessButton', 'button', Button, class_='button--success')%0AInfoButton = NewTag('InfoButton', 'button', Button, class_='button--primary')%0AWarningButton = NewTag('WarningButton', 'button', Button, class_='button--secondary')%0ADangerButton = NewTag('DangerButton', 'button', Button, class_='button--error')%0ALinkButton = NewTag('LinkButton', 'button', Button)%0A%0AInput = NewTag('Input', 'input', Input, class_='field')%0ATextInput = NewTag('TextInput', 'input', TextInput, class_='field')%0ATextarea = NewTag('Textarea', 'textarea', Textarea, class_='field')%0ASelect = NewTag('Select', 'select', Select, class_='choice')%0A%0AUl = NewTag('Ul', 'ul', Ul, class_='list')%0AOl = NewTag('Ol', 'ol', Ol, class_='list--ordered')%0ALi = NewTag('Li', 'li', Li, class_='list__item')%0A%0ATable = NewTag('Table', 'table', Table, class_='table')%0ATr = NewTag('Tr', 'tr', Tr, class_='table__row')%0ATh = NewTag('Th', 'th', Th, class_='table__cell')%0ATd = NewTag('Td', 'td', Td, class_='table__cell')%0A%0AH1 = NewTag('H1', 'div', H1, class_='heading heading--super')%0AH2 = NewTag('H2', 'div', H2, class_='heading heading--xlarge')%0AH3 = NewTag('H3', 'div', H3, class_='heading heading--large')%0AH4 = NewTag('H4', 'div', H4, class_='heading heading--medium')%0AH5 = NewTag('H5', 'div', H5, class_='heading heading--small')%0AH6 = NewTag('H6', 'div', H6, class_='heading heading--xsmall')%0A
0c44e2527004bbb3000c2f3cbe06648ff04f0c92
Fix python 2.6 'zero length field name in format' error
stripe/error.py
stripe/error.py
# Exceptions class StripeError(Exception): def __init__(self, message=None, http_body=None, http_status=None, json_body=None, headers=None): super(StripeError, self).__init__(message) if http_body and hasattr(http_body, 'decode'): try: http_body = http_body.decode('utf-8') except: http_body = ('<Could not decode body as utf-8. ' 'Please report to support@stripe.com>') self.http_body = http_body self.http_status = http_status self.json_body = json_body self.headers = headers or {} self.request_id = self.headers.get('request-id', None) def __str__(self): msg = super(StripeError, self).__str__() if self.request_id is not None: return "Request {}: {}".format(self.request_id, msg) else: return msg class APIError(StripeError): pass class APIConnectionError(StripeError): pass class CardError(StripeError): def __init__(self, message, param, code, http_body=None, http_status=None, json_body=None, headers=None): super(CardError, self).__init__( message, http_body, http_status, json_body, headers) self.param = param self.code = code class InvalidRequestError(StripeError): def __init__(self, message, param, http_body=None, http_status=None, json_body=None, headers=None): super(InvalidRequestError, self).__init__( message, http_body, http_status, json_body, headers) self.param = param class AuthenticationError(StripeError): pass
Python
0.001793
@@ -844,12 +844,14 @@ st %7B +0 %7D: %7B +1 %7D%22.f
ece35f891ffd976a7ecfd191e9fbad1e416650d2
TEST added for phantom
dipy/sims/tests/test_phantom.py
dipy/sims/tests/test_phantom.py
Python
0
@@ -0,0 +1,1353 @@ +import numpy as np%0Aimport nose%0Aimport nibabel as nib%0Afrom nose.tools import assert_true, assert_false, assert_equal, assert_almost_equal%0Afrom numpy.testing import assert_array_equal, assert_array_almost_equal%0Afrom dipy.core.geometry import vec2vec_rotmat%0Afrom dipy.data import get_data %0Afrom dipy.viz import fvtk%0Afrom dipy.reconst.dti import Tensor%0Afrom dipy.sims.phantom import orbital_phantom%0A%0Adef test_phantom():%0A %0A def f(t):%0A x=np.sin(t)%0A y=np.cos(t) %0A z=np.linspace(-1,1,len(x))%0A return x,y,z%0A%0A fimg,fbvals,fbvecs=get_data('small_64D') %0A bvals=np.load(fbvals)%0A bvecs=np.load(fbvecs)%0A bvecs%5Bnp.isnan(bvecs)%5D=0%0A %0A N=50 #timepoints%0A %0A vol=orbital_phantom(bvals=bvals,%0A bvecs=bvecs,%0A func=f,%0A t=np.linspace(0,2*np.pi,N),%0A datashape=(10,10,10,len(bvals)),%0A origin=(5,5,5),%0A scale=(3,3,3),%0A angles=np.linspace(0,2*np.pi,16),%0A radii=np.linspace(0.2,2,6))%0A %0A ten=Tensor(vol,bvals,bvecs)%0A FA=ten.fa()%0A FA%5Bnp.isnan(FA)%5D=0%0A %0A assert_equal(np.round(FA.max()*1000),707)%0A %0A %0A %0A %0A %0Aif __name__ == %22__main__%22: %0A test_phantom()%0A%0A %0A %0A %0A %0A %0A %0A
4f2cd5d857c00c5b2f4d4aed5fff42a00a003cac
add directory
MellPlayer/directory.py
MellPlayer/directory.py
Python
0.000001
@@ -0,0 +1,259 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A'''%0ANetease Music Player%0A%0ACreated on 2017-02-23%0A@author: Mellcap%0A'''%0A%0Aimport os%0A%0ADIRECTORY = '~/.MellPlayer'%0A%0Adef create_directory(directory):%0A if not os.path.exists(directory):%0A os.makedirs(directory)%0A
2d9122adc0e12b00f29bca321979dc2ecc428ddc
Create proxy_scan.py
proxy_scan.py
proxy_scan.py
Python
0.000002
@@ -0,0 +1,797 @@ +from burp import IBurpExtender%0Afrom burp import IHttpListener%0Afrom burp import IScannerListener%0Afrom java.net import URL%0Afrom java.io import File%0Aimport datetime%0Aimport time%0A%0Aclass BurpExtender(IBurpExtender, IHttpListener, IScannerListener):%0A def registerExtenderCallbacks(self, callbacks):%0A self._callbacks = callbacks%0A%09self._callbacks.setExtensionName(%22Proxy History Tester%22)%0A%09httpReqResp = callbacks.getProxyHistory()%0A%09print %22There are %25d items in the list%22 %25 httpReqResp.__len__()%0A%0A%09for item in httpReqResp:%0A #print item.getRequest().tostring()%0A print item.getHttpService().getHost()%0A print item.getHttpService().getPort()%0A self._callbacks.doActiveScan(item.getHttpService().getHost(),item.getHttpService().getPort(),0,item.getRequest())%0A
19c0183e6e23195aef479553c018718565925585
Add new regex based features
nala/features/regex.py
nala/features/regex.py
Python
0
@@ -0,0 +1,1873 @@ +import re%0A%0Afrom nala.features import FeatureGenerator%0A%0A%0Aclass RegexNLFeatureGenerator(FeatureGenerator):%0A def __init__(self):%0A self.patterns = %5B%0A re.compile('%5Bg%5D%5C.%5B0-9%5D+_%5B0-9%5D+(del)%5B0-9%5D+'),%0A re.compile('deletion of( (the%7Ca))?.* region'),%0A re.compile('deletion of( (the%7Ca))?( %5Cd+(bp%7Cbase pairs?%7Ca%5C.a%5C.%7Camino acids?%7Cnucleotides?)?)? %5B%5Cw%5C-%5C.%5D+'),%0A re.compile('%5Cd+(-%7C )?(bp%7Cbase pairs?%7Ca%5C.a%5C.%7Camino acids?%7Cnucleotides?).*deletion'),%0A re.compile('%5B%5Cw%5C-%5C.%5D+ deletion'),%0A re.compile('(c%7Ccarboxyl?%7Ccooh%7Cn%7Camino%7Cnh2%7Camine)(-%7C )(terminus%7Cterminal)( (tail%7Cend))?'),%0A re.compile('exons? %5Cd+(( ?(and%7Cor%7C-) ?%5Cd+))?')%0A %5D%0A%0A def generate(self, dataset):%0A %22%22%22%0A :type dataset: nala.structures.data.Dataset%0A %22%22%22%0A for part in dataset.parts():%0A matches = %7B%7D%0A%0A for index, pattern in enumerate(self.patterns):%0A matches%5Bindex%5D = %5B%5D%0A for match in pattern.finditer(part.text):%0A matches%5Bindex%5D.append((match.start(), match.end()))%0A%0A for sentence in part.sentences:%0A for token in sentence:%0A for match_index, match in matches.items():%0A name = 'regex_nl_%7B%7D'.format(match_index)%0A value = 'O'%0A for start, end in match:%0A if start == token.start:%0A value = 'B'%0A break%0A elif start %3C token.start %3C token.end %3C end:%0A value = 'I'%0A break%0A elif token.end == end:%0A value = 'E'%0A break%0A%0A token.features%5Bname%5D = value%0A
0259d9a361fa49966977f958b8222f977616713f
Add SRIM energy histogram vacancy analysis script (#31)
analysis/SRIMVacEnergyCount.py
analysis/SRIMVacEnergyCount.py
Python
0
@@ -0,0 +1,905 @@ +#!/usr/bin/python%0A%0A#%0A# Tool to parse a SRIM COLLISION.txt file and produce an output comparable%0A# to TrimVacEnergyCount (output.type = 'vaccount')%0A#%0A%0Aimport fileinput%0Aimport math%0Aimport re%0A%0Arecoil = re.compile('%5E%5Cxdb')%0A%0A# read file header%0Aheader = %5B''%5D * 4%0Afor i in range(4) :%0A header%5Bi%5D = fileinput.input()%0A%0A# parse rest of the file%0Avac = %5B%5D%0Afor line in fileinput.input() :%0A # detect recoils%0A if recoil.match(line) :%0A field = line.split()%0A%0A # vacancy%0A if field%5B7%5D == '1' :%0A x = int(float(field%5B4%5D))%0A E = int(math.log10(float(field%5B3%5D)))%0A E = max(0, E);%0A%0A if E not in vac :%0A vac += %5B%5B0%5D%5D * (1 + E - len(vac))%0A%0A try:%0A vac%5BE%5D%5Bx%5D += 1%0A except IndexError:%0A vac%5BE%5D += %5B0%5D * (1 + x - len(vac%5BE%5D))%0A vac%5BE%5D%5Bx%5D = 1%0A%0A# output histogram%0Afor E in range(len(vac)) :%0A for x in range(len(vac%5BE%5D)) :%0A print %22%25d %25d %25d%22 %25 (E, x, vac%5BE%5D%5Bx%5D)%0A print%0A
620e568b59d8e811a6457be251dfa7d5bf0f8a3d
implement stack using the linked list
utils/stack.py
utils/stack.py
Python
0.000001
@@ -0,0 +1,958 @@ +try:%0A from .linkedlist import LinkedList, Element%0Aexcept ModuleNotFoundError as e:%0A from linkedlist import LinkedList, Element%0A%0A%0Aclass Stack(object):%0A def __init__(self, top=None):%0A self.ll = LinkedList(top)%0A%0A def push(self, new_element):%0A %22%22%22Push (add) a new element onto the top of the stack%22%22%22%0A self.ll.insert_first(new_element)%0A%0A def pop(self):%0A %22%22%22Pop (remove) the first element off the top of the stack and return it%22%22%22%0A return self.ll.delete_first()%0A%0A%0Aprint(__name__)%0Aif __name__ == '__main__':%0A # Test cases%0A # Set up some Elements%0A e1 = Element(1)%0A e2 = Element(2)%0A e3 = Element(3)%0A e4 = Element(4)%0A%0A # Start setting up a Stack%0A stack = Stack(e1)%0A%0A # Test stack functionality%0A stack.push(e2)%0A stack.push(e3)%0A print(stack.pop().value)%0A print(stack.pop().value)%0A print(stack.pop().value)%0A print(stack.pop())%0A stack.push(e4)%0A print(stack.pop().value)%0A
a94964156d1bfbcab1fd711514d375d8fba0eaf5
sum plus one
problems/sum-of-array-plus-one/sum-of-array-plus-one.py
problems/sum-of-array-plus-one/sum-of-array-plus-one.py
Python
0.99844
@@ -0,0 +1,91 @@ +def sum_plus_one(arr):%0A return sum(arr) + len(arr)%0A%0Aprint sum_plus_one(%5B1, 2, 3, 4%5D) # 14%0A
fd2cc81feab4b24b276c8f4a0a8efc16cacef60b
Add template for lab 09 class A
Lab/09/Template_09_A.py
Lab/09/Template_09_A.py
Python
0.000001
@@ -0,0 +1,1155 @@ +%0Aclass Bangunan:%0A def __init__(self, nama, lama_sewa, harga_sewa):%0A self.nama = nama%0A self.lama_sewa = lama_sewa%0A self.harga_sewa = harga_sewa%0A%0A def getHargaSewa(self):%0A return self.harga_sewa%0A%0Aclass Restoran(Object):%0A def __init__(self, nama, lama_sewa):%0A Bangunan.__init__(self, nama, lama_sewa, 30000000)%0A%0A# Silahkan ditambahkan class-class lainnya atau jika ingin memodifikasi%0A%0A%0Adaftar_bangunan = None%0A%0Awhile True:%0A masukan = input().split()%0A%0A if(masukan%5B0%5D == %22BANGUN%22):%0A%09%09# dapatkan nilai ini dari masukan_split sesuai indexnya (lihat format input)%0A%09%09nama = None%0A%09%09jenis_bangunan = None%0A%0A%09%09# lakukan selection untuk menentukan tipe Pegawai%0A%09%09if(jenis_bangunan == %22HOTEL%22):%0A%09%09%09bangunan = Hotel(nama) #instansiasi objek%0A%09%09elif(jenis_bangunan == %22RESTORAN%22):%0A%09%09%09bangunan = None%0A%09%09elif(jenis_bangunan == %22RUMAHSAKIT%22):%0A%09%09%09bangunan = None%0A%0A%09%09# masukan bangunan yang sudah dibuat ke dalam dictionary%0A%09%09# cetak pesan sesuai format%0A%0A%09elif(masukan%5B0%5D == %22INFO%22):%0A%0A%09elif(masukan%5B0%5D == %22JUALMAKANAN%22):%0A%0A%09elif(masukan%5B0%5D == %22TERIMATAMU%22)%0A%0A%09elif(masukan%5B0%5D == %22OBATIPASIEN%22):%0A%0A%09elif(masukan%5B0%5D == %22HITUNGUANG%22):%0A
5bf67ac445da7b69dd4f883b8d4ed89bd17f8274
add urlinfo with basic youtube parsing
modules/urlinfo.py
modules/urlinfo.py
Python
0
@@ -0,0 +1,1920 @@ +from twisted.web.client import getPage%0Afrom twisted.internet.defer import inlineCallbacks%0Afrom core.Uusipuu import UusipuuModule%0A%0Aimport re%0Aimport lxml.html%0A%0Aclass Module(UusipuuModule):%0A %0A def startup(self):%0A self.log('urlinfo.py loaded')%0A%0A def privmsg(self, user, target, msg):%0A if target != self.channel:%0A return%0A%0A urls = self.parse_urls(msg)%0A if not len(urls):%0A return%0A%0A re_youtube = re.compile(%0A '%5E(https?%5C:%5C/%5C/)?(www%5C.)?(youtube%5C.com%7Cyoutu%5C.?be)%5C/.+$')%0A for url in urls:%0A if re_youtube.match(url):%0A d = getPage(url)%0A d.addCallback(self.show_youtube)%0A%0A def show_youtube(self, output):%0A if output is None or not len(output):%0A print('Received empty youtube data!')%0A return%0A data = self.parse_youtube(output)%0A print(data%5B'title'%5D)%0A%0A self.chanmsg('%25s' %25 (data%5B'title'%5D,))%0A%0A def parse_youtube(self, output):%0A foo = lxml.html.fromstring(output)%0A title = None%0A%0A for result in foo.iterfind('.//meta'):%0A prop = result.get('property')%0A if prop is None:%0A continue%0A if prop != 'og:title':%0A continue%0A title = result.get('content')%0A break%0A%0A if not title:%0A return None%0A%0A return %7B%0A 'title': title,%0A %7D%0A%0A def parse_urls(self, s):%0A # TODO: http://www.google.com/asdasd)%0A re_url = re.compile(%0A '(https?:%5C/%5C/(?:www%5C.%7C(?!www))%5B%5E%5Cs%5C.%5D+%5C.%5B%5E%5Cs%5D%7B2,%7D%7Cwww%5C.%5B%5E%5Cs%5D+%5C.%5B%5E%5Cs%5D%7B2,%7D)')%0A matches = re_url.findall(s)%0A ret = %5B%5D%0A for match in matches:%0A if match is None:%0A continue%0A if not match.startswith('http'):%0A ret.append('http://' + match)%0A else:%0A ret.append(match)%0A return ret%0A
e70ec3a60d36a6d525230b6864a516ac9bf9c255
importing __init__ in counting
theshitfolder/Jake/__init__.py_misconfigure/src/counting.py
theshitfolder/Jake/__init__.py_misconfigure/src/counting.py
Python
0.999645
@@ -0,0 +1,606 @@ +import __init__%0A%0A#Log Here%0Alogging.basicConfig(level=logging.INFO)%0Alogger = logging.getLogger(__name__)%0A%0A#import timeit%0A#TODO add a shit ton more threads%0A#just counts files were indexing%0Adef counting():%0A%09root=os.path.expanduser('~')#change this before development%0A%09#print %22start%22%0A%09i=1%0A%09#start = timeit.default_timer()%0A%09for dir_name, sub_dirs, files in os.walk(root):%0A%09%09#print i%0A%09%09try:%0A%09%09%09i = i+1#change to i+=1 later%0A%09%09%09logger.info(%22Counted to file %22 + str(i))%0A%09%09except BaseException:%0A%09%09%09logger.debug(%22An Error Occured, Continuing%22)%0A%09%09%09continue %0A%09return i #returns the count%0A%09#stop = timeit.default_timer()
fdb2dc8b54c5d7194639457444c32c20d5e2bfca
Create launch.py
launch.py
launch.py
Python
0.000001
@@ -0,0 +1,189 @@ +#!/usr/bin/env python%0A%0Afrom __future__ import print_function%0Afrom __future__ import division%0Aimport pygecko%0Aimport opencvutils as cvu%0A%0Adef run():%0A pass%0A%0Aif __name__ == '__main__':%0A run()%0A
cee5313906b2ee7e4fb01fc772e2afc6c4de1072
Add simple lauch script without configuration options
launch.py
launch.py
Python
0
@@ -0,0 +1,695 @@ +from twisted.application import internet, service%0Afrom twisted.names import dns%0Afrom twisted.names import server%0Afrom openvpnzone import OpenVpnStatusAuthority, extract_status_file_path%0A%0A%0Adef createOpenvpn2DnsService():%0A zones = %5BOpenVpnStatusAuthority(extract_status_file_path('server.conf'))%5D%0A%0A f = server.DNSServerFactory(zones, None, None, 100)%0A p = dns.DNSDatagramProtocol(f)%0A f.noisy = 0%0A%0A m = service.MultiService()%0A for (klass, arg) in %5B(internet.TCPServer, f), (internet.UDPServer, p)%5D:%0A s = klass(53535, arg)%0A s.setServiceParent(m)%0A return m%0A%0Aapplication = service.Application(%22OpenVPN2DNS%22)%0A%0AcreateOpenvpn2DnsService().setServiceParent(application)%0A
281e328711b9724027eb6b64939bf9795fe86ac4
Create linter.py
linter.py
linter.py
Python
0.000002
@@ -0,0 +1,782 @@ +#!/usr/bin/python%0A%0Aimport yaml, sys, getopt, os.path%0A%0Adef main(argv):%0A try:%0A opts, args = getopt.getopt(argv,%22hi:%22)%0A except getopt.GetoptError:%0A print 'linter.py -i %3Cinputfile.yml%3E'%0A sys.exit(2)%0A for opt, arg in opts:%0A if opt == '-h':%0A print 'linter.py -i %3Cinputfile.yml%3E'%0A sys.exit()%0A elif opt == '-i':%0A if os.path.isfile(arg):%0A stream = open(arg, 'r')%0A try:%0A yaml.safe_load(stream)%0A sys.exit()%0A except yaml.scanner.ScannerError:%0A sys.exit(1)%0A else:%0A print %22Input file is missing or not readable%22%0A sys.exit(1)%0A%0Aif __name__ == %22__main__%22:%0A main(sys.argv%5B1:%5D)%0A
5d7574728290fd1afba39769bb933b12b6044ee9
Create massinvandring_streamer.py
massinvandring_streamer.py
massinvandring_streamer.py
Python
0.000001
@@ -0,0 +1,776 @@ +# the MassinvandringStreamer is a subclass of TwythonStreamer%0Afrom twython import TwythonStreamer%0A%0A# the MassinvandringStreamer class will use the streaming api to find tweets containing the word 'massinvandring'%0A# This class could technically be used to reply to all kinds of tweets.%0Aclass MassinvandringStreamer(TwythonStreamer):%0A # this function will be called when a tweet is received%0A def on_success(self, data):%0A # generate a reply%0A print(%22should generate a reply; not implemented yet though%22)%0A%0A # when an error is caught%0A def on_error(self, status_code, data):%0A print(%22STREAMING API ERROR!%22)%0A print(%22Status code:%22)%0A print(status_code)%0A print(%22Other data:%22)%0A print(data)%0A print(%22END OF ERROR MESSAGE%22)%0A
ced30f90907909090c0da0e468c855f400d9da92
Add shallow tests for spin-1/2 general drudge
tests/spin_one_half_gen_test.py
tests/spin_one_half_gen_test.py
Python
0
@@ -0,0 +1,509 @@ +%22%22%22Tests for the general model with explicit one-half spin.%22%22%22%0A%0Aimport pytest%0A%0Afrom drudge import UP, DOWN, SpinOneHalfGenDrudge%0A%0A%0A@pytest.fixture(scope='module')%0Adef dr(spark_ctx):%0A %22%22%22The fixture with a general spin one-half drudge.%22%22%22%0A return SpinOneHalfGenDrudge(spark_ctx)%0A%0A%0Adef test_spin_one_half_general_drudge_has_properties(dr):%0A %22%22%22Test the basic properties of the drudge.%22%22%22%0A%0A assert dr.spin_vals == %5BUP, DOWN%5D%0A assert dr.orig_ham.n_terms == 2 + 4%0A assert dr.ham.n_terms == 2 + 3%0A
251e88398541124555b0c87edf83a59c4ea0347a
add testing framework for new announcer
tests/test_announcer_2_chair.py
tests/test_announcer_2_chair.py
Python
0
@@ -0,0 +1,320 @@ +import unittest%0Afrom source.announcer_2_chair import *%0A%0Aclass AnnouncerTestCase(unittest.TestCase):%0A%0A def setUp(self):%0A pass%0A%0A def test_announcer_is_a_class(self):%0A pass%0A%0A def test_announcer_has_a_show_method(self):%0A pass%0A%0A def test_announcer_has_an_ask_human_method(self):%0A pass%0A
733dc300dff354312fdfa7588bcd7636117ac0c7
Create SpatialFieldRetrieve.py
SpatialFieldRetrieve.py
SpatialFieldRetrieve.py
Python
0
@@ -0,0 +1,2563 @@ +#-------------------------------------------------------------------------------%0A# Name: Spatial Field Retrieval%0A# Purpose: Retrieve a field from the source dataset and use it to populate%0A# the target field. Honors selections.%0A# Author: Andy Bradford%0A#%0A# Created: 25/02/2016%0A# Copyright: (c) andy.bradford 2016%0A#-------------------------------------------------------------------------------%0A%0Aimport arcpy%0Afrom arcpy import env%0Aenv.overwriteOutput = True%0A%0A#parameters%0A%0A#Layer to be calculated%0AInLayer = arcpy.GetParameterAsText(0)%0A#InField: Layer which will receive final data%0AInField = arcpy.GetParameterAsText(1)%0A#SourceLayer: Layer which contributes data.%0ASourceLayer = arcpy.GetParameterAsText(2)%0A#SourceField: source field%0ASourceField = arcpy.GetParameterAsText(3)%0A#SpatShip = spatial relationship - same as Spatial Join tool%0ASpatShip = arcpy.GetParameterAsText(4)%0A#MergeRule: How to handle one-to-many relationships%0AMergeRule = arcpy.GetParameterAsText(5)%0A#SearchDist: search distance%0ASearchDist = arcpy.GetParameterAsText(6)%0A%0A#Create field map a la forrestchev%0A#thanks to forrestchev on GIS StackExchange%0A#this field mapping code sets up the Spatial Join code later%0A#to create an output with only the Target_FID and the source field.%0AScratchFMS = arcpy.FieldMappings()%0AScratchFMS.addTable(SourceLayer)%0ASourceIndex = ScratchFMS.findFieldMapIndex(SourceField)%0ASourceFM = ScratchFMS.getFieldMap(SourceIndex)%0AScratchFMS = arcpy.FieldMappings()%0ASourceFM.addInputField(SourceLayer, SourceField)%0ASourceFM.mergeRule = MergeRule%0AScratchFMS.addFieldMap(SourceFM)%0A%0A%0A%0A%0A#spatial join to scratch features%0Aarcpy.SpatialJoin_analysis(InLayer, SourceLayer, %22ScratchSJ%22, %22JOIN_ONE_TO_ONE%22,%0A %22KEEP_ALL%22, ScratchFMS, SpatShip, SearchDist)%0Aarcpy.AddMessage(%22Spatial Join completed.%22)%0A%0A#create dictionary object for join purposes.%0A#the key will be the Target FID, and the value is the target field value.%0AJoinDict = %7B%7D%0Awith arcpy.da.SearchCursor(%22ScratchSJ%22, (%22TARGET_FID%22, SourceField)) as cursor:%0A for row in cursor:%0A fid = row%5B0%5D%0A val = row%5B1%5D%0A JoinDict%5Bfid%5D = val%0A%0Aarcpy.AddMessage(%22Dictionary created.%22)%0A%0A#Update cursor, hinges on dictionary%0Awith arcpy.da.UpdateCursor(InLayer, (%22OID@%22, InField)) as cursor:%0A #reach into dictionary using FID values%0A for row in cursor:%0A #Search for dictionary item with feature's FID as key%0A val = JoinDict%5Brow%5B0%5D%5D%0A row%5B1%5D = str(val)%0A cursor.updateRow(row)%0A%0A#delete ScratchSJ file.%0Aarcpy.Delete_management(%22ScratchSJ%22)%0A%0A
4791122d34cbf4eaf6bc118c5e7e78346dee7010
add cost_ensemble
costcla/models/cost_ensemble.py
costcla/models/cost_ensemble.py
Python
0.001376
@@ -0,0 +1,18 @@ +__author__ = 'al'%0A
abd6fab2000d8af016a0251ab9fb912c359a77ed
add atom for eLisp
compiler/eLisp2/eLisp/atom.py
compiler/eLisp2/eLisp/atom.py
Python
0.000021
@@ -0,0 +1,2850 @@ +#!/usr/bin/env python%0A# -*- encoding: utf-8 -*-%0A#%0A# Copyright (c) 2015 ASMlover. All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions%0A# are met:%0A#%0A# * Redistributions of source code must retain the above copyright%0A# notice, this list ofconditions and the following disclaimer.%0A#%0A# * Redistributions in binary form must reproduce the above copyright%0A# notice, this list of conditions and the following disclaimer in%0A# the documentation and/or other materialsprovided with the%0A# distribution.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS%0A# %22AS IS%22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT%0A# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS%0A# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE%0A# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,%0A# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,%0A# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;%0A# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER%0A# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT%0A# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN%0A# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE%0A# POSSIBILITY OF SUCH DAMAGE.%0A%0Afrom error import UnimplementedFunctionError%0Afrom interface import Eval, Egal%0Afrom seq import Sequence, List%0A%0Aclass Atom(Eval, Egal):%0A def __init__(self, data):%0A self.data = data%0A self.hint = 'atom'%0A%0A def __eq__(self, rhs):%0A if isinstance(rhs, Atom):%0A return self.data == rhs.data%0A else:%0A return False%0A%0Aclass Symbol(Atom):%0A def __init__(self, symbol):%0A super(Symbol, self).__init__(symbol)%0A%0A def __repr__(self):%0A return self.data%0A%0A def __hash__(self):%0A return hash(self.data)%0A%0A def eval(self, env, args=None):%0A return env.get(self.data)%0A%0ATRUE = Symbol('#t')%0AFALSE = List()%0A%0Aclass String(Atom, Sequence):%0A def __init__(self, str):%0A Atom.__init__(self, str)%0A%0A def __repr__(self):%0A return repr(self.data)%0A%0A def eval(self, env, args=None):%0A return self%0A%0A def cons(self, e):%0A if e.__class__ != self.__class__ and %0A e.__class__ != Symbol.__class__:%0A raise UnimplementedFunctionError(%0A 'Cannot cons a string and a ', %0A e.__class__.__name__)%0A return String(e.data + self.data)%0A%0A def car(self):%0A %22%22%22%0A %60car%60 is roughly the same as %60first%60 in linear eLisp%0A %22%22%22%0A return Symbol(self.data%5B0%5D)%0A%0A def cdr(self):%0A %22%22%22%0A %60cdr%60 is roughly the same as 'rest' in linear eLisp%0A %22%22%22%0A return String(self.data%5B1:%5D)%0A
59f9e552d16e7d4dca73b1232c0804d4ef3154a7
Add functioning code for training sequence
training_sequence_classifier.py
training_sequence_classifier.py
Python
0.000011
@@ -0,0 +1,1677 @@ +import tensorflow as tf%0Aimport numpy as np%0A%0Atf.set_random_seed(5)%0A%0An_steps = 28%0An_inputs = 28%0An_neurons = 150%0An_outputs = 10%0A%0Alearning_rate = 0.001%0A%0AX = tf.placeholder(tf.float32, %5BNone, n_steps, n_inputs%5D)%0Ay = tf.placeholder(tf.int32, %5BNone%5D)%0A%0Awith tf.variable_scope('rnn', initializer=tf.contrib.layers.variance_scaling_initializer()):%0A basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)%0A outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)%0A%0Alogits = tf.layers.dense(states, n_outputs)%0Ax_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)%0Aloss = tf.reduce_mean(x_entropy)%0Aoptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)%0Atraining_op = optimizer.minimize(loss)%0Acorrect = tf.nn.in_top_k(logits, y, 1)%0Aaccuracy = tf.reduce_mean(tf.cast(correct, tf.float32))%0A%0Ainit = tf.global_variables_initializer()%0A%0Afrom tensorflow.examples.tutorials.mnist import input_data%0Amnist = input_data.read_data_sets('/tmp/data/')%0AX_test = mnist.test.images.reshape((-1, n_steps, n_inputs))%0Ay_test = mnist.test.labels%0A%0An_epochs = 100%0Abatch_size = 150%0A%0Awith tf.Session() as sess:%0A init.run()%0A for epoch in range(n_epochs):%0A for k in range(mnist.train.num_examples // batch_size):%0A X_batch, y_batch = mnist.train.next_batch(batch_size)%0A X_batch = X_batch.reshape((-1, n_steps, n_inputs))%0A sess.run(training_op, feed_dict=%7BX: X_batch, y: y_batch%7D)%0A acc_train = accuracy.eval(feed_dict=%7BX: X_batch, y: y_batch%7D)%0A acc_test = accuracy.eval(feed_dict=%7BX: X_test, y: y_test%7D)%0A print(epoch, 'Train acc: ', acc_train, 'Test acc: ', acc_test)
6ea0d957a49f734151605c952768d15183d3a285
Create __init__.py
CyberGuard_v2/secrets/__init__.py
CyberGuard_v2/secrets/__init__.py
Python
0.000429
@@ -0,0 +1 @@ +%0A
9e38386947ba01effcf5908adad264aa77a688e5
Add basic auth module
modernrpc/auth.py
modernrpc/auth.py
Python
0.000001
@@ -0,0 +1,1338 @@ +# coding: utf-8%0A%0A%0Adef user_pass_test(func=None, test_function=None, params=None):%0A%0A def decorated(function):%0A%0A function.modernrpc_auth_check_function = test_function%0A function.modernrpc_auth_check_params = params%0A return function%0A%0A # If @rpc_method is used without any argument nor parenthesis%0A if func is None:%0A def decorator(f):%0A return decorated(f)%0A return decorator%0A%0A # If @rpc_method() is used with parenthesis (with or without arguments)%0A return decorated(func)%0A%0A%0Adef check_user_is_logged(user):%0A if user:%0A return not user.is_anonymous()%0A return False%0A%0A%0Adef check_user_is_admin(user):%0A if user:%0A return user.is_admin()%0A return False%0A%0A%0Adef check_user_has_perm(user, perm):%0A if user:%0A return user.has_perm(perm)%0A return False%0A%0A%0Adef check_user_has_perms(user, perms):%0A if user:%0A return user.has_perms(perms)%0A return False%0A%0A%0Adef login_required(func=None):%0A%0A def decorated(function):%0A return user_pass_test(function, check_user_is_logged)%0A%0A # If @rpc_method is used without any argument nor parenthesis%0A if func is None:%0A def decorator(f):%0A return decorated(f)%0A return decorator%0A%0A # If @rpc_method() is used with parenthesis (with or without arguments)%0A return decorated(func)%0A
2a44794af558563d9cdfc1d0ea9bf072fad41ffa
test soma_workflow working directory
epac/tests/test_swf_wd.py
epac/tests/test_swf_wd.py
Python
0
@@ -0,0 +1,1129 @@ +# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Tue Sep 17 17:07:47 2013%0A%0A@author: jinpeng.li@cea.fr%0A%22%22%22%0A%0Aimport tempfile%0Aimport os%0Afrom soma_workflow.client import Job, Workflow%0Afrom soma_workflow.client import Helper, FileTransfer%0Afrom soma_workflow.client import WorkflowController%0Aimport socket%0Aimport os.path%0A%0A%0Aif __name__ == '__main__':%0A tmp_work_dir_path = tempfile.mkdtemp()%0A cur_work_dir = os.getcwd()%0A test_filepath = u%22./onlytest.txt%22%0A%0A job = Job(command=%5Bu%22touch%22, test_filepath%5D,%0A name=%22epac_job_test%22,%0A working_directory=tmp_work_dir_path)%0A soma_workflow = Workflow(jobs=%5Bjob%5D)%0A%0A resource_id = socket.gethostname()%0A controller = WorkflowController(resource_id, %22%22, %22%22)%0A ## run soma-workflow%0A ## =================%0A wf_id = controller.submit_workflow(workflow=soma_workflow,%0A name=%22epac workflow%22)%0A Helper.wait_workflow(wf_id, controller)%0A if not os.path.isfile(os.path.join(tmp_work_dir_path, test_filepath)):%0A raise ValueError(%22Soma-workflow cannot define working directory%22)%0A else:%0A print %22OK%22%0A
231029d867171ad5ee708c61d8a0aed60127aa9a
Add test for Link object.
cybox/test/objects/link_test.py
cybox/test/objects/link_test.py
Python
0
@@ -0,0 +1,612 @@ +# Copyright (c) 2015, The MITRE Corporation. All rights reserved.%0A# See LICENSE.txt for complete terms.%0A%0Aimport unittest%0A%0Afrom mixbox.vendor.six import u%0A%0Afrom cybox.objects.link_object import Link%0Afrom cybox.objects.uri_object import URI%0Afrom cybox.test.objects import ObjectTestCase%0A%0A%0Aclass TestLink(ObjectTestCase, unittest.TestCase):%0A object_type = %22LinkObjectType%22%0A klass = Link%0A%0A _full_dict = %7B%0A 'value': u(%22http://www.example.com%22),%0A 'type': URI.TYPE_URL,%0A 'url_label': u(%22Click Here!%22),%0A 'xsi:type': object_type,%0A %7D%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
07b198463951753535217ff1612c2789045c4046
add manage.py
manage.py
manage.py
Python
0.000001
@@ -0,0 +1,296 @@ +#!/usr/bin/env python%0Aimport os%0Aimport sys%0A%0Aif __name__ == %22__main__%22:%0A os.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22,%0A %22seed_staged_based_messaging.settings%22)%0A%0A from django.core.management import execute_from_command_line%0A%0A execute_from_command_line(sys.argv)%0A
2cad12729048dd5dc52b5d612656fe60bb3bd256
Use '/usr/bin/env python' instead of '/usr/bin/python' in manage.py to support running manage.py as an executable in virtualenvs.
manage.py
manage.py
#!/usr/bin/python import os import sys if not os.path.exists('mysite/manage.py'): print "Eek, where is the real manage.py? Quitting." sys.exit(1) execfile('mysite/manage.py', globals(), locals())
Python
0
@@ -4,16 +4,20 @@ usr/bin/ +env python%0Ai
979dbd0ff0fba03847ca96beaf4d68a0f4e5c9eb
Add beam information to 850um file
data/b5_scuba2_850um_addbeam.py
data/b5_scuba2_850um_addbeam.py
Python
0
@@ -0,0 +1,387 @@ +import os%0Afrom astropy.io import fits%0A%0Afile_scuba2_raw='B5_850um_ext_v2_regrid.fits'%0Afile_scuba2_out='B5_850um_ext_v2_regrid_beam.fits'%0A%0Ahdu = fits.open(file_scuba2_raw)%0Ahdr =hdu%5B0%5D.header%0Adata=hdu%5B0%5D.data%0Ahdu.close()%0Ahdr.append(('BMAJ', 14.6/3600.))%0Ahdr.append(('BMIN', 14.6/3600.))%0Ahdr.append(('BPA', 0.0))%0Aos.system('rm -r '+file_scuba2_out)%0Afits.writeto(file_scuba2_out, data, hdr)%0A
d87311d349b3a7a25b23bd03804a27fd29e90b52
add missing file
moban/data_loaders/manager.py
moban/data_loaders/manager.py
Python
0.000001
@@ -0,0 +1,638 @@ +import os%0Afrom lml.plugin import PluginManager%0Afrom moban import constants%0A%0A%0Aclass AnyDataLoader(PluginManager):%0A def __init__(self):%0A super(AnyDataLoader, self).__init__(constants.DATA_LOADER_EXTENSION)%0A%0A def get_data(self, file_name):%0A file_extension = os.path.splitext(file_name)%5B1%5D%0A file_type = file_extension%0A if file_extension.startswith(%22.%22):%0A file_type = file_type%5B1:%5D%0A%0A try:%0A loader_function = self.load_me_now(file_type)%0A except Exception:%0A loader_function = self.load_me_now(constants.DEFAULT_DATA_TYPE)%0A return loader_function(file_name)%0A
5346b024ffc3e4eca25794214a4539cb8a20f08c
add monk file
monk_eproperty.py
monk_eproperty.py
Python
0
@@ -0,0 +1,639 @@ +#!/usr/bin/python%0Aimport monkModule%0Aimport monkTools as tools%0Aimport os%0A%0Adef get_desc():%0A%09return %22E-property simple property interface%22%0A%0Adef create():%0A%09# module name is 'ewol' and type binary.%0A%09myModule = monkModule.Module(__file__, 'eproperty', 'LIBRARY')%0A%09# enable doculentation :%0A%09myModule.set_website(%22http://atria-soft.github.io/eproperty/%22)%0A%09myModule.set_website_sources(%22http://github.com/atria-soft/eproperty/%22)%0A%09myModule.set_path(os.path.join(tools.get_current_path(__file__), %22eproperty%22))%0A%09myModule.set_path_general_doc(os.path.join(tools.get_current_path(__file__), %22doc%22))%0A%09# add the currrent module at the %0A%09return myModule%0A%0A
d0a53dfaec71959728aeecfee755e0bde6e2370e
Make octal numbers python2 and python3 compliant.
neon/ipc/shmem.py
neon/ipc/shmem.py
# ---------------------------------------------------------------------------- # Copyright 2015 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Shared-memory based IPC for accepting data from third-party applications. """ import numpy as np import posix_ipc as ipc import mmap import struct import logging logger = logging.getLogger(__name__) class Message(object): def __init__(self, suffix, header_format): self.header_size = struct.calcsize(header_format) self.header_format = header_format self.shmem_name = '/neon-shmem-' + suffix self.empty_sem_name = '/neon-empty-sem-' + suffix self.fill_sem_name = '/neon-fill-sem-' + suffix self.mutex_name = '/neon-mutex-' + suffix self.memory = None self.mapfile = None self.empty_sem = None self.fill_sem = None self.mutex = None def create(self, data_size): self.data_size = data_size self.shmem_size = self.header_size + self.data_size self.memory, self.mapfile = self.create_shmem(self.shmem_name, self.shmem_size) self.empty_sem = self.create_sem(self.empty_sem_name, 1) self.fill_sem = self.create_sem(self.fill_sem_name, 0) self.mutex = self.create_sem(self.mutex_name, 1) def destroy(self): self.destroy_shmem(self.memory, self.mapfile) for item in [self.empty_sem, self.fill_sem, self.mutex]: self.destroy_sem(item) def open(self): self.memory, self.mapfile = self.open_shmem(self.shmem_name) self.data_size = self.memory.size self.empty_sem = self.open_sem(self.empty_sem_name) self.fill_sem = self.open_sem(self.fill_sem_name) self.mutex = self.open_sem(self.mutex_name) def close(self): self.close_shmem(self.memory, self.mapfile) for item in [self.empty_sem, self.fill_sem, self.mutex]: self.close_sem(item) def create_sem(self, name, initial_value): try: sem = ipc.Semaphore(name, ipc.O_CREX, 0660, initial_value) except ipc.ExistentialError: logger.warning('Deleting semaphore %s', name) self.destroy_sem(self.open_sem(name)) sem = ipc.Semaphore(name, ipc.O_CREX, 0660, initial_value) return sem def destroy_sem(self, sem): if sem is not None: sem.unlink() def create_shmem(self, name, size): try: memory = ipc.SharedMemory(name, ipc.O_CREX, 0660, size) except ipc.ExistentialError: logger.warning('Deleting shared memory %s', name) self.destroy_shmem(*self.open_shmem(name)) memory = ipc.SharedMemory(name, ipc.O_CREX, 0660, size) mapfile = mmap.mmap(memory.fd, memory.size) return memory, mapfile def destroy_shmem(self, memory, mapfile): self.close_shmem(memory, mapfile) if memory is not None: memory.unlink() def open_sem(self, name): return ipc.Semaphore(name, 0) def close_sem(self, sem): if sem is not None: sem = sem.close() def open_shmem(self, name): memory = ipc.SharedMemory(name, 0) mapfile = mmap.mmap(memory.fd, 0) return memory, mapfile def close_shmem(self, memory, mapfile): if memory is not None: memory.close_fd() if mapfile is not None: mapfile.close() def send(self, data, header): self.empty_sem.acquire() self.mutex.acquire() self.mapfile.seek(0) if len(header) != 0: packed_header = struct.pack(self.header_format, *header) self.mapfile.write(packed_header) self.mapfile.write(np.getbuffer(data)) self.mutex.release() self.fill_sem.release() def receive(self): self.fill_sem.acquire() self.mutex.acquire() self.mapfile.seek(0) if self.header_size != 0: packed_header = self.mapfile.read(self.header_size) header = struct.unpack(self.header_format, packed_header) else: header = () buf = self.mapfile.read(self.data_size) data = np.frombuffer(buf, dtype=np.uint8) self.mutex.release() self.empty_sem.release() return data, header class Endpoint(object): def __init__(self, **kwargs): self.req_name = 'req' self.res_name = 'res' self.req_header_format = '' self.res_header_format = '' self.__dict__.update(kwargs) self.request = Message(self.req_name, self.req_header_format) self.response = Message(self.res_name, self.res_header_format) class Server(Endpoint): def __init__(self, **kwargs): super(Server, self).__init__(**kwargs) self.start() def start(self): self.request.create(self.req_size) self.response.create(self.res_size) logger.info('Started shared-memory server') def stop(self): self.request.destroy() self.response.destroy() def send(self, data, header=()): self.response.send(data, header) def receive(self): return self.request.receive() class Client(Endpoint): def __init__(self, **kwargs): super(Client, self).__init__(**kwargs) self.start() def start(self): self.request.open() self.response.open() logger.info('Started shared-memory client') def stop(self): self.request.close() self.response.close() def send(self, data, header=()): self.request.send(data, header) def receive(self): return self.response.receive()
Python
0.000055
@@ -2680,32 +2680,33 @@ e, ipc.O_CREX, 0 +o 660, initial_val @@ -2701,32 +2701,32 @@ initial_value)%0A - except i @@ -2901,24 +2901,25 @@ pc.O_CREX, 0 +o 660, initial @@ -3134,32 +3134,33 @@ e, ipc.O_CREX, 0 +o 660, size)%0A @@ -3300,32 +3300,32 @@ en_shmem(name))%0A - memo @@ -3365,16 +3365,17 @@ _CREX, 0 +o 660, siz
3aa165d9527266d978d943437cb03816c30b8608
add a fit_nh3 test
examples/ammonia_fit_example_wrapper.py
examples/ammonia_fit_example_wrapper.py
Python
0.00001
@@ -0,0 +1,1171 @@ +from __future__ import print_function%0Aimport pyspeckit%0Aimport numpy as np%0Afrom astropy import units as u%0A%0Afrom pyspeckit.spectrum.models import ammonia%0A%0Axarr = np.linspace(-40, 40, 300) * u.km/u.s%0Aoneonemod = ammonia.ammonia(xarr.to(u.GHz, u.doppler_radio(ammonia.freq_dict%5B'oneone'%5D*u.Hz)),)%0Atwotwomod = ammonia.ammonia(xarr.to(u.GHz, u.doppler_radio(ammonia.freq_dict%5B'twotwo'%5D*u.Hz)),)%0A%0Asp11 = pyspeckit.Spectrum(xarr=xarr, data=oneonemod, unit=u.K,%0A xarrkwargs=%7B'refX': ammonia.freq_dict%5B'oneone'%5D*u.Hz%7D)%0Asp22 = pyspeckit.Spectrum(xarr=xarr, data=twotwomod, unit=u.K,%0A xarrkwargs=%7B'refX': ammonia.freq_dict%5B'twotwo'%5D*u.Hz%7D)%0A%0A%0Ainput_dict=%7B'oneone':sp11, 'twotwo':sp22,%7D%0Aspf, specout = pyspeckit.wrappers.fitnh3.fitnh3tkin(input_dict, dobaseline=False)%0Aprint(specout.specfit.modelpars)%0Aprint(specout.specfit.parinfo)%0A%0Aspf2, specout2 = pyspeckit.wrappers.fitnh3.fitnh3tkin(input_dict,%0A dobaseline=True,%0A baselinekwargs=%7B'exclude':%5B-30,30%5D*u.km/u.s%7D)%0Aprint(specout.specfit.modelpars)%0Aprint(specout.specfit.parinfo)%0A
29d8e20e41ab599030cd1027069ba01f569c1627
add terminal highlight utils
highlight.py
highlight.py
Python
0.000001
@@ -0,0 +1,1616 @@ +class bcolors:%0A HEADER = '%5C033%5B95m'%0A OKBLUE = '%5C033%5B94m'%0A OKGREEN = '%5C033%5B92m'%0A WARNING = '%5C033%5B93m'%0A FAIL = '%5C033%5B91m'%0A ENDC = '%5C033%5B0m'%0A BOLD = '%5C033%5B1m'%0A UNDERLINE = '%5C033%5B4m'%0A%0Adef highlight( s, **term_color ):%0A %22%22%22return ANSI color rendered string%0A This will work on unixes including OS X, linux and windows (provided you enable ansi.sys).%0A %22%22%22%0A color_pos = %7B%7D%0A%0A for term, color in term_color.items():%0A start = 0%0A while start %3C len( s ):%0A term_start = s.find( term, start )%0A term_end = term_start + len( term )%0A if term_start == -1:%0A break%0A start = term_start + 1%0A%0A color_pos%5B term_start %5D = color%0A color_pos%5B term_end %5D = bcolors.ENDC%0A%0A if len( color_pos ) == 0:%0A return s %0A%0A segments = %5B%5D%0A last_i = 0%0A for i in range( len( s ) + 1 ):%0A if i in color_pos:%0A segments.append( s%5B last_i : i %5D )%0A segments.append( color_pos%5B i %5D )%0A last_i = i%0A segments.append( s%5B last_i : %5D )%0A %0A return ''.join( segments )%0A %0Aif __name__ == '__main__':%0A s = %22How to print string with color in terminal? %5Cn%22 %5C%0A %22This somewhat depends on what platform you are on. %5Cn%22 %5C%0A %22The most common way to do this is by printing ANSI escape sequences.%22%0A%0A terms_color = %7B%0A 'print string with color': bcolors.WARNING,%0A 'platform': bcolors.OKBLUE,%0A 'ANSI escape sequences': bcolors.FAIL%0A %7D%0A print highlight( s, **terms_color )%0A
23607247006f36034ba29eba0fddbc35c9f407b4
add script for automatic dependency updates
bin/update-web-dependencies.py
bin/update-web-dependencies.py
Python
0
@@ -0,0 +1,1669 @@ +#!/usr/bin/env python%0A%0Aimport sys,re,urllib.request,json,fileinput%0Afrom prettytable import PrettyTable%0A%0Adef replaceAll(file,searchExp,replaceExp):%0A for line in fileinput.input(file, inplace=1):%0A if searchExp in line:%0A line = line.replace(searchExp,replaceExp)%0A sys.stdout.write(line)%0A%0Afilename = sys.argv%5B1%5D;%0Aupdates = PrettyTable(%5B'Dependency', 'Old Version', 'New Version'%5D)%0Afor line in open(filename).read().split(%22%5Cn%22):%0A%09if %22cdnjs%22 in line:%0A%09%09dependency = re.match(%22(?:.+)%5C/ajax%5C/libs%5C/(%5Ba-z%5C-%5C.%5D+)%5C/(%5B0-9a-zA-Z%5C.%5C-%5D+)%5C/%22, line);%0A%09%09if dependency:%0A%09%09%09dep_name = dependency.groups()%5B0%5D;%0A%09%09%09dep_version = dependency.groups()%5B1%5D;%0A%09%09%09with urllib.request.urlopen(%22https://api.cdnjs.com/libraries/%22+dep_name+%22?fields=name,version%22) as url:%0A%09%09%09%09data = json.loads(url.read().decode())%0A%09%09%09%09dep_version_new = data%5B%22version%22%5D%0A%09%09%09%09if dep_version != dep_version_new:%0A%09%09%09%09%09updatedLine = line.replace(dep_version,dep_version_new)%0A%09%09%09%09%09replaceAll(filename, line, updatedLine)%0A%09%09%09%09%09updates.add_row(%5Bdep_name, dep_version, dep_version_new%5D)%0A%0A%09if %22use.fontawesome%22 in line:%0A%09%09dependency = re.match(%22(?:.+)%5C/releases%5C/(%5B0-9a-zA-Z%5C.%5C-%5D+)%5C/%22, line);%0A%09%09if dependency:%0A%09%09%09dep_name = %22fontawesome%22;%0A%09%09%09dep_version = dependency.groups()%5B0%5D;%0A%09%09%09with urllib.request.urlopen(%22https://api.github.com/repos/FortAwesome/Font-Awesome/releases/latest%22) as url:%0A%09%09%09%09data = json.loads(url.read().decode())%0A%09%09%09%09dep_version_new = %22v%22+data%5B%22tag_name%22%5D%0A%09%09%09%09if dep_version != dep_version_new:%0A%09%09%09%09%09updatedLine = line.replace(dep_version,dep_version_new)%0A%09%09%09%09%09replaceAll(filename, line, updatedLine)%0A%09%09%09%09%09updates.add_row(%5Bdep_name, dep_version, dep_version_new%5D)%0A%09%09%0Aprint(updates)
f2f2f8833628058052fae0c5c814e42411f681d2
Add migrations
python/ecep/portal/migrations/0021_auto_20170625_1454.py
python/ecep/portal/migrations/0021_auto_20170625_1454.py
Python
0.000001
@@ -0,0 +1,571 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('portal', '0020_auto_20170525_1921'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='contact',%0A name='email',%0A field=models.EmailField(max_length=254),%0A ),%0A migrations.AlterField(%0A model_name='location',%0A name='email',%0A field=models.EmailField(max_length=254, blank=True),%0A ),%0A %5D%0A
0de4e4d0d81f4b1d0ecf2b7a91b0aa1db84c7e88
Add default locale for currency
oscar/defaults.py
oscar/defaults.py
from django.utils.translation import ugettext_lazy as _ OSCAR_SHOP_NAME = 'Oscar' OSCAR_SHOP_TAGLINE = '' # Basket settings OSCAR_BASKET_COOKIE_LIFETIME = 7 * 24 * 60 * 60 OSCAR_BASKET_COOKIE_OPEN = 'oscar_open_basket' OSCAR_BASKET_COOKIE_SAVED = 'oscar_saved_basket' OSCAR_MAX_BASKET_QUANTITY_THRESHOLD = 10000 # Currency OSCAR_DEFAULT_CURRENCY = 'GBP' # Max number of products to keep on the user's history OSCAR_RECENTLY_VIEWED_PRODUCTS = 20 # Paths OSCAR_IMAGE_FOLDER = 'images/products/%Y/%m/' OSCAR_PROMOTION_FOLDER = 'images/promotions/' # Copy this image from oscar/static/img to your MEDIA_ROOT folder. # It needs to be there so Sorl can resize it. OSCAR_MISSING_IMAGE_URL = 'image_not_found.jpg' OSCAR_UPLOAD_ROOT = '/tmp' # Address settings OSCAR_REQUIRED_ADDRESS_FIELDS = ('first_name', 'last_name', 'line1', 'line4', 'postcode', 'country') # Search settings OSCAR_SEARCH_SUGGEST_LIMIT = 10 # Product list settings OSCAR_PRODUCTS_PER_PAGE = 20 # Checkout OSCAR_ALLOW_ANON_CHECKOUT = False # Partners OSCAR_PARTNER_WRAPPERS = {} # Promotions COUNTDOWN, LIST, SINGLE_PRODUCT, TABBED_BLOCK = ( 'Countdown', 'List', 'SingleProduct', 'TabbedBlock') OSCAR_PROMOTION_MERCHANDISING_BLOCK_TYPES = ( (COUNTDOWN, "Vertical list"), (LIST, "Horizontal list"), (TABBED_BLOCK, "Tabbed block"), (SINGLE_PRODUCT, "Single product"), ) OSCAR_PROMOTION_POSITIONS = (('page', 'Page'), ('right', 'Right-hand sidebar'), ('left', 'Left-hand sidebar')) # Reviews OSCAR_ALLOW_ANON_REVIEWS = True OSCAR_MODERATE_REVIEWS = False # Accounts OSCAR_ACCOUNTS_REDIRECT_URL = 'customer:profile-view' # This enables sending alert notifications/emails # instantly when products get back in stock # by listening to stock record update signals # this might impact performace for large numbers # stock record updates. # Alternatively, the management command # ``oscar_send_alerts`` can be used to # run periodically, e.g. as a cronjob. In this case # instant alerts should be disabled. OSCAR_EAGER_ALERTS = True # Registration OSCAR_SEND_REGISTRATION_EMAIL = True OSCAR_FROM_EMAIL = 'oscar@example.com' # Offers OSCAR_OFFER_BLACKLIST_PRODUCT = None # Cookies OSCAR_COOKIES_DELETE_ON_LOGOUT = ['oscar_recently_viewed_products', ] # Hidden Oscar features, e.g. wishlists or reviews OSCAR_HIDDEN_FEATURES = [] # Menu structure of the dashboard navigation OSCAR_DASHBOARD_NAVIGATION = [ { 'label': _('Dashboard'), 'icon': 'icon-th-list', 'url_name': 'dashboard:index', }, { 'label': _('Catalogue'), 'icon': 'icon-sitemap', 'children': [ { 'label': _('Products'), 'url_name': 'dashboard:catalogue-product-list', }, { 'label': _('Categories'), 'url_name': 'dashboard:catalogue-category-list', }, { 'label': _('Ranges'), 'url_name': 'dashboard:range-list', }, { 'label': _('Low stock alerts'), 'url_name': 'dashboard:stock-alert-list', }, ] }, { 'label': _('Fulfilment'), 'icon': 'icon-shopping-cart', 'children': [ { 'label': _('Order management'), 'url_name': 'dashboard:order-list', }, { 'label': _('Statistics'), 'url_name': 'dashboard:order-stats', }, { 'label': _('Partners'), 'url_name': 'dashboard:partner-list', }, ] }, { 'label': _('Customers'), 'icon': 'icon-group', 'children': [ { 'label': _('Customer management'), 'url_name': 'dashboard:users-index', }, { 'label': _('Stock alert requests'), 'url_name': 'dashboard:user-alert-list', }, ] }, { 'label': _('Offers'), 'icon': 'icon-bullhorn', 'children': [ { 'label': _('Offer management'), 'url_name': 'dashboard:offer-list', }, { 'label': _('Vouchers'), 'url_name': 'dashboard:voucher-list', }, ], }, { 'label': _('Content'), 'icon': 'icon-folder-close', 'children': [ { 'label': _('Content blocks'), 'url_name': 'dashboard:promotion-list', }, { 'label': _('Content blocks by page'), 'url_name': 'dashboard:promotion-list-by-page', }, { 'label': _('Pages'), 'url_name': 'dashboard:page-list', }, { 'label': _('Email templates'), 'url_name': 'dashboard:comms-list', }, { 'label': _('Reviews'), 'url_name': 'dashboard:reviews-list', }, ] }, { 'label': _('Reports'), 'icon': 'icon-bar-chart', 'url_name': 'dashboard:reports-index', }, ] # Search facets OSCAR_SEARCH_FACETS = { 'fields': { # The key for these dicts will be used when passing facet data # to the template. Same for the 'queries' dict below. 'category': { 'name': _('Category'), 'field': 'category' } }, 'queries': { 'price_range': { 'name': _('Price range'), 'field': 'price', 'queries': [ # This is a list of (name, query) tuples where the name will # be displayed on the front-end. (_('0 to 40'), '[0 TO 20]'), (_('20 to 40'), '[20 TO 40]'), (_('40 to 60'), '[40 TO 60]'), (_('60+'), '[60 TO *]'), ] } } } OSCAR_SETTINGS = dict( [(k, v) for k, v in locals().items() if k.startswith('OSCAR_')])
Python
0
@@ -349,16 +349,48 @@ = 'GBP' +%0AOSCAR_CURRENCY_LOCALE = 'en_GB' %0A%0A# Max
4feb6d987f92981542eea2a9501e363277ce4109
Create Tcheck2_clear.py
Tcheck2_clear.py
Tcheck2_clear.py
Python
0.000001
@@ -0,0 +1,2273 @@ +import sys%0Afrom itertools import combinations%0A%0AF_dict = dict()%0AM_dict = dict()%0Acounter = 0%0A%0AfileIn1 = sys.argv%5B1%5D #feb_less_april_clear%0AfileIn2 = sys.argv%5B2%5D #mar_clear.txt %0AfileOut1 = sys.argv%5B3%5D #t_pairs_clear.feb%0AfileOut2 = sys.argv%5B4%5D #t_uniq_pairs_clear.mar%0A%0Afi = open(fileIn1, 'r')%0Anum_lines = sum(1 for line in fi)%0Afi.close()%0Aprint 'Number of lines in training data', num_lines%0A%0AfoF = open(fileOut1, 'w+')%0Afi = open(fileIn1, 'r')%0Afor line in range(0, num_lines): #num_lines):%0A inline = %5Bi for i in fi.readline().rstrip('%5Cn').split('%7C')%5D%0A if len(inline) %3E 3:%0A aidline = inline%5B2:%5D%0A aidline = sorted(set(aidline), reverse = False)%0A while 'NoMATCH' in aidline:%0A aidline.remove('NoMATCH')%0A #print aidline%0A for c in combinations(aidline,2):%0A c = sorted(c, reverse = False)%0A cstr = c%5B0%5D + c%5B1%5D%0A if cstr in F_dict:%0A F_dict%5Bcstr%5D += 1%0A else:%0A F_dict%5Bcstr%5D = 1%0A%0Aprint 'Number of lines packed into a dictionary from first file', len(F_dict)%0Afor key, value in F_dict.iteritems():%0A foF.write(key%5B:6%5D + '%7C' + key%5B-6:%5D + '%7C' + str(value) + '%5Cn')%0A%0AfoF.close()%0Afi.close()%0A%0Afi = open(fileIn2, 'r')%0Anum_lines = sum(1 for line in fi)%0Afi.close()%0Aprint 'Number of rows in test data', num_lines%0A%0AfoR = open(fileOut2, 'w+')%0Afi = open(fileIn2, 'r')%0Afor line in range(0, num_lines): #num_lines):%0A inline = %5Bi for i in fi.readline().rstrip('%5Cn').split('%7C')%5D%0A if len(inline) %3E 3:%0A aidline = inline%5B2:%5D%0A aidline = sorted(set(aidline), reverse = False)%0A while 'NoMATCH' in aidline:%0A aidline.remove('NoMATCH')%0A for c in combinations(aidline,2):%0A #print c%0A c = sorted(c, reverse = False)%0A #print 'sorted', c%0A cstr = c%5B0%5D + c%5B1%5D%0A if cstr not in F_dict:%0A if cstr in M_dict:%0A M_dict%5Bcstr%5D += 1%0A else:%0A M_dict%5Bcstr%5D = 1%0A%0A%0Aprint 'Number of lines packed into a dictionary from first and second file', len(F_dict)%0Afor key, value in M_dict.iteritems():%0A foR.write(key%5B:6%5D + '%7C' + key%5B-6:%5D + '%7C' + str(value) + '%5Cn')%0Aprint 'Number of new, unique pairs in test data: ', len(M_dict)%0A%0AfoR.close()%0Afi.close()%0A%0A
64c21a3e01d50cdc6a719f0e4e48f925d5dd9e03
Add tests for very big AST trees
src/testers/unittests/test_ast_deep.py
src/testers/unittests/test_ast_deep.py
Python
0
@@ -0,0 +1,1271 @@ +import unittest%0Afrom triton import *%0A%0ADEPTH = 10000%0A%0A%0Aclass TestDeep(unittest.TestCase):%0A%0A def setUp(self):%0A %22%22%22Define the arch.%22%22%22%0A self.triton = TritonContext()%0A self.triton.setArchitecture(ARCH.X86_64)%0A self.ctx = self.triton.getAstContext()%0A%0A sym_var = self.ctx.variable(self.triton.convertRegisterToSymbolicVariable(self.triton.registers.rax))%0A%0A add_inst = Instruction()%0A add_inst.setAddress(0x100)%0A add_inst.setOpcode(%22%5Cx48%5Cx01%5Cxc0%22) # add rax, rax%0A%0A sub_inst = Instruction()%0A sub_inst.setOpcode(%22%5Cx48%5Cx29%5CxC0%22) # sub rax, rax%0A%0A for _ in range(DEPTH):%0A self.triton.processing(add_inst)%0A sub_inst.setAddress(add_inst.getAddress() + add_inst.getSize())%0A self.triton.processing(sub_inst)%0A add_inst.setAddress(sub_inst.getAddress() + sub_inst.getSize())%0A%0A self.complex_ast_tree = self.triton.getSymbolicRegister(self.triton.registers.rax).getAst()%0A%0A def test_z3_conversion(self):%0A result = self.triton.simplify(self.complex_ast_tree, True)%0A answer = self.ctx.bv(0, 64)%0A self.assertEqual(str(result), str(answer))%0A%0A def test_duplication(self):%0A s = self.ctx.duplicate(self.complex_ast_tree)%0A
c2f79200689171a49c5bd72e6354ba56ee09a6b6
Upgrade libchromiumcontent to contain printing headers.
script/lib/config.py
script/lib/config.py
#!/usr/bin/env python import platform import sys NODE_VERSION = 'v0.11.13' BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent' LIBCHROMIUMCONTENT_COMMIT = 'bb664e4665851fe923ce904e620ba43d8d010ba5' ARCH = { 'cygwin': '32bit', 'darwin': '64bit', 'linux2': platform.architecture()[0], 'win32': '32bit', }[sys.platform] DIST_ARCH = { '32bit': 'ia32', '64bit': 'x64', }[ARCH] TARGET_PLATFORM = { 'cygwin': 'win32', 'darwin': 'darwin', 'linux2': 'linux', 'win32': 'win32', }[sys.platform]
Python
0
@@ -181,47 +181,47 @@ = ' -bb664e4665851fe923ce904e620ba43d8d010ba +432720d4613e3aac939f127fe55b9d44fea349e 5'%0A%0A
ddf0a5a4438531e4bfa29d8709c1c76d8ca17f59
Add helper module for keyword-only arguments.
volttron/platform/kwonlyargs.py
volttron/platform/kwonlyargs.py
Python
0
@@ -0,0 +1,1438 @@ +'''Support functions for implementing keyword-only arguments.%0A%0AThis module is designed to make it easy to support keyword-only%0Aarguments in Python 2.7 while providing the same kind of exceptions one%0Awould see with Python 3.x.%0A%0ABasic usage:%0A%0A def foo(arg1, *args, **kwargs):%0A # Use required context manager to convert KeyError exceptions%0A # to TypeError with an appropriate message.%0A with required:%0A arg2 = kwargs.pop('arg2')%0A arg3 = kwargs.pop('arg3')%0A # Provide a default to pop for optional arguments%0A arg4 = kwargs.pop('arg4', 'default value')%0A # Include the next line to disallow additional keyword args%0A assertempty(kwargs)%0A'''%0A%0A__all__ = %5B'required', 'assertempty'%5D%0A%0A%0Aclass Required(object):%0A '''Context manager to raise TypeError for missing required kwargs.'''%0A __slots__ = ()%0A @classmethod%0A def __enter__(cls):%0A pass%0A @classmethod%0A def __exit__(cls, exc_type, exc_value, exc_tb):%0A # pylint: disable=bad-context-manager%0A if exc_type is KeyError:%0A raise TypeError(%0A 'missing a required keyword-only argument %25r' %25 exc_value.args)%0A%0Arequired = Required() # pylint: disable=invalid-name%0A%0A%0Adef assertempty(kwargs):%0A '''Raise TypeError if kwargs is not empty.'''%0A if kwargs:%0A name = next(kwargs.iterkeys())%0A raise TypeError('got an unexpected keyword argument %25r' %25 (name,))%0A
99d95d6ed14e912701b1f6ae26779612694590f5
add gdw2 django task tutorial
examples/djangotasks/tasks.py
examples/djangotasks/tasks.py
Python
0.000241
@@ -0,0 +1,2724 @@ +from pyjamas.ui.Label import Label%0Afrom pyjamas.ui.RootPanel import RootPanel%0Afrom pyjamas.ui.VerticalPanel import VerticalPanel%0Afrom pyjamas.ui.TextBox import TextBox%0Afrom pyjamas.ui.ListBox import ListBox%0Afrom pyjamas.ui import KeyboardListener%0A%0Afrom pyjamas.JSONService import JSONProxy%0A%0Aclass TodoApp:%0A def onModuleLoad(self):%0A self.remote = DataService()%0A panel = VerticalPanel()%0A%0A self.todoTextBox = TextBox()%0A self.todoTextBox.addKeyboardListener(self)%0A%0A self.todoList = ListBox()%0A self.todoList.setVisibleItemCount(7)%0A self.todoList.setWidth(%22200px%22)%0A self.todoList.addClickListener(self)%0A%0A panel.add(Label(%22Add New Todo:%22))%0A panel.add(self.todoTextBox)%0A panel.add(Label(%22Click to Remove:%22))%0A panel.add(self.todoList)%0A%0A self.status = Label()%0A panel.add(self.status)%0A%0A RootPanel().add(panel)%0A%0A%0A%0A def onKeyUp(self, sender, keyCode, modifiers):%0A pass%0A%0A def onKeyDown(self, sender, keyCode, modifiers):%0A pass%0A%0A def onKeyPress(self, sender, keyCode, modifiers):%0A %22%22%22%0A This functon handles the onKeyPress event, and will add the item in the text box to the list when the user presses the enter key. In the future, this method will also handle the auto complete feature.%0A %22%22%22%0A if keyCode == KeyboardListener.KEY_ENTER and sender == self.todoTextBox:%0A id = self.remote.addTask(sender.getText(),self)%0A sender.setText(%22%22)%0A%0A if id%3C0:%0A self.status.setText(%22Server Error or Invalid Response%22)%0A%0A%0A def onClick(self, sender):%0A id = self.remote.deleteTask(sender.getValue(sender.getSelectedIndex()),self)%0A if id%3C0:%0A self.status.setText(%22Server Error or Invalid Response%22)%0A%0A def onRemoteResponse(self, response, request_info):%0A self.status.setText(%22response received%22)%0A if request_info.method == 'getTasks' or request_info.method == 'addTask' or request_info.method == 'deleteTask':%0A self.status.setText(self.status.getText() + %22HERE!%22)%0A self.todoList.clear()%0A for task in response:%0A self.todoList.addItem(task%5B0%5D)%0A self.todoList.setValue(self.todoList.getItemCount()-1,task%5B1%5D)%0A else:%0A self.status.setText(self.status.getText() + %22none!%22)%0A%0A def onRemoteError(self, code, message, request_info):%0A self.status.setText(%22Server Error or Invalid Response: ERROR %22 + code + %22 - %22 + message)%0A%0Aclass DataService(JSONProxy):%0A def __init__(self):%0A JSONProxy.__init__(self, %22/services/%22, %5B%22getTasks%22, %22addTask%22,%22deleteTask%22%5D)%0A%0Aif __name__ == %22__main__%22:%0A app = TodoApp()%0A app.onModuleLoad()%0A%0A
cbfb38e904c7bc75c0635d36e896feef6c44fde2
add modbus_thread example
examples/modbus_thread.py
examples/modbus_thread.py
Python
0
@@ -0,0 +1,876 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A# modbus_thread%0A# start a thread for polling a set of registers, display result on console%0A%0Aimport time%0Afrom threading import Thread, Lock%0Afrom pyModbusTCP.client import ModbusClient%0A%0ASERVER_HOST = %22localhost%22%0ASERVER_PORT = 502%0A%0A# set global%0Aregs = %5B%5D%0A%0A# init a thread lock%0Aregs_lock = Lock()%0A%0A# modbus polling thread%0Adef polling_thread():%0A global regs%0A c = ModbusClient(host=SERVER_HOST, port=SERVER_PORT)%0A while True:%0A # keep TCP open%0A if not c.is_open():%0A c.open()%0A reg_list = c.read_holding_registers(0,10)%0A if reg_list:%0A with regs_lock:%0A regs = reg_list%0A time.sleep(1)%0A%0A# start polling thread%0Atp = Thread(target=polling_thread)%0Atp.daemon = True%0Atp.start()%0A%0A# display loop%0Awhile True:%0A with regs_lock:%0A print(regs)%0A time.sleep(1)%0A%0A
7d317f013389f77b09f2b057c2ddd15beca653e4
Fix URL in the pirate bay URL rewriter
flexget/plugins/urlrewrite_piratebay.py
flexget/plugins/urlrewrite_piratebay.py
from __future__ import unicode_literals, division, absolute_import import re import urllib import logging from flexget.plugins.plugin_urlrewriting import UrlRewritingError from flexget.entry import Entry from flexget.plugin import register_plugin, internet, PluginWarning from flexget.utils import requests from flexget.utils.soup import get_soup from flexget.utils.search import torrent_availability, normalize_unicode from flexget import validator log = logging.getLogger('piratebay') TLDS = "com|org|sx|se" CUR_TLD = "sx" URL_MATCH = re.compile("^http://(?:torrent\.)?thepiratebay\.(?:%s)/.*$" % TLDS) URL_SEARCH = re.compile("^http://thepiratebay\.(?:%s)/search/.*$" % TLDS) CATEGORIES = { 'all': 0, 'audio': 100, 'music': 101, 'video': 200, 'movies': 201, 'tv': 205, 'highres movies': 207, 'comics': 602 } SORT = { 'default': 99, # This is piratebay default, not flexget default. 'date': 3, 'size': 5, 'seeds': 7, 'leechers': 9 } class UrlRewritePirateBay(object): """PirateBay urlrewriter.""" def validator(self): root = validator.factory() root.accept('boolean') advanced = root.accept('dict') advanced.accept('choice', key='category').accept_choices(CATEGORIES) advanced.accept('integer', key='category') advanced.accept('choice', key='sort_by').accept_choices(SORT) advanced.accept('boolean', key='sort_reverse') return root # urlrewriter API def url_rewritable(self, task, entry): url = entry['url'] if url.endswith('.torrent'): return False return bool(URL_MATCH.match(url)) # urlrewriter API def url_rewrite(self, task, entry): if not 'url' in entry: log.error("Didn't actually get a URL...") else: log.debug("Got the URL: %s" % entry['url']) if URL_SEARCH.match(entry['url']): # use search results = self.search(entry) if not results: raise UrlRewritingError("No search results found") # TODO: Close matching was taken out of search methods, this may need to be fixed to be more picky entry['url'] = results[0]['url'] else: # parse download page entry['url'] = self.parse_download_page(entry['url']) @internet(log) def parse_download_page(self, url): page = requests.get(url).content try: soup = get_soup(page) tag_div = soup.find('div', attrs={'class': 'download'}) if not tag_div: raise UrlRewritingError('Unable to locate download link from url %s' % url) tag_a = tag_div.find('a') torrent_url = tag_a.get('href') # URL is sometimes missing the schema if torrent_url.startswith('//'): torrent_url = 'http:' + torrent_url return torrent_url except Exception as e: raise UrlRewritingError(e) @internet(log) def search(self, arg_entry, config=None): """ Search for name from piratebay. """ if not isinstance(config, dict): config = {} sort = SORT.get(config.get('sort_by', 'seeds')) if config.get('sort_reverse'): sort += 1 if isinstance(config.get('category'), int): category = config['category'] else: category = CATEGORIES.get(config.get('category', 'all')) filter_url = '/0/%d/%d' % (sort, category) entries = set() for search_string in arg_entry.get('search_string', [arg_entry['title']]): query = normalize_unicode(search_string) # TPB search doesn't like dashes query = query.replace('-', ' ') # urllib.quote will crash if the unicode string has non ascii characters, so encode in utf-8 beforehand url = 'http://thepiratebay.%s/search/%s%s' % (CUR_TLD, urllib.quote(query.encode('utf-8')), filter_url) log.debug('Using %s as piratebay search url' % url) page = requests.get(url).content soup = get_soup(page) for link in soup.find_all('a', attrs={'class': 'detLink'}): entry = Entry() entry['title'] = link.contents[0] entry['url'] = 'http://thepiratebay.%s%s' % (CUR_TLD, link.get('href')) tds = link.parent.parent.parent.find_all('td') entry['torrent_seeds'] = int(tds[-2].contents[0]) entry['torrent_leeches'] = int(tds[-1].contents[0]) entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches']) # Parse content_size size = link.find_next(attrs={'class': 'detDesc'}).contents[0] size = re.search('Size ([\.\d]+)\xa0([GMK])iB', size) if size: if size.group(2) == 'G': entry['content_size'] = int(float(size.group(1)) * 1000 ** 3 / 1024 ** 2) elif size.group(2) == 'M': entry['content_size'] = int(float(size.group(1)) * 1000 ** 2 / 1024 ** 2) else: entry['content_size'] = int(float(size.group(1)) * 1000 / 1024 ** 2) entries.add(entry) return sorted(entries, reverse=True, key=lambda x: x.get('search_sort')) register_plugin(UrlRewritePirateBay, 'piratebay', groups=['urlrewriter', 'search'])
Python
0.000762
@@ -563,16 +563,17 @@ :torrent +s %5C.)?thep
081297b75fdcc9415be20e84b8db19a8eae483c9
Create match_smiley_to_cvr.py
examples/match_smiley_to_cvr.py
examples/match_smiley_to_cvr.py
Python
0.000035
@@ -0,0 +1,540 @@ +%22%22%22Print match and missing match between smiley and CVR.%22%22%22%0A%0A%0Afrom __future__ import print_function%0A%0Afrom cvrminer.cvrmongo import CvrMongo%0Afrom cvrminer.smiley import Smiley%0A%0A%0Acvr_mongo = CvrMongo()%0A%0Asmiley = Smiley()%0Acvrs = smiley.all_cvrs()%0A%0An_missing = 0%0An_ok = 0%0Afor cvr in sorted(cvrs):%0A company = cvr_mongo.get_company(cvr)%0A if company:%0A n_ok += 1%0A print('cvr %7B%7D ok'.format(cvr))%0A else:%0A n_missing += 1%0A print('cvr %7B%7D missing'.format(cvr))%0A%0A%0Aprint(%22Missing: %7B%7D; Ok: %7B%7D.%22.format(n_missing, n_ok))%0A
1f2917dd4146c2ddb6c0f5532e4aaa63f19f1a44
Create 1.py
python/problems/1/1.py
python/problems/1/1.py
Python
0.000001
@@ -0,0 +1,112 @@ +sum = 0%0Afor number in range(1000):%0A if not (number %25 3 or number %25 5):%0A sum = sum + number%0Aprint(sum)%0A
9641234ac5897ec3f1a5f6cf0b5a822e7b103ae8
Update consecutive-numbers-sum.py
Python/consecutive-numbers-sum.py
Python/consecutive-numbers-sum.py
# Time: O(sqrt(n)) # Space: O(1) # Given a positive integer N, # how many ways can we write it as a sum of # consecutive positive integers? # # Example 1: # # Input: 5 # Output: 2 # Explanation: 5 = 5 = 2 + 3 # Example 2: # # Input: 9 # Output: 3 # Explanation: 9 = 9 = 4 + 5 = 2 + 3 + 4 # Example 3: # # Input: 15 # Output: 4 # Explanation: 15 = 15 = 8 + 7 = 4 + 5 + 6 = 1 + 2 + 3 + 4 + 5 # Note: 1 <= N <= 10 ^ 9. class Solution(object): def consecutiveNumbersSum(self, N): """ :type N: int :rtype: int """ # if prime factorization of N is 2^k * p1^a * p2^b * .. # => result is the number of all odd factors = (a+1) * (b+1) * ... result = 1 while N % 2 == 0: N /= 2 i = 3 while i*i <= N: count = 0 while N % i == 0: N /= i count += 1 result *= count+1 i += 2 if N > 1: result *= 2 return result
Python
0.003216
@@ -534,32 +534,308 @@ int%0A %22%22%22%0A + # x + x+1 + x+2 + ... + x+l-1 = N = 2%5Ek * M%0A # =%3E l*x + (l-1)*l/2 = N%0A # =%3E x = (N -(l-1)*l/2)/l= 2%5Ek * M/l - (l-1)/2 is integer%0A # =%3E l could be 2 or any odd factor of M (excluding M)%0A # =%3E the answer is the number of all odd factors of M%0A # if pri @@ -895,22 +895,22 @@ # =%3E -result +answer is the
5c084bf10cb8feda62ac46939b3508a8c0e6a080
load csv files generic
parsers/data_parser.py
parsers/data_parser.py
Python
0.000001
@@ -0,0 +1,353 @@ +import numpy as np%0Afrom tflearn.data_utils import load_csv%0A%0Adef parse_csv(csv_file):%0A%09features, labels = load_csv(csv_file, target_column=4, columns_to_ignore=None, has_header=True)%0A%09feature_tensor = np.array(features).reshape(len(features%5B0%5D), len(features))%0A%09label_tensor = np.array(labels).reshape(len(labels), 1)%0A%09return feature_tensor, label_tensor
8be9ab8de9558efa6ded7d184a3cdc8dad43e4ff
Add an ajax_aware_render utility.
jsonit/utils.py
jsonit/utils.py
Python
0
@@ -0,0 +1,657 @@ +import os%0A%0Afrom django.http import HttpResponse%0Afrom django.template import RequestContext, loader%0A%0A%0Adef ajax_aware_render(request, template_list, extra_context=None, **kwargs):%0A if isinstance(template_list, basestring):%0A template_list = %5Btemplate_list%5D%0A if request.is_ajax():%0A new_template_list = %5B%5D%0A for name in template_list:%0A new_template_list.append('%25s.ajax.%25s' %25 os.path.splitext(name))%0A new_template_list.append(name)%0A template_list = new_template_list%0A c = RequestContext(request, extra_context)%0A t = loader.select_template(template_list)%0A return HttpResponse(t.render(c), **kwargs)%0A
a21add52424d81a36f5a34d067f70cfb2066636f
Add process module
androtoolbox/process.py
androtoolbox/process.py
Python
0.000001
@@ -0,0 +1,1560 @@ +import attr%0Aimport re%0A%0Afrom .adb import adb%0A%0A%0A@attr.s%0Aclass Process(object):%0A name = attr.ib()%0A user = attr.ib()%0A pid = attr.ib(convert=int)%0A parent_pid = attr.ib(convert=int)%0A vsize = attr.ib(convert=int)%0A rss = attr.ib(convert=int)%0A wchan = attr.ib()%0A pc = attr.ib()%0A state = attr.ib()%0A%0A @classmethod%0A def parse_from_line(cls, line):%0A user, pid, ppid, vsize, rss, wchan, pc, state, name = line.split()%0A return cls(name, user, pid, ppid, vsize, rss, wchan, pc, state)%0A%0A%0Adef get_running_processes(filter=None):%0A %22%22%22%0A Get all running processes, (optionally) matching a filter.%0A%0A :param filter: An optional regex to filter process names%0A :type filter: str %7C None%0A :rtype: list(Process)%0A %22%22%22%0A raw_data = adb.shell('ps').splitlines()%5B1:%5D # The first line is the table headers%0A processes = %5BProcess.parse_from_line(raw_data_line) for raw_data_line in raw_data%5D%0A if filter:%0A processes = %5Bp for p in processes if re.search(filter, p.name)%5D%0A return processes%0A%0A%0Adef pid_of(process):%0A %22%22%22%0A Get the PID of a running process%0A%0A :param process: The process name%0A %22%22%22%0A processes = get_running_processes(process)%0A if len(processes) != 1:%0A return None%0A return processes%5B0%5D%0A%0A%0Adef kill(process):%0A %22%22%22%0A Kill a running process. If the process%0A%0A note: Uses su%0A :param process: The process' name or pid%0A %22%22%22%0A try:%0A pid = int(process)%0A except ValueError:%0A pid = pid_of(process)%0A%0A if pid:%0A adb.shell('kill -9 %25s', use_su=True)%0A
add720894d1d29eb80ee99986c7e8473ef4f3067
upgrade script for translations works for published items (#1431)
superdesk/data_updates/00015_20181127-105425_archive.py
superdesk/data_updates/00015_20181127-105425_archive.py
Python
0
@@ -0,0 +1,2891 @@ +# -*- coding: utf-8; -*-%0A# This file is part of Superdesk.%0A# For the full copyright and license information, please see the%0A# AUTHORS and LICENSE files distributed with this source code, or%0A# at https://www.sourcefabric.org/superdesk/license%0A#%0A# Author : tomas%0A# Creation: 2018-11-27 10:54%0A%0Afrom superdesk.commands.data_updates import DataUpdate%0Afrom superdesk import get_resource_service%0A%0A# This upgrade script does the same as the previous one 00014_20181114-153727_archive.py%0A# except this works across multiple collections%0A%0A%0Adef get_root_nodes(tree_items):%0A root_nodes = %5B%5D%0A%0A for key in tree_items:%0A node = tree_items%5Bkey%5D%0A if node.parent is None:%0A root_nodes.append(node)%0A%0A return root_nodes%0A%0A%0Adef get_ids_recursive(list_of_nodes, resource):%0A # walks the tree and returns ids of items with a specified resource%0A%0A ids = %5B%5D%0A%0A for node in list_of_nodes:%0A if len(node.children) %3E 0:%0A ids.extend(get_ids_recursive(node.children, resource))%0A%0A if node.resource == resource:%0A ids.append(node.id)%0A%0A return ids%0A%0A%0Aclass TreeNode:%0A def __init__(self, id):%0A self.id = id%0A self.parent = None%0A self.resource = None%0A self.children = %5B%5D%0A%0A%0Aclass DataUpdate(DataUpdate):%0A%0A resource = 'archive' # will use multiple resources, keeping this here so validation passes%0A%0A def forwards(self, mongodb_collection, mongodb_database):%0A tree_items = %7B%7D%0A%0A # %60translated_from%60 can refer to archive%5B'_id'%5D or published%5B'item_id'%5D%0A%0A for resource in %5B'archive', 'published'%5D:%0A collection = mongodb_database%5Bresource%5D%0A%0A # building multiple trees%0A for item in collection.find(%7B'translated_from': %7B'$exists': True%7D%7D):%0A node_id = item%5B'_id'%5D%0A%0A if node_id not in tree_items:%0A tree_items%5Bnode_id%5D = TreeNode(node_id)%0A%0A node = tree_items%5Bnode_id%5D%0A node.resource = resource%0A parent_id = item%5B'translated_from'%5D%0A%0A if parent_id not in tree_items:%0A tree_items%5Bparent_id%5D = TreeNode(parent_id)%0A%0A node.parent = tree_items%5Bparent_id%5D%0A node.parent.children.append(node)%0A%0A # processing trees%0A for root_node in get_root_nodes(tree_items):%0A updates = %7B'translation_id': root_node.id%7D%0A%0A for resource in %5B'archive', 'published'%5D:%0A service = get_resource_service(resource)%0A ids = get_ids_recursive(%5Broot_node%5D, resource)%0A%0A for item_id in ids:%0A item = service.find_one(req=None, _id=item_id)%0A%0A if item is not None:%0A print(service.system_update(item_id, updates, item))%0A%0A def backwards(self, mongodb_collection, mongodb_database):%0A raise NotImplementedError()%0A
56c955b5700eb9e133024c9f51e39af9b065dfb1
Add neopixel rainbow demo.
demos/rainbow.py
demos/rainbow.py
Python
0
@@ -0,0 +1,767 @@ +# Add your Python code here. E.g.%0Afrom microbit import *%0Aimport neopixel%0A%0A%0Anp = neopixel.NeoPixel(pin13, 12)%0A%0Arainbow_raw = %5B%0A (255, 0, 0),%0A (255, 127, 0),%0A (255, 255, 0),%0A (127, 255, 0),%0A (0, 255, 0),%0A (0, 255, 127),%0A (0, 255, 255),%0A (0, 127, 255),%0A (0, 0, 255),%0A (127, 0, 255),%0A (255, 0, 255),%0A (255, 0, 127),%0A%5D%0A%0Adim = lambda r, g, b: (r//20, g//20, b//20)%0A%0Arainbow = %5Bdim(*c) for c in rainbow_raw%5D%0A%0Ashift = 0%0Awhile True:%0A for i in range(6):%0A np%5Bi%5D = rainbow%5B(i + shift) %25 len(rainbow)%5D%0A np%5Bi+6%5D = rainbow%5B(i + shift) %25 len(rainbow)%5D%0A np.show()%0A%0A if abs(accelerometer.get_y()) %3E= 90:%0A shift += 1 if accelerometer.get_y() %3E 0 else -1%0A shift %25= len(rainbow)%0A%0A sleep(100)%0A
255d9b002b820d1c475d2434858fd5ab3c6847cf
add SeriesStim
pliers/stimuli/misc.py
pliers/stimuli/misc.py
Python
0
@@ -0,0 +1,2434 @@ +%22%22%22Miscellaneous Stim classes.%22%22%22%0A%0Aimport numpy as np%0Aimport pandas as pd%0A%0Afrom .base import Stim%0A%0A%0Aclass SeriesStim(Stim):%0A '''Represents a pandas Series as a pliers Stim.%0A%0A Args:%0A data (dict, pd.Series, array-like): A dictionary, pandas Series, or any%0A other iterable (e.g., list or 1-D numpy array) that can be coerced%0A to a pandas Series.%0A filename (str, optional): Path or URL to data file. Must be readable%0A using pd.read_csv().%0A onset (float): Optional onset of the SeriesStim (in seconds) with%0A respect to some more general context or timeline the user wishes%0A to keep track of.%0A duration (float): Optional duration of the SeriesStim, in seconds.%0A order (int): Optional order of stim within some broader context.%0A url (str): Optional URL to read data from. Must be readable using%0A pd.read_csv().%0A column (str): If filename or url is passed, defines the name of the%0A column in the data source to read in as data.%0A name (str): Optional name to give the SeriesStim instance. If None%0A is provided, the name will be derived from the filename if one is%0A defined. If no filename is defined, name will be an empty string.%0A pd_args: Optional keyword arguments passed onto pd.read_csv() (e.g., %0A to control separator, header, etc.).%0A '''%0A%0A def __init__(self, data=None, filename=None, onset=None, duration=None,%0A order=None, url=None, column=None, name=None, **pd_args):%0A%0A if data is None:%0A if filename is None and url is None:%0A raise ValueError(%22No data provided! One of the data, filename,%22%0A %22or url arguments must be passed.%22)%0A source = data or url%0A data = pd.read_csv(source, squeeze=True, **pd_args)%0A if isinstance(data, pd.DataFrame):%0A if column is None:%0A raise ValueError(%22Data source contains more than one %22%0A %22column; please specify which column to %22%0A %22use by passing the 'column' argument.%22)%0A data = data.loc%5B:, column%5D%0A %0A data = pd.Series(data)%0A self.data = data%0A super().__init__(filename, onset, duration, order, name)%0A%0A def save(self, path):%0A self.data.to_csv(path)%0A
d270330375060d0bd8694bc8a2ea8bdbb3762586
add show_single_event for debugging
show_single_event.py
show_single_event.py
Python
0.000001
@@ -0,0 +1,314 @@ +from deepjets.generate import get_generator_input, generate%0A%0Agen_input = get_generator_input('pythia', 'w.config', random_state=1)%0A%0Afor event in generate(gen_input, 1):%0A print event.jets%0A print event.subjets%0A print event.subjets.shape%0A print event.trimmed_constit%0A print event.trimmed_constit.shape%0A
974ebd337c00a8b4a07991983eea0b9b60e1af08
Add example binary sink
sinks/binary_sink.py
sinks/binary_sink.py
Python
0.000001
@@ -0,0 +1,1015 @@ +import struct%0Aimport sys%0A%0A# Line format. We have:%0A# 8 byte unsigned timestamp%0A# 1 byte metric type%0A# 1 byte value type%0A# 2 byte key length%0A# 8 byte value%0ALINE = struct.Struct(%22%3CQBBHd%22)%0APREFIX_SIZE = 20%0A%0ATYPE_MAP = %7B%0A 1: %22kv%22,%0A 2: %22counter%22,%0A 3: %22timer%22%0A%7D%0AVAL_TYPE_MAP = %7B%0A 0: %22kv%22,%0A 1: %22sum%22,%0A 2: %22sum sq%22,%0A 3: %22mean%22,%0A 4: %22count%22,%0A 5: %22stddev%22,%0A 6: %22min%22,%0A 7: %22max%22,%0A 128: %22percentile%22%0A%7D%0A# Pre-compute all the possible percentiles%0Afor x in xrange(1, 100):%0A VAL_TYPE_MAP%5B128 %7C x%5D = %22P%2502d%22 %25 x%0A%0A%0Adef main():%0A while True:%0A # Read the prefix%0A prefix = sys.stdin.read(20)%0A if not prefix or len(prefix) != 20:%0A return%0A%0A # Unpack the line%0A (ts, type, val_type, key_len, val) = LINE.unpack(prefix)%0A type = TYPE_MAP%5Btype%5D%0A val_type = VAL_TYPE_MAP%5Bval_type%5D%0A%0A # Read the key%0A key = sys.stdin.read(key_len)%0A%0A # Print%0A print ts, type, val_type, key, val%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A%0A
420ebb50cfb5a366b35d058ad6018857b899a19e
Add function approximator to deal with off-switch
hiora_cartpole/offswitch_hfa.py
hiora_cartpole/offswitch_hfa.py
Python
0
@@ -0,0 +1,1014 @@ +# -*- coding: utf-8 -*-%0A%0Afrom __future__ import unicode_literals%0A%0A# HO %E2%80%A6 higher-order%0Aclass SliceHOFeatureVec(object):%0A def __init__(self, slice_i, entries_per_slice, feature_vec):%0A self.feature_vec = feature_vec%0A self.slice = slice(slice_i * entries_per_slice,%0A (slice_i+1) * entries_per_slice)%0A%0A%0A def dot(self, vec):%0A return self.feature_vec.dot(vec%5Bself.slice%5D)%0A%0A%0A def add_to(self, vec):%0A %22%22%22%0A%0A Warning: Modifies vec.%0A %22%22%22%0A self.feature_vec.add_to(vec%5Bself.slice%5D)%0A%0A%0A def alphabounds_diffdot(self, prev, elig):%0A %22%22%22%0A%0A Credits: http://people.cs.umass.edu/~wdabney/papers/alphaBounds.pdf%0A %22%22%22%0A return self.dot(elig) - prev.dot(elig)%0A%0A%0Adef make_feature_vec(feature_vec, n_weights):%0A def feature_vec_inner(state, action):%0A return SliceHOFeatureVec(state%5B0%5D, n_weights,%0A feature_vec(state%5B1%5D, action))%0A%0A return n_weights * 2, feature_vec_inner%0A
d154adc3486cf5671d757d8072a705ed4c67a433
Remove timestamp, fix time
scrapi/consumers/uwashington/consumer.py
scrapi/consumers/uwashington/consumer.py
# University of Washington consumer from __future__ import unicode_literals import requests from datetime import date, timedelta, datetime from dateutil.parser import * import time from lxml import etree from scrapi.linter import lint from scrapi.linter.document import RawDocument, NormalizedDocument from nameparser import HumanName NAME = u'uwdspace' TODAY = date.today() OAI_DC_BASE = 'http://digital.lib.washington.edu/dspace-oai/' NAMESPACES = {'dc': 'http://purl.org/dc/elements/1.1/', 'oai_dc': 'http://www.openarchives.org/OAI/2.0/', 'ns0': 'http://www.openarchives.org/OAI/2.0/'} DEFAULT = datetime(1970, 01, 01) DEFAULT_ENCODING = 'utf-8' record_encoding = None def consume(days_back=5): base_url = OAI_DC_BASE + 'request?verb=ListRecords&metadataPrefix=oai_dc&from=' start_date = TODAY - timedelta(days_back) url = base_url + str(start_date) records = get_records(url) xml_list = [] for record in records: doc_id = record.xpath('ns0:header/ns0:identifier', namespaces=NAMESPACES)[0].text record = etree.tostring(record, encoding=(record_encoding or DEFAULT_ENCODING)) xml_list.append(RawDocument({ 'doc': record, 'source': NAME, 'docID': copy_to_unicode(doc_id), 'filetype': u'xml' })) return xml_list def get_records(url): data = requests.get(url) record_encoding = data.encoding doc = etree.XML(data.content) records = doc.xpath('//ns0:record', namespaces=NAMESPACES) token = doc.xpath('//ns0:resumptionToken/node()', namespaces=NAMESPACES) if len(token) == 1: time.sleep(0.5) base_url = OAI_DC_BASE + 'request?verb=ListRecords&resumptionToken=' url = base_url + token[0] records += get_records(url) return records def copy_to_unicode(element): encoding = record_encoding or DEFAULT_ENCODING element = ''.join(element) if isinstance(element, unicode): return element else: return unicode(element, encoding=encoding) def get_contributors(result): dctype = (result.xpath('//dc:type/node()', namespaces=NAMESPACES) or [''])[0] contributors = result.xpath('//dc:contributor/node()', namespaces=NAMESPACES) creators = result.xpath('//dc:creator/node()', namespaces=NAMESPACES) if 'hesis' not in dctype and 'issertation' not in dctype: all_contributors = contributors + creators else: all_contributors = creators contributor_list = [] for person in all_contributors: name = HumanName(person) contributor = { 'prefix': name.title, 'given': name.first, 'middle': name.middle, 'family': name.last, 'suffix': name.suffix, 'email': u'', 'ORCID': u'', } contributor_list.append(contributor) return contributor_list def get_tags(result): tags = result.xpath('//dc:subject/node()', namespaces=NAMESPACES) or [] thetags = [] for tag in tags: if ';' in tag: moretags = tag.split(';') moretags = [word.strip() for word in moretags] elif '::' in tag: moretags = tag.split('::') moretags = [word.strip() for word in moretags] else: moretags = [] thetags += moretags return [copy_to_unicode(tag.lower()) for tag in tags] def get_ids(result, doc): serviceID = doc.get('docID') identifiers = result.xpath('//dc:identifier/node()', namespaces=NAMESPACES) url = '' doi = '' for item in identifiers: if 'hdl.handle.net' in item: url = item if 'doi' in item or 'DOI' in item: doi = item doi = doi.replace('doi:', '') doi = doi.replace('DOI:', '') doi = doi.replace('http://dx.doi.org/', '') doi = doi.strip(' ') if url == '': raise Exception('Warning: No url provided!') return {'serviceID': serviceID, 'url': copy_to_unicode(url), 'doi': copy_to_unicode(doi)} def get_properties(result): result_type = (result.xpath('//dc:type/node()', namespaces=NAMESPACES) or [''])[0] rights = result.xpath('//dc:rights/node()', namespaces=NAMESPACES) or [''] if len(rights) > 1: copyright = ' '.join(rights) else: copyright = rights publisher = (result.xpath('//dc:publisher/node()', namespaces=NAMESPACES) or [''])[0] language = (result.xpath('//dc:language/node()', namespaces=NAMESPACES) or [''])[0] identifiers = result.xpath('//dc:identifier/node()', namespaces=NAMESPACES) or [] ids = [] for identifier in identifiers: if 'http://' not in identifier: ids.append(copy_to_unicode(identifier)) props = { 'type': copy_to_unicode(result_type), 'language': copy_to_unicode(language), 'publisherInfo': { 'publisher': copy_to_unicode(publisher), }, 'identifiers': ids, } return props def get_date_created(result): dates = result.xpath('//dc:date/node()', namespaces=NAMESPACES) date_list = [] for item in dates: a_date = parse(str(item)[:10], yearfirst=True, default=DEFAULT).isoformat() date_list.append(a_date) min_date = min(date_list) return min_date def get_date_updated(result): dateupdated = result.xpath('//ns0:header/ns0:datestamp/node()', namespaces=NAMESPACES)[0] date_updated = parse(dateupdated).isoformat() return date_updated def normalize(raw_doc, timestamp): result = raw_doc.get('doc') try: result = etree.XML(result) except etree.XMLSyntaxError: print('Error in namespaces! Skipping this one...') return None title = result.xpath('//dc:title/node()', namespaces=NAMESPACES)[0] description = (result.xpath('//dc:description/node()', namespaces=NAMESPACES) or [''])[0] payload = { 'title': copy_to_unicode(title), 'contributors': get_contributors(result), 'properties': get_properties(result), 'description': copy_to_unicode(description), 'tags': get_tags(result), 'id': get_ids(result, raw_doc), 'source': NAME, 'dateUpdated': copy_to_unicode(get_date_updated(result)), 'dateCreated': copy_to_unicode(get_date_created(result)), 'timestamp': copy_to_unicode(timestamp), } # import json # print(json.dumps(payload, indent=4)) return NormalizedDocument(payload) if __name__ == '__main__': print(lint(consume, normalize))
Python
0.005017
@@ -355,29 +355,8 @@ ce'%0A -TODAY = date.today()%0A OAI_ @@ -811,13 +811,20 @@ e = -TODAY +date.today() - t @@ -5586,19 +5586,8 @@ _doc -, timestamp ):%0A @@ -6382,57 +6382,8 @@ )),%0A - 'timestamp': copy_to_unicode(timestamp),%0A
e150f6bc2401d396d9baed52d1cee747ee906141
extract all sentence from BCCWJ/SUW"
Script/get_sent_from_BCCWJ_SUW.py
Script/get_sent_from_BCCWJ_SUW.py
Python
0.999999
@@ -0,0 +1,758 @@ +#!usr/bin/python%0A#coding:utf-8%0Aimport glob%0Aimport pickle%0Aimport sys%0A%0A%0Aif __name__ == %22__main__%22:%0A dir_name = '../Orig_Data/' #sys.args%5B1%5D%0A %0A fnames = sorted(glob.glob(dir_name + '*.txt') )%0A for fname in fnames:%0A sentences = ''%0A print fname%0A file_name = fname.split('/')%5B-1%5D%5B:-4%5D%0A try:%0A fin = open(fname).readlines()%0A except:%0A print(fname)%0A continue%0A for line in fin:%0A line = line.split('%5Ct')%0A try:%0A if line%5B9%5D == 'B':%0A sentences += '%5Cn'%0A sentences += line%5B22%5D%0A except:%0A continue%0A pickle.dump(sentences.split('%5Cn'), open('../Orig_Sent/' + file_name + '.pkl', 'w') )%0A
cfe103b4edc5e8366cccb7e34e1a890fe8ad9bfc
unify quotes
letsencrypt/tests/configuration_test.py
letsencrypt/tests/configuration_test.py
"""Tests for letsencrypt.configuration.""" import os import unittest import mock class NamespaceConfigTest(unittest.TestCase): """Tests for letsencrypt.configuration.NamespaceConfig.""" def setUp(self): self.namespace = mock.MagicMock( config_dir='/tmp/config', work_dir='/tmp/foo', foo='bar', server='https://acme-server.org:443/new') from letsencrypt.configuration import NamespaceConfig self.config = NamespaceConfig(self.namespace) def test_proxy_getattr(self): self.assertEqual(self.config.foo, 'bar') self.assertEqual(self.config.work_dir, '/tmp/foo') def test_server_path(self): self.assertEqual(['acme-server.org:443', 'new'], self.config.server_path.split(os.path.sep)) self.namespace.server = ('http://user:pass@acme.server:443' '/p/a/t/h;parameters?query#fragment') self.assertEqual(['user:pass@acme.server:443', 'p', 'a', 't', 'h'], self.config.server_path.split(os.path.sep)) @mock.patch('letsencrypt.configuration.constants') def test_dynamic_dirs(self, constants): constants.ACCOUNTS_DIR = 'acc' constants.BACKUP_DIR = 'backups' constants.CERT_KEY_BACKUP_DIR = 'c/' constants.CERT_DIR = 'certs' constants.IN_PROGRESS_DIR = '../p' constants.KEY_DIR = 'keys' constants.TEMP_CHECKPOINT_DIR = 't' self.assertEqual( self.config.accounts_dir, '/tmp/config/acc/acme-server.org:443/new') self.assertEqual(self.config.backup_dir, '/tmp/foo/backups') self.assertEqual(self.config.cert_dir, '/tmp/config/certs') self.assertEqual( self.config.cert_key_backup, '/tmp/foo/c/acme-server.org:443/new') self.assertEqual(self.config.in_progress_dir, '/tmp/foo/../p') self.assertEqual(self.config.key_dir, '/tmp/config/keys') self.assertEqual(self.config.temp_checkpoint_dir, '/tmp/foo/t') class RenewerConfigurationTest(unittest.TestCase): """Test for letsencrypt.configuration.RenewerConfiguration.""" def setUp(self): self.namespace = mock.MagicMock(config_dir='/tmp/config') from letsencrypt.configuration import RenewerConfiguration self.config = RenewerConfiguration(self.namespace) @mock.patch('letsencrypt.configuration.constants') def test_dynamic_dirs(self, constants): constants.ARCHIVE_DIR = "a" constants.LIVE_DIR = 'l' constants.RENEWAL_CONFIGS_DIR = "renewal_configs" constants.RENEWER_CONFIG_FILENAME = 'r.conf' self.assertEqual(self.config.archive_dir, '/tmp/config/a') self.assertEqual(self.config.live_dir, '/tmp/config/l') self.assertEqual( self.config.renewal_configs_dir, '/tmp/config/renewal_configs') self.assertEqual(self.config.renewer_config_file, '/tmp/config/r.conf') if __name__ == '__main__': unittest.main() # pragma: no cover
Python
0.029793
@@ -2489,11 +2489,11 @@ R = -%22a%22 +'a' %0A @@ -2562,17 +2562,17 @@ S_DIR = -%22 +' renewal_ @@ -2578,17 +2578,17 @@ _configs -%22 +' %0A
2db2727dcccf81c3dca2e86efabd8e40afb223d1
Automate Transfers: Add another pre-transfer script
transfers/pre-transfer/add_metadata.py
transfers/pre-transfer/add_metadata.py
Python
0
@@ -0,0 +1,714 @@ +#!/usr/bin/env python2%0A%0Aimport json%0Aimport os%0Aimport sys%0A%0Adef main(transfer_path):%0A basename = os.path.basename(transfer_path)%0A try:%0A _, dc_id, _ = basename.split('---')%0A except ValueError:%0A return 1%0A metadata = %5B%0A %7B%0A 'parts': 'objects',%0A 'dc.identifier': dc_id,%0A %7D%0A %5D%0A metadata_path = os.path.join(transfer_path, 'metadata')%0A if not os.path.exists(metadata_path):%0A os.makedirs(metadata_path)%0A metadata_path = os.path.join(metadata_path, 'metadata.json')%0A with open(metadata_path, 'w') as f:%0A json.dump(metadata, f)%0A return 0%0A%0Aif __name__ == '__main__':%0A transfer_path = sys.argv%5B1%5D%0A sys.exit(main(transfer_path))%0A
8549a8fe5e71e2e35a4f034549e95fa44f34a9cb
Add exception wrapper to prevent elb snapshot from blowing up.
security_monkey/watchers/ec2/ebs_snapshot.py
security_monkey/watchers/ec2/ebs_snapshot.py
# Copyright 2016 Bridgewater Associates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module: security_monkey.watchers.ec2.ebs_snapshot :platform: Unix .. version:: $$VERSION$$ .. moduleauthor:: Bridgewater OSS <opensource@bwater.com> """ from security_monkey.decorators import record_exception, iter_account_region from security_monkey.watcher import Watcher from security_monkey.watcher import ChangeItem from security_monkey import app import datetime def snapshot_name(snapshot): name_tag = None if snapshot.get('Tags') is not None: for tag in snapshot.get('Tags'): if tag['Key'] == 'Name': name_tag = tag['Value'] break if name_tag is not None: return name_tag + ' (' + snapshot.get('SnapshotId') + ')' else: return snapshot.get('SnapshotId') class EBSSnapshot(Watcher): index = 'ebssnapshot' i_am_singular = 'EBS Snapshot' i_am_plural = 'EBS Snapshots' def __init__(self, accounts=None, debug=False): super(EBSSnapshot, self).__init__(accounts=accounts, debug=debug) # naive single entry cache self.last_session = None self.last_session_account = None self.last_session_region = None self.last_session_datetime = None def get_session(self, **kwargs): # cache the session for performance, # but expect it to be expired if it is over 45 minutes old if kwargs['account_name'] == self.last_session_account: if kwargs['region'] == self.last_session_region: if self.last_session_datetime: if self.last_session_datetime > datetime.datetime.now() - datetime.timedelta(minutes=45): return self.last_session from security_monkey.common.sts_connect import connect self.last_session = connect(kwargs['account_name'], 'boto3.ec2.client', region=kwargs['region'], assumed_role=kwargs['assumed_role']) self.last_session_account = kwargs['account_name'] self.last_session_region = kwargs['region'] self.last_session_datetime = datetime.datetime.now() return self.last_session def get_attribute(self, attribute_name, result_key_name, snapshot, **kwargs): ec2 = self.get_session(**kwargs) attributes = self.wrap_aws_rate_limited_call( ec2.describe_snapshot_attribute, Attribute=attribute_name, SnapshotId=snapshot.get('SnapshotId'), DryRun=False) return attributes[result_key_name] @record_exception() def process_snapshot(self, snapshot, **kwargs): app.logger.debug("Slurping {index} ({name}) from {account}".format( index=EBSSnapshot.i_am_singular, name=kwargs['name'], account=kwargs['account_name'])) return { 'create_volume_permissions': self.get_attribute('createVolumePermission', 'CreateVolumePermissions', snapshot, **kwargs), 'product_codes': self.get_attribute('productCodes', 'ProductCodes', snapshot, **kwargs), 'name': snapshot_name(snapshot), 'snapshot_id': snapshot.get('SnapshotId'), 'volume_id': snapshot.get('VolumeId'), 'state': snapshot.get('State'), 'state_message': snapshot.get('StateMessage'), 'start_time': str(snapshot.get('StartTime')), 'progress': snapshot.get('Progress'), 'ownerId': snapshot.get('OwnerId'), 'description': snapshot.get('Description'), 'volume_size': snapshot.get('VolumeSize'), 'owner_alias': snapshot.get('OwnerAlias'), 'tags': snapshot.get('Tags', []), 'encrypted': snapshot.get('Encrypted', False), 'kms_key_id': snapshot.get('KmsKeyId'), 'data_encryption_key_id': snapshot.get('DataEncryptionKeyId'), } @record_exception() def describe_snapshots(self, **kwargs): ec2 = self.get_session(**kwargs) response = self.wrap_aws_rate_limited_call(ec2.describe_snapshots, OwnerIds=['self']) snapshots = response.get('Snapshots') return [snapshot for snapshot in snapshots if not self.check_ignore_list(snapshot_name(snapshot))] def slurp(self): """ :returns: item_list - list of available EBS snapshots defined by account :returns: exception_map - A dict where the keys are a tuple containing the location of the exception and the value is the actual exception """ self.prep_for_slurp() @iter_account_region(index=self.index, accounts=self.accounts, service_name='ec2') def slurp_items(**kwargs): item_list = [] exception_map = {} kwargs['exception_map'] = exception_map app.logger.debug("Checking {}/{}/{}".format(self.index, kwargs['account_name'], kwargs['region'])) snapshots = self.describe_snapshots(**kwargs) if snapshots: app.logger.debug("Found {} {}.".format(len(snapshots), self.i_am_plural)) for snapshot in snapshots: kwargs['name'] = snapshot_name(snapshot) config = self.process_snapshot(snapshot, **kwargs) item = EBSSnapshotItem(region=kwargs['region'], account=kwargs['account_name'], name=kwargs['name'], config=config) item_list.append(item) return item_list, exception_map return slurp_items() class EBSSnapshotItem(ChangeItem): def __init__(self, region=None, account=None, name=None, config={}): super(EBSSnapshotItem, self).__init__( index=EBSSnapshot.index, region=region, account=account, name=name, new_config=config)
Python
0
@@ -2752,24 +2752,48 @@ st_session%0A%0A + @record_exception()%0A def get_
29555289b28e63655e5bb6fa89d163b5e3022827
add supervised loss as separate term
hypergan/losses/supervised.py
hypergan/losses/supervised.py
Python
0
@@ -0,0 +1,808 @@ +import tensorflow as tf%0Afrom hypergan.util.ops import *%0Afrom hypergan.util.hc_tf import *%0Aimport hyperchamber as hc%0A%0Adef config():%0A selector = hc.Selector()%0A selector.set(%22reduce%22, %5Btf.reduce_mean%5D)#reduce_sum, reduce_logexp work%0A%0A selector.set('create', create)%0A selector.set('batch_norm', layer_norm_1)%0A%0A return selector.random_config()%0A%0Adef create(config, gan):%0A batch_norm = config.batch_norm%0A batch_size = gan.config.batch_size%0A%0A num_classes = gan.config.y_dims%0A net = gan.graph.d_real%0A net = linear(net, num_classes, scope=%22d_fc_end%22, stddev=0.003)%0A net = batch_norm(batch_size, name='d_bn_end')(net)%0A%0A d_class_loss = tf.nn.softmax_cross_entropy_with_logits(net,gan.graph.y)%0A%0A gan.graph.d_class_loss=tf.reduce_mean(d_class_loss)%0A%0A return %5Bd_class_loss, None%5D%0A%0A
e1478f694d6ad422a87e03f71a79a8c1b5e77c5c
build for the entire framework
Sketches/AM/KPIFramework/setup.py
Sketches/AM/KPIFramework/setup.py
Python
0
@@ -0,0 +1,1511 @@ +#!/usr/bin/env python%0D%0A#%0D%0A# (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)%0D%0A# All Rights Reserved.%0D%0A#%0D%0A# You may only modify and redistribute this under the terms of any of the%0D%0A# following licenses(2): Mozilla Public License, V1.1, GNU General%0D%0A# Public License, V2.0, GNU Lesser General Public License, V2.1%0D%0A#%0D%0A# (1) Kamaelia Contributors are listed in the AUTHORS file and at%0D%0A# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,%0D%0A# not this notice.%0D%0A# (2) Reproduced in the COPYING file, and at:%0D%0A# http://kamaelia.sourceforge.net/COPYING%0D%0A# Under section 3.5 of the MPL, we are using this text since we deem the MPL%0D%0A# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this%0D%0A# notice is prohibited.%0D%0A#%0D%0A# Please contact us via: kamaelia-list-owner@lists.sourceforge.net%0D%0A# to discuss alternative licensing.%0D%0A# -------------------------------------------------------------------------%0D%0A%0D%0Afrom distutils.core import setup%0D%0A%0D%0Asetup(name = %22KPI%22,%0D%0A version = %220.1.0%22,%0D%0A description = %22KPI Framework for building secure streaming server%22,%0D%0A author = %22Anagha Mudigonda & Kamaelia Contributors%22,%0D%0A author_email = %22anagha_m@users.sourceforge.net%22,%0D%0A url = %22http://kamaelia.sourceforge.net/%22,%0D%0A packages = %5B%22KPI%22,%0D%0A %22KPI.Client%22,%0D%0A %22KPI.Server%22,%0D%0A %22KPI.Crypto%22,%0D%0A %22KPI.DB%22,%0D%0A %22%22%5D,%0D%0A long_description = %22%22%22%0D%0A%22%22%22%0D%0A )%0D%0A
53306793268cb31944d42caf95c275afcbe97e6d
Add migration for creating the Professional Certificate program type
course_discovery/apps/edx_catalog_extensions/migrations/0002_create_professional_certificate_program_type.py
course_discovery/apps/edx_catalog_extensions/migrations/0002_create_professional_certificate_program_type.py
Python
0
@@ -0,0 +1,1125 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.11 on 2016-12-19 19:51%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0APAID_SEAT_TYPES = ('credit', 'professional', 'verified',)%0APROGRAM_TYPE = 'Professional Certificate'%0A%0A%0Adef add_program_type(apps, schema_editor):%0A SeatType = apps.get_model('course_metadata', 'SeatType')%0A ProgramType = apps.get_model('course_metadata', 'ProgramType')%0A%0A seat_types = SeatType.objects.filter(slug__in=PAID_SEAT_TYPES)%0A%0A program_type, __ = ProgramType.objects.update_or_create(name=PROGRAM_TYPE)%0A program_type.applicable_seat_types.clear()%0A program_type.applicable_seat_types.add(*seat_types)%0A program_type.save()%0A%0A%0Adef drop_program_type(apps, schema_editor):%0A ProgramType = apps.get_model('course_metadata', 'ProgramType')%0A ProgramType.objects.filter(name=PROGRAM_TYPE).delete()%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('edx_catalog_extensions', '0001_squashed_0003_create_publish_to_marketing_site_flag'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(add_program_type, drop_program_type)%0A %5D%0A
8b894a02cf1d271b7df65e1c3efcac499100ed51
Add submission form tests
reddit/tests/test_submission.py
reddit/tests/test_submission.py
Python
0
@@ -0,0 +1,1144 @@ +from django.test import TestCase, Client%0Afrom reddit.forms import SubmissionForm%0A%0A%0Aclass TestSubmissionForm(TestCase):%0A def test_full_valid_submission(self):%0A test_data = %7B%0A 'title': 'submission_title',%0A 'url': 'http://example.com',%0A 'text': 'submission text'%0A %7D%0A form = SubmissionForm(data=test_data)%0A self.assertTrue(form.is_valid())%0A%0A def test_minimum_data_required(self):%0A test_data = %7B%0A 'title': 'submission title'%0A %7D%0A form = SubmissionForm(data=test_data)%0A self.assertTrue(form.is_valid())%0A%0A def test_invalid_data(self):%0A test_data = %7B%0A 'title': '.' * 300,%0A 'url': 'notaurl',%0A 'text': '.' * 5001%0A %7D%0A form = SubmissionForm(data=test_data)%0A self.assertEqual(form.errors%5B'title'%5D, %5Bu%22Ensure this value has at most 250 characters (it has 300).%22%5D)%0A self.assertEqual(form.errors%5B'url'%5D, %5Bu%22Enter a valid URL.%22%5D)%0A self.assertEqual(form.errors%5B'text'%5D, %5Bu%22Ensure this value has at most 5000 characters (it has 5001).%22%5D)%0A self.assertFalse(form.is_valid())%0A
d2802eebe9311243aabc5954f26719fa5544b378
Create matchingBrackets.py
GeneralPython/PyDataStructure/matchingBrackets.py
GeneralPython/PyDataStructure/matchingBrackets.py
Python
0.000001
@@ -0,0 +1,958 @@ +# https://www.geeksforgeeks.org/check-for-balanced-parentheses-in-an-expression/%0A%0Adef areParanthesisBalanced(expr):%0A stack = list()%0A%0A for i,chr in enumerate(expr):%0A #print(i, chr)%0A if chr in %5B'(','%7B','%5B'%5D:%0A stack.append(chr)%0A continue %0A %0A # IF current current character is not opening %0A # bracket, then it must be closing. So stack %0A # cannot be empty at this point. %0A if len(stack) == 0 :%0A return False%0A%0A if chr == ')':%0A x = stack.pop()%0A if x != '(':%0A return False%0A elif chr == '%7D':%0A x = stack.pop()%0A if x != '%7B':%0A return False%0A elif chr == '%5D':%0A x = stack.pop()%0A if x != '%5B':%0A return False%0A %0A return len(stack) == 0%0A%0A%0Aif __name__ == '__main__':%0A print(%22#########%22)%0A str__ = %22%7B()%7D%5B%5D%22%0A print(areParanthesisBalanced(str__))%0A
6e9a789aa3113403d6d60ca662605506ce70c4d1
Add empty Resources module.
app/api_v1/resources.py
app/api_v1/resources.py
Python
0
@@ -0,0 +1,72 @@ +%22%22%22This module contains the resources to be served on the endpoints.%22%22%22%0A
b223bc7023cd959c95171fc69466153a744f035a
Add note saying where code was lifted from.
src/new/util/util.py
src/new/util/util.py
#################### import functools import gzip import io import random import time try: import socks NO_SOCKS = False except ImportError: NO_SOCKS = True import socket ################### try: import urllib2 except ImportError: import urllib.request as urllib2 #################### class Util: @staticmethod def getSourceCode(url, proxy = None, returnRedirctUrl = False, maxRetries=1, waitRetryTime=1): """ Loop to get around server denies for info or minor disconnects. """ if (proxy <> None): if (NO_SOCKS): raise RuntimeError('socks library required to use proxy (e.g. SocksiPy)') proxySettings = proxy.split(':') socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, proxySettings[0], int(proxySettings[1]), True) socket.socket = socks.socksocket ret = None request = urllib2.Request(url, headers={ 'User-agent': """Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.472.14 Safari/534.3""", 'Accept-encoding': 'gzip' }) while (ret == None): try: f = urllib2.urlopen(request) encoding = f.headers.get('Content-Encoding') if encoding == None: ret = f.read() else: if encoding.upper() == 'GZIP': compressedstream = io.BytesIO(f.read()) gzipper = gzip.GzipFile(fileobj=compressedstream) ret = gzipper.read() else: raise RuntimeError('Unknown HTTP Encoding returned') except urllib2.URLError: if (maxRetries == 0): break else: # random dist. for further protection against anti-leech # idea from wget time.sleep(random.uniform(0.5*waitRetryTime, 1.5*waitRetryTime)) maxRetries -= 1 if returnRedirctUrl: return ret, f.geturl() else: return ret @staticmethod def memoize(obj): cache = obj.cache = {} @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return memoizer
Python
0
@@ -2171,16 +2171,118 @@ rn ret%0A%0A + # :SEE: http://wiki.python.org/moin/PythonDecoratorLibrary/#Alternate_memoize_as_nested_functions%0A @sta
faa1c167e6551da738f2039ef9e9373bde50ab41
Add unittest utils.
app/tests/test_utils.py
app/tests/test_utils.py
Python
0
@@ -0,0 +1,477 @@ +import unittest%0Aimport re%0Afrom app.util.utils import *%0A%0A%0Aclass utilsTest(unittest.TestCase):%0A %22%22%22Docstring for decorationsTest. %22%22%22%0A%0A def setUp(self):%0A %22%22%22 decorators: setup%0A %22%22%22%0A pass%0A%0A def tearDown(self):%0A pass%0A%0A def test_is_empty(self):%0A %22%22%22utils, is_empty: Check if an object is empty or contains spaces%0A :returns: TODO%0A %22%22%22%0A self.assertTrue(is_empty(''))%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
d9486bc6180a2dfe38a953eb84184e0410e1cb66
Add a Quartz backend for the null toolkit
enthought/enable/null/quartz.py
enthought/enable/null/quartz.py
Python
0.000001
@@ -0,0 +1,1924 @@ +#------------------------------------------------------------------------------%0A# Copyright (c) 2011, Enthought, Inc.%0A# All rights reserved.%0A#%0A# This software is provided without warranty under the terms of the BSD%0A# license included in enthought/LICENSE.txt and may be redistributed only%0A# under the conditions described in the aforementioned license. The license%0A# is also available online at http://www.enthought.com/licenses/BSD.txt%0A# Thanks for using Enthought open source!%0A#------------------------------------------------------------------------------%0A%0Aimport numpy as np%0A%0Afrom enthought.kiva.fonttools import Font%0Afrom enthought.kiva.quartz import ABCGI%0A%0Aclass NativeScrollBar(object):%0A pass%0A%0Aclass Window(object):%0A pass%0A%0ACompiledPath = ABCGI.CGMutablePath%0A%0Aclass GraphicsContext(ABCGI.CGLayerContext):%0A def __init__(self, size_or_array, window_gc=None, *args, **kwds):%0A gc = window_gc%0A if not gc:%0A # Create a tiny base context to spawn the CGLayerContext from.%0A # We are better off making our Layer from the window gc since%0A # the data formats will match and so it will be faster to draw the%0A # layer.%0A gc = ABCGI.CGBitmapContext((1,1))%0A if isinstance(size_or_array, np.ndarray):%0A # Initialize the layer with an image.%0A image = ABCGI.CGImage(size_or_array)%0A width = image.width%0A height = image.height%0A else:%0A # No initialization.%0A image = None%0A width, height = size_or_array%0A ABCGI.CGLayerContext.__init__(self, gc, (width, height))%0A if image is not None:%0A self.draw_image(image)%0A%0A @classmethod%0A def create_from_gc(klass, gc, size_or_array, *args, **kwds):%0A return klass(size_or_array, gc, *args, **kwds)%0A%0A%0Adef font_metrics_provider():%0A gc = GraphicsContext((1, 1))%0A gc.set_font(Font())%0A return gc%0A
5f9a2fe783891dd5a1f926060fcfa2561150d840
add cleese pipeline runner
reduction/run_pipeline_cleese.py
reduction/run_pipeline_cleese.py
Python
0.000001
@@ -0,0 +1,566 @@ +import make_apex_cubes%0Afrom os.path import join%0A%0Aroot = '/scratch/aginsbur/apex/'%0Arawpath = join(root,'raw/')%0Areducedpath = join(root,'reduced/')%0Amake_apex_cubes.june2013datapath = rawpath%0Amake_apex_cubes.june2013path = join(reducedpath,'june2013/')%0Amake_apex_cubes.h2copath = join(reducedpath, 'h2co_cubes/')%0Amake_apex_cubes.mergepath = join(reducedpath, 'merged_datasets/')%0Amake_apex_cubes.aorawpath = rawpath%0Amake_apex_cubes.aopath = join(reducedpath, '2010_reduced/')%0Amake_apex_cubes.diagplotdir = join(root,'diagnostic_plots/')%0A%0Amake_apex_cubes.do_everything()%0A
a659f0f8f4672933fc36cecfe62c65366c496f07
Add a package for VarDictJava@1.5.1 (#5626)
var/spack/repos/builtin/packages/vardictjava/package.py
var/spack/repos/builtin/packages/vardictjava/package.py
Python
0
@@ -0,0 +1,1938 @@ +##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0Aimport glob%0A%0A%0Aclass Vardictjava(Package):%0A %22%22%22VarDictJava is a variant discovery program written in Java.%0A It is a partial Java port of VarDict variant caller.%22%22%22%0A%0A homepage = %22https://github.com/AstraZeneca-NGS/VarDictJava%22%0A url = %22https://github.com/AstraZeneca-NGS/VarDictJava/releases/download/v1.5.1/VarDict-1.5.1.tar%22%0A%0A version('1.5.1', '8c0387bcc1f7dc696b04e926c48b27e6')%0A%0A depends_on('java@8:', type='run')%0A%0A def install(self, spec, prefix):%0A mkdirp(prefix.bin)%0A install('bin/VarDict', prefix.bin)%0A%0A mkdirp(prefix.lib)%0A files = %5Bx for x in glob.glob(%22lib/*jar%22)%5D%0A for f in files:%0A install(f, prefix.lib)%0A
cc582dd4b435ba06dc140b1ca96b688871e36abb
Add mock python package.
var/spack/repos/builtin.mock/packages/python/package.py
var/spack/repos/builtin.mock/packages/python/package.py
Python
0
@@ -0,0 +1,1871 @@ +##############################################################################%0A# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the LICENSE file for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License (as published by%0A# the Free Software Foundation) version 2.1 dated February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public License%0A# along with this program; if not, write to the Free Software Foundation,%0A# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0Aclass Python(Package):%0A %22%22%22Dummy Python package to demonstrate preferred versions.%22%22%22%0A homepage = %22http://www.python.org%22%0A url = %22http://www.python.org/ftp/python/2.7.8/Python-2.7.8.tgz%22%0A%0A extendable = True%0A%0A version('3.5.1', 'be78e48cdfc1a7ad90efff146dce6cfe')%0A version('3.5.0', 'a56c0c0b45d75a0ec9c6dee933c41c36')%0A version('2.7.11', '6b6076ec9e93f05dd63e47eb9c15728b', preferred=True)%0A version('2.7.10', 'd7547558fd673bd9d38e2108c6b42521')%0A version('2.7.9', '5eebcaa0030dc4061156d3429657fb83')%0A version('2.7.8', 'd4bca0159acb0b44a781292b5231936f')%0A%0A def install(self, spec, prefix):%0A pass%0A%0A
f718f852bcc9be7d7ff57b8a0188499d5b1c9f99
Create pyglatin.py
pyglatin.py
pyglatin.py
Python
0.000012
@@ -0,0 +1,590 @@ +print %22Welcome to the Pig Latin Translator! %5Cn%22%0A%0Apyg = %22ay%22%0Aoriginal = raw_input(%22Enter a word: %22)%0A%0Aif len(original) %3E 0 and original.isalpha():%0A%09word = original.lower()%0A%09first = word%5B0%5D%0A%09if word%5B0%5D != %22a%22 or %22e%22 or %22i%22 or %22o%22 or %22u%22:%0A%09%09new_word = word + first + pyg%0A%09%09new_word = new_word%5B1:%5D%0A%0A%09else:%0A%09%09new_word = word + pyg%0A%09%09new_word = new_word%5B0:%5D%0A%0A%09print %22%5Cn%22%0A%09print new_word%0A%0Aelif len(original) == 0:%0A%09print %22Um, I'm waiting! %5Cn%22%0A%0Aelif original.isalpha() == False:%0A%09print %22No, not l33t. A WORD, please. %5Cn%22%0A%0Aelse:%0A%09print %22Wow, you broke the program. How the hell did you do that?! %5Cn%22%0A
a5edbf04345653b18bdb63ed9bd63625689b0f4c
add some simple unit tests for ADMM
odl/test/solvers/nonsmooth/admm_test.py
odl/test/solvers/nonsmooth/admm_test.py
Python
0
@@ -0,0 +1,2106 @@ +# Copyright 2014-2017 The ODL contributors%0A#%0A# This file is part of ODL.%0A#%0A# This Source Code Form is subject to the terms of the Mozilla Public License,%0A# v. 2.0. If a copy of the MPL was not distributed with this file, You can%0A# obtain one at https://mozilla.org/MPL/2.0/.%0A%0A%22%22%22Unit tests for ADMM.%22%22%22%0A%0Afrom __future__ import division%0Aimport odl%0Afrom odl.solvers import admm_linearized, Callback%0A%0Afrom odl.util.testutils import all_almost_equal, noise_element%0A%0A%0Adef test_admm_lin_input_handling():%0A %22%22%22Test to see that input is handled correctly.%22%22%22%0A%0A space = odl.uniform_discr(0, 1, 10)%0A%0A L = odl.ZeroOperator(space)%0A f = g = odl.solvers.ZeroFunctional(space)%0A%0A # Check that the algorithm runs. With the above operators and functionals,%0A # the algorithm should not modify the initial value.%0A x0 = noise_element(space)%0A x = x0.copy()%0A niter = 3%0A%0A admm_linearized(x, f, g, L, tau=1.0, sigma=1.0, niter=niter)%0A%0A assert x == x0%0A%0A # Check that a provided callback is actually called%0A class CallbackTest(Callback):%0A%0A def __init__(self):%0A self.was_called = False%0A%0A def __call__(self, *args, **kwargs):%0A self.was_called = True%0A%0A callback = CallbackTest()%0A admm_linearized(x, f, g, L, tau=1.0, sigma=1.0, niter=niter,%0A callback=callback)%0A assert callback.was_called%0A%0A%0Adef test_admm_lin_l1():%0A %22%22%22Verify that the correct value is returned for l1 dist optimization.%0A%0A Solves the optimization problem%0A%0A min_x %7C%7Cx - data_1%7C%7C_1 + 0.5 %7C%7Cx - data_2%7C%7C_1%0A%0A which has optimum value data_1 since the first term dominates.%0A %22%22%22%0A space = odl.rn(5)%0A%0A L = odl.IdentityOperator(space)%0A%0A data_1 = odl.util.testutils.noise_element(space)%0A data_2 = odl.util.testutils.noise_element(space)%0A%0A f = odl.solvers.L1Norm(space).translated(data_1)%0A g = 0.5 * odl.solvers.L1Norm(space).translated(data_2)%0A%0A x = space.zero()%0A admm_linearized(x, f, g, L, tau=1.0, sigma=2.0, niter=10)%0A%0A assert all_almost_equal(x, data_1, places=2)%0A%0A%0Aif __name__ == '__main__':%0A odl.util.test_file(__file__)%0A