commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
7ab37e931a836faa78a78f5d8358d845f72cdf49
|
Add low level Gemini serial command script
|
point/gemini_cmd.py
|
point/gemini_cmd.py
|
Python
| 0.000003
|
@@ -0,0 +1,1195 @@
+#!/usr/bin/env python3%0A%0A%22%22%22%0AA simple script for sending raw serial commands to Gemini.%0A%22%22%22%0A%0Aimport time%0Aimport serial%0Aimport readline%0A%0Adef main():%0A%0A ser = serial.Serial('/dev/ttyACM0', baudrate=9600)%0A%0A while True:%0A cmd = input('%3E ')%0A%0A if len(cmd) == 0:%0A continue%0A%0A # losmandy native commands -- add checksum%0A if cmd%5B0%5D == '%3C' or cmd%5B0%5D == '%3E':%0A%0A if ':' not in cmd:%0A print(%22Rejected: Native command must contain a ':' character%22)%0A continue%0A%0A checksum = 0%0A for c in cmd:%0A checksum = checksum %5E ord(c)%0A checksum %25= 128%0A checksum += 64%0A cmd = cmd + chr(checksum) + '#'%0A%0A print('Native command: ' + cmd)%0A%0A # LX200 command format%0A elif cmd%5B0%5D == ':':%0A print('LX200 command: ' + cmd)%0A pass%0A else:%0A print(%22Rejected: Must start with ':', '%3C', or '%3E'%22)%0A continue%0A%0A ser.write(cmd.encode())%0A time.sleep(0.1)%0A reply = ser.read(ser.in_waiting).decode()%0A if len(reply) %3E 0:%0A print('reply: ' + reply)%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
f65789fb705b43b446d1fc4b899074a66685a420
|
add missed file
|
workbench/clients/workbench_client.py
|
workbench/clients/workbench_client.py
|
Python
| 0.000001
|
@@ -0,0 +1,915 @@
+''' This encapsulates some boilerplate workbench client code '''%0Aimport ConfigParser%0Aimport argparse%0Aimport os%0A%0Adef grab_server_args():%0A ''' Grab server info from configuration file '''%0A workbench_conf = ConfigParser.ConfigParser()%0A config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini')%0A workbench_conf.read(config_path)%0A server = workbench_conf.get('workbench', 'server_uri')%0A port = workbench_conf.get('workbench', 'server_port')%0A%0A # Collect args from the command line%0A parser = argparse.ArgumentParser()%0A parser.add_argument('-s', '--server', type=str, default=server, help='location of workbench server')%0A parser.add_argument('-p', '--port', type=int, default=port, help='port used by workbench server')%0A args, unknown = parser.parse_known_args()%0A server = str(args.server)%0A port = str(args.port)%0A%0A return %7B'server':server, 'port':port%7D%0A
|
|
f203136772cfdca96a44a848d646426a42111698
|
Solve 20.
|
020/solution.py
|
020/solution.py
|
Python
| 0.999988
|
@@ -0,0 +1,287 @@
+%22%22%22 Project Euler problem #20. %22%22%22%0A%0Aimport math as mt%0A%0A%0Adef problem():%0A %22%22%22 Solve the problem.%0A%0A Find the sum of the digits in the number 100!%0A%0A Answer: 648%0A%0A %22%22%22%0A num = mt.factorial(100)%0A return sum(map(int, str(num)))%0A%0A%0Aif __name__ == '__main__':%0A print problem()%0A
|
|
fe88269d03915e06cba0d0d228e2f4e78592d172
|
Create 0007_ssoaccesslist.py
|
evewspace/API/migrations/0007_ssoaccesslist.py
|
evewspace/API/migrations/0007_ssoaccesslist.py
|
Python
| 0.000003
|
@@ -0,0 +1,748 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('core', '0001_initial'),%0A ('API', '0006_auto_20161223_1751'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='SSOAccessList',%0A fields=%5B%0A ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),%0A ('char_id', models.IntegerField(null=True)),%0A ('char_name', models.CharField(max_length=255)),%0A ('corp', models.OneToOneField(related_name='access_list_corp', null=True, to='core.Corporation')),%0A %5D,%0A ),%0A %5D%0A
|
|
97749df49156b652d12104b030e7ec7cc327d123
|
add test for canonical
|
quantecon/tests/test_dle.py
|
quantecon/tests/test_dle.py
|
"""
Tests for dle.py file
"""
import sys
import os
import unittest
import numpy as np
from scipy.linalg import LinAlgError
from numpy.testing import assert_allclose
from numpy import dot
from quantecon.dle import DLE
ATOL = 1e-10
class TestDLE(unittest.TestCase):
def setUp(self):
"""
Given LQ control is tested we will test the transformation
to alter the problem into a form suitable to solve using LQ
"""
# Initial Values
gam = 0
gamma = np.array([[gam], [0]])
phic = np.array([[1], [0]])
phig = np.array([[0], [1]])
phi1 = 1e-4
phii = np.array([[0], [-phi1]])
deltak = np.array([[.95]])
thetak = np.array([[1]])
beta = np.array([[1 / 1.05]])
ud = np.array([[5, 1, 0], [0, 0, 0]])
a22 = np.array([[1, 0, 0], [0, 0.8, 0], [0, 0, 0.5]])
c2 = np.array([[0, 1, 0], [0, 0, 1]]).T
llambda = np.array([[0]])
pih = np.array([[1]])
deltah = np.array([[.9]])
thetah = np.array([[1]]) - deltah
ub = np.array([[30, 0, 0]])
x0 = np.array([[5, 150,1,0,0]]).T
information = (a22, c2, ub, ud)
technology = (phic, phig, phii, gamma, deltak, thetak)
preferences = (beta, llambda, pih, deltah, thetah)
self.dle = DLE(information, technology, preferences)
def tearDown(self):
del self.dle
def test_transformation_Q(self):
Q_solution = np.array([[5.e-09]])
assert_allclose(Q_solution, self.dle.Q)
def test_transformation_R(self):
R_solution = np.array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 312.5, -12.5, 0.],
[0., 0., -12.5, 0.5, 0.],
[0., 0., 0., 0., 0.]])
assert_allclose(R_solution, self.dle.R)
def test_transformation_A(self):
A_solution = np.array([[0.9, 0., 0.5, 0.1, 0.],
[0., 0.95, 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0.8, 0.],
[0., 0., 0., 0., 0.5]])
assert_allclose(A_solution, self.dle.A)
def test_transformation_B(self):
B_solution = np.array([[-0.],
[1.],
[0.],
[0.],
[0.]])
assert_allclose(B_solution, self.dle.B)
def test_transformation_C(self):
C_solution = np.array([[0., 0.],
[0., 0.],
[0., 0.],
[1., 0.],
[0., 1.]])
assert_allclose(C_solution, self.dle.C)
def test_transformation_W(self):
W_solution = np.array([[0., 0., 0., 0., 0.]])
assert_allclose(W_solution, self.dle.W)
def test_compute_steadystate(self):
solutions = {
'css' : np.array([[5.]]),
'sss' : np.array([[5.]]),
'iss' : np.array([[0.]]),
'dss' : np.array([[5.], [0.]]),
'bss' : np.array([[30.]]),
'kss' : np.array([[0.]]),
'hss' : np.array([[5.]]),
}
self.dle.compute_steadystate()
for item in solutions.keys():
assert_allclose(self.dle.__dict__[item], solutions[item], atol=ATOL)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDLE)
unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite)
|
Python
| 0
|
@@ -3505,16 +3505,373 @@
=ATOL)%0A%0A
+ def test_canonical(self):%0A solutions = %7B%0A 'pihat': np.array(%5B%5B1.%5D%5D),%0A 'llambdahat': np.array(%5B%5B-1.48690584e-19%5D%5D),%0A 'ubhat': np.array(%5B%5B30., -0., -0.%5D%5D)%0A %7D%0A self.dle.canonical()%0A for item in solutions.keys():%0A assert_allclose(self.dle.__dict__%5Bitem%5D, solutions%5Bitem%5D, atol=ATOL)%0A%0A%0A
if __nam
|
5b67f6ddea05cb301a317e500657cb1cd0949bff
|
Create solution.py
|
hackerrank/algorithms/sorting/easy/running_time_of_quicksort/py/solution.py
|
hackerrank/algorithms/sorting/easy/running_time_of_quicksort/py/solution.py
|
Python
| 0.000018
|
@@ -0,0 +1,1578 @@
+#!/bin/python%0A%0Aclass QuickSort(object):%0A def __init__(self, debugMode = False):%0A self._debugMode = debugMode%0A self._swapCount = 0%0A %0A def partition(self, L, lo, hi):%0A if hi - lo %3C 2:%0A return lo%0A i = j = lo%0A v = hi - 1%0A while i %3C v:%0A if L%5Bi%5D %3C L%5Bv%5D:%0A L%5Bi%5D, L%5Bj%5D = L%5Bj%5D, L%5Bi%5D%0A if self._debugMode: self._swapCount += 1%0A j += 1%0A i += 1%0A L%5Bj%5D, L%5Bv%5D = L%5Bv%5D, L%5Bj%5D%0A if self._debugMode: self._swapCount += 1%0A return j%0A %0A def sort(self, L):%0A def helper(L, lo, hi):%0A if hi - lo %3C 2:%0A return lo%0A v = self.partition(L, lo, hi)%0A helper(L, lo, v)%0A helper(L, v + 1, hi)%0A helper(L, 0, len(L))%0A %0Aclass InsertionSort(object):%0A def __init__(self, debugMode = False):%0A self._debugMode = debugMode%0A self._swapCount = 0%0A %0A def sort(self, L):%0A for i in range(len(L)):%0A key = L%5Bi%5D%0A for j in reversed(range(i)):%0A if L%5Bj%5D %3C key:%0A break%0A L%5Bj%5D, L%5Bj + 1%5D = L%5Bj + 1%5D, L%5Bj%5D%0A if self._debugMode: self._swapCount += 1%0A %0Asize = int(raw_input())%0AL = %5Bint(value) for value in raw_input().split()%5D%0A %0AsortingAlgos = InsertionSort(True), QuickSort(True)%0AswapCounts = %5B%5D%0Afor algo in sortingAlgos:%0A LCopy = L%5B:%5D%0A algo.sort(LCopy)%0A swapCounts.append(algo._swapCount)%0Afor i in range(len(swapCounts) - 1):%0A print swapCounts%5Bi%5D - swapCounts%5Bi + 1%5D%0A
|
|
844d94619f2cf221ab5bd550f3136be4d164155b
|
add working dir
|
working_dir/diff.py
|
working_dir/diff.py
|
Python
| 0.000002
|
@@ -0,0 +1,1757 @@
+#! /usr/bin/env python%0A#! coding: utf8%0Aimport os, argparse, re, glob%0A%0Adb_user = 'aleph'%0Adb_pass = 'swbrIcu3Iv4cEhnTzmJL'%0A%0A# parse args%0Aparser = argparse.ArgumentParser(description='')%0Aparser.add_argument('--locale', '-l', default='zhCN',%0A help='Locale to extract, eg. zhCN, default zhCN')%0Aparser.add_argument('--version', %0A help='version string from versions, eg. WOW-21655patch7.0.3_Beta')%0Aargs = vars(parser.parse_args())%0A%0A# generate db_name%0Adb_name = '%25s.%25s_%25s' %25 (%0A re.findall('WOW%5C-(%5B0-9%5D+)patch(%5B0-9.%5D+)_Beta',args%5B'version'%5D)%5B0%5D%5B::-1%5D+(args%5B'locale'%5D,)%0A)%0A%0A# clear caches/trashes%0A#os.system('rm -rf CASCExplorer/CASCConsole/cache/')%0A%0A# Blizzard CDN -%3E local dbcs%0Aprint('%5B*%5D Downloading necessary files...')%0Aos.system('mono CASCConsole.exe %22DBFilesClient*%22 ./ ./ %7Blocale%7D None True %22%7Bversion%7D%22'.format(%0A locale = args%5B'locale'%5D,%0A version = args%5B'version'%5D%0A))%0Aos.system('mv DBFILESCLIENT/ DBFilesClient/')%0A%0A# create db%0Aos.system('mysql --user=%7Bdb_user%7D --password=%7Bdb_pass%7D -e %22drop database if exists %5C%60%7Bdb_name%7D%5C%60%22'.format(%0A db_user = db_user, db_pass = db_pass, db_name = db_name%0A))%0Aos.system('mysql --user=%7Bdb_user%7D --password=%7Bdb_pass%7D -e %22create database %5C%60%7Bdb_name%7D%5C%60%22'.format(%0A db_user = db_user, db_pass = db_pass, db_name = db_name%0A))%0A%0A# dbc2sql%0Afor file in glob.glob('./DBFilesClient/*'):%0A dbc_name = os.path.splitext(os.path.basename(file))%5B0%5D%0A print('%5B*%5D Importing %25s...' %25 dbc_name)%0A os.system('mono DBC%5C Viewer.exe %25s' %25 file)%0A os.system('mysql --user=aleph --password=%25s %25s %3C %25s.sql' %25 (db_pass, db_name, dbc_name))%0A os.system('rm -rf %25s.sql' %25 dbc_name)%0A%0A# diff%0Aos.system('./lcqcmp %7Bdb_user%7D %7Bdb_pass%7D nga.txt'.format(%0A db_user = db_user,%0A db_pass = db_pass,%0A))%0A
|
|
bb0178d0b97f52bb163cf13be3bd763840426f32
|
Add API tests
|
django/artists/tests/test_api.py
|
django/artists/tests/test_api.py
|
Python
| 0
|
@@ -0,0 +1,1936 @@
+import json%0A%0Afrom django.core.urlresolvers import reverse%0Afrom mock import patch%0Afrom rest_framework import status%0Afrom rest_framework.test import APITestCase%0A%0Afrom artists.models import Artist%0Afrom echonest.models import SimilarResponse%0A%0A%0Aclass ArtistTest(APITestCase):%0A%0A @patch('echonest.utils.get_similar_from_api')%0A def test_find_artists(self, get_similar):%0A url = reverse('artist-list')%0A names = %5B%22Mike Doughty%22, %22Jonathan Coulton%22%5D%0A response = %7B%0A 'response': %7B%0A 'status': %7B%0A 'message': 'Success',%0A 'version': '4.2',%0A 'code': 0,%0A %7D,%0A 'artists': %5B%0A %7B'id': 'ARHE4MO1187FB4014D', 'name': 'Mike Doughty'%7D,%0A %7B'id': 'ARW7K0P1187B9B5B47', 'name': 'Barenaked Ladies'%7D,%0A %7B'id': 'ARXSNCN1187B9B06A3', 'name': 'Jonathan Coulton'%7D%0A %5D,%0A %7D,%0A %7D%0A artists = %5BArtist.objects.create(name=n) for n in names%5D%0A get_similar.return_value = SimilarResponse(%0A name=%22They Might Be Giants%22,%0A response=json.dumps(response),%0A )%0A data = %7B'name': %22They Might Be Giants%22%7D%0A response = self.client.get(url, data, format='json')%0A self.assertEqual(response.status_code, status.HTTP_200_OK)%0A self.assertEqual(response.data, %5B%0A %7B'id': a.id, 'name': a.name, 'links': list(a.links.all())%7D%0A for a in artists%0A %5D)%0A%0A def test_get_artist(self):%0A artist = Artist.objects.create(name=%22Brad Sucks%22)%0A url = reverse('artist-detail', args=%5Bartist.id%5D)%0A response = self.client.get(url, format='json')%0A self.assertEqual(response.status_code, status.HTTP_200_OK)%0A self.assertEqual(response.data, %7B%0A 'id': artist.id,%0A 'name': artist.name,%0A 'links': list(artist.links.all()),%0A %7D)%0A
|
|
36beb6ae8bc41e5d131dbbdc65d6716d498375c7
|
add script to diff bonferonni & benjamini-whitney p-value corrections
|
server/diffBHvsBon.py
|
server/diffBHvsBon.py
|
Python
| 0
|
@@ -0,0 +1,2485 @@
+#!/usr/bin/env python2.7%0A%22%22%22%0AdiffBHvsBon.py%0AThis reports differnences between the BenjaminiWhitney-FDR p-value correction%0Avs. the Bonferroni%0A%22%22%22%0A%0Aimport sys, os, csv, traceback, glob%0A%0Adef diffBHvsBon():%0A #basePath = '/Users/swat/data/mcrchopra/first/'%0A #tmpBase = '/Users/swat/tmp/'%0A tmpBase = '/cluster/home/swat/tmp/'%0A basePath = '/cluster/home/swat/data/pancan12/mar17_ranksums/'%0A %0A # Build a list of layer names with index corresponding to the layers%0A layers = glob.glob(basePath + 'layer_*.tab')%0A layerIndex = %5B'empty' for x in range(len(layers))%5D%0A filename = os.path.join(basePath, 'layers.tab')%0A with open(filename, 'r') as fOut:%0A fOut = csv.reader(fOut, delimiter='%5Ct')%0A %0A for i, line in enumerate(fOut.__iter__()):%0A # layer_2044.tab%0A j = int(line%5B1%5D%5B6:-4%5D)%0A layerIndex%5Bj%5D = line%5B0%5D%0A%0A # Save each stats whose two p-value corrections are the same%0A searches =%5B'stats_*'%5D%0A #searches =%5B'stats_*', 'statsL_*'%5D%0A outFiles = %5B%0A 'stats_sameBHvsBon.tab',%0A 'statsL_sameBHvsBon.tab',%0A %5D%0A i = 0;%0A same = 0%0A diff = 0%0A for search in searches:%0A search = basePath + search%0A files = glob.glob(search)%0A with open(tmpBase + outFiles%5Bi%5D, 'w') as fOut:%0A fOut = csv.writer(fOut, delimiter='%5Ct')%0A #fOut.writerow(%5B'#p-value correction', 'layer1', 'layer2'%5D)%0A for file in files:%0A layer1 = layerIndex%5Bint(file%5Blen(basePath) + 6:-4%5D)%5D%0A with open(file, 'r') as f:%0A f = csv.reader(f, delimiter='%5Ct')%0A for i, line in enumerate(f.__iter__()):%0A # stats_*: TP63_expression altered 0.0003403091 0.0005671818 0.0005671818%0A if line%5B2%5D == line%5B3%5D:%0A same += 1%0A #fOut.writerow(%5Bline%5B3%5D, layer1, line%5B0%5D%5D)%0A else:%0A diff += 1%0A %0A fOut.writerow(%5B'same: ' + str(same) + ', diff: ' + str(diff)%5D)%0A i += 1%0A return 0%0A%0Aif __name__ == %22__main__%22 :%0A try:%0A # Get the return code to return%0A # Don't just exit with it because sys.exit works by exceptions.%0A return_code = diffBHvsBon()%0A except:%0A traceback.print_exc()%0A # Return a definite number and not some unspecified error code.%0A return_code = 1%0A %0A sys.exit(return_code)%0A
|
|
55fa30c236095006e6f9c970ef668598c4348a96
|
Add microservice plugin for adding static attributes to responses.
|
src/satosa/micro_service/attribute_modifications.py
|
src/satosa/micro_service/attribute_modifications.py
|
Python
| 0
|
@@ -0,0 +1,1099 @@
+import os%0A%0Aimport yaml%0A%0Afrom satosa.internal_data import DataConverter%0Afrom satosa.micro_service.service_base import ResponseMicroService%0A%0A%0Aclass AddStaticAttributes(ResponseMicroService):%0A %22%22%22%0A Add static attributes to the responses.%0A%0A The path to the file describing the mapping (as YAML) of static attributes must be specified%0A with the environment variable 'SATOSA_STATIC_ATTRIBUTES'.%0A %22%22%22%0A def __init__(self, internal_attributes):%0A super(AddStaticAttributes, self).__init__()%0A self.data_converter = DataConverter(internal_attributes)%0A%0A mapping_file = os.environ.get(%22SATOSA_STATIC_ATTRIBUTES%22)%0A if not mapping_file:%0A raise ValueError(%22Could not find file containing mapping of static attributes.%22)%0A%0A with open(mapping_file) as f:%0A self.static_attributes = yaml.safe_load(f)%0A%0A def process(self, context, data):%0A all_attributes = data.get_attributes()%0A all_attributes.update(self.data_converter.to_internal(%22saml%22, self.static_attributes))%0A data.add_attributes(all_attributes)%0A return data%0A
|
|
b72dd1c890491ccfe2de66f89f5adc035e862acb
|
Create HtmlParser.py
|
service/HtmlParser.py
|
service/HtmlParser.py
|
Python
| 0.000002
|
@@ -0,0 +1,291 @@
+#########################################%0A# HtmlParser.py%0A# description: html parser%0A# categories: %5Bdocument%5D%0A# possibly more info @: http://myrobotlab.org/service/HtmlParser%0A#########################################%0A# start the service%0Ahtmlparser = Runtime.start(%22htmlparser%22,%22HtmlParser%22)%0A
|
|
9c8402bdadb4860a3876aa2ab0f94b9ddac8cfd5
|
Add offboard_sample.py
|
script/offboard_sample.py
|
script/offboard_sample.py
|
Python
| 0.000001
|
@@ -0,0 +1,2334 @@
+#!/usr/bin/env python%0A%0Aimport rospy%0A%0Afrom geometry_msgs.msg import PoseStamped%0Afrom mavros_msgs.msg import State%0Afrom mavros_msgs.srv import CommandBool, CommandBoolRequest%0Afrom mavros_msgs.srv import SetMode, SetModeRequest%0A%0Acurrent_state = State()%0Adef state_cb(msg):%0A global current_state%0A current_state = msg%0A%0Adef offboard_node():%0A%0A rospy.init_node(%22offb_node%22)%0A r = rospy.Rate(20)%0A%0A rospy.Subscriber(%22mavros/state%22, State, state_cb)%0A local_pos_pub = rospy.Publisher(%22mavros/setpoint_position/local%22,%0A PoseStamped,%0A queue_size=1000)%0A arming_client = rospy.ServiceProxy(%22mavros/cmd/arming%22, CommandBool)%0A set_mode_client = rospy.ServiceProxy(%22mavros/set_mode%22, SetMode)%0A%0A while not rospy.is_shutdown() and not current_state.connected:%0A r.sleep()%0A%0A pose = PoseStamped()%0A pose.pose.position.x = 0%0A pose.pose.position.y = 0%0A pose.pose.position.z = 2%0A%0A for i in range(100):%0A local_pos_pub.publish(pose)%0A r.sleep()%0A%0A if rospy.is_shutdown():%0A break%0A%0A offb_set_mode = SetModeRequest()%0A offb_set_mode.custom_mode = %22OFFBOARD%22%0A%0A arm_cmd = CommandBoolRequest()%0A arm_cmd.value = True%0A%0A last_request = rospy.Time.now()%0A%0A while not rospy.is_shutdown():%0A if current_state.mode != %22OFFBOARD%22 %5C%0A and (rospy.Time.now() - last_request %3E rospy.Duration(5)):%0A%0A try:%0A offb_set_mode_resp = set_mode_client(offb_set_mode)%0A if offb_set_mode_resp.mode_sent:%0A rospy.loginfo(%22Offboard enabled%22)%0A except rospy.ServiceException as e:%0A rospy.logwarn(e)%0A%0A last_request = rospy.Time.now()%0A%0A else:%0A if not current_state.armed %5C%0A and (rospy.Time.now() - last_request %3E rospy.Duration(5)):%0A%0A try:%0A arm_cmd_resp = arming_client(arm_cmd)%0A if arm_cmd_resp.success:%0A rospy.loginfo(%22Vehicle armed%22)%0A except rospy.ServiceException as e:%0A rospy.logwarn(e)%0A%0A last_request = rospy.Time.now()%0A%0A local_pos_pub.publish(pose)%0A r.sleep()%0A%0Aif __name__ == %22__main__%22:%0A try:%0A offboard_node()%0A except rospy.ROSInterruptException:%0A pass
|
|
b883b3066848957376d841cb4ffdf2d5646315c8
|
add quick-testlist.py
|
scripts/quick-testlist.py
|
scripts/quick-testlist.py
|
Python
| 0
|
@@ -0,0 +1,1859 @@
+#!/usr/bin/env python%0A#%0A# Copyright 2015 Intel Corporation%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a%0A# copy of this software and associated documentation files (the %22Software%22),%0A# to deal in the Software without restriction, including without limitation%0A# the rights to use, copy, modify, merge, publish, distribute, sublicense,%0A# and/or sell copies of the Software, and to permit persons to whom the%0A# Software is furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice (including the next%0A# paragraph) shall be included in all copies or substantial portions of the%0A# Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL%0A# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING%0A# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS%0A# IN THE SOFTWARE.%0A%0Afrom __future__ import print_function%0Aimport json%0Aimport sys%0A%0Adef filter_results(filename):%0A with open(filename) as data:%0A json_data = json.load(data)%0A%0A for test_name in json_data%5B%22tests%22%5D:%0A if json_data%5B%22tests%22%5D%5Btest_name%5D%5B%22result%22%5D == %22incomplete%22:%0A continue%0A if json_data%5B%22tests%22%5D%5Btest_name%5D%5B%22time%22%5D %3C 60:%0A print(test_name)%0A%0A%0Aif len(sys.argv) %3C 2:%0A print(%22Usage: quick-testlist.py RESULTS%22)%0A print(%22Read piglit results from RESULTS and print the tests that executed%22%0A %22 in under 60 seconds, excluding any incomplete tests. The list can%22%0A %22 be used by the --test-list option of piglit.%22)%0A sys.exit(1)%0A%0Afilter_results(sys.argv%5B1%5D)%0A
|
|
718379eea1e0c58ba76ada08d64512d9f4904c07
|
add new package (#10060)
|
var/spack/repos/builtin/packages/eztrace/package.py
|
var/spack/repos/builtin/packages/eztrace/package.py
|
Python
| 0
|
@@ -0,0 +1,812 @@
+# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Eztrace(AutotoolsPackage):%0A %22%22%22EZTrace is a tool to automatically generate execution traces%0A of HPC applications.%22%22%22%0A%0A homepage = %22http://eztrace.gforge.inria.fr%22%0A url = %22https://gforge.inria.fr/frs/download.php/file/37703/eztrace-1.1-8.tar.gz%22%0A%0A version('1.1-8', sha256='d80d78a25f1eb0e6e60a3e535e3972cd178c6a8663a3d6109105dfa6a880b8ec')%0A%0A depends_on('mpi')%0A%0A # Does not work on Darwin due to MAP_POPULATE%0A conflicts('platform=darwin')%0A%0A def configure_args(self):%0A args = %5B%22--with-mpi=%7B0%7D%22.format(self.spec%5B%22mpi%22%5D.prefix)%5D%0A return args%0A
|
|
cfe9550bfe7d8659c06892af8a32662cb372bea9
|
add new package : sysstat (#13907)
|
var/spack/repos/builtin/packages/sysstat/package.py
|
var/spack/repos/builtin/packages/sysstat/package.py
|
Python
| 0
|
@@ -0,0 +1,1036 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Sysstat(AutotoolsPackage):%0A %22%22%22The sysstat package contains various utilities, common to many%0A commercial Unixes, to monitor system performance and usage activity%0A Sysstat also contains tools you can schedule via cron or systemd to%0A collect and historize performance and activity data.%22%22%22%0A%0A homepage = %22https://github.com/sysstat%22%0A url = %22https://github.com/sysstat/sysstat/archive/v12.1.6.tar.gz%22%0A%0A version('12.2.0', sha256='614ab9fe8e7937a3edb7b2b6760792a3764ea3a7310ac540292dd0e3dfac86a6')%0A version('12.1.7', sha256='293b31ca414915896c639a459f4d03a742b3a472953975394bef907b245b3a9f')%0A version('12.1.6', sha256='50f4cbf023f8b933ed6f1fee0e6d33e508d9dc20355a47f6927e0c6046c6acf6')%0A version('12.1.5', sha256='d0ea36f278fe10c7978be2a383cb8055c1277d60687ac9030ba694a08a80f6ff')%0A
|
|
56592b10e25cd1f2cf8d122df389268ab24b3729
|
refactor and use OOMMF_PATH environment variable to locate oommf.tcl
|
oommfmif/__init__.py
|
oommfmif/__init__.py
|
import subprocess
def get_version():
p = subprocess.Popen("~/git/oommf/oommf/oommf.tcl +version", shell=True,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
p.wait()
stdout, stderr = p.stdout.read(), p.stderr.read()
# version is returned in stderr
print(stderr.split()[0:2])
s_oommftcl, versionstring = stderr.split()[0:2]
return versionstring
|
Python
| 0
|
@@ -1,8 +1,18 @@
+import os%0A
import s
@@ -26,103 +26,439 @@
ss%0A%0A
-%0Adef get_version():%0A p = subprocess.Popen(%22~/git/oommf/oommf/oommf.tcl +version%22, shell=True
+# Environment variable OOMMF_PATH should point to the directory which%0A# contains 'oommf.tcl'%0Aoommf_path = os.environ%5B'OOMMF_PATH'%5D%0A%0A%0Adef call_oommf(argstring):%0A %22%22%22Convenience function to call OOMMF: Typicallusage%0A%0A p = call_oommf(%22+version%22)%0A p.wait()%0A stdout, stderr = p.stdout.read(), p.stderr.read()%0A%0A %22%22%22%0A%0A p = subprocess.Popen(os.path.join(oommf_path, 'oommf.tcl') +%0A ' ' + argstring
,%0A
@@ -479,16 +479,28 @@
+ shell=True,
stderr=
@@ -515,16 +515,41 @@
ss.PIPE,
+%0A
stdout=
@@ -573,54 +573,159 @@
-p.wait
+return p%0A%0A%0Adef get_version
()
+:
%0A
-stdout, stderr = p.stdout.read(),
+%22%22%22Return OOMMF version as string, something like 1.2.0.5%22%22%22%0A p = call_oommf('+version')%0A p.wait()%0A stderr =
p.s
@@ -732,26 +732,25 @@
tderr.read()
-%0A%0A
+
# versio
@@ -777,39 +777,8 @@
err%0A
- print(stderr.split()%5B0:2%5D)%0A
|
94f5f630c315bc6951c98cd2a9f4908ce05d59a4
|
Test float precision in json encoding.
|
fedmsg/tests/test_encoding.py
|
fedmsg/tests/test_encoding.py
|
Python
| 0
|
@@ -0,0 +1,434 @@
+import unittest%0Aimport fedmsg.encoding%0A%0Afrom nose.tools import eq_%0A%0A%0Aclass TestEncoding(unittest.TestCase):%0A def test_float_precision(self):%0A %22%22%22 Ensure that float precision is limited to 3 decimal places. %22%22%22%0A msg = dict(some_number=1234.123456)%0A json_str = fedmsg.encoding.dumps(msg)%0A print json_str%0A output = fedmsg.encoding.loads(json_str)%0A eq_(str(output%5B'some_number'%5D), '1234.123')%0A
|
|
dfb4c5422c79fcd413d0d9a028cb5548e2678454
|
Add script for generating test certificates
|
generate_test_certificates.py
|
generate_test_certificates.py
|
Python
| 0
|
@@ -0,0 +1,287 @@
+import trustme%0A%0A# Create a CA%0Aca = trustme.CA()%0A%0A# Issue a cert signed by this CA%0Aserver_cert = ca.issue_cert(u%22www.good.com%22)%0A%0A# Save the PEM-encoded data to a file%0Aca.cert_pem.write_to_path(%22GoodRootCA.pem%22)%0Aserver_cert.private_key_and_cert_chain_pem.write_to_path(%22www.good.com.pem%22)%0A
|
|
f063ebffaa52c3807dac641ae01146235d77210c
|
Remove superfluous comment.
|
src/python/twitter/common/contextutil/__init__.py
|
src/python/twitter/common/contextutil/__init__.py
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'John Sirois, Brian Wickman'
import errno
import os
import shutil
import tarfile
import tempfile
import sys
import zipfile
from contextlib import closing, contextmanager
@contextmanager
def environment_as(**kwargs):
"""
Update the environment to the supplied values, for example:
with environment_as(PYTHONPATH = 'foo:bar:baz',
PYTHON = '/usr/bin/python2.6'):
subprocess.Popen(foo).wait()
"""
new_environment = kwargs
old_environment = {}
def setenv(key, val):
if val is not None:
os.putenv(key, val)
else:
os.unsetenv(key)
for key, val in new_environment.items():
old_environment[key] = os.environ.get(key)
setenv(key, val)
try:
yield
finally:
for key, val in old_environment.items():
setenv(key, val)
@contextmanager
def temporary_dir(root_dir=None, cleanup=True):
"""
A with-context that creates a temporary directory.
You may specify the following keyword args:
root_dir [path]: The parent directory to create the temporary directory.
cleanup [True/False]: Whether or not to clean up the temporary directory.
Important note: If you fork inside the context, make sure only one tine
performs cleanup (e.g., by calling os._exit() in the child).
"""
path = tempfile.mkdtemp(dir=root_dir)
try:
yield path
finally:
if cleanup:
shutil.rmtree(path, ignore_errors=True)
@contextmanager
def temporary_file_path(root_dir=None, cleanup=True):
"""
A with-context that creates a temporary file and returns its path.
You may specify the following keyword args:
root_dir [path]: The parent directory to create the temporary file.
cleanup [True/False]: Whether or not to clean up the temporary file.
Important note: If you fork inside the context, make sure only one tine
performs cleanup (e.g., by calling os._exit() in the child).
"""
# argh, I would love to use os.fdopen here but then fp.name == '<fdopen>'
# and that's unacceptable behavior for most cases where I want to use temporary_file
fh, path = tempfile.mkstemp(dir=root_dir)
os.close(fh)
try:
yield path
finally:
if cleanup:
try:
os.unlink(path)
except OSError, e:
if e.errno == errno.ENOENT:
pass
else:
raise e
@contextmanager
def temporary_file(root_dir=None, cleanup=True):
"""
A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
root_dir [path]: The parent directory to create the temporary file.
cleanup [True/False]: Whether or not to clean up the temporary file.
Important note: If you fork inside the context, make sure only one tine
performs cleanup (e.g., by calling os._exit() in the child).
"""
# argh, I would love to use os.fdopen here but then fp.name == '<fdopen>'
# and that's unacceptable behavior for most cases where I want to use temporary_file
fh, path = tempfile.mkstemp(dir=root_dir)
os.close(fh)
# Note that there's a race condition here. Another process could open the file at this point.
# This is potentially a security hole. TODO: Why not just yield fh here?
fd = open(path, 'w+')
try:
yield fd
finally:
if not fd.closed:
fd.close()
if cleanup:
os.unlink(path)
@contextmanager
def pushd(directory):
"""
A with-context that encapsulates pushd/popd.
"""
cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(cwd)
@contextmanager
def mutable_sys():
"""
A with-context that does backup/restore of sys.argv, sys.path and
sys.stderr/stdout/stdin following execution.
"""
SAVED_ATTRIBUTES = [
'stdin', 'stdout', 'stderr',
'argv', 'path', 'path_importer_cache', 'path_hooks',
'modules', '__egginsert'
]
_sys_backup = dict((key, getattr(sys, key)) for key in SAVED_ATTRIBUTES if hasattr(sys, key))
_sys_delete = set(filter(lambda key: not hasattr(sys, key), SAVED_ATTRIBUTES))
try:
yield sys
finally:
for attribute in _sys_backup:
setattr(sys, attribute, _sys_backup[attribute])
for attribute in _sys_delete:
if hasattr(sys, attribute):
delattr(sys, attribute)
@contextmanager
def open_zip(path_or_file, *args, **kwargs):
"""
A with-context for zip files. Passes through positional and kwargs to zipfile.ZipFile.
"""
with closing(zipfile.ZipFile(path_or_file, *args, **kwargs)) as zip:
yield zip
@contextmanager
def open_tar(path_or_file, *args, **kwargs):
"""
A with-context for tar files. Passes through positional and kwargs to tarfile.open.
If path_or_file is a file, caller must close it separately.
"""
(path, fileobj) = (path_or_file, None) if isinstance(path_or_file, basestring) else (None, path_or_file)
with closing(tarfile.open(path, *args, fileobj=fileobj, **kwargs)) as tar:
yield tar
|
Python
| 0
|
@@ -2830,171 +2830,8 @@
%22%22%22%0A
- # argh, I would love to use os.fdopen here but then fp.name == '%3Cfdopen%3E'%0A # and that's unacceptable behavior for most cases where I want to use temporary_file%0A
fh
|
989537e91109becdae96b8484b6cb9a007137d0a
|
FIX m2o on wizard with ondelete
|
account_statement_move_import/wizard/account_statement_move_import_wizard.py
|
account_statement_move_import/wizard/account_statement_move_import_wizard.py
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
class account_statement_move_import_wizard(models.TransientModel):
_name = "account.statement.move.import.wizard"
_description = "account_statement_move_import_wizard"
@api.model
def _get_statement(self):
return self.env['account.bank.statement'].browse(
self._context.get('active_id', False))
from_date = fields.Date(
'From Date',
required=True,
)
to_date = fields.Date(
'To Date',
required=True,
)
statement_id = fields.Many2one(
'account.bank.statement',
'Statement',
default=_get_statement,
required=True,
)
journal_id = fields.Many2one(
'account.journal',
'Journal',
compute='get_journal',
)
journal_account_ids = fields.Many2many(
'account.account',
compute='get_accounts',
string='Journal Accounts'
)
move_line_ids = fields.Many2many(
'account.move.line',
'account_statement_import_move_line_rel',
'line_id', 'move_line_id',
'Journal Items',
domain="[('journal_id', '=', journal_id), "
"('statement_id', '=', False), "
"('exclude_on_statements', '=', False), "
"('account_id', 'in', journal_account_ids[0][2])]"
)
@api.multi
@api.onchange('statement_id')
def onchange_statement(self):
self.from_date = self.statement_id.period_id.date_start
self.to_date = self.statement_id.period_id.date_stop
@api.multi
@api.depends('statement_id')
def get_journal(self):
self.journal_id = self.statement_id.journal_id
@api.onchange('from_date', 'to_date', 'journal_id')
def get_move_lines(self):
move_lines = self.move_line_ids.search([
('journal_id', '=', self.journal_id.id),
('account_id', 'in', self.journal_account_ids.ids),
('statement_id', '=', False),
('exclude_on_statements', '=', False),
('date', '>=', self.from_date),
('date', '<=', self.to_date),
])
self.move_line_ids = move_lines
@api.multi
@api.depends('journal_id')
def get_accounts(self):
self.journal_account_ids = (
self.journal_id.default_credit_account_id +
self.journal_id.default_debit_account_id)
@api.multi
def confirm(self):
self.ensure_one()
statement = self.statement_id
for line in self.move_line_ids:
if line.account_id not in self.journal_account_ids:
raise Warning(_(
'Imported line account must be one of the journals '
'defaults, in this case %s') % (
', '.join(self.journal_account_ids.mapped('name'))))
if line.statement_id:
raise Warning(_(
'Imported line must have "statement_id" == False'))
line_balance = line.debit - line.credit
# if line_balance > 0:
# line_type = 'customer'
# else:
# line_type = 'supplier'
line_vals = {
'statement_id': statement.id,
'name': line.name,
'ref': line.ref,
'amount': line_balance,
'imported': True,
# 'type': line_type,
'partner_id': line.partner_id.id,
# we need journal entry so that id dont suggest a
# reconciliation
'journal_entry_id': line.move_id.id,
}
# create statement line
statement.line_ids.create(line_vals)
# add statement to move lines
line.move_id.line_id.write({'statement_id': statement.id})
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0
|
@@ -940,32 +940,60 @@
required=True,%0A
+ ondelete='cascade',%0A
)%0A jo
|
8b8c6c85d05d0291829f68d3bb02d8a8d22631f7
|
remove unusage import
|
jieba/analyse/textrank.py
|
jieba/analyse/textrank.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import json
import collections
from operator import itemgetter
import jieba.posseg as pseg
class UndirectWeightedGraph:
d = 0.85
def __init__(self):
self.graph = collections.defaultdict(list)
def addEdge(self, start, end, weight):
# use a tuple (start, end, weight) instead of a Edge object
self.graph[start].append((start, end, weight))
self.graph[end].append((end, start, weight))
def rank(self):
ws = collections.defaultdict(float)
outSum = collections.defaultdict(float)
giter = list(self.graph.items()) # these two lines for build stable iteration
giter.sort()
wsdef = 1.0 / (len(self.graph) or 1.0)
for n, out in giter:
ws[n] = wsdef
outSum[n] = sum((e[2] for e in out), 0.0)
for x in range(10): # 10 iters
for n, inedges in giter:
s = 0
for e in inedges:
s += e[2] / outSum[e[1]] * ws[e[1]]
ws[n] = (1 - self.d) + self.d * s
(min_rank, max_rank) = (sys.float_info[0], sys.float_info[3])
for _, w in ws.items():
if w < min_rank:
min_rank = w
elif w > max_rank:
max_rank = w
for n, w in ws.items():
# to unify the weights, don't *100.
ws[n] = (w - min_rank / 10.0) / (max_rank - min_rank / 10.0)
return ws
def textrank(sentence, topK=10, withWeight=False, allowPOS=['ns', 'n', 'vn', 'v']):
"""
Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list,it will be filtered.
"""
pos_filt = frozenset(allowPOS)
g = UndirectWeightedGraph()
cm = collections.defaultdict(int)
span = 5
words = list(pseg.cut(sentence))
for i in range(len(words)):
if words[i].flag in pos_filt:
for j in range(i + 1, i + span):
if j >= len(words):
break
if words[j].flag not in pos_filt:
continue
cm[(words[i].word, words[j].word)] += 1
for terms, w in cm.items():
g.addEdge(terms[0], terms[1], w)
nodes_rank = g.rank()
if withWeight:
tags = sorted(nodes_rank.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(nodes_rank, key=nodes_rank.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags
if __name__ == '__main__':
s = "此外,公司拟对全资子公司吉林欧亚置业有限公司增资4.3亿元,增资后,吉林欧亚置业注册资本由7000万元增加到5亿元。吉林欧亚置业主要经营范围为房地产开发及百货零售等业务。目前在建吉林欧亚城市商业综合体项目。2013年,实现营业收入0万元,实现净利润-139.13万元。"
for x, w in textrank(s, withWeight=True):
print('%s %s' % (x, w))
|
Python
| 0.000003
|
@@ -95,20 +95,8 @@
sys%0A
-import json%0A
impo
|
89a78e09ee52c27df8cd548839b240984b13d61d
|
add client exception
|
kafka/exception/client.py
|
kafka/exception/client.py
|
Python
| 0.000001
|
@@ -0,0 +1,292 @@
+class FailedPayloadsException(Exception):%0A pass%0A%0A%0Aclass ConnectionError(Exception):%0A pass%0A%0A%0Aclass BufferUnderflowError(Exception):%0A pass%0A%0A%0Aclass ChecksumError(Exception):%0A pass%0A%0A%0Aclass ConsumerFetchSizeTooSmall(Exception):%0A pass%0A%0A%0Aclass ConsumerNoMoreData(Exception):%0A pass%0A
|
|
de71fee0a095ea043c66be92ad8bd6685b7fc74f
|
Fix #7026 adding a new wol parameter (#7144)
|
homeassistant/components/switch/wake_on_lan.py
|
homeassistant/components/switch/wake_on_lan.py
|
"""
Support for wake on lan.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.wake_on_lan/
"""
import logging
import platform
import subprocess as sp
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
from homeassistant.const import (CONF_HOST, CONF_NAME)
REQUIREMENTS = ['wakeonlan==0.2.2']
_LOGGER = logging.getLogger(__name__)
CONF_MAC_ADDRESS = 'mac_address'
CONF_OFF_ACTION = 'turn_off'
DEFAULT_NAME = 'Wake on LAN'
DEFAULT_PING_TIMEOUT = 1
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MAC_ADDRESS): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OFF_ACTION): cv.SCRIPT_SCHEMA,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up a wake on lan switch."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
mac_address = config.get(CONF_MAC_ADDRESS)
off_action = config.get(CONF_OFF_ACTION)
add_devices([WOLSwitch(hass, name, host, mac_address, off_action)])
class WOLSwitch(SwitchDevice):
"""Representation of a wake on lan switch."""
def __init__(self, hass, name, host, mac_address, off_action):
"""Initialize the WOL switch."""
from wakeonlan import wol
self._hass = hass
self._name = name
self._host = host
self._mac_address = mac_address
self._off_script = Script(hass, off_action) if off_action else None
self._state = False
self._wol = wol
self.update()
@property
def should_poll(self):
"""Poll for status regularly."""
return True
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@property
def name(self):
"""The name of the switch."""
return self._name
def turn_on(self):
"""Turn the device on."""
if self._host:
self._wol.send_magic_packet(self._mac_address,
ip_address=self._host)
else:
self._wol.send_magic_packet(self._mac_address)
def turn_off(self):
"""Turn the device off if an off action is present."""
if self._off_script is not None:
self._off_script.run()
def update(self):
"""Check if device is on and update the state."""
if platform.system().lower() == 'windows':
ping_cmd = ['ping', '-n', '1', '-w',
str(DEFAULT_PING_TIMEOUT * 1000), str(self._host)]
else:
ping_cmd = ['ping', '-c', '1', '-W',
str(DEFAULT_PING_TIMEOUT), str(self._host)]
status = sp.call(ping_cmd, stdout=sp.DEVNULL)
self._state = not bool(status)
|
Python
| 0
|
@@ -611,16 +611,61 @@
urn_off'
+%0ACONF_BROADCAST_ADDRESS = 'broadcast_address'
%0A%0ADEFAUL
@@ -835,32 +835,85 @@
ST): cv.string,%0A
+ vol.Optional(CONF_BROADCAST_ADDRESS): cv.string,%0A
vol.Optional
@@ -1236,16 +1236,75 @@
DDRESS)%0A
+ broadcast_address = config.get(CONF_BROADCAST_ADDRESS)%0A
off_
@@ -1398,27 +1398,73 @@
address,
- off_action
+%0A off_action, broadcast_address
)%5D)%0A%0A%0Acl
@@ -1596,27 +1596,63 @@
address,
- off_action
+%0A off_action, broadcast_address
):%0A
@@ -1839,16 +1839,68 @@
address%0A
+ self._broadcast_address = broadcast_address%0A
@@ -2424,20 +2424,33 @@
f self._
-host
+broadcast_address
:%0A
@@ -2559,20 +2559,33 @@
s=self._
-host
+broadcast_address
)%0A
|
05004f8dc48fe15268bc2d0146e5788f0bdf463e
|
Add missing migration
|
djedi/migrations/0002_auto_20190722_1447.py
|
djedi/migrations/0002_auto_20190722_1447.py
|
Python
| 0
|
@@ -0,0 +1,392 @@
+# Generated by Django 2.2.3 on 2019-07-22 14:47%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('djedi', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='node',%0A name='is_published',%0A field=models.BooleanField(blank=True, default=False),%0A ),%0A %5D%0A
|
|
c1d0ed6b5e4a1be5866ace6eca98fd52495e3f7e
|
add dc= and role= groups
|
plugins/inventory/terraform.py
|
plugins/inventory/terraform.py
|
#!/usr/bin/env python
"""\
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
from __future__ import unicode_literals, print_function
import argparse
from collections import defaultdict
import json
import os
import sys
parser = argparse.ArgumentParser(__file__, __doc__)
modes = parser.add_mutually_exclusive_group()
modes.add_argument('--list', action='store_true', help='list all variables')
modes.add_argument('--host', help='list variables for a single host')
parser.add_argument('--pretty',
action='store_true',
help='pretty-print output JSON')
parser.add_argument('--nometa',
action='store_true',
help='with --list, exclude hostvars')
def tfstates(root=None):
root = root or os.getcwd()
for dirpath, _, filenames in os.walk(root):
for name in filenames:
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
for module in state['modules']:
for key, resource in module['resources'].items():
yield key, resource
## READ RESOURCES
PARSERS = {}
def iterhosts(resources):
'''yield host tuples of (name, attributes, groups)'''
for key, resource in resources:
resource_type, name = key.split('.', 1)
try:
parser = PARSERS[resource_type]
except KeyError:
continue
yield parser(resource)
def parses(prefix):
def inner(func):
PARSERS[prefix] = func
return func
return inner
def _parse_prefix(source, prefix):
for compkey, value in source.items():
try:
curprefix, rest = compkey.split('.', 1)
except ValueError:
continue
if curprefix != prefix or rest == '#':
continue
yield rest, value
def parse_attr_list(source, prefix):
size_key = '%s.#' % prefix
try:
size = int(source[size_key])
except KeyError:
return []
attrs = [{} for _ in range(size)]
for compkey, value in _parse_prefix(source, prefix):
nth, key = compkey.split('.', 1)
attrs[int(nth)][key] = value
return attrs
def parse_dict(source, prefix):
return dict(_parse_prefix(source, prefix))
def parse_list(source, prefix):
return [value for _, value in _parse_prefix(source, prefix)]
@parses('google_compute_instance')
def gce_host(resource, tfvars=None):
name = resource['primary']['id']
raw_attrs = resource['primary']['attributes']
groups = []
# network interfaces
interfaces = parse_attr_list(raw_attrs, 'network_interface')
for interface in interfaces:
interface['access_config'] = parse_attr_list(interface,
'access_config')
for key in interface.keys():
if '.' in key:
del interface[key]
# general attrs
attrs = {
'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
'disks': parse_attr_list(raw_attrs, 'disk'),
'machine_type': raw_attrs['machine_type'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'network_interface': interfaces,
'self_link': raw_attrs['self_link'],
'service_account': parse_attr_list(raw_attrs, 'service_account'),
'tags': parse_list(raw_attrs, 'tags'),
'zone': raw_attrs['zone'],
# ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': 'deploy',
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': attrs['metadata'].get('dc', attrs['zone']),
})
try:
attrs.update({
'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({
'ansible_ssh_host': '',
'publicly_routable': False,
})
# add groups based on attrs
groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
groups.append('gce_machine_type=' + attrs['machine_type'])
groups.extend('gce_metadata_%s=%s' % (key, value)
for (key, value) in attrs['metadata'].items()
if key not in set(['sshKeys']))
groups.extend('gce_tag=' + tag for tag in attrs['tags'])
groups.append('gce_zone=' + attrs['zone'])
if attrs['can_ip_forward']:
groups.append('gce_ip_forward')
if attrs['publicly_routable']:
groups.append('gce_publicly_routable')
return name, attrs, groups
## QUERY TYPES
def query_host(hosts, target):
for name, attrs, _ in hosts:
if name == target:
return attrs
return {}
def query_list(hosts):
groups = defaultdict(dict)
meta = {}
for name, attrs, hostgroups in hosts:
for group in hostgroups:
groups[group].setdefault('hosts', [])
groups[group]['hosts'].append(name)
meta[name] = attrs
groups['_meta'] = {'hostvars': meta}
return groups
def main():
args = parser.parse_args()
if not args.list and not args.host:
print('error: one of --list or --host is required', file=sys.stderr)
print('{}')
return 1
hosts = iterhosts(iterresources(tfstates()))
if args.list:
output = query_list(hosts)
if args.nometa:
del output['_meta']
else:
output = query_host(hosts, args.host)
print(json.dumps(output, indent=4 if args.pretty else None))
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000006
|
@@ -4848,16 +4848,212 @@
able')%0A%0A
+ # groups specific to microservices-infrastructure%0A if 'role' in attrs%5B'metadata'%5D:%0A groups.append('role=' + attrs%5B'metadata'%5D%5B'role'%5D)%0A groups.append('dc=' + attrs%5B'consul_dc'%5D)%0A%0A
retu
|
c201245a01ded92bec91f1f34320e87666330c44
|
add mbtiles command
|
seedsource_core/django/seedsource/management/commands/create_vector_tiles.py
|
seedsource_core/django/seedsource/management/commands/create_vector_tiles.py
|
Python
| 0.00123
|
@@ -0,0 +1,810 @@
+from django.core.management import BaseCommand%0Afrom seedsource_core.django.seedsource.models import SeedZone%0Aimport subprocess%0A%0Aclass Command(BaseCommand):%0A help = 'Facilitates converting of vector data into vector tiles.'%0A%0A def handle(self, *args, **options):%0A def write_out(output):%0A self.stdout.write(output)%0A%0A zones = %5B%5D%0A write_out(self.style.WARNING('Loading data..'))%0A for sz in SeedZone.objects.all():%0A zones.append(sz.polygon.json)%0A write_out(sz.name)%0A%0A with open(%22geojson%22, %22w%22) as f:%0A f.write('%5Cn'.join(zones))%0A%0A write_out(self.style.WARNING('Data loaded%5CnLaunching process to write mbtiles..'))%0A subprocess.Popen(%22tippecanoe -o seedtiles.mbtiles -f -zg --drop-densest-as-needed geojson%22, shell=True)%0A
|
|
08364dae50a68b5d053eadc836c02b51873df250
|
Add dog_cat
|
cnn/dog_cat/dog_cat.py
|
cnn/dog_cat/dog_cat.py
|
Python
| 0.999994
|
@@ -0,0 +1,2067 @@
+from keras.models import Sequential%0D%0Afrom keras.layers import Convolution2D, MaxPooling2D%0D%0Afrom keras.layers import Activation, Dropout, Flatten, Dense%0D%0Afrom keras.preprocessing.image import ImageDataGenerator%0D%0A%0D%0A%0D%0Adef save_history(history, result_file):%0D%0A loss = history.history%5B'loss'%5D%0D%0A acc = history.history%5B'acc'%5D%0D%0A val_loss = history.history%5B'val_loss'%5D%0D%0A val_acc = history.history%5B'val_acc'%5D%0D%0A nb_epoch = len(acc)%0D%0A%0D%0A with open(result_file, %22w%22) as fp:%0D%0A fp.write(%22epoch%5Ctloss%5Ctacc%5Ctval_loss%5Ctval_acc%5Cn%22)%0D%0A for i in range(nb_epoch):%0D%0A fp.write(%22%25d%5Ct%25f%5Ct%25f%5Ct%25f%5Ct%25f%5Cn%22 %25 (i, loss%5Bi%5D, acc%5Bi%5D, val_loss%5Bi%5D, val_acc%5Bi%5D))%0D%0A%0D%0A%0D%0Amodel = Sequential()%0D%0Amodel.add(Convolution2D(32, 3, 3, input_shape=(150, 150, 3)))%0D%0Amodel.add(Activation('relu'))%0D%0Amodel.add(MaxPooling2D(pool_size=(2, 2)))%0D%0A%0D%0Amodel.add(Convolution2D(32, 3, 3))%0D%0Amodel.add(Activation('relu'))%0D%0Amodel.add(MaxPooling2D(pool_size=(2, 2)))%0D%0A%0D%0Amodel.add(Convolution2D(64, 3, 3))%0D%0Amodel.add(Activation('relu'))%0D%0Amodel.add(MaxPooling2D(pool_size=(2, 2)))%0D%0A%0D%0Amodel.add(Flatten())%0D%0Amodel.add(Dense(64))%0D%0Amodel.add(Activation('relu'))%0D%0Amodel.add(Dropout(0.5))%0D%0Amodel.add(Dense(1))%0D%0Amodel.add(Activation('sigmoid'))%0D%0A%0D%0Amodel.compile(loss='binary_crossentropy',%0D%0A optimizer='rmsprop',%0D%0A metrics=%5B'accuracy'%5D)%0D%0A%0D%0Atrain_datagen = ImageDataGenerator(%0D%0A rescale=1./255,%0D%0A shear_range=0.2,%0D%0A zoom_range=0.2,%0D%0A horizontal_flip=True)%0D%0A%0D%0Atest_datagen = ImageDataGenerator(rescale=1./255)%0D%0A%0D%0Atrain_generator = train_datagen.flow_from_directory(%0D%0A 'data/train',%0D%0A target_size=(150, 150),%0D%0A batch_size=32,%0D%0A class_mode='binary')%0D%0A%0D%0Avalidation_generator = test_datagen.flow_from_directory(%0D%0A 'data/validation',%0D%0A target_size=(150, 150),%0D%0A batch_size=32,%0D%0A class_mode='binary')%0D%0A%0D%0Ahistory = model.fit_generator(%0D%0A train_generator,%0D%0A samples_per_epoch=2000,%0D%0A nb_epoch=1,%0D%0A validation_data=validation_generator,%0D%0A nb_val_samples=800)%0D%0A%0D%0Amodel.save_weights('first_try.h5')%0D%0Asave_history(history, 'history.txt')%0D%0A
|
|
fa7a24493e6e8029ea2dd7f3bf244b08353c50a3
|
create run commnd
|
manage.py
|
manage.py
|
Python
| 0
|
@@ -0,0 +1,339 @@
+import os%0Afrom flask_script import Manager%0Afrom flask_migrate import Migrate, MigrateCommand%0A%0Afrom app import app, db%0A%0A%0Aapp.config.from_object(os.getenv('APP_SETTINGS', 'config.DevelopmentConfig'))%0A%0Amigrate = Migrate(app, db)%0Amanager = Manager(app)%0A%0Amanager.add_command('db', MigrateCommand)%0A%0A%0Aif __name__ == '__main__':%0A manager.run()%0A
|
|
6bd35a2df0dbeca2668999dafbbd05779911cca7
|
add directory for state
|
src/mugen/state/serialize.py
|
src/mugen/state/serialize.py
|
Python
| 0
|
@@ -0,0 +1,317 @@
+#!/usr/bin/env python%0A%0A# This script reads a specification of a datastructure with fields in it and writes%0A# out a class that contains those fields and a way to serialize/deserialize them%0A# to a stream. This is similar to google's protobuf but using a much simpler%0A# implementation.%0A%0A# TODO: grammar of specification%0A
|
|
d5aae9d0a770cad05c76c30754f5fcc57be5bd9b
|
Solve Fuel Spent in python
|
solutions/uri/1017/1017.py
|
solutions/uri/1017/1017.py
|
Python
| 0.999999
|
@@ -0,0 +1,68 @@
+h = float(input())%0As = float(input())%0A%0Aprint(f%22%7Bh * s / 12.0:.3f%7D%22)%0A
|
|
52f4d72387810994a7106e4fa55c3bfcda798a1c
|
Create __init__.py
|
ENN/__init__.py
|
ENN/__init__.py
|
Python
| 0.000429
|
@@ -0,0 +1 @@
+%0A
|
|
f3f073379b71a13fea4255622c7df19bec02fdd7
|
bump version
|
EMpy/version.py
|
EMpy/version.py
|
__author__ = 'Lorenzo Bolla'
version = '0.1.3'
|
Python
| 0
|
@@ -42,7 +42,7 @@
0.1.
-3
+4
'%0A
|
05e37a58825a6b75ade5ffdd25e887f9c9a7409c
|
Add net/ip.py containing python function wrapping /sbin/ip
|
net/ip.py
|
net/ip.py
|
Python
| 0.000015
|
@@ -0,0 +1,2757 @@
+#!/usr/bin/env python%0A# Copyright (c) 2012 Citrix Systems, Inc.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License as published%0A# by the Free Software Foundation; version 2.1 only. with the special%0A# exception on linking described in file LICENSE.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Lesser General Public License for more details.%0A%0A%22%22%22%0APython function using /sbin/ip for convenience%0A%22%22%22%0A%0A__version__ = %221.0.0%22%0A__author__ = %22Andrew Cooper%22%0A%0Afrom subprocess import Popen, PIPE%0A%0Afrom xcp.logger import LOG%0A%0Adef ip_link_set_name(src_name, dst_name):%0A %22%22%22%0A Rename network interface src_name to dst_name using%0A %22ip link set $src_name name $dst_name%22%0A %22%22%22%0A%0A LOG.debug(%22Attempting rename %25s -%3E %25s%22 %25 (src_name, dst_name))%0A%0A # Is the interface currently up?%0A link_show = Popen(%5B%22/sbin/ip%22, %22link%22, %22show%22, src_name%5D, stdout = PIPE)%0A stdout, _ = link_show.communicate()%0A%0A if link_show.returncode != 0:%0A LOG.error(%22performing %5C%22ip link show %25s%5C%22 returned %25d - skipping%22%0A %25 (src_name, link_show.returncode))%0A return%0A%0A # Does the string %22UP%22 appear?%0A isup = 'UP' in (stdout.split(%22%3C%22, 1)%5B1%5D.split(%22%3E%22, 1)%5B0%5D.split(','))%0A%0A # If it is up, bring it down for the rename%0A if isup:%0A link_down = Popen(%5B%22/sbin/ip%22, %22link%22, %22set%22, src_name, %22down%22%5D)%0A link_down.wait()%0A%0A if link_down.returncode != 0:%0A LOG.error(%22Unable to bring link %25s down. (Exit %25d)%22%0A %25 (src_name, link_down.returncode))%0A return%0A%0A # Perform the rename%0A link_rename = Popen(%5B%22/sbin/ip%22, %22link%22, %22set%22, src_name, %22name%22, dst_name%5D)%0A link_rename.wait()%0A%0A if link_rename.returncode != 0:%0A LOG.error(%22Unable to rename link %25s to %25s. (Exit %25d)%22%0A %25 (src_name, dst_name, link_rename.returncode))%0A return%0A%0A # if the device was up before, bring it back up%0A if isup:%0A%0A # Performace note: if we are doing an intermediate rename to%0A # move a device sideways, we shouldnt bring it back until it has%0A # its final name. However, i cant think of a non-hacky way of doing%0A # this with the current implementation%0A%0A link_up = Popen(%5B%22/sbin/ip%22, %22link%22, %22set%22, dst_name, %22up%22%5D)%0A link_up.wait()%0A%0A if link_up.returncode != 0:%0A LOG.error(%22Unable to bring link %25s back up. (Exit %25d)%22%0A %25 (src_name, link_down.returncode))%0A return%0A%0A LOG.info(%22Succesfully renamed link %25s to %25s%22 %25 (src_name, dst_name))%0A%0A
|
|
2b57a443807de26c9e71c97fd029e3d8416db597
|
Add feature usage shuffler
|
SessionTools/feature_usage_shuffler.py
|
SessionTools/feature_usage_shuffler.py
|
Python
| 0
|
@@ -0,0 +1,2106 @@
+# Condenses all the feature files into a single location,%0A# Split by the names of the features%0A%0A%0Aimport sys%0Afrom os.path import isfile%0Aimport os%0Aimport json%0A%0Apath = sys.argv%5B1%5D%0Aout_path = sys.argv%5B2%5D%0Apaths = %5B%5D%0A%0Ai = 0%0Askipped = 0%0A%0Apretty_print_json_output = True%0A%0Afeature_versions_map = %7B%7D%0A%0Adef flush():%0A # Create one file per feature version%0A for k in feature_versions_map.keys():%0A out_full_path = out_path + %22.%22 + k + '.json'%0A data_to_dump = %7B%0A %22feature_version%22 : k,%0A %22sessions%22 : feature_versions_map%5Bk%5D%0A %7D%0A%0A with open(out_full_path, 'w') as f:%0A if pretty_print_json_output:%0A f.write(json.dumps(data_to_dump, sort_keys=True, indent=2))%0A else:%0A f.write(json.dumps(data_to_dump))%0A%0A%0A# Main function%0A%0Aprint ('Enumerating feature files')%0A%0Afor root, subdirs, files in os.walk(path):%0A for ff in files:%0A i = i + 1%0A if i %25 1000 == 0:%0A print (i, skipped)%0A flush()%0A%0A path = os.path.join(root,ff)%0A if (path.find('.sorted.gz.features.') == -1 ):%0A continue%0A path_split = path.split ('.sorted.gz.features.')%0A%0A feature_version = path_split%5B-1%5D%0A if not feature_versions_map.has_key(feature_version):%0A feature_versions_map%5Bfeature_version%5D = %7B%7D%0A%0A session_id = path_split%5B0%5D.split('/')%5B-1%5D%0A%0A%0A %0A if feature_versions_map%5Bfeature_version%5D.has_key(session_id):%0A # We've already added this session%0A # This can be used in a version that loads a partially complete file%0A print (%22Session: %22) + session_id + %22 skipped, features already added for: %22 + feature_version%0A skipped += 1%0A continue%0A feature_versions_map%5Bfeature_version%5D%5Bsession_id%5D = %5B%5D%0A%0A paths.append(path)%0A print (feature_version, session_id, path)%0A%0A with open(path, 'r') as f:%0A lines = f.readlines()%0A for ln in lines:%0A feature_versions_map%5Bfeature_version%5D%5Bsession_id%5D.append(json.loads(ln))%0A%0Aflush()%0A %0A
|
|
66bb19a5937091812b80b9c0d98c6f52b9d47165
|
add new package : kafka (#14315)
|
var/spack/repos/builtin/packages/kafka/package.py
|
var/spack/repos/builtin/packages/kafka/package.py
|
Python
| 0
|
@@ -0,0 +1,1484 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Kafka(Package):%0A %22%22%22%0A Kafka is used for building real-time data pipelines and streaming apps.%0A It is horizontally scalable, fault-tolerant, wicked fast, and runs in%0A production in thousands of companies.%0A %22%22%22%0A%0A homepage = %22https://www-eu.apache.org/dist/kafka%22%0A url = %22https://www-eu.apache.org/dist/kafka/2.3.1/kafka_2.12-2.3.1.tgz%22%0A list_url = %22https://www-eu.apache.org/dist/kafka/%22%0A list_depth = 1%0A%0A version('2.13-2.4.0', sha256='c1c5246c7075459687b3160b713a001f5cd1cc563b9a3db189868d2f22aa9110')%0A version('2.12-2.4.0', sha256='b9582bab0c3e8d131953b1afa72d6885ca1caae0061c2623071e7f396f2ccfee')%0A version('2.12-2.3.1', sha256='5a3ddd4148371284693370d56f6f66c7a86d86dd96c533447d2a94d176768d2e')%0A version('2.12-2.3.0', sha256='d86f5121a9f0c44477ae6b6f235daecc3f04ecb7bf98596fd91f402336eee3e7')%0A version('2.12-2.2.2', sha256='7a1713d2ee929e54b1c889a449d77006513e59afb3032366368b2ebccd9e9ec0')%0A%0A depends_on('java@8:', type='run')%0A%0A def url_for_version(self, version):%0A url = %22https://www-eu.apache.org/dist/kafka/%7B0%7D/kafka_%7B1%7D.tgz%22%0A parent_dir = str(version).split('-')%5B1%5D%0A return url.format(parent_dir, version)%0A%0A def install(self, spec, prefix):%0A install_tree('.', prefix)%0A
|
|
16e6f88e094d4eac8ba154eed5681187f14ab652
|
Create __init__.py
|
spacegame/__init__.py
|
spacegame/__init__.py
|
Python
| 0.000001
|
@@ -0,0 +1,315 @@
+%22%22%22SpaceGame%0A%0AA simple 2d space shooter made with python and pygame.%0A%22%22%22%0A%0A# Make sure you ha python34 and pygame 1.9.1+ installed before run this code.%0Aimport pygame%0Aimport pygame.locals as c%0A%0A# this module itself does nothing important, but its good to have pygame%0A# initialized as soon as possible.%0Apygame.init()%0A
|
|
00cdcceb131814b24546c36810682ed78ba866c6
|
Create database column class (DBCol)
|
pyfwk/struc/dbcol.py
|
pyfwk/struc/dbcol.py
|
Python
| 0
|
@@ -0,0 +1,567 @@
+#!/usr/bin/env python%0A%0A%22%22%22%0A dbcol.py: DBCol is a struct describing an sqlite database table column%0A%22%22%22%0A%0A%0A# ----------------------------DATABASE-COLUMN-----------------------------#%0Aclass DBCol:%0A name = None%0A datatype = None%0A%0A def __init__(self, name, datatype):%0A self.name = name%0A self.datatype = datatype%0A%0A%0A# ---------------------------------EXPORT---------------------------------#%0A__all__ = %5B'DBCol'%5D%0A%0A%0A# ----------------------------------MAIN----------------------------------#%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
891eef2354e4cf0a552e5c8023c2778bf45a3582
|
add py lib and the first py fiel
|
pylib/EncodingLib.py
|
pylib/EncodingLib.py
|
Python
| 0.000001
|
@@ -0,0 +1,1021 @@
+# coding=utf-8%0A%0Aimport sys%0A%0A# sys.stdout = codecs.lookup('iso8859-1')%5B-1%5D(sys.stdout)%0A%0Aprint 'System Encoding is', sys.getdefaultencoding()%0A%0A# python%E4%B8%AD%E7%9A%84str%E5%AF%B9%E8%B1%A1%E5%85%B6%E5%AE%9E%E5%B0%B1%E6%98%AF%228-bit string%22 %EF%BC%8C%E5%AD%97%E8%8A%82%E5%AD%97%E7%AC%A6%E4%B8%B2%EF%BC%8C%E6%9C%AC%E8%B4%A8%E4%B8%8A%E7%B1%BB%E4%BC%BCjava%E4%B8%AD%E7%9A%84byte%5B%5D%E3%80%82%0As_chinese = '%E4%B8%AD%E6%96%87'%0A# %E8%80%8Cpython%E4%B8%AD%E7%9A%84unicode%E5%AF%B9%E8%B1%A1%E5%BA%94%E8%AF%A5%E6%89%8D%E6%98%AF%E7%AD%89%E5%90%8C%E4%BA%8Ejava%E4%B8%AD%E7%9A%84String%E5%AF%B9%E8%B1%A1%EF%BC%8C%E6%88%96%E6%9C%AC%E8%B4%A8%E4%B8%8A%E6%98%AFjava%E7%9A%84char%5B%5D%E3%80%82%0As_unicode_chinese = u'%E4%B8%AD%E6%96%87'%0A%0Aprint 's_chinese is str', isinstance(s_chinese, str)%0Aprint 's_unicode_chinese is basestring', isinstance(s_unicode_chinese, basestring)%0A%0A# encoding list: https://docs.python.org/2.4/lib/standard-encodings.html%0Aprint u'%22%E4%B8%AD%E6%96%87%22%E7%9A%84unicode-escape%E5%8E%9F%E7%94%9F%E5%AD%97%E7%AC%A6%E4%B8%B2', repr(s_chinese.decode('unicode-escape'))%0Aprint u'%22%E4%B8%AD%E6%96%87%22%E7%9A%84gb18030%E5%8E%9F%E7%94%9F%E5%AD%97%E7%AC%A6%E4%B8%B2', repr(s_chinese.decode('gb18030'))%0Aprint u'%22%E4%B8%AD%E6%96%87%22%E7%9A%84utf-8%E5%8E%9F%E7%94%9F%E5%AD%97%E7%AC%A6%E4%B8%B2', repr(s_chinese.decode('utf-8'))%0Aprint u'%22%E4%B8%AD%E6%96%87%22%E7%9A%84utf-8%E5%8E%9F%E7%94%9F%E5%AD%97%E7%AC%A6%E4%B8%B2', repr(s_unicode_chinese) #%0A%0Aprint s_unicode_chinese.encode('utf-8')%0Aprint s_chinese == s_unicode_chinese%0A# print u'A good idea%5Cu00AE'.encode('latin-1')%0A# print s.encode('ascii', 'xmlcharrefreplace')%0A%0A# accept input and parse to int%0A# input = int(raw_input('come%3E'))%0A# print input + 2%0A
|
|
2644625e137963ef2982d7ff0a3241bfcbde1ac6
|
Prepend the logs with '...' if they aren't complete
|
raven_cron/runner.py
|
raven_cron/runner.py
|
from os import getenv, SEEK_END
from raven import Client
from subprocess import call
from tempfile import TemporaryFile
from argparse import ArgumentParser
from sys import argv, exit
from time import time
from .version import VERSION
MAX_MESSAGE_SIZE = 1000
parser = ArgumentParser(description='Wraps commands and reports failing ones to sentry')
# FIXME: Should we also use a configuration file ?
parser.add_argument(
'--dsn',
metavar='SENTRY_DSN',
default=getenv('SENTRY_DSN'),
help='Sentry server address',
)
parser.add_argument(
'--version',
action='version',
version=VERSION,
)
parser.add_argument(
'cmd',
nargs='*',
help='The command to run',
)
def run(args=argv[1:]):
opts = parser.parse_args(args)
runner = CommandReporter(**vars(opts))
runner.run()
class CommandReporter(object):
def __init__(self, cmd, dsn):
if len(cmd) <= 1:
cmd = cmd[0]
self.dsn = dsn
self.command = cmd
self.client = None
def run(self):
buf = TemporaryFile()
start = time()
exit_status = call(self.command, stdout=buf, stderr=buf, shell=True)
if exit_status > 0:
elapsed = time() - start
self.report_fail(exit_status, buf, elapsed)
buf.close()
def report_fail(self, exit_status, buf, elapsed):
if self.dsn is None:
return
# Hack to get the file size since the tempfile doesn't exist anymore
buf.seek(0, SEEK_END)
if buf.tell() < MAX_MESSAGE_SIZE:
buf.seek(0)
else:
buf.seek(-MAX_MESSAGE_SIZE, SEEK_END)
last_lines = buf.read()
message="Command %s exited with exit status %d" % (self.command, exit_status)
#print message
if self.client is None:
self.client = Client(dsn=self.dsn)
# FIXME: extras are not displayed
self.client.captureMessage(
message,
extra={
'command': self.command,
'exit_status': exit_status,
'last_lines': last_lines,
},
time_spent=elapsed
)
|
Python
| 0.00028
|
@@ -1534,21 +1534,51 @@
-if buf.tell()
+file_size = buf.tell()%0A if file_size
%3C M
@@ -1618,16 +1618,52 @@
seek(0)%0A
+ last_lines = buf.read()%0A
@@ -1690,16 +1690,17 @@
f.seek(-
+(
MAX_MESS
@@ -1707,16 +1707,19 @@
AGE_SIZE
+-3)
, SEEK_E
@@ -1722,17 +1722,20 @@
EK_END)%0A
-%0A
+
@@ -1738,32 +1738,40 @@
last_lines =
+ '...' +
buf.read()%0A%0A
@@ -1857,39 +1857,8 @@
us)%0A
- #print message%0A
%0A
@@ -1975,32 +1975,36 @@
isplayed%0A
+ x =
self.client.cap
|
e66a690271f23fc2a4904e446bbdf0bf6b491a60
|
Add manager migration
|
osf/migrations/0028_auto_20170504_1548.py
|
osf/migrations/0028_auto_20170504_1548.py
|
Python
| 0.000002
|
@@ -0,0 +1,415 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11 on 2017-05-04 20:48%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('osf', '0027_auto_20170428_1435'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='subject',%0A options=%7B'base_manager_name': 'objects'%7D,%0A ),%0A %5D%0A
|
|
84e3475158797a60312068c284aa8d61d9466c6e
|
add model
|
www/models.py
|
www/models.py
|
Python
| 0
|
@@ -0,0 +1,709 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A'''%0AModels for user, blog, comment%0A'''%0A%0A__author__ = 'Ian Zheng'%0A%0Aimport time, uuid%0A%0Afrom www.orm import Model, StringField, BooleanField, IntegerField, FloatField%0A%0Adef next_id():%0A return '%25015d%25s000' %25 (int(time.time() * 1000), uuid.uuid4().hex)%0A%0Aclass User(Model):%0A __table__ = 'users'%0A%0A id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')%0A email = StringField(ddl='varchar(50')%0A passwd = StringField(ddl='varchar(50)')%0A admin = BooleanField()%0A name = StringField(ddl='varchar(50)')%0A image = StringField(ddl='varchar(500)')%0A created_at = FloatField(default=time.time)%0A%0Aclass Blog(Model):%0A __table__ = 'blogs'%0A%0A
|
|
3e9d5f9cf1c28619422cb012e532e776c4cc8b99
|
fix bug 1369498: remove adi-related tables and stored procedures
|
alembic/versions/eb8269f6bb85_bug_1369498_remove_adi.py
|
alembic/versions/eb8269f6bb85_bug_1369498_remove_adi.py
|
Python
| 0
|
@@ -0,0 +1,901 @@
+%22%22%22bug 1369498 remove adi%0A%0ARemove ADI-related tables and stored procedures.%0A%0ARevision ID: eb8269f6bb85%0ARevises: 0db05da17ae8%0ACreate Date: 2018-07-19 20:00:52.933551%0A%0A%22%22%22%0A%0Afrom alembic import op%0A%0A%0A# revision identifiers, used by Alembic.%0Arevision = 'eb8269f6bb85'%0Adown_revision = '0db05da17ae8'%0A%0A%0Adef upgrade():%0A # Remove tables%0A for table in ('raw_adi_logs',%0A 'raw_adi',%0A 'build_adu',%0A 'product_adu'):%0A op.execute('DROP TABLE IF EXISTS %25s' %25 table)%0A%0A # Remove stored procedures%0A for proc in ('backfill_adu(date)',%0A 'backfill_build_adu(date)',%0A 'backfill_matviews(date, date, boolean, interval)',%0A 'update_adu(date, boolean)',%0A 'update_build_adu(date, boolean)'):%0A op.execute('DROP FUNCTION IF EXISTS %25s' %25 proc)%0A%0A%0Adef downgrade():%0A # No going back%0A pass%0A
|
|
4f5ce4af85971ea3c15c90b8a482b611b8bf6c4c
|
move logging code to evaluation directory
|
src/evaluation/log.py
|
src/evaluation/log.py
|
Python
| 0
|
@@ -0,0 +1,813 @@
+%22%22%22 This module provides a globally accessible%0A logger created from the config file %22%22%22%0A%0Aimport logging%0Aimport os%0A%0Adef _create_logger_from_config():%0A %22%22%22 Create the logger from the config file %22%22%22%0A conf = %7B%0A %22name%22: %22StackLogger%22,%0A %22log_file%22: %22logs/experiment.log%22,%0A %22format%22: %22%25(asctime)s %25(levelname)s %5Cn %3E%3E%3E %25(message)s%22,%0A %22level%22: logging.DEBUG%0A %7D%0A%0A logging.basicConfig(format=conf%5B%22format%22%5D)%0A logger = logging.getLogger(conf%5B%22name%22%5D)%0A%0A # if need to write to the log file%0A log_file = conf%5B'log_file'%5D%0A if log_file is not None:%0A handler = logging.FileHandler(conf%5B'log_file'%5D)%0A logger.addHandler(handler)%0A%0A logger.setLevel(conf%5B%22level%22%5D)%0A logger.debug(%22Logger initialized%22)%0A%0A return logger%0A%0ALOGGER = _create_logger_from_config()%0A%0A
|
|
6d93ad1df3eb4a50038b7429fe9ed98a8d44af6f
|
add solution for Divide Two Integers
|
src/divideTwoIntegers.py
|
src/divideTwoIntegers.py
|
Python
| 0.000184
|
@@ -0,0 +1,572 @@
+class Solution:%0A # @return an integer%0A%0A def divide(self, dividend, divisor):%0A if divisor == 0:%0A return 2147483647%0A positive = (dividend %3C 0) is (divisor %3C 0)%0A dividend, divisor = abs(dividend), abs(divisor)%0A res = 0%0A while dividend %3E= divisor:%0A tmp, i = divisor, 1%0A while dividend %3E= tmp:%0A dividend -= tmp%0A res += i%0A tmp %3C%3C= 1%0A i %3C%3C= 1%0A if not positive:%0A res = -res%0A return min(max(res, -2147483648), 2147483647)%0A
|
|
51466e360320267afab41704caecebac0dff1dc2
|
Add a handler for performing client load testing.
|
src/example/bench_wsh.py
|
src/example/bench_wsh.py
|
Python
| 0.000053
|
@@ -0,0 +1,2322 @@
+# Copyright 2011, Google Inc.%0A# All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions are%0A# met:%0A#%0A# * Redistributions of source code must retain the above copyright%0A# notice, this list of conditions and the following disclaimer.%0A# * Redistributions in binary form must reproduce the above%0A# copyright notice, this list of conditions and the following disclaimer%0A# in the documentation and/or other materials provided with the%0A# distribution.%0A# * Neither the name of Google Inc. nor the names of its%0A# contributors may be used to endorse or promote products derived from%0A# this software without specific prior written permission.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS%0A# %22AS IS%22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT%0A# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR%0A# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT%0A# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,%0A# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT%0A# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,%0A# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY%0A# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT%0A# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE%0A# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A%0A%0A%22%22%22A simple load tester for WebSocket clients.%0A%0AA client program sends a message formatted as %22%3Ctime%3E %3Ccount%3E %3Cmessage%3E%22 to%0Athis handler. This handler starts sending total %3Ccount%3E WebSocket messages%0Acontaining %3Cmessage%3E every %3Ctime%3E seconds. %3Ctime%3E can be a floating point%0Avalue. %3Ccount%3E must be an integer value.%0A%22%22%22%0A%0A%0Aimport time%0A%0A%0Adef web_socket_do_extra_handshake(request):%0A pass # Always accept.%0A%0A%0Adef web_socket_transfer_data(request):%0A line = request.ws_stream.receive_message()%0A parts = line.split(' ')%0A if len(parts) != 3:%0A raise ValueError('Bad parameter format')%0A wait = float(parts%5B0%5D)%0A count = int(parts%5B1%5D)%0A message = parts%5B2%5D%0A for i in xrange(count):%0A request.ws_stream.send_message(message)%0A time.sleep(wait)%0A%0A%0A# vi:sts=4 sw=4 et%0A
|
|
8dbc2dd48d1d0e25972ad359464694d352d58705
|
add transpilation of the arangodb social graph
|
examples/createSocialGraph.py
|
examples/createSocialGraph.py
|
Python
| 0.00189
|
@@ -0,0 +1,2180 @@
+#!/usr/bin/python%0Aimport sys%0Afrom pyArango.connection import *%0Afrom pyArango.graph import *%0Afrom pyArango.collection import *%0A%0A%0Aclass Social(object):%0A class male(Collection) :%0A _fields = %7B%0A %22name%22 : Field()%0A %7D%0A %0A class female(Collection) :%0A _fields = %7B%0A %22name%22 : Field()%0A %7D%0A %0A class relation(Edges) :%0A _fields = %7B%0A %22number%22 : Field()%0A %7D%0A %0A class social(Graph) :%0A%0A _edgeDefinitions = (EdgeDefinition ('relation',%0A fromCollections = %5B%22female%22, %22male%22%5D,%0A toCollections = %5B%22female%22, %22male%22%5D),)%0A _orphanedCollections = %5B%5D%0A%0A%0A def __init__(self):%0A self.conn = Connection(username=%22USERNAME%22, password=%22SECRET%22)%0A %0A self.db = self.conn%5B%22_system%22%5D%0A if self.db.hasGraph('social'):%0A raise Exception(%22The social graph was already provisioned! remove it first%22)%0A%0A self.female = self.db.createCollection(%22female%22)%0A self.male = self.db.createCollection(%22male%22)%0A %0A self.relation = self.db.createCollection(%22relation%22)%0A %0A g = self.db.createGraph(%22social%22)%0A %0A a = g.createVertex('female', %7B%22name%22: 'Alice', %22_key%22: 'alice'%7D);%0A b = g.createVertex('male', %7B%22name%22: 'Bob', %22_key%22: 'bob'%7D);%0A c = g.createVertex('male', %7B%22name%22: 'Charly', %22_key%22: 'charly'%7D);%0A d = g.createVertex('female', %7B%22name%22: 'Diana', %22_key%22: 'diana'%7D);%0A a.save()%0A b.save()%0A c.save()%0A d.save()%0A%0A g.link('relation', a, b, %7B%22type%22: 'married', %22_key%22: 'aliceAndBob'%7D)%0A g.link('relation', a, c, %7B%22type%22: 'friend', %22_key%22: 'aliceAndCharly'%7D)%0A g.link('relation', c, d, %7B%22type%22: 'married', %22_key%22: 'charlyAndDiana'%7D)%0A g.link('relation', b, d, %7B%22type%22: 'friend', %22_key%22: 'bobAndDiana'%7D)%0A%0A%0ASocial()%0A
|
|
3af86cf1521170ffeb886802f4a96f403e86bf82
|
add title
|
src/slidegen/DataProvider.py
|
src/slidegen/DataProvider.py
|
from glob import glob
import random
class DataProviderBase(object):
def __init__(self):
'''
Init your data provider
'''
raise NotImplementedError('Please ')
def image(self, size):
'''
size in ['big', 'medium', 'small']
big for full page background
medium for a foreground image
small for half-page image
'''
raise NotImplementedError('Please ')
def topic(self):
'''
generate a slide topic
'''
raise NotImplementedError('Please ')
def title(self):
'''
generate a slide page title
'''
raise NotImplementedError('Please ')
def text(self):
'''
generate an paragraph for slide content
'''
raise NotImplementedError('Please ')
class DataProvider(DataProviderBase):
def __init__(self):
self.counter = 0
def image(self, size='medium'):
return '../' + random.choice(glob('data/img/*'))
def who(self):
return random.choice([
'Denny', 'RS', '開源社長', '大萌神', '畢總召', 'Mouse',
'Inndy', 'Jenny', 'HackNTU 總召', '蟆總統', '王立委'
])
def company(self):
return random.choice([
'Elppa', 'Alset', 'MBI', 'LargeHard',
'Elcaro', 'Allizom', 'Edonil', 'SUSA',
'Reca', 'Elgoog', 'Letin', 'CafeGot',
'大硬鐵門'
])
def product(self):
return random.choice([
'CamKoob', 'PhoneI', 'Suxen', 'TenPhone',
'個資', '雲端平台', '開發版', '作業系統',
'作業系統'
])
def buzz(self):
return random.choice([
'雲端', '物聯網', '大數據', '機器學習', '深度學習',
'駭客', '創業', '開源', '資料探勘', '敏捷開發', '矽谷'
])
def topic(self):
return ''.join(set(self.buzz() for _ in range(4)))
def title(self):
return random.choice([
'十億人都驚呆了!%s憑空產生能源' % self.company(),
'%s推出新款%s,完全開源!' % (self.company(), self.product()),
'%s宣布,%s旗下%s即將開源' % (self.who(), self.company(), self.product()),
'%s %s即將全面開源?!' % (self.company(), self.product()),
'%s宣布%s的bug全部當成feature' % (self.who(), self.product()),
'只有%s才是正義,其他的都應該廢除' % (self.product()),
'這邊有一批好便宜的%s啊' % (self.product()),
'%s旗下的%s將會內建%s' % (self.company(), self.product(), self.product())
])
def text(self):
return random.choice([
'ㄗㄘ的個資在我手上',
'我想要睡覺,很睏很睏'
])
if __name__ == '__main__':
p = DataProvider()
print('Topic = %s' % p.topic())
print('Topic = %s' % p.topic())
print('Topic = %s' % p.topic())
print('Topic = %s' % p.topic())
print('Title = %s' % p.title())
print('Title = %s' % p.title())
print('Title = %s' % p.title())
print('Title = %s' % p.title())
print('Title = %s' % p.title())
print('Title = %s' % p.title())
|
Python
| 0.999996
|
@@ -2393,24 +2393,169 @@
.product())%0A
+ '%25s%E6%9C%83%E5%9C%A8%E8%BF%91%E6%9C%9F%E5%85%A7%E6%8E%A8%E5%87%BA%25s' %25 (self.company(), self.product()),%0A '%E5%95%8A%E5%93%88%E5%93%88%E5%93%88%E4%BD%A0%E7%9C%8B%E7%9C%8B%E4%BD%A0',%0A '%25s%E6%9C%AA%E4%BE%86%E5%B0%87%E5%8F%96%E4%BB%A3%25s', (self.product(), self.product()),%0A
%5D)%0A%0A
|
06d2d7dd155f5ac888a8c0d2d9c45c61b95de714
|
update tests for thresholding for ecm
|
CPAC/network_centrality/tests/test_thresh_and_sum.py
|
CPAC/network_centrality/tests/test_thresh_and_sum.py
|
Python
| 0
|
@@ -0,0 +1,2206 @@
+%22%22%22%0AThis tests the functions in network_centrality/thresh_and_sum.pyx%0A%22%22%22%0A%0Aimport os, sys%0Aimport numpy as np%0Afrom numpy.testing import *%0A%0Afrom nose.tools import ok_, eq_, raises, with_setup%0Afrom nose.plugins.attrib import attr # http://nose.readthedocs.org/en/latest/plugins/attrib.html%0A%0Aimport sys%0Asys.path.insert(0, '/home2/data/Projects/CPAC_Regression_Test/zarrar/centrality_tests/lib/nipype')%0Asys.path.insert(1, '/home2/data/Projects/CPAC_Regression_Test/zarrar/centrality_tests/lib/C-PAC')%0A%0A# For eigen centrality%0Afrom CPAC.network_centrality.thresh_and_sum import %5C%0A thresh_binarize_float, thresh_binarize_double, %5C%0A thresh_weighted_float, thresh_weighted_double, %5C%0A thresh_transform_weighted_float, thresh_transform_weighted_double%0A%0A# For degree centrality%0Afrom CPAC.network_centrality.thresh_and_sum import %5C%0A centrality_binarize_float, centrality_binarize_double, %5C%0A centrality_weighted_float, centrality_weighted_double, %5C%0A centrality_both_float, centrality_both_double # these aren't currently used%0A%0A%0A###%0A# TEST thresholding of matrices for eigenvector centrality%0A###%0A%0Adef test_thresh_binarize():%0A print %22testing threshold binarize%22%0A %0A nvoxs = 1000%0A r_value = 0.2%0A corr_matrix = np.random.random((nvoxs, nvoxs)).astype('float32')%0A%0A ref = 1*(corr_matrix%3Er_value)%0A%0A comp = corr_matrix.copy()%0A thresh_binarize_float(comp, r_value)%0A%0A assert_equal(ref, comp)%0A%0Adef test_thresh_weighted():%0A print %22testing threshold weighted%22%0A %0A nvoxs = 1000%0A r_value = 0.2%0A corr_matrix = np.random.random((nvoxs, nvoxs)).astype('float32')%0A %0A ref = corr_matrix*(corr_matrix%3Er_value)%0A%0A comp = corr_matrix.copy()%0A thresh_weighted_float(comp, r_value)%0A %0A assert_equal(ref, comp)%0A%0Adef test_thresh_transform_weighted():%0A print %22testing threshold weighted%22%0A %0A nvoxs = 1000%0A r_value = 0.2%0A corr_matrix = np.random.random((nvoxs, nvoxs)).astype('float32')%0A %0A ref = ((1.0+corr_matrix)/2.0)*(corr_matrix%3Er_value)%0A%0A comp = corr_matrix.copy()%0A thresh_transform_weighted_float(comp, r_value)%0A %0A assert_equal(ref, comp)%0A%0A%0A###%0A# TEST centrality functions%0A###%0A
|
|
82fa373c46581e84f8e5ea0da733ef5c65928165
|
Update MultipleParticleSystems.pyde
|
mode/examples/Topics/Simulate/MultipleParticleSystems/MultipleParticleSystems.pyde
|
mode/examples/Topics/Simulate/MultipleParticleSystems/MultipleParticleSystems.pyde
|
"""
Multiple Particle Systems
by Daniel Shiffman.
Click the mouse to generate a burst of particles
at mouse location.
Each burst is one instance of a particle system
with Particles and CrazyParticles (a subclass of Particle).
Note use of Inheritance and Polymorphism here.
"""
from crazy_particle import CrazyParticle
from particle import Particle
from particle_system import ParticleSystem
systems = None
def setup():
global systems
size(640, 360)
systems = []
def draw():
background(0)
for ps in systems:
ps.run()
ps.addParticle()
if not systems:
fill(255)
textAlign(CENTER)
text("click mouse to add particle systems", width / 2, height / 2)
def mousePressed():
systems.append(ParticleSystem(1, PVector(mouseX, mouseY)))
|
Python
| 0
|
@@ -794,8 +794,13 @@
useY)))%0A
+ %0A
|
70bc8413dc3748f606e76f5e4e4abcde6b851cdd
|
Read and UDP
|
bari_spitter.py
|
bari_spitter.py
|
Python
| 0.000001
|
@@ -0,0 +1,1108 @@
+#!/usr/bin/python3%0A# coding=utf-8%0A%22%22%22reads barometric pressure sensor and writes it to UDP socket with timestamp%0A%22%22%22%0Aimport socket%0Afrom datetime import datetime%0Afrom time import sleep%0Afrom time import time%0A%0Aimport ms5637%0A%0A__author__ = 'Moe'%0A__copyright__ = 'Copyright 2017 Moe'%0A__license__ = 'MIT'%0A__version__ = '0.0.2'%0A%0A# Bari sensor of MS5637%0Asensor = ms5637.Chip()%0Abari_file = 'bari_data.csv'%0A%0A%0AUDP_IP = %22192.168.0.2%22 # Big Machine%0AUDP_PORT = 6421 # bARI port%0AMESSAGE = %22Get ready to rumble.%22%0A%0Aprint(%22UDP target IP:%22, UDP_IP)%0Aprint(%22UDP target port:%22, UDP_PORT)%0Aprint(%22message:%22, MESSAGE)%0A%0Awhile True:%0A try:%0A now = time()%0A humantime = datetime.fromtimestamp(now).strftime('%25Y-%25m-%25dT%25H:%25M:%25S')%0A pressure, _temperature = sensor.get_data()%0A%0A except OSError:%0A sensor.__init__()%0A pressure, temperatue = sensor.get_data()%0A%0A finally:%0A outstring = str(humantime) + ', ' + str(pressure)%0A outstring = outstring.encode()%0A%0A sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)%0A sock.sendto(outstring, (UDP_IP, UDP_PORT))%0A%0A sleep(1)%0A
|
|
6811b4014fc0267edf4d397ccab86b0e986c2215
|
Implement the INFO command
|
txircd/modules/rfc/cmd_info.py
|
txircd/modules/rfc/cmd_info.py
|
Python
| 0.999339
|
@@ -0,0 +1,1207 @@
+from twisted.plugin import IPlugin%0Afrom twisted.words.protocols import irc%0Afrom txircd import version%0Afrom txircd.module_interface import Command, ICommand, IModuleData, ModuleData%0Afrom zope.interface import implements%0A%0Aclass InfoCommand(ModuleData, Command):%0A implements(IPlugin, IModuleData, ICommand)%0A %0A name = %22InfoCommand%22%0A core = True%0A %0A def hookIRCd(self, ircd):%0A self.ircd = ircd%0A %0A def userCommands(self):%0A return %5B (%22INFO%22, 1, self) %5D%0A %0A def parseParams(self, user, params, prefix, tags):%0A return %7B%7D%0A %0A def execute(self, user, data):%0A user.sendMessage(irc.RPL_INFO, %22:%7B%7D is running txircd-%7B%7D%22.format(self.ircd.name, version))%0A user.sendMessage(irc.RPL_INFO, %22:Originally developed for the Desert Bus for Hope charity fundraiser (http://desertbus.org)%22)%0A user.sendMessage(irc.RPL_INFO, %22:%22)%0A user.sendMessage(irc.RPL_INFO, %22:Developed by ElementalAlchemist %3CElementAlchemist7@gmail.com%3E%22)%0A user.sendMessage(irc.RPL_INFO, %22:Contributors:%22)%0A user.sendMessage(irc.RPL_INFO, %22: Heufneutje%22)%0A user.sendMessage(irc.RPL_ENDOFINFO, %22:End of /INFO list%22)%0A return True%0A%0AinfoCmd = InfoCommand()
|
|
0f199556df6bd498f01cccdce6316b733c876acc
|
Add migration file
|
InvenTree/part/migrations/0046_auto_20200804_0107.py
|
InvenTree/part/migrations/0046_auto_20200804_0107.py
|
Python
| 0.000001
|
@@ -0,0 +1,480 @@
+# Generated by Django 3.0.7 on 2020-08-04 01:07%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('part', '0045_auto_20200605_0932'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='partcategory',%0A name='default_keywords',%0A field=models.CharField(blank=True, help_text='Default keywords for parts in this category', max_length=250, null=True),%0A ),%0A %5D%0A
|
|
f8fcae7dd7579b51c3c204337dfa70c702fdbf38
|
add new namedtuple Chunk
|
AlphaTwirl/HeppyResult/Chunk.py
|
AlphaTwirl/HeppyResult/Chunk.py
|
Python
| 0.00003
|
@@ -0,0 +1,362 @@
+# Tai Sakuma %3Ctai.sakuma@cern.ch%3E%0A%0A##__________________________________________________________________%7C%7C%0Aimport collections%0A%0A##__________________________________________________________________%7C%7C%0AChunk = collections.namedtuple('Chunk', 'inputPath treeName maxEvents start component name')%0A%0A##__________________________________________________________________%7C%7C%0A
|
|
83986f6ade666e5f12ae599048369ecdd9856737
|
VISIBLE not visible
|
src/sentry/api/endpoints/team_project_index.py
|
src/sentry/api/endpoints/team_project_index.py
|
from __future__ import absolute_import
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.team import TeamEndpoint
from sentry.api.serializers import serialize
from sentry.models import Project, ProjectStatus, AuditLogEntryEvent
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('ListTeamProjects')
def list_team_projects_scenario(runner):
runner.request(
method='GET',
path='/teams/%s/%s/projects/' % (
runner.org.slug, runner.default_team.slug)
)
@scenario('CreateNewProject')
def create_project_scenario(runner):
runner.request(
method='POST',
path='/teams/%s/%s/projects/' % (
runner.org.slug, runner.default_team.slug),
data={
'name': 'The Spoiled Yoghurt'
}
)
class ProjectSerializer(serializers.Serializer):
name = serializers.CharField(max_length=200, required=True)
slug = serializers.CharField(max_length=200, required=False)
class TeamProjectIndexEndpoint(TeamEndpoint):
doc_section = DocSection.TEAMS
@attach_scenarios([list_team_projects_scenario])
def get(self, request, team):
"""
List a Team's Projects
``````````````````````
Return a list of projects bound to a team.
:pparam string organization_slug: the slug of the organization the
team belongs to.
:pparam string team_slug: the slug of the team to list the projects of.
:auth: required
"""
if request.user.is_authenticated():
results = list(Project.objects.get_for_user(
team=team, user=request.user))
else:
# TODO(dcramer): status should be selectable
results = list(Project.objects.filter(
team=team,
status=ProjectStatus.visible,
))
return Response(serialize(results, request.user))
@attach_scenarios([create_project_scenario])
def post(self, request, team):
"""
Create a New Project
````````````````````
Create a new project bound to a team.
:pparam string organization_slug: the slug of the organization the
team belongs to.
:pparam string team_slug: the slug of the team to create a new project
for.
:param string name: the name for the new project.
:param string slug: optionally a slug for the new project. If it's
not provided a slug is generated from the name.
:auth: required
"""
serializer = ProjectSerializer(data=request.DATA)
if serializer.is_valid():
result = serializer.object
project = Project.objects.create(
name=result['name'],
slug=result.get('slug'),
organization=team.organization,
team=team
)
# XXX: create sample event?
self.create_audit_entry(
request=request,
organization=team.organization,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_ADD,
data=project.get_audit_log_data(),
)
return Response(serialize(project, request.user), status=201)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
Python
| 0.999528
|
@@ -1953,15 +1953,15 @@
tus.
-visible
+VISIBLE
,%0A
|
b5dd2894120ceee04ca43fd6f408e2404ccd12ad
|
Include ethash w/macos build
|
factories/cpp_ethereum_osx.py
|
factories/cpp_ethereum_osx.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: caktux
# @Date: 2015-02-23 15:00:28
# @Last Modified by: caktux
# @Last Modified time: 2015-04-05 23:12:26
import factory
reload(factory)
from factory import *
import cpp_ethereum
reload(cpp_ethereum)
from cpp_ethereum import *
def cmake_osx_cmd(cmd=[], ccache=True, evmjit=False, headless=True):
cmd.append("-DFATDB=1")
cmd.append("-DBUNDLE=default")
if headless:
cmd.append("-DGUI=0")
if evmjit:
for opt in [
"-DLLVM_DIR=/usr/local/opt/llvm/share/llvm/cmake",
"-DEVMJIT=1"
]: cmd.append(opt)
elif ccache:
cmd.append("-DCMAKE_CXX_COMPILER=/usr/local/opt/ccache/libexec/g++")
return cmd
def osx_cpp_check_factory(branch='develop'):
factory = BuildFactory()
scan_build_path = "/usr/local/opt/llvm/share/clang/tools/scan-build"
analyzer = "c++-analyzer"
for step in [
Git(
haltOnFailure = True,
logEnviron = False,
repourl='https://github.com/ethereum/cpp-ethereum.git',
branch=branch,
mode='full',
method = 'copy',
codebase='cpp-ethereum',
retry=(5, 3)
),
Configure(
haltOnFailure = True,
logEnviron = False,
command=["cmake", ".", "-DCMAKE_CXX_COMPILER=%s/%s" % (scan_build_path, analyzer)]
),
Compile(
logEnviron = False,
name = "scan-build",
command = ["%s/scan-build" % scan_build_path, "--use-analyzer=%s/%s" % (scan_build_path, analyzer), "make", "-j", "6"]
)
]: factory.addStep(step)
return factory
# C++
def osx_cpp_factory(branch='develop', isPullRequest=False, evmjit=False, headless=True):
factory = BuildFactory()
for step in [
Git(
haltOnFailure = True,
logEnviron = False,
repourl = 'https://github.com/ethereum/cpp-ethereum.git',
branch = branch,
mode = 'full',
method = 'copy',
codebase = 'cpp-ethereum',
retry=(5, 3)
),
Git(
haltOnFailure = True,
logEnviron = False,
repourl='https://github.com/ethereum/tests.git',
branch=branch,
mode='incremental',
codebase='tests',
retry=(5, 3),
workdir='tests'
),
SetPropertyFromCommand(
haltOnFailure = True,
logEnviron = False,
name = "set-protocol",
command='sed -ne "s/.*c_protocolVersion = \(.*\);/\\1/p" libethcore/Common%s.cpp' % ("Eth" if branch == 'master' else ""),
property="protocol"
),
SetPropertyFromCommand(
haltOnFailure = True,
logEnviron = False,
name = "set-version",
command='sed -ne "s/.*Version = \\"\(.*\)\\";/\\1/p" libdevcore/Common.cpp',
property="version"
),
Configure(
haltOnFailure = True,
logEnviron = False,
command = cmake_osx_cmd(['cmake', '.'], evmjit=evmjit, headless=headless)
),
Compile(
haltOnFailure = True,
logEnviron = False,
command = "make -j $(sysctl -n hw.ncpu)"
)
]: factory.addStep(step)
if not headless:
for step in [
ShellCommand(
haltOnFailure = True,
logEnviron = False,
name = "make-install",
description = 'running make install',
descriptionDone= 'make install',
command = ['make', 'install'],
workdir = 'build/alethzero'
),
ShellCommand(
haltOnFailure = True,
logEnviron = False,
name = "make-install-mix",
description = 'running mix make install',
descriptionDone= 'make install mix',
command = ['make', 'install'],
workdir = 'build/mix'
)
]: factory.addStep(step)
for step in [
Test(
haltOnFailure = True,
warnOnFailure = True,
logEnviron = False,
name="test-cpp",
description="testing",
descriptionDone="test",
command=testeth_cmd(["./testeth"], evmjit=evmjit),
env={'CTEST_OUTPUT_ON_FAILURE': '1', 'ETHEREUM_TEST_PATH': Interpolate('%(prop:workdir)s/tests')},
workdir="build/test",
maxTime=900
),
]: factory.addStep(step)
# Trigger check
if not evmjit and not headless:
for step in [
Trigger(
schedulerNames=["cpp-ethereum-%s-osx-check" % branch],
waitForFinish=False,
set_properties={
"protocol": Interpolate("%(prop:protocol)s"),
"version": Interpolate("%(prop:version)s")
}
)
]: factory.addStep(step)
# Trigger deb builders
if not evmjit and headless:
if not isPullRequest:
for step in [
Trigger(
schedulerNames=["cpp-ethereum-%s-brew" % branch],
waitForFinish=False,
set_properties={
"protocol": Interpolate("%(prop:protocol)s"),
"version": Interpolate("%(prop:version)s")
}
)
]: factory.addStep(step)
# Package AlethZero.app
if not isPullRequest and not headless:
for step in [
Compile(
haltOnFailure = True,
logEnviron = False,
command = "make -j $(sysctl -n hw.ncpu) appdmg"
),
SetPropertyFromCommand(
haltOnFailure = True,
logEnviron = False,
name = "set-sha1sum",
command = Interpolate('sha1sum Ethereum.dmg | grep -o -w "\w\{40\}"'),
property = 'sha1sum'
),
SetProperty(
description="setting filename",
descriptionDone="set filename",
name="set-filename",
property="filename",
value=Interpolate("AlethZero-OSX-%(kw:time_string)s-%(prop:version)s-%(prop:protocol)s-%(kw:short_revision)s.dmg", time_string=get_time_string, short_revision=get_short_revision)
),
FileUpload(
haltOnFailure = True,
name = 'upload-alethzero',
slavesrc="Ethereum.dmg",
masterdest = Interpolate("public_html/builds/%(prop:buildername)s/%(prop:filename)s"),
url = Interpolate("builds/%(prop:buildername)s/%(prop:filename)s")
),
MasterShellCommand(
name = "clean-latest-link",
description = 'cleaning latest link',
descriptionDone= 'clean latest link',
command = ['rm', '-f', Interpolate("public_html/builds/%(prop:buildername)s/AlethZero-OSX-latest.dmg")]
),
MasterShellCommand(
haltOnFailure = True,
name = "link-latest",
description = 'linking latest',
descriptionDone= 'link latest',
command = ['ln', '-sf', Interpolate("%(prop:filename)s"), Interpolate("public_html/builds/%(prop:buildername)s/AlethZero-OSX-latest.dmg")]
)
]: factory.addStep(step)
return factory
|
Python
| 0
|
@@ -417,16 +417,47 @@
fault%22)%0A
+ cmd.append(%22-DETHASHCL=1%22)%0A
if h
|
658cefded99d140db212b9525f791ce2e0336472
|
Fix NameError in custom_csrf_failure
|
pydotorg/views.py
|
pydotorg/views.py
|
from django.http import HttpResponseForbidden
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.translation import ugettext as _
from django.utils.version import get_docs_version
from django.views.csrf import CSRF_FAILURE_TEMPLATE, CSRF_FAILURE_TEMPLATE_NAME
from django.views.generic.base import TemplateView
from codesamples.models import CodeSample
from downloads.models import Release
def custom_csrf_failure(request, reason=''):
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
c = {
'title': _("Forbidden"),
'main': _("CSRF verification failed. Request aborted."),
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_referer1': _(
"You are seeing this message because this HTTPS site requires a "
"'Referer header' to be sent by your Web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."),
'no_referer2': _(
"If you have configured your browser to disable 'Referer' headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for 'same-origin' requests."),
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'no_cookie1': _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."),
'no_cookie2': _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for 'same-origin' "
"requests."),
# TODO: Customized this to get more information.
'DEBUG': True,
'docs_version': get_docs_version(),
'more': _("More information is available with DEBUG=True."),
}
try:
t = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name == CSRF_FAILURE_TEMPLATE_NAME:
# If the default template doesn't exist, use the string template.
t = Engine().from_string(CSRF_FAILURE_TEMPLATE)
c = Context(c)
else:
# Raise if a developer-specified template doesn't exist.
raise
return HttpResponseForbidden(t.render(c), content_type='text/html')
class IndexView(TemplateView):
template_name = "python/index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'code_samples': CodeSample.objects.published()[:5],
})
return context
class DocumentationIndexView(TemplateView):
template_name = 'python/documentation.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'latest_python2': Release.objects.latest_python2(),
'latest_python3': Release.objects.latest_python3(),
})
return context
|
Python
| 0.000008
|
@@ -2102,29 +2102,42 @@
emplate(
-template_name
+CSRF_FAILURE_TEMPLATE_NAME
)%0A ex
@@ -2167,68 +2167,8 @@
st:%0A
- if template_name == CSRF_FAILURE_TEMPLATE_NAME:%0A
@@ -2237,20 +2237,16 @@
mplate.%0A
-
@@ -2301,20 +2301,16 @@
-
c = Cont
@@ -2320,109 +2320,8 @@
(c)%0A
- else:%0A # Raise if a developer-specified template doesn't exist.%0A raise%0A
|
f531eb7d1734d6d715893356a50d11eee6bc009a
|
Test mobile set password form
|
corehq/apps/users/tests/forms.py
|
corehq/apps/users/tests/forms.py
|
Python
| 0
|
@@ -0,0 +1,1037 @@
+from collections import namedtuple%0Afrom django.contrib.auth import get_user_model%0Afrom django.test import TestCase%0Afrom corehq.apps.users.forms import SetUserPasswordForm%0A%0AProject = namedtuple('Project', %5B'name', 'strong_mobile_passwords'%5D)%0A%0A%0Aclass TestSetUserPasswordForm(TestCase):%0A def setUp(self):%0A super(TestSetUserPasswordForm, self).setUp()%0A self.project = Project('mydomain', True)%0A self.user = get_user_model().objects.create_user('tswift')%0A%0A def tearDown(self):%0A self.user.delete()%0A super(TestSetUserPasswordForm, self).tearDown()%0A%0A def form(self, password):%0A return SetUserPasswordForm(self.project, user_id=self.user.id, user=self.user, data=%7B%0A %22new_password1%22: password,%0A %22new_password2%22: password,%0A %7D)%0A %0A def test_weak_password(self):%0A form = self.form(%22Taylor%22)%0A self.assertFalse(form.is_valid())%0A%0A def test_strong_password(self):%0A form = self.form(%22TaylorSwift89!%22)%0A self.assertTrue(form.is_valid())%0A
|
|
e10c696fd3125a15ca9b27baed6949f7dfbf3e19
|
update tests (add digitlib.curric)
|
restclients/tests.py
|
restclients/tests.py
|
from django.utils import unittest
from restclients.test.uwnetid.subscription import EmailForwardingTest
from restclients.test.util.date_formator import formatorTest
from restclients.test.hfs.idcard import HfsTest
from restclients.test.library.mylibinfo import MyLibInfoTest
from restclients.test.sws.compatible import SWSTest
from restclients.test.sws.financial import SWSFinance
from restclients.test.sws.notice import SWSNotice
from restclients.test.sws.term import SWSTestTerm
from restclients.test.sws.err404.dao import SWSTestDAO404
from restclients.test.sws.err500.dao import SWSTestDAO500
from restclients.test.sws.invalid_dao import SWSTestInvalidDAO
from restclients.test.sws.file_implementation.dao import SWSTestFileDAO
from restclients.test.sws.schedule_data import SWSTestScheduleData
from restclients.test.sws.enrollment import SWSTestEnrollments
from restclients.test.sws.section import SWSTestSectionData
from restclients.test.sws.section_status import SWSTestSectionStatusData
from restclients.test.sws.independent_study import SWSIndependentStudy
from restclients.test.sws.instructor_no_regid import SWSMissingRegid
from restclients.test.sws.registrations import SWSTestRegistrations
from restclients.test.sws.campus import SWSTestCampus
from restclients.test.sws.college import SWSTestCollege
from restclients.test.sws.department import SWSTestDepartment
from restclients.test.sws.curriculum import SWSTestCurriculum
from restclients.test.sws.graderoster import SWSTestGradeRoster
from restclients.test.sws.dates import SWSTestDates
from restclients.test.pws.person import PWSTestPersonData
from restclients.test.pws.entity import PWSTestEntityData
from restclients.test.pws.idcard import TestIdCardPhoto
from restclients.test.pws.err404.dao import PWSTestDAO404
from restclients.test.pws.err404.pws import PWSTest404
from restclients.test.pws.err500.dao import PWSTestDAO500
from restclients.test.pws.err500.pws import PWSTest500
from restclients.test.pws.invalid_dao import PWSTestInvalidDAO
from restclients.test.pws.file_implementation.dao import PWSTestFileDAO
from restclients.test.gws.group import GWSGroupBasics
from restclients.test.gws.course_group import GWSCourseGroupBasics
from restclients.test.gws.search import GWSGroupSearch
from restclients.test.cache.none import NoCacheTest
from restclients.test.cache.time import TimeCacheTest
from restclients.test.cache.etag import ETagCacheTest
from restclients.test.book.by_schedule import BookstoreScheduleTest
from restclients.test.amazon_sqs.queues import SQSQueue
from restclients.test.sms.send import SMS
from restclients.test.sms.invalid_phone_number import SMSInvalidNumbers
from restclients.test.nws.subscription import NWSTestSubscription
from restclients.test.nws.channel import NWSTestChannel
from restclients.test.nws.endpoint import NWSTestEndpoint
from restclients.test.nws.message import NWSTestMessage
from restclients.test.nws.person import NWSTestPerson
from restclients.test.canvas.enrollments import CanvasTestEnrollment
from restclients.test.canvas.accounts import CanvasTestAccounts
from restclients.test.canvas.admins import CanvasTestAdmins
from restclients.test.canvas.roles import CanvasTestRoles
from restclients.test.canvas.courses import CanvasTestCourses
from restclients.test.canvas.sections import CanvasTestSections
from restclients.test.canvas.bad_sis_ids import CanvasBadSISIDs
from restclients.test.canvas.terms import CanvasTestTerms
from restclients.test.canvas.users import CanvasTestUsers
from restclients.test.canvas.submissions import CanvasTestSubmissions
from restclients.test.canvas.assignments import CanvasTestAssignments
from restclients.test.canvas.quizzes import CanvasTestQuizzes
from restclients.test.catalyst.gradebook import CatalystTestGradebook
from restclients.test.trumba.accounts import TrumbaTestAccounts
from restclients.test.trumba.calendar import TestCalendarParse
from restclients.test.trumba.calendars import TrumbaTestCalendars
from restclients.test.gws.trumba_group import TestGwsTrumbaGroup
from restclients.test.r25.events import R25TestEvents
from restclients.test.r25.spaces import R25TestSpaces
from restclients.test.thread import ThreadsTest
from restclients.test.view import ViewTest
from restclients.test.dao_implementation.mock import TestMock
|
Python
| 0
|
@@ -263,24 +263,82 @@
yLibInfoTest
+%0Afrom restclients.test.digitlib.curric import DigitLibTest
%0A%0Afrom restc
|
b3f436e14df37d4af602dcdc9882ce27c97fabd4
|
Add a yaml sdb module (#37563)
|
salt/sdb/yaml.py
|
salt/sdb/yaml.py
|
Python
| 0
|
@@ -0,0 +1,1964 @@
+# -*- coding: utf-8 -*-%0A'''%0APull sdb values from a YAML file%0A%0A:maintainer: SaltStack%0A:maturity: New%0A:platform: all%0A%0A.. versionadded:: Nitrogen%0A%0AConfiguration:%0A%0A.. code-block:: yaml%0A%0A my-yaml-file:%0A driver: yaml%0A files:%0A - /path/to/foo.yaml%0A - /path/to/bar.yaml%0A%0AThe files are merged together and the result is searched using the same%0Amechanism Salt uses for searching Grains and Pillar data structures.%0A%0AOptional configuration:%0A%0A.. code-block:: yaml%0A%0A my-yaml-file:%0A driver: yaml%0A files:%0A - /path/to/foo.yaml%0A - /path/to/bar.yaml%0A merge:%0A strategy: smart%0A merge_list: false%0A'''%0A%0A# import python libs%0Afrom __future__ import absolute_import%0Aimport logging%0A%0Aimport salt.exceptions%0Aimport salt.loader%0Aimport salt.utils%0Aimport salt.utils.dictupdate%0A%0Alog = logging.getLogger(__name__)%0A%0A__func_alias__ = %7B%0A 'set_': 'set'%0A%7D%0A%0A%0Adef set_(*args, **kwargs):%0A '''%0A Setting a value is not supported; edit the YAML files directly%0A '''%0A raise salt.exceptions.NotImplemented()%0A%0A%0Adef get(key, profile=None): # pylint: disable=W0613%0A '''%0A Get a value from the REST interface%0A '''%0A data = _get_values(profile)%0A return salt.utils.traverse_dict_and_list(data, key, None)%0A%0A%0Adef _get_values(profile=None):%0A '''%0A Retrieve all the referenced files, deserialize, then merge them together%0A '''%0A profile = profile or %7B%7D%0A serializers = salt.loader.serializers(__opts__)%0A%0A ret = %7B%7D%0A for fname in profile.get('files', %5B%5D):%0A try:%0A with salt.utils.flopen(fname) as f:%0A contents = serializers.yaml.deserialize(f)%0A ret = salt.utils.dictupdate.merge(ret, contents,%0A **profile.get('merge', %7B%7D))%0A except IOError:%0A log.error(%22File not found '%7B0%7D'%22.format(fname))%0A except TypeError:%0A log.error(%22Error deserializing sdb file '%7B0%7D'%22.format(fname))%0A return ret%0A
|
|
a2151435057e3e42b8ecf6323b8276f4698fdd15
|
Create getTermSize.py
|
ssh_utils/getTermSize.py
|
ssh_utils/getTermSize.py
|
Python
| 0
|
@@ -0,0 +1,2725 @@
+#!/usr/bin/env python%0A%0A%22%22%22 getTerminalSize()%0A - get width and height of console%0A - works on linux,os x,windows,cygwin(windows)%0A%22%22%22%0A%0A__all__=%5B'getTerminalSize'%5D%0A%0A%0Adef getTerminalSize():%0A import platform%0A current_os = platform.system()%0A tuple_xy=None%0A if current_os == 'Windows':%0A tuple_xy = _getTerminalSize_windows()%0A if tuple_xy is None:%0A tuple_xy = _getTerminalSize_tput()%0A # needed for window's python in cygwin's xterm!%0A if current_os == 'Linux' or current_os == 'Darwin' or current_os.startswith('CYGWIN'):%0A tuple_xy = _getTerminalSize_linux()%0A if tuple_xy is None:%0A print %22default%22%0A tuple_xy = (80, 25) # default value%0A return tuple_xy%0A%0Adef _getTerminalSize_windows():%0A res=None%0A try:%0A from ctypes import windll, create_string_buffer%0A%0A # stdin handle is -10%0A # stdout handle is -11%0A # stderr handle is -12%0A%0A h = windll.kernel32.GetStdHandle(-12)%0A csbi = create_string_buffer(22)%0A res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)%0A except:%0A return None%0A if res:%0A import struct%0A (bufx, bufy, curx, cury, wattr,%0A left, top, right, bottom, maxx, maxy) = struct.unpack(%22hhhhHhhhhhh%22, csbi.raw)%0A sizex = right - left + 1%0A sizey = bottom - top + 1%0A return sizex, sizey%0A else:%0A return None%0A%0Adef _getTerminalSize_tput():%0A # get terminal width%0A # src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window%0A try:%0A import subprocess%0A proc=subprocess.Popen(%5B%22tput%22, %22cols%22%5D,stdin=subprocess.PIPE,stdout=subprocess.PIPE)%0A output=proc.communicate(input=None)%0A cols=int(output%5B0%5D)%0A proc=subprocess.Popen(%5B%22tput%22, %22lines%22%5D,stdin=subprocess.PIPE,stdout=subprocess.PIPE)%0A output=proc.communicate(input=None)%0A rows=int(output%5B0%5D)%0A return (cols,rows)%0A except:%0A return None%0A%0A%0Adef _getTerminalSize_linux():%0A def ioctl_GWINSZ(fd):%0A try:%0A import fcntl, termios, struct, os%0A cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,'1234'))%0A except:%0A return None%0A return cr%0A cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)%0A if not cr:%0A try:%0A fd = os.open(os.ctermid(), os.O_RDONLY)%0A cr = ioctl_GWINSZ(fd)%0A os.close(fd)%0A except:%0A pass%0A if not cr:%0A try:%0A cr = (env%5B'LINES'%5D, env%5B'COLUMNS'%5D)%0A except:%0A return None%0A return int(cr%5B1%5D), int(cr%5B0%5D)%0A %0A %0A %0A%0Aif __name__ == %22__main__%22:%0A sizex,sizey=getTerminalSize()%0A print 'width =',sizex,'height =',sizey%0A%0A%0A
|
|
6c38414d899b00cf0ba386e59721354f3b2a799b
|
Update bechdel.py
|
bechdel.py
|
bechdel.py
|
Python
| 0
|
@@ -0,0 +1,1552 @@
+# Difficulty level: Advanced%0A%0A# Goal #1: Create a program that will print out a list of movie titles and a set of ratings defined below into a particular format.%0A%0A# First, choose any five movies you want.%0A%0A# Next, look each movie up manually to find out four pieces of information:%0A#%09%09Their parental guidance rating (G, PG, PG-13, R)%0A#%09%09Their Bechdel Test Rating (See http://shannonvturner.com/bechdel or http://bechdeltest.com/)%0A#%09%09Their IMDB Rating from 0 - 10 (See http://imdb.com/)%0A# %09%09Their genre according to IMDB%0A%0A# You'll need a variable for movie_titles, a variable for parental_rating, a variable for bechdel_rating, a variable for imdb_rating, and a variable for genre.%0A%0A# Since you have five sets of facts about five movies, you'll want to use lists to hold these pieces of information.%0A%0Atitles = %5B'American Sniper','Birdman','Boyhood','The Grand Budapest Hotel','The Imitation Game'%5D%0Aparental_rating = %5B'R', 'R', 'R', 'R', 'PG-13'%5D%0Abechdel_rating = %5B'1', '3', '3', '1', '2'%5D%0Aimdb_rating = %5B'7.4', '8.0','8.1', '8.1', '8.2'%5D%0Agenre = %5B'Action / Biography / Drama', 'Comedy / Drama', 'Drama', 'Adventure / Comedy / Drama', 'Biography / Drama / Thriller'%5D%0A%0A# Once all of your information is stored in lists, loop through those lists to print out information with each part separated by a comma, like this:%0A%0Afor titles, parental_rating, bechdel_rating, imdb_rating, genre in zip(titles, parental_rating, bechdel_rating, imdb_rating, genre):%0A print %22%7B0%7D, %7B1%7D, %7B2%7D, %7B3%7D, %7B4%7D%22.format(titles, parental_rating, bechdel_rating, imdb_rating, genre)
|
|
5b0f490cb527b0940dc322b060069f44fb29accd
|
Add git versioning
|
expyfun/_git.py
|
expyfun/_git.py
|
Python
| 0
|
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-%0A
|
|
bcfac4b7ea5b10b5b6e84a756d716ef6c47cdd62
|
Create finalproject.py
|
finalproject.py
|
finalproject.py
|
Python
| 0.000002
|
@@ -0,0 +1,6 @@
+code!%0A
|
|
2cdf030ee6d8a545c071f2c033d88c6c2091ef08
|
Add freeze_graph tool
|
freeze_graph.py
|
freeze_graph.py
|
Python
| 0.000001
|
@@ -0,0 +1,2280 @@
+# code from https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc%0A# Thanks Morgan%0A%0Aimport os, argparse%0A%0Aimport tensorflow as tf%0Afrom tensorflow.python.framework import graph_util%0A%0Adir = os.path.dirname(os.path.realpath(__file__))%0A%0A%0Adef freeze_graph(model_folder):%0A # We retrieve our checkpoint fullpath%0A checkpoint = tf.train.get_checkpoint_state(model_folder)%0A input_checkpoint = checkpoint.model_checkpoint_path%0A%0A # We precise the file fullname of our freezed graph%0A absolute_model_folder = %22/%22.join(input_checkpoint.split('/')%5B:-1%5D)%0A output_graph = absolute_model_folder + %22/frozen_model.pb%22%0A%0A # Before exporting our graph, we need to precise what is our output node%0A # This is how TF decides what part of the Graph he has to keep and what part it can dump%0A # NOTE: this variable is plural, because you can have multiple output nodes%0A output_node_names = %22clone_0/MobileNet/Predictions/Softmax%22%0A%0A # We clear devices to allow TensorFlow to control on which device it will load operations%0A clear_devices = True%0A%0A # We import the meta graph and retrieve a Saver%0A saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)%0A%0A # We retrieve the protobuf graph definition%0A graph = tf.get_default_graph()%0A input_graph_def = graph.as_graph_def()%0A%0A # We start a session and restore the graph weights%0A with tf.Session() as sess:%0A saver.restore(sess, input_checkpoint)%0A%0A # We use a built-in TF helper to export variables to constants%0A output_graph_def = graph_util.convert_variables_to_constants(%0A sess, # The session is used to retrieve the weights%0A input_graph_def, # The graph_def is used to retrieve the nodes%0A output_node_names.split(%22,%22) # The output node names are used to select the usefull nodes%0A )%0A%0A # Finally we serialize and dump the output graph to the filesystem%0A with tf.gfile.GFile(output_graph, %22wb%22) as f:%0A f.write(output_graph_def.SerializeToString())%0A print(%22%25d ops in the final graph.%22 %25 len(output_graph_def.node))%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser()%0A parser.add_argument(%22--model_folder%22, type=str, help=%22Model folder to export%22)%0A args = parser.parse_args()%0A%0Afreeze_graph(args.model_folder)
|
|
2207e8dfbf1ea0f11cac0a95f7c5317eaae27f9b
|
Add cron state support
|
salt/states/cron.py
|
salt/states/cron.py
|
Python
| 0
|
@@ -0,0 +1,1024 @@
+'''%0AManage cron states%0A'''%0A%0Adef present(name,%0A user='root',%0A minute='*',%0A hour='*',%0A daymonth='*',%0A month='*',%0A dayweek='*',%0A ):%0A '''%0A Verifies that the specified cron job is present for the specified user%0A '''%0A ret = %7B'name': name,%0A 'result': True,%0A 'changes': %7B%7D,%0A 'comment': ''%7D%0A data = __salt__%5B'cron.set_job'%5D(%0A user,%0A minute,%0A hour,%0A daymonth,%0A month,%0A dayweek,%0A name,%0A )%0A if data == 'present':%0A ret%5B'comment'%5D = 'Cron %7B0%7D already present'.format(name)%0A return ret%0A if data == 'new':%0A ret%5B'comment'%5D = 'Cron %7B0%7D added to %7B1%7D%5C's crontab'.format(name, user)%0A ret%5B'changes'%5D = %7Buser: name%7D%0A return ret%0A ret%5B'comment'%5D = 'Cron %7B0%7D for user %7B1%7D failed to commit with error %5Cn%7B2%7D'.format(%0A name,%0A user,%0A data%0A )%0A ret%5B'result'%5D = False%0A return ret%0A%0A
|
|
86c2441be14dbc3303b0bc65356372728a62fd4a
|
Add infrastructure for counting database queries
|
test/performance.py
|
test/performance.py
|
Python
| 0.000001
|
@@ -0,0 +1,1650 @@
+from contextlib import contextmanager%0Aimport json%0Aimport os%0Aimport re%0Aimport sys%0Afrom django.conf import settings%0Afrom django.db import connection, reset_queries%0A%0A%0Acount = %7B%7D%0A%0A%0A@contextmanager%0Adef count_queries(k):%0A q = 0%0A debug = settings.DEBUG%0A try:%0A settings.DEBUG = True%0A reset_queries()%0A yield%0A q = len(connection.queries)%0A finally:%0A settings.DEBUG = debug%0A count.setdefault(k, 0)%0A count%5Bk%5D += q%0A return q%0A%0A%0Adef export(f):%0A d = os.path.dirname(f)%0A if not os.path.exists(d):%0A os.makedirs(d)%0A if os.path.exists(f):%0A diff(f)%0A with open(f, 'w') as output:%0A output.write(json.dumps(count))%0A%0A%0Adef diff(previous_file):%0A previous = json.loads(open(previous_file).read())%0A improvements = %5B%5D%0A regressions = %5B%5D%0A for k, v in count.items():%0A if k in previous:%0A v0 = previous%5Bk%5D%0A if v %3E v0:%0A regressions.append((k, v0, v))%0A elif v %3C v0:%0A improvements.append((k, v0, v))%0A if improvements:%0A list_changes(improvements, 'DATABASE PERFORMANCE IMPROVEMENTS')%0A if regressions:%0A list_changes(regressions, 'DATABASE PERFORMANCE REGRESSIONS')%0A print('')%0A print('If there are good reasons for the increase(s) above (e.g. new features), just remove %60%25s%60 and carry on. You will not be bothered again.' %25 previous_file)%0A sys.exit(1)%0A%0A%0Adef list_changes(data, title):%0A print('')%0A print(title)%0A print(re.sub('.', '-', title))%0A print('Unit: number of database queries')%0A print('')%0A for k, v0, v in data:%0A print(%22%25s: %25d -%3E %25d%22 %25 (k, v0, v))%0A
|
|
059b7c5705d2134ca998e67caf65e3125d503dbc
|
add sitemap.py
|
staticpy/page/sitemap.py
|
staticpy/page/sitemap.py
|
Python
| 0.000002
|
@@ -0,0 +1,748 @@
+from __future__ import absolute_import%0A%0Aimport os%0A%0Afrom jinja2 import Environment, PackageLoader%0A%0Afrom ..utils import write_to_file%0A%0A%0Aclass Sitemap(object):%0A def __init__(self, site):%0A self.env = Environment(loader=PackageLoader('dynamic', 'templates'))%0A self.site = site%0A%0A def write(self):%0A template = self.env.get_template('sitemap.html')%0A file_path = os.path.join(%0A self.site.output_path,%0A 'static',%0A 'sitemap.xml'%0A )%0A site_map = template.render(%0A pages=self.pages,%0A base_url=self.site.base_url%0A )%0A%0A write_to_file(file_path, site_map)%0A%0A @property%0A def pages(self):%0A return %5Bp for p in self.site.pages if p.sitemap%5D%0A
|
|
b5568053325bd78c277d4bc0adff59cd12e10f48
|
Add a script to build plugin.
|
build-plugin.py
|
build-plugin.py
|
Python
| 0
|
@@ -0,0 +1,267 @@
+import os%0AUnrealEnginePath='/home/qiuwch/workspace/UnrealEngine'%0AUATScript = os.path.join(UnrealEnginePath, 'Engine/Build/BatchFiles/RunUAT.sh')%0AFullPluginFile = os.path.abspath('UnrealCV.uplugin')%0Aos.system('%25s BuildPlugin -plugin=%25s' %25 (UATScript, FullPluginFile))%0A
|
|
bf42dd5246d935b0179faf1d563baa98bbcf0dbc
|
Create setup.py
|
Python/setup.py
|
Python/setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1365 @@
+#==================================================================================================%0A# Copyright (C) 2016 Olivier Mallet - All Rights Reserved %0A#==================================================================================================%0A%0A# run with:%0A# python setup.py build_ext --inplace%0A%0A# before running python program you need to export the library path:%0A# export LD_LIBRARY_PATH=/path/to/URT/lib:$LD_LIBRARY_PATH%0A%0A# export LD_LIBRARY_PATH=/home/olivier/Z/GitHub/Cpp/URT/lib:$LD_LIBRARY_PATH%0A%0Afrom distutils.core import setup%0Afrom distutils.extension import Extension%0Afrom Cython.Distutils import build_ext%0Afrom Cython.Build import cythonize%0Afrom numpy import get_include%0A%0A# linking to C++ libURT.so library%0Aext = Extension('CyURT',%0A sources = %5B'CyURT.pyx'%5D,%0A include_dirs = %5Bget_include()%5D,%0A libraries = %5B'URT'%5D,%0A extra_compile_args = %5B'-std=c++14','-Wall','-march=native','-DUSE_BLAZE','-DBLAZE_BLAS_INCLUDE_FILE %3Ccblas.h%3E'%5D,%0A extra_link_args = %5B'-L../lib'%5D,%0A language='c++')%0A%0A%0A%0Aext.cython_directives = %7B'boundscheck': False,'wraparound': False%7D%0A# turn off bounds-checking for entire function%0A# turn off negative index wrapping for entire function%0A%0Asetup(cmdclass = %7B'build_ext' : build_ext%7D, ext_modules = %5Bext%5D)%0A
|
|
5ae41fc3763f4fd4a25a7863ab139ef2709e9565
|
Fix missing import
|
Python/setup.py
|
Python/setup.py
|
#!/usr/bin/env python
from setuptools import setup
requirements = [x.strip() for x in open("requirements.txt")]
# Automatically run 2to3 for Python 3 support
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='ml_metrics',
version='0.1.2',
description='Machine Learning Evaluation Metrics',
author = 'Ben Hamner',
author_email = 'ben@benhamner.com',
packages = ['ml_metrics', 'ml_metrics.custom'],
install_requires = requirements,
**extra)
|
Python
| 0.999463
|
@@ -44,16 +44,27 @@
rt setup
+%0Aimport sys
%0A%0Arequir
|
2aa07b8ac9ba2ec8d2b1ac814b5a1fb3074a2616
|
test loading dataset
|
test_loadDataset.py
|
test_loadDataset.py
|
Python
| 0.000002
|
@@ -0,0 +1,1921 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# File: test_loadDataset.py%0A# Author: Rafa%C5%82 Nowak %3Crafal.nowak@cs.uni.wroc.pl%3E%0A%0Aimport unittest%0A%0Aclass TestLoadDataset(unittest.TestCase):%0A %22%22%22Test load_CIFAR_dataset function from utils%22%22%22%0A def test_certain_images(self):%0A from myutils import load_CIFAR_dataset%0A data_training, data_testing = load_CIFAR_dataset(shuffle=False)%0A %0A sample_id = 9%0A self.assertTrue( (data_training%5Bsample_id-1%5D%5B0%5D%5B0,0,:%5D == %5B134, 186, 223%5D).all() )%0A sample_id = 19%0A self.assertTrue( (data_training%5Bsample_id-1%5D%5B0%5D%5B30,31,:%5D == %5B91, 75, 64%5D).all() )%0A self.assertTrue( (data_testing%5Bsample_id-1%5D%5B0%5D%5B30,31,:%5D == %5B61, 103, 125%5D).all() )%0A self.assertEqual( data_testing%5Bsample_id-1%5D%5B1%5D, 8 )%0A%0A def test_shuffling(self):%0A from myutils import load_CIFAR_dataset%0A data_training, data_testing = load_CIFAR_dataset()%0A %0A sample_id = 192%0A x_training = data_training%5Bsample_id%5D%5B0%5D%5B:,:%5D%0A y_training = data_training%5Bsample_id%5D%5B1%5D%0A %0A sample_id = 190%0A x_testing = data_testing%5Bsample_id%5D%5B0%5D%5B:,:%5D%0A y_testing = data_testing%5Bsample_id%5D%5B1%5D%0A%0A data_training, data_testing = load_CIFAR_dataset(shuffle=True)%0A found = False%0A for i in range(0,50000):%0A if ( data_training%5Bi%5D%5B0%5D%5B:,:%5D == x_training ).all():%0A if found:%0A self.fail()%0A else:%0A found = True%0A self.assertEqual( y_training , data_training%5Bi%5D%5B1%5D )%0A %0A found = False%0A for i in range(0,10000):%0A if ( data_testing%5Bi%5D%5B0%5D%5B:,:%5D == x_testing ).all():%0A if found:%0A self.fail()%0A else:%0A found = True%0A self.assertEqual( y_testing , data_testing%5Bi%5D%5B1%5D )%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()
|
|
21d931e35d9e0b32415a408f28e45894f0c3e800
|
Add task files for celery async process
|
django_backend_test/noras_menu/tasks.py
|
django_backend_test/noras_menu/tasks.py
|
Python
| 0.000001
|
@@ -0,0 +1,1374 @@
+# -*- encoding: utf-8 -*-%0A%0A#app_mail/tasks.py%0Aimport requests%0Aimport simplejson as json%0A%0Afrom django_backend_test.celery import app%0Afrom django.template.loader import render_to_string%0Afrom django.utils.html import strip_tags%0Afrom django.core.mail import EmailMultiAlternatives%0Afrom .models import Subscribers, MenuItems%0A%0A %0A@app.task%0Adef mail_remainder(menu,link):%0A%09items_menu = MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)%0A%09list_mail = Subscribers.objects.values_list('email', flat=True)%0A%09subject,from_email,to = 'Menu of the Day','alertas@electroquimica.cl',list_mail%0A%09html_content = render_to_string('menu_day.html',%7B'menu':items_menu,'link':str(link)%7D)%0A%09text_content = strip_tags(html_content)%0A%09msg = EmailMultiAlternatives(subject,text_content,from_email,to)%0A%09msg.attach_alternative(html_content,%22text/html%22)%0A%09msg.send()%0A%0A@app.task%0Adef slack_remainder(menu,link):%0A%09msg = u%22Hola!%5CnDejo el men%C3%BA de hoy :)%5Cn %7B0%7D %3Chttp://%7B1%7D%3E%22%0A%09items_menu= MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)%0A%09text=%22%22.join(%5Bx+%22%5Cn%22 for x in items_menu%5D)%0A%09data = %7B%22text%22:msg.format(text,link), %22username%22:%22Nora%22, %22icon_emoji%22: %22:knife_fork_plate:%22,%7D%0A%09headers = %7B'Content-type': 'application/json'%7D%0A%09response = requests.post(%22https://hooks.slack.com/services/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%22, data=json.dumps(data), headers=headers)
|
|
849a29b22d656c8079b4ccaf922848fb057c80c5
|
Add migration to assign appropriate sheets to Transnational CountryRegion
|
forms/migrations/0023_assign_sheets_to_transnational.py
|
forms/migrations/0023_assign_sheets_to_transnational.py
|
Python
| 0
|
@@ -0,0 +1,1546 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.core.exceptions import ObjectDoesNotExist%0Afrom django.db import migrations%0A%0A%0Adef assign_transnational_region_to_sheets(apps, schema_editor):%0A from forms.models import sheet_models%0A%0A CountryRegion = apps.get_model(%22forms%22, %22CountryRegion%22)%0A Monitor = apps.get_model(%22gmmp%22, %22Monitor%22)%0A%0A db_alias = schema_editor.connection.alias%0A%0A try:%0A trans_country_region = CountryRegion.objects.using(db_alias).get(country='T1', region='Transnational')%0A except ObjectDoesNotExist:%0A trans_country_region = CountryRegion(country='T1', region='Transnational')%0A trans_country_region.save()%0A%0A monitor = Monitor.objects.get(user__last_name='Macharia', user__first_name='Sarah')%0A monitor.country = trans_country_region.country%0A monitor.save()%0A%0A for name, model in sheet_models.iteritems():%0A sheets_model = apps.get_model(%22forms%22, model._meta.object_name)%0A sheets = sheets_model.objects.using(db_alias).filter(monitor=monitor)%0A for sheet in sheets:%0A sheet.country_region = trans_country_region%0A sheet.country = trans_country_region.country%0A sheet.save()%0A%0Adef backwards(apps, schema_editor):%0A pass%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('forms', '0022_assign_country_region_to_sheet_models'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(%0A assign_transnational_region_to_sheets,%0A backwards,%0A ),%0A %5D%0A
|
|
22738b2cae0a6c77127bbf5385b7265247ffb306
|
migrate also user profiles
|
geography/management/commands/migrate_geography_user.py
|
geography/management/commands/migrate_geography_user.py
|
Python
| 0
|
@@ -0,0 +1,1416 @@
+from proso_user.models import UserProfile%0Afrom django.core.management.base import BaseCommand%0Afrom optparse import make_option%0Afrom contextlib import closing%0Afrom django.db import connection%0Afrom clint.textui import progress%0Afrom django.db import transaction%0A%0A%0Aclass Command(BaseCommand):%0A%0A option_list = BaseCommand.option_list + (%0A make_option(%0A '--clean',%0A action='store_true',%0A dest='clean',%0A default=False,%0A help='Delete all previously loaded data'),%0A )%0A%0A def handle(self, *args, **options):%0A with transaction.atomic():%0A if options%5B'clean'%5D:%0A self.clean()%0A self.create_profiles()%0A%0A def clean(self):%0A with closing(connection.cursor()) as cursor:%0A cursor.execute('TRUNCATE TABLE proso_user_userprofile')%0A%0A def create_profiles(self):%0A with closing(connection.cursor()) as cursor:%0A cursor.execute(%0A '''%0A SELECT auth_user.id%0A FROM auth_user%0A LEFT JOIN lazysignup_lazyuser ON auth_user.id = lazysignup_lazyuser.user_id%0A WHERE lazysignup_lazyuser.id IS NULL%0A ''')%0A for user_id, in progress.bar(cursor, every=max(1, cursor.rowcount / 100), expected_size=cursor.rowcount):%0A profile = UserProfile.objects.get_or_create(user_id=user_id, public=True)%0A
|
|
fdcb04a71d8163ed87aaa387c3f1d77143c49089
|
Made it so I can compare various runs to make sure they're numerically identical.
|
megadiff.py
|
megadiff.py
|
Python
| 0.999819
|
@@ -0,0 +1,1434 @@
+#!/usr/bin/python%0A# megadiff.py%0A# Alex Szatmary%0A# 2009-08-08%0A# This is useful in comparing different runs with the same set of%0A# parameters. The idea is that, while revising the code, megadiff can%0A# be used when numerically identical results are expected from run to%0A# run. This is useful when making stylistic changes to the code, but%0A# not when implementing new physics.%0A# To use, copy a directory from the batch directory to the main%0A# directory of the repository, so you have a tree that looks something%0A# like this:%0A# cell/batch/myjob%0A# cell/myjob%0A# Run this script:%0A# ./megadiff.py myjob%0A# This will then diff a handful of files. If you see noise, you broke%0A# something affecting the numerics.%0A%0Aimport os, time, os.path, sys%0A%0Awd = sys.argv%5B-1%5D%0A%0Afiles = %5B'TaylorDF__00001.txt', 'capsulev__00001.txt', 'capsulex__00001.txt', 'fort.206', 'fort.403', 'fort.451', 'rbcshpk0000.pro', 'solidforce00000.txt', 'solidforce00005.txt', 'solidforce00010.txt', 'solidnodes00000.txt', 'solidnodes00005.txt', 'solidnodes00010.txt', 'status.txt', 'uvwpdump__00000.txt', 'uvwpdump__00005.txt', 'uvwpdump__00010.txt', 'wprofile__00000.txt', 'wprofile__00001.txt', 'wprofile__00002.txt', 'wprofile__00003.txt', 'wprofile__00004.txt', 'wprofile__00005.txt', 'wprofile__00006.txt', 'wprofile__00007.txt', 'wprofile__00008.txt', 'wprofile__00009.txt', 'wprofile__00010.txt'%5D%0Afor file in files:%0A os.system('diff batch/'+wd+'/'+file+' '+wd+'/'+file)%0A
|
|
e7640ad635a77eecbcc5291792b514e42958876e
|
add magic-gen.py
|
scripts/magic-gen.py
|
scripts/magic-gen.py
|
Python
| 0
|
@@ -0,0 +1,1393 @@
+#!/bin/env python%0Aimport os, sys%0Aimport struct%0A%0A# This program parses criu magic.h file and produces%0A# magic.py with all *_MAGIC constants except RAW and V1.%0Adef main(argv):%0A%09if len(argv) != 3:%0A%09%09print(%22Usage: magic-gen.py path/to/image.h path/to/magic.py%22)%0A%09%09exit(1)%0A%0A%09magic_c_header = argv%5B1%5D%0A%09magic_py = argv%5B2%5D%0A%0A%09out = open(magic_py, 'w+')%0A%0A%09# all_magic is used to parse constructions like:%0A%09# #define PAGEMAP_MAGIC%09%090x56084025%0A%09# #define SHMEM_PAGEMAP_MAGIC%09PAGEMAP_MAGIC%0A%09all_magic = %7B%7D%0A%09# and magic is used to store only unique magic.%0A%09magic = %7B%7D%0A%0A%09f = open(magic_c_header, 'r')%0A%09for line in f:%0A%09%09split = line.split()%0A%0A%09%09if len(split) %3C 3:%0A%09%09%09continue%0A%0A%09%09if not '#define' in split%5B0%5D:%0A%09%09%09continue%0A%0A%09%09key = split%5B1%5D%0A%09%09value = split%5B2%5D%0A%0A%09%09if value in all_magic:%0A%09%09%09value = all_magic%5Bvalue%5D%0A%09%09else:%0A%09%09%09magic%5Bkey%5D = value%0A%0A%09%09all_magic%5Bkey%5D = value%0A%0A%09out.write('#Autogenerated. Do not edit!%5Cn')%0A%09out.write('by_name = %7B%7D%5Cn')%0A%09out.write('by_val = %7B%7D%5Cn')%0A%09for k,v in magic.items():%0A%09%09# We don't need RAW or V1 magic, because%0A%09%09# they can't be used to identify images.%0A%09%09if v == '0x0' or v == '1' or k == '0x0' or v == '1':%0A%09%09%09continue%0A%09%09if k.endswith(%22_MAGIC%22):%0A%09%09%09# Just cutting _MAGIC suffix%0A%09%09%09k = k%5B:-6%5D%0A%09%09v = int(v, 16)%0A%09%09out.write(%22by_name%5B'%22+ k +%22'%5D = %22+ str(v) +%22%5Cn%22)%0A%09%09out.write(%22by_val%5B%22+ str(v) +%22%5D = '%22+ k +%22'%5Cn%22)%0A%09f.close()%0A%09out.close()%0A%0Aif __name__ == %22__main__%22:%0A%09main(sys.argv)%0A
|
|
03c0aa498470037ef2aa6a8233198ff521f8d42f
|
add the links demo
|
demos/gtk-demo/demos/links.py
|
demos/gtk-demo/demos/links.py
|
Python
| 0
|
@@ -0,0 +1,2639 @@
+#!/usr/bin/env python%0A# -*- Mode: Python; py-indent-offset: 4 -*-%0A# vim: tabstop=4 shiftwidth=4 expandtab%0A#%0A# Copyright (C) 2010 Red Hat, Inc., John (J5) Palmieri %3Cjohnp@redhat.com%3E%0A#%0A# This library is free software; you can redistribute it and/or%0A# modify it under the terms of the GNU Lesser General Public%0A# License as published by the Free Software Foundation; either%0A# version 2.1 of the License, or (at your option) any later version.%0A#%0A# This library is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU%0A# Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this library; if not, write to the Free Software%0A# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301%0A# USA%0A%0Atitle = %22Links%22%0Adescription = %22%22%22%0AGtkLabel can show hyperlinks. The default action is to call gtk_show_uri() on %0Atheir URI, but it is possible to override this with a custom handler.%0A%22%22%22%0A%0Afrom gi.repository import Gtk%0A%0Aclass LinksApp:%0A def __init__(self):%0A self.window = Gtk.Window()%0A self.window.set_title('Links')%0A self.window.set_border_width(12)%0A self.window.connect('destroy', Gtk.main_quit)%0A%0A label = Gtk.Label(%22%22%22Some %3Ca href=%22http://en.wikipedia.org/wiki/Text%22%0Atitle=%22plain text%22%3Etext%3C/a%3E may be marked up%0Aas hyperlinks, which can be clicked%0Aor activated via %3Ca href=%22keynav%22%3Ekeynav%3C/a%3E%22%22%22)%0A%0A label.set_use_markup(True)%0A label.connect(%22activate-link%22, self.activate_link)%0A self.window.add(label);%0A label.show()%0A%0A self.window.show()%0A%0A def activate_link(self, label, uri):%0A if uri == 'keynav':%0A parent = label.get_toplevel()%0A markup = %22%22%22The term %3Ci%3Ekeynav%3C/i%3E is a shorthand for%0Akeyboard navigation and refers to the process of using%0Aa program (exclusively) via keyboard input.%22%22%22%0A dialog = Gtk.MessageDialog(parent,%0A Gtk.DialogFlags.DESTROY_WITH_PARENT,%0A Gtk.MessageType.INFO,%0A Gtk.ButtonsType.OK,%0A text=markup,%0A use_markup=True)%0A dialog.present()%0A dialog.connect('response', self.response_cb)%0A%0A return True%0A%0A def response_cb(self, dialog, response_id):%0A dialog.destroy()%0A%0Adef main(demoapp=None):%0A app = LinksApp()%0A Gtk.main()%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
dc82990f7a00e5e1e4d2a860630507f9cb3b81d4
|
add script for just opening a package source
|
scripts/pkgsource.py
|
scripts/pkgsource.py
|
Python
| 0
|
@@ -0,0 +1,621 @@
+#!/usr/bin/python%0A%0Aimport sys%0Afrom conary.lib import util%0Asys.excepthook = util.genExcepthook()%0A%0Aimport logging%0Aimport updatebot.log%0A%0Aupdatebot.log.addRootLogger()%0Alog = logging.getLogger('test')%0A%0Afrom aptmd import Client%0Afrom updatebot import config%0Afrom updatebot import pkgsource%0A%0Acfg = config.UpdateBotConfig()%0Acfg.read('/data/hg/mirrorball/config/ubuntu/updatebotrc')%0A%0Aclient = Client('http://i.rdu.rpath.com/ubuntu')%0ApkgSource = pkgsource.PackageSource(cfg)%0A%0Afor path in cfg.repositoryPaths:%0A log.info('loading %25s' %25 path)%0A pkgSource.loadFromClient(client, path)%0A%0ApkgSource.finalize()%0A%0Aimport epdb; epdb.st()%0A
|
|
eeb9b9877f1aa5bc1f22ac4883fe58a57ee0474a
|
Add script to test HOTS
|
scripts/test_hots.py
|
scripts/test_hots.py
|
Python
| 0
|
@@ -0,0 +1,633 @@
+import numpy as np%0A%0A%0Aevents = %5B%0A (1162704874, -5547),%0A (1179727586, -5548),%0A (1209562198, -5547),%0A (1224960594, -5548),%0A %5D%0A%0At, x = zip(*events)%0At = np.array(t)%0Ax = np.array(x)%0A%0At = t - t%5B0%5D # redefine zero time%0Aalpha = 1/t%5B-1%5D%0At = alpha*t # scale time values%0A%0AA = np.ones((4, 4))%0AA%5B:, -2%5D = np.array(t)%0Afor i in reversed(range(0, A.shape%5B1%5D - 2)):%0A A%5B:, i%5D = A%5B:, i + 1%5D * A%5B:, -2%5D%0AB = np.array(x)%0Aprint(A)%0Aprint(B)%0A%0AP = np.linalg.lstsq(A, B)%5B0%5D%0Aprint(P)%0A%0Atc = alpha*(events%5B-1%5D%5B0%5D + 1000)%0Aprint(tc)%0AT = np.ones(4)%0Afor i in reversed(range(0, T.shape%5B0%5D - 1)):%0A T%5Bi%5D = tc * T%5Bi + 1%5D%0A%0Aprint(T)%0Aprint(np.dot(P, T))%0A
|
|
3862ea1b1cae1c3be80824495d1c6937a18378b9
|
test added
|
tests/pycut_test.py
|
tests/pycut_test.py
|
Python
| 0
|
@@ -0,0 +1,1028 @@
+#! /usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0A%0A%0A# import funkc%C3%AD z jin%C3%A9ho adres%C3%A1%C5%99e%0Aimport sys%0Aimport os.path%0Aimport copy%0A%0Apath_to_script = os.path.dirname(os.path.abspath(__file__))%0Asys.path.append(os.path.join(path_to_script, %22../src/%22))%0Aimport unittest%0A%0Aimport numpy as np%0A%0Aimport pycut%0A%0A%0A%0Aclass PycutTest(unittest.TestCase):%0A%0A # @TODO znovu zprovoznit test%0A #@unittest.skip(%22Cekame, az to Tomas opravi%22)%0A%0A def test_ordered_values_by_indexes(self):%0A %22%22%22%0A test of pycut.__ordered_values_by_indexes%0A %22%22%22%0A slab = %7B'none':0, 'liver':1, 'porta':2, 'lesions':6%7D%0A voxelsize_mm = np.array(%5B1.0,1.0,1.2%5D)%0A%0A segm = np.zeros(%5B256,256,80%5D, dtype=np.int16)%0A%0A # liver%0A segm%5B70:190,40:220,30:60%5D = slab%5B'liver'%5D%0A# port%0A segm%5B120:130,70:220,40:45%5D = slab%5B'porta'%5D%0A segm%5B80:130,100:110,40:45%5D = slab%5B'porta'%5D%0A segm%5B120:170,130:135,40:44%5D = slab%5B'porta'%5D%0A%0A # vytvo%C5%99en%C3%AD kopie segmentace - p%C5%99ed ur%C4%8Den%C3%ADm l%C3%A9z%C3%AD%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
|
|
ee169acf82eff08daa40c461263712f2af2a1131
|
Add a standalone simulation script (really a duplicate of sensitivity.py)
|
scripts/simulate.py
|
scripts/simulate.py
|
Python
| 0
|
@@ -0,0 +1,1294 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A%22%22%22%0AThis script runs stand-alone simulation on an RMG job. This is effectively the %0Asame script as sensitivity.py%0A%22%22%22%0A%0Aimport os.path%0Aimport argparse%0A%0Afrom rmgpy.tools.sensitivity import runSensitivity%0A%0A################################################################################%0A%0Adef parse_arguments():%0A%0A parser = argparse.ArgumentParser()%0A parser.add_argument('input', metavar='INPUT', type=str, nargs=1,%0A help='RMG input file')%0A parser.add_argument('chemkin', metavar='CHEMKIN', type=str, nargs=1,%0A help='Chemkin file')%0A parser.add_argument('dictionary', metavar='DICTIONARY', type=str, nargs=1,%0A help='RMG dictionary file')%0A args = parser.parse_args()%0A %0A inputFile = os.path.abspath(args.input%5B0%5D)%0A chemkinFile = os.path.abspath(args.chemkin%5B0%5D)%0A dictFile = os.path.abspath(args.dictionary%5B0%5D)%0A%0A return inputFile, chemkinFile, dictFile%0A%0Adef main():%0A # This might not work anymore because functions were modified for use with webserver%0A%0A inputFile, chemkinFile, dictFile = parse_arguments()%0A%0A runSensitivity(inputFile, chemkinFile, dictFile)%0A%0A################################################################################%0A%0A%0Aif __name__ == '__main__':%0A main()%0A%0A %0A %0A
|
|
0f31db66a38073e1549d977909c5f4c5d3eab280
|
Create permutation-in-string.py
|
Python/permutation-in-string.py
|
Python/permutation-in-string.py
|
Python
| 0.999383
|
@@ -0,0 +1,1109 @@
+# Time: O(n)%0A# Space: O(1)%0A%0A# Given two strings s1 and s2, write a function to return true%0A# if s2 contains the permutation of s1. In other words,%0A# one of the first string's permutations is the substring of the second string.%0A#%0A# Example 1:%0A# Input:s1 = %22ab%22 s2 = %22eidbaooo%22%0A# Output:True%0A# Explanation: s2 contains one permutation of s1 (%22ba%22).%0A# Example 2:%0A# Input:s1= %22ab%22 s2 = %22eidboaoo%22%0A# Output: False%0A# Note:%0A# The input strings only contain lower case letters.%0A# The length of both given strings is in range %5B1, 10,000%5D.%0A%0Aclass Solution(object):%0A def checkInclusion(self, s1, s2):%0A %22%22%22%0A :type s1: str%0A :type s2: str%0A :rtype: bool%0A %22%22%22%0A counts = collections.Counter(s1)%0A l = len(s1)%0A for i in xrange(len(s2)):%0A if counts%5Bs2%5Bi%5D%5D %3E 0:%0A l -= 1%0A counts%5Bs2%5Bi%5D%5D -= 1%0A if l == 0:%0A return True%0A start = i + 1 - len(s1)%0A if start %3E= 0:%0A counts%5Bs2%5Bstart%5D%5D += 1%0A if counts%5Bs2%5Bstart%5D%5D %3E 0:%0A l += 1%0A return False%0A
|
|
ebb797bb7596adc71b1e906cb7d7f94b56e8f535
|
Create subarray-sum-equals-k.py
|
Python/subarray-sum-equals-k.py
|
Python/subarray-sum-equals-k.py
|
Python
| 0.9988
|
@@ -0,0 +1,822 @@
+# Time: O(n)%0A# Space: O(n)%0A%0A# Given an array of integers and an integer k,%0A# you need to find the total number of continuous subarrays whose sum equals to k.%0A#%0A# Example 1:%0A# Input:nums = %5B1,1,1%5D, k = 2%0A# Output: 2%0A#%0A# Note:%0A# The length of the array is in range %5B1, 20,000%5D.%0A# The range of numbers in the array is %5B-1000, 1000%5D and the range of the integer k is %5B-1e7, 1e7%5D.%0A%0Aclass Solution(object):%0A def subarraySum(self, nums, k):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :type k: int%0A :rtype: int%0A %22%22%22%0A result = 0%0A accumulated_sum = 0%0A lookup = collections.defaultdict(int)%0A lookup%5B0%5D += 1%0A for num in nums:%0A accumulated_sum += num%0A result += lookup%5Baccumulated_sum - k%5D%0A lookup%5Baccumulated_sum%5D += 1%0A return result%0A
|
|
c9b75d5195666efaef8b52d9f2f2b70d9b11f25f
|
Create individual file used for initializing db
|
server/models/db.py
|
server/models/db.py
|
Python
| 0
|
@@ -0,0 +1,58 @@
+from flask_sqlalchemy import SQLAlchemy%0A%0Adb = SQLAlchemy()
|
|
c0ba4a18433a05f492cfb78716fc77e14c8b4f56
|
test solvable:filelist attribute
|
bindings/python/tests/filelist.py
|
bindings/python/tests/filelist.py
|
Python
| 0.999908
|
@@ -0,0 +1,696 @@
+#%0A# Check Filelists%0A#%0A%0Aimport unittest%0A%0Aimport sys%0Asys.path.insert(0, '../../../build/bindings/python')%0A%0Aimport satsolver%0A%0A%0Aclass TestSequenceFunctions(unittest.TestCase):%0A %0A def test_filelists(self):%0A pool = satsolver.Pool()%0A assert pool%0A pool.set_arch(%22x86_64%22)%0A repo = pool.add_solv( %22os11-biarch.solv%22 )%0A repo.set_name( %22openSUSE 11.0 Beta3 BiArch%22 )%0A i = 0%0A for solv in pool:%0A print %22Filelist for %22, solv%0A if solv.attr_exists('solvable:filelist'):%0A# print solv, %22 has a filelist%22%0A print solv.attr('solvable:filelist')%0A else:%0A print '-'%0A i = i + 1%0A if i %3E 2:%0A break%0Aif __name__ == '__main__':%0A unittest.main()%0A%0A
|
|
d1eceaf35b74166f3471dea86b194f67a152cb19
|
add Python script to diff two source trees
|
dev-tools/scripts/diffSources.py
|
dev-tools/scripts/diffSources.py
|
Python
| 0.000049
|
@@ -0,0 +1,2285 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more%0A# contributor license agreements. See the NOTICE file distributed with%0A# this work for additional information regarding copyright ownership.%0A# The ASF licenses this file to You under the Apache License, Version 2.0%0A# (the %22License%22); you may not use this file except in compliance with%0A# the License. You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Aimport subprocess%0Aimport sys%0A%0A# recursive, unified output format, treat missing files as present but empty%0ADIFF_FLAGS = '-ruN'%0A%0Aif '-skipWhitespace' in sys.argv:%0A sys.argv.remove('-skipWhitespace')%0A # ignores only whitespace changes%0A DIFF_FLAGS += 'bBw'%0A%0Aif len(sys.argv) != 3:%0A print%0A print 'Usage: python -u diffSources.py %3Cdir1%3E %3Cdir2%3E %5B-skipWhitespace%5D'%0A print%0A print '''This tool creates an applying patch between two directories.%0A%0AWhile you could use this to make a committable patch from a branch, that approach loses%0Athe svn history from the branch (better to use %22svn merge --reintegrate%22, for example). This%0Adiff output should not be considered %22authoritative%22 from a merging standpoint as it does%0Anot reflect what svn will do on merge.%0A'''%0A print%0A sys.exit(0)%0A%0Ap = subprocess.Popen(%5B'diff', DIFF_FLAGS, '-x', '.svn', '-x', 'build', sys.argv%5B1%5D, sys.argv%5B2%5D%5D, shell=False, stdout=subprocess.PIPE)%0A%0Akeep = False%0Awhile True:%0A l = p.stdout.readline()%0A if l == '':%0A break%0A if l.endswith('%5Cr%5Cn'):%0A l = l%5B:-2%5D%0A elif l.endswith('%5Cn'):%0A l = l%5B:-1%5D%0A if l.startswith('diff ') or l.startswith('Binary files '):%0A keep = l.lower().find('/build/') == -1 and (l.lower().startswith('Only in') or ((l.lower().endswith('.java') or l.lower().endswith('.txt') or l.lower().endswith('.xml') or l.lower().endswith('.iml')) and l.find('/.svn/') == -1))%0A if keep:%0A print%0A print%0A print l.strip()%0A elif keep:%0A print l%0A elif l.startswith('Only in'):%0A print l.strip()%0A
|
|
74f3f70337e9924e4fce030d6a5941ce506bfee9
|
Add a runserver script to start the application for development purposes
|
runserver.py
|
runserver.py
|
Python
| 0
|
@@ -0,0 +1,397 @@
+#!/usr/bin/env python%0A%0A## These two lines are needed to run on EL6%0A__requires__ = %5B'SQLAlchemy %3E= 0.8', 'jinja2 %3E= 2.4'%5D%0Aimport pkg_resources%0A%0Aimport sys%0Afrom werkzeug.contrib.profiler import ProfilerMiddleware%0A%0Afrom fresque import APP%0AAPP.debug = True%0A%0Aif '--profile' in sys.argv:%0A APP.config%5B'PROFILE'%5D = True%0A APP.wsgi_app = ProfilerMiddleware(APP.wsgi_app, restrictions=%5B30%5D)%0A%0AAPP.run()%0A
|
|
93039b9cbea2c8355b8d8651ec0d15cdd73169a6
|
Create findmean.py
|
udacity/findmean.py
|
udacity/findmean.py
|
Python
| 0.000009
|
@@ -0,0 +1,663 @@
+# The mean of a set of numbers is the sum of the numbers divided by the%0A# number of numbers. Write a procedure, list_mean, which takes a list of numbers%0A# as its input and return the mean of the numbers in the list.%0A%0A# Hint: You will need to work out how to make your division into decimal%0A# division instead of integer division. You get decimal division if any of%0A# the numbers involved are decimals.%0A%0Adef list_mean():%0A i = 0%0A sum_int = 0.0%0A while i %3C len(p): # or %3C=?%0A sum_int = p%5Bi%5D + sum_int%0A i = i + 1%0A return sum_int / len(p)%0A%0Aprint list_mean(%5B1,2,3,4%5D)%0A#%3E%3E%3E 2.5%0Aprint list_mean(%5B1,3,4,5,2%5D)%0A#%3E%3E%3E 3.0%0Aprint list_mean(%5B2%5D)%0A#%3E%3E%3E 2.0%0A
|
|
7ea9bbd3315fed4d6fd319a865517a4f72228342
|
Create test.py
|
Python/test.py
|
Python/test.py
|
Python
| 0.000005
|
@@ -0,0 +1 @@
+%0A
|
|
199b6bb0c62028d93e1204d96591500b0f76e834
|
Add Robot_V002b.py Object Oriented Version
|
Robot_V002b.py
|
Robot_V002b.py
|
Python
| 0.000006
|
@@ -0,0 +1,2301 @@
+#!/usr/bin/python%0A%0Aimport sys%0Asys.path.append(%22/home/pi/Documents/Robots/slcypi/MA%22) ### ADD PATH%0Asys.path.append(%22/home/pi/Documents/Robots/slcypi/HAT_Python3%22) ### ADD PATH%0Aimport time%0Afrom time import sleep%0Aimport atexit%0Aimport pygame%0Aimport pygame.camera%0Afrom PIL import Image%0A#from pylab import *%0Afrom Tank import Tank%0A%0A%0A# Pygame and camera initialize%0Apygame.init()%0Apygame.display.set_caption('My Robot')%0Apygame.camera.init()%0Ascreen = pygame.display.set_mode((640,480),0)%0Acam_list = pygame.camera.list_cameras()%0Acam = pygame.camera.Camera(cam_list%5B0%5D,(320,240))%0Acam.start()%0A%0Arobot = Tank()%0A%0Atry:%0A print('starting loop')%0A done = False%0A while not done:%0A%0A # Camera%0A image1 = cam.get_image()%0A image1 = pygame.transform.scale(image1,(640,480))%0A image1 = pygame.transform.flip(image1,1,1)%0A screen.blit(image1,(0,0))%0A pygame.display.update()%0A%0A # User events%0A for event in pygame.event.get():%0A if event.type == pygame.KEYDOWN:%0A if event.key == (pygame.K_UP):%0A robot.drive(1)%0A if event.key == (pygame.K_DOWN):%0A robot.drive(-1)%0A if (event.key == pygame.K_ESCAPE):%0A done = True%0A if (event.key == pygame.K_LEFT):%0A robot.rotate(1)%0A if (event.key == pygame.K_RIGHT):%0A robot.rotate(-1)%0A if event.type == pygame.KEYUP:%0A if event.key == (pygame.K_UP):%0A robot.drive(0)%0A if event.key == (pygame.K_DOWN):%0A robot.drive(0)%0A if (event.key == pygame.K_LEFT):%0A robot.rotate(0)%0A if (event.key == pygame.K_RIGHT):%0A robot.rotate(0)%0A%0Aexcept KeyboardInterrupt:%0A pygame.quit()%0A%0Acam.stop()%0Apygame.quit()%0A
|
|
eeb0187b9d474b9b5d1710e8f45f8116894eb15c
|
Read Temperature from DS18B20. Post the data to data.sparkfun.com
|
temp-sensor02/main.py
|
temp-sensor02/main.py
|
Python
| 0
|
@@ -0,0 +1,945 @@
+from machine import Pin%0Afrom ds18x20 import DS18X20%0Aimport onewire%0Aimport time%0Aimport machine%0Aimport ujson%0Aimport urequests%0A%0Adef posttocloud(temperature):%0A keystext = open(%22sparkfun_keys.json%22).read()%0A keys = ujson.loads(keystext)%0A url = keys%5B'inputUrl'%5D + %22?private_key=%22 + keys%5B'privateKey'%5D + %22&temp=%22 + str(temperature)%0A #data = %7B'temp':temperature%7D%0A #data%5B'private_key'%5D = keys%5B'privateKey'%5D%0A #print (keys%5B'inputUrl'%5D)%0A #print(keys%5B'privateKey'%5D)%0A #datajson = ujson.dumps(data)%0A #print (datajson)%0A resp = urequests.request(%22POST%22, url)%0A print (resp.text)%0A%0Awhile True:%0A p = Pin(2) # Data Line is on GPIO2 aka D4%0A ow = onewire.OneWire(p)%0A ds = DS18X20(ow)%0A lstrom = ds.scan()%0A #Assuming we have only 1 device connected%0A rom = lstrom%5B0%5D%0A%0A ds.convert_temp()%0A time.sleep_ms(750)%0A temperature = round(float(ds.read_temp(rom)),1)%0A #print(%22Temperature: %7B:02.1f%7D%22.format(temperature))%0A posttocloud(temperature)%0A time.sleep(10)
|
|
d2e63dfc644e323bf23fbd6654f7493ed94d7991
|
Use HTTPS for libchromiumcontent's URL
|
script/lib/config.py
|
script/lib/config.py
|
#!/usr/bin/env python
import errno
import os
import platform
import sys
BASE_URL = os.getenv('LIBCHROMIUMCONTENT_MIRROR') or \
'http://github-janky-artifacts.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'cfbe8ec7e14af4cabd1474386f54e197db1f7ac1'
PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def get_platform_key():
if os.environ.has_key('MAS_BUILD'):
return 'mas'
else:
return PLATFORM
def get_target_arch():
try:
target_arch_path = os.path.join(__file__, '..', '..', '..', 'vendor',
'brightray', 'vendor', 'download',
'libchromiumcontent', '.target_arch')
with open(os.path.normpath(target_arch_path)) as f:
return f.read().strip()
except IOError as e:
if e.errno != errno.ENOENT:
raise
if PLATFORM == 'win32':
return 'ia32'
else:
return 'x64'
def get_chromedriver_version():
return 'v2.15'
def s3_config():
config = (os.environ.get('ATOM_SHELL_S3_BUCKET', ''),
os.environ.get('ATOM_SHELL_S3_ACCESS_KEY', ''),
os.environ.get('ATOM_SHELL_S3_SECRET_KEY', ''))
message = ('Error: Please set the $ATOM_SHELL_S3_BUCKET, '
'$ATOM_SHELL_S3_ACCESS_KEY, and '
'$ATOM_SHELL_S3_SECRET_KEY environment variables')
assert all(len(c) for c in config), message
return config
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
|
Python
| 0
|
@@ -136,11 +136,29 @@
http
+s
://
+s3.amazonaws.com/
gith
@@ -179,25 +179,8 @@
acts
-.s3.amazonaws.com
/lib
|
632f71651864517cc977f79dcdac7f3b0f516b49
|
Add example script to post experiment and task data
|
scripts/post_data.py
|
scripts/post_data.py
|
Python
| 0
|
@@ -0,0 +1,991 @@
+#!/usr/bin/env python3%0A%0Aimport requests%0A%0A%0Adomain = 'http://dakis.gimbutas.lt/api/'%0Aexp_data = %7B%0A %22description%22: %22First successful post through API%22,%0A %22algorithm%22: %22TestTasks%22,%0A %22neighbours%22: %22Nearest%22,%0A %22stopping_criteria%22: %22x_dist%22,%0A %22stopping_accuracy%22: %220.01%22,%0A %22subregion%22: %22simplex%22,%0A %22inner_problem_accuracy%22: None,%0A %22inner_problem_iters%22: 10,%0A %22inner_problem_division%22: %22LongesEdge%22,%0A %22lipschitz_estimation%22: %22min_allowed%22,%0A %22simplex_division%22: %22LongestEdge%22,%0A %22valid%22: True,%0A %22mistakes%22: %22%22,%0A%7D%0A%0Aresp = requests.post(domain + 'experiments/', data=exp_data)%0Aexp_url = resp.json()%5B'url'%5D%0A%0Atask_data = %7B%0A %22func_name%22: %22GKLS%22,%0A %22func_cls%22: 1,%0A %22func_id%22: 1,%0A %22calls%22: 123,%0A %22subregions%22: 1041,%0A %22duration%22: %220.12%22,%0A %22f_min%22: None,%0A %22x_min%22: None,%0A %22experiment%22: exp_url,%0A%7D%0Arequests.post(domain + 'tasks/', data=task_data)%0A%0Atask_data%5B'func_id'%5D = 2%0Atask_data%5B'calls'%5D = 213%0Arequests.post(domain + 'tasks/', data=task_data)%0A
|
|
0c96781e2c85ea73f7fc926f7f3cfc805d6543d7
|
Define chunked_argmax
|
Examples/SegmentVesselsUsingNeuralNetworks/scripts-xyz-slices/deploy.py
|
Examples/SegmentVesselsUsingNeuralNetworks/scripts-xyz-slices/deploy.py
|
"""Routines used for applying an already trained network to images"""
import os.path
import subprocess
import time
import itk
import numpy as np
import utils
from utils import script_params
def duplicate(im):
"""Duplicate an itk image"""
f = itk.ImageDuplicator.New(im)
f.Update()
return f.GetOutput()
# Preprocess ("prep") images
def prep(inputImage, outputDir, expertImage=None):
"""Preprocess inputImage and expertImage (if not None) according to
script_params. Output (where '*' stands for outputDir +
basename(inputImage) (without extension)):
- *_prepped.mha: Preprocessed inputImage
- *_prepped_expert.mha: Preprocessed expertImage
"""
outputImagePrefix = os.path.join(outputDir, os.path.splitext(os.path.basename(inputImage))[0])
outputImagePrefix = str(outputImagePrefix)
smoothing_radius = script_params['SMOOTHING_RADIUS']
reader = itk.ImageFileReader.New(FileName=str(inputImage))
smoothing_filter = itk.MedianImageFilter.New(reader.GetOutput(),
Radius=smoothing_radius)
equalization_filter = itk.AdaptiveHistogramEqualizationImageFilter.New(
smoothing_filter.GetOutput(),
Radius=script_params['PATCH_RADIUS'],
Alpha=0, Beta=0,
)
writer = itk.ImageFileWriter.New(equalization_filter.GetOutput(),
FileName=outputImagePrefix + "_prepped.mha",
UseCompression=True)
writer.Update()
if expertImage is None:
return writer.GetFileName()
else:
utils.symlink_through(expertImage, outputImagePrefix + '_prepped_expert.mha')
def segmentPreppedImage(model, input_file, output_file):
"""Segment (really, generate seed points from) a preprocessed image"""
print "Segmenting image", input_file
data_shape = model.input_shape
print data_shape
# read input slab image
input_image_itk = itk.imread(str(input_file))
input_image = itk.GetArrayViewFromImage(input_image_itk)
# get foreground mask
input_revcum = np.cumsum(np.bincount(input_image.reshape(-1))[::-1])[::-1]
th = np.count_nonzero(input_revcum >= input_revcum[0] * script_params['DEPLOY_TOP_FRAC']) - 2
fgnd_mask = input_image > th
# get test_batch_size and patch_size used for cnn model
test_batch_size = script_params['DEPLOY_BATCH_SIZE']
patch_size = data_shape[0][1]
print 'Test batch shape = ', data_shape
# collect all patches
print "Extracting patches ... "
start_time = time.time()
w = np.int(patch_size / 2)
patch_indices = np.stack(np.where(fgnd_mask[(np.s_[w:-w],) * input_image.ndim]), axis=-1) + w
end_time = time.time()
print '\tTook %s seconds' % (end_time - start_time)
num_patches = patch_indices.shape[0]
print "\tNo of patches = %s" % num_patches
# Classify patches using cnn and write result in output image
print "Classifying patches ... "
start_time = time.time()
output_image_itk = duplicate(input_image_itk)
output_image = itk.GetArrayViewFromImage(output_image_itk)
output_image.fill(0)
prob_vessel = utils.predict_on_indices(model, input_image, patch_indices, test_batch_size)
output_image[tuple(patch_indices.T)] = (prob_vessel * 255).round()
end_time = time.time()
print '\tTook %s seconds' % (end_time - start_time)
# Save output
itk.imwrite(output_image_itk, str(output_file), compression=True)
def segmentTubes(originalImage, vascularModelFile, outputDir,
vess_seed_prob=0.95, vess_scale=0.1):
inputImageName = os.path.splitext(os.path.basename(originalImage))[0]
# compute seed image
vessProbImageFile = os.path.join(
outputDir, inputImageName + "_vess_prob.mha")
outSeedImageFile = os.path.join(
outputDir, inputImageName + "_vess_seeds.mha")
subprocess.call(["ImageMath", vessProbImageFile,
"-t", str(255 * vess_seed_prob), "255", "1", "0",
"-W", "0", outSeedImageFile])
# segment tubes using ridge traversal
outVsegMaskFile = os.path.join(outputDir, inputImageName + "_vseg.mha")
outVsegTreFile = os.path.join(outputDir, inputImageName + "_vseg.tre")
subprocess.call(["SegmentTubes",
"-o", outVsegMaskFile,
"-P", vascularModelFile,
"-M", outSeedImageFile,
"-s", str(vess_scale),
originalImage, outVsegTreFile])
# Fill gaps and convert to a tree
subprocess.call(["ConvertTubesToTubeTree",
"--maxTubeDistanceToRadiusRatio", "3",
"--removeOrphanTubes",
outVsegTreFile,
outVsegTreFile])
subprocess.call(["TreeMath",
"-f", "S",
"-w", outVsegTreFile,
outVsegTreFile])
|
Python
| 0.000003
|
@@ -1676,24 +1676,863 @@
ert.mha')%0A%0A%0A
+def chunked_argmax(arr, window):%0A %22%22%22Compute a tuple (length arr.ndim) of arrays representing the%0A indices of the max values in window-shaped chunks of arr, which%0A must evenly divide into windows.%0A%0A %22%22%22%0A split_dim = arr.reshape(tuple(x for s, w in zip(arr.shape, window) for x in (s / w, w)))%0A transpose_dim = split_dim.transpose(tuple(range(0, split_dim.ndim, 2)) +%0A tuple(range(1, split_dim.ndim, 2)))%0A flat_dim = transpose_dim.reshape(transpose_dim.shape%5B:arr.ndim%5D + (-1,))%0A argmaxes = np.argmax(flat_dim, axis=-1)%0A flat_argmaxes = argmaxes.reshape(-1)%0A in_chunk_indices = np.unravel_index(flat_argmaxes, window)%0A chunk_corner_indices = (np.indices(argmaxes.shape).reshape((arr.ndim, -1)).T * window).T%0A return tuple(chunk_corner_indices + in_chunk_indices)%0A%0A%0A
def segmentP
|
1c5ddc6803853e48eb77bd337fedbaabc56a0102
|
Add empty MultiLayerPercetrsons file.
|
MultiLayerNeuralNetworks/MultiLayerPerceptrons/MultiLayerPerceptrons.py
|
MultiLayerNeuralNetworks/MultiLayerPerceptrons/MultiLayerPerceptrons.py
|
Python
| 0
|
@@ -0,0 +1,181 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0Au%22%22%22%0ACopyright (c) 2016 Masaru Morita%0A%0AThis software is released under the MIT License.%0ASee LICENSE file included in this repository.%0A%22%22%22%0A%0A
|
|
5fc6b3c64b29dc5b17fec90f331cc7a2ca22704f
|
add main file
|
sms2email.py
|
sms2email.py
|
Python
| 0.000001
|
@@ -0,0 +1,2682 @@
+#!/usr/bin/env python%0A# -*- coding: UTF-8 -*-%0A%0A'''%0AWork at python2.5, iphone4(ios6) with cydia, using hotmail to send the message.%0A'''%0A%0Aimport sqlite3 as sql%0Aimport email%0Aimport os%0Aimport sys%0Aimport codecs%0Aimport string%0Aimport datetime%0Afrom Queue import Queue%0Aimport time%0Aimport threading%0Aimport pymail%0A%0A%0AEMAIL_CONTENT = '''Author:$%7Bauthor%7D%5CnTEXT:%5Cn$%7Btext%7D%5Cn$%7Bdate%7D%5Cn%5Cn%5Cn'''%0A%0A%0Areload(sys)%0Asys.setdefaultencoding('utf8')%0AstreamWriter = codecs.lookup('utf-8')%5B-1%5D%0Asys.stdout = streamWriter(sys.stdout)%0A%0A%0AUPDATE_DATE = -1%0AUPDATE_CHECK_SECONDS = 30%0ASMSDB_PATH = '/var/mobile/Library/SMS/sms.db'%0ASQL_QUERY_TEMPLATE = string.Template(%0A '''select date, hd.id, text from message as msg, handle as hd where msg.handle_id=hd.rowid and date%3E$%7Bdate%7D order by msg.date desc limit 10''')%0A# sql index%0A# date=0%0A# author=1%0A# text=2%0A%0ASMSDB = sql.connect(SMSDB_PATH)%0ASMSDB_CURSOR = SMSDB.cursor()%0Amail = pymail.Pymail(os.environ.get('USER_MAIL'), os.environ.get('USER_PASSWD'), os.environ.get('MAIL_TO'))%0Amq = Queue()%0A%0A%0Adef email_sender():%0A '''worker%0A '''%0A item = mq.get()%0A if item:%0A mail.send_mail('SMS on IPhone4', msg_body)%0A mq.task_done()%0A%0A%0Adef message_date(mac_time):%0A '''see: http://stackoverflow.com/questions/10746562/parsing-date-field-of-iphone-sms-file-from-backup%0A '''%0A unix_time = int(mac_time) + 978307200%0A date = datetime.datetime.fromtimestamp(unix_time)%0A return date%0A%0A%0Adef build_content(message_data):%0A msg_body = ''%0A for m in message_data:%0A _body = string.Template(EMAIL_CONTENT)%0A msg_body += _body.safe_substitute(author=str(m%5B1%5D), text=m%5B2%5D, date=message_date(m%5B0%5D))%0A return msg_body%0A%0A%0Aif __name__ == '__main__':%0A print 'worker sender is OK'%0A while(1):%0A if UPDATE_DATE %3E 0:%0A SMSDB_CURSOR.execute(SQL_QUERY_TEMPLATE.safe_substitute(date=UPDATE_DATE))%0A message_data = SMSDB_CURSOR.fetchall()%0A if message_data:%0A UPDATE_DATE = int(message_data%5B0%5D%5B0%5D)%0A msg_body = build_content(message_data)%0A mq.put(msg_body)%0A t = threading.Thread(target=email_sender)%0A t.daemon = True%0A t.start()%0A time.sleep(UPDATE_CHECK_SECONDS)%0A else:%0A # INIT%0A SMSDB_CURSOR.execute('''select date, hd.id, text from message as msg, handle as hd where msg.handle_id=hd.rowid order by msg.date desc limit 2''')%0A message_data = SMSDB_CURSOR.fetchall()%0A UPDATE_DATE = int(message_data%5B0%5D%5B0%5D)%0A msg_body = build_content(message_data)%0A mail.send_mail('SMS Monitor', 'init OK, SMS monitor has is running. recent messge is %5Cn' + msg_body)%0A
|
|
762b87e25ded495aab3b5ff96774e8a2230f395d
|
Move xblock tagging model import into method.
|
cms/lib/xblock/tagging/tagging.py
|
cms/lib/xblock/tagging/tagging.py
|
# -*- coding: utf-8 -*-
"""
Structured Tagging based on XBlockAsides
"""
from django.conf import settings
from webob import Response
from xblock.core import XBlock, XBlockAside
from xblock.fields import Dict, Scope
from xblock.fragment import Fragment
from edxmako.shortcuts import render_to_string
from xmodule.capa_module import CapaModule
from xmodule.x_module import AUTHOR_VIEW
from .models import TagCategories
_ = lambda text: text
class StructuredTagsAside(XBlockAside):
"""
Aside that allows tagging blocks
"""
saved_tags = Dict(help=_("Dictionary with the available tags"),
scope=Scope.content,
default={},)
def get_available_tags(self):
"""
Return available tags
"""
return TagCategories.objects.all()
def _get_studio_resource_url(self, relative_url):
"""
Returns the Studio URL to a static resource.
"""
return settings.STATIC_URL + relative_url
@XBlockAside.aside_for(AUTHOR_VIEW)
def student_view_aside(self, block, context): # pylint: disable=unused-argument
"""
Display the tag selector with specific categories and allowed values,
depending on the context.
"""
if isinstance(block, CapaModule):
tags = []
for tag in self.get_available_tags():
tag_available_values = tag.get_values()
tag_current_values = self.saved_tags.get(tag.name, [])
if isinstance(tag_current_values, basestring):
tag_current_values = [tag_current_values]
tag_values_not_exists = [cur_val for cur_val in tag_current_values
if cur_val not in tag_available_values]
tag_values_available_to_choose = tag_available_values + tag_values_not_exists
tag_values_available_to_choose.sort()
tags.append({
'key': tag.name,
'title': tag.title,
'values': tag_values_available_to_choose,
'current_values': tag_current_values,
})
fragment = Fragment(render_to_string('structured_tags_block.html', {'tags': tags,
'tags_count': len(tags),
'block_location': block.location}))
fragment.add_javascript_url(self._get_studio_resource_url('/js/xblock_asides/structured_tags.js'))
fragment.initialize_js('StructuredTagsInit')
return fragment
else:
return Fragment(u'')
@XBlock.handler
def save_tags(self, request=None, suffix=None): # pylint: disable=unused-argument
"""
Handler to save choosen tags with connected XBlock
"""
try:
posted_data = request.json
except ValueError:
return Response("Invalid request body", status=400)
saved_tags = {}
need_update = False
for av_tag in self.get_available_tags():
if av_tag.name in posted_data and posted_data[av_tag.name]:
tag_available_values = av_tag.get_values()
tag_current_values = self.saved_tags.get(av_tag.name, [])
if isinstance(tag_current_values, basestring):
tag_current_values = [tag_current_values]
for posted_tag_value in posted_data[av_tag.name]:
if posted_tag_value not in tag_available_values and posted_tag_value not in tag_current_values:
return Response("Invalid tag value was passed: %s" % posted_tag_value, status=400)
saved_tags[av_tag.name] = posted_data[av_tag.name]
need_update = True
if av_tag.name in posted_data:
need_update = True
if need_update:
self.saved_tags = saved_tags
return Response()
else:
return Response("Tags parameters were not passed", status=400)
def get_event_context(self, event_type, event): # pylint: disable=unused-argument
"""
This method return data that should be associated with the "check_problem" event
"""
if self.saved_tags and event_type == "problem_check":
return {'saved_tags': self.saved_tags}
else:
return None
|
Python
| 0
|
@@ -383,43 +383,8 @@
EW%0A%0A
-from .models import TagCategories%0A%0A
_ =
@@ -723,32 +723,148 @@
ags%0A %22%22%22%0A
+ # Import is placed here to avoid model import at project startup.%0A from .models import TagCategories%0A
return T
|
4e8d4f21749a329dd114926d3654512e9842a1e1
|
Change FULL_NAME_FUNCTION to GET_FULL_NAME_FUNCTION.
|
idbase/views.py
|
idbase/views.py
|
from django.shortcuts import render, redirect
from django.conf import settings
from idbase.exceptions import InvalidSessionError
import logging
from importlib import import_module
logger = logging.getLogger(__name__)
def index(request, template=None):
"""Render the Identity home page."""
conf = {'urls': settings.CORE_URLS}
return render(request, 'idbase/index.html', conf)
def login(request):
"""This view gets SSO-protected and redirects to next."""
if request.user.is_authenticated():
logger.info('User %s logged in' % (request.user.username))
if (request.user.get_full_name() is None and
hasattr(settings, 'FULL_NAME_FUNCTION')):
mod, func = settings.FULL_NAME_FUNCTION.rsplit('.', 1)
module = import_module(mod)
full_name_function = getattr(module, func)
request.user.set_full_name(full_name_function(request))
return redirect(request.GET.get('next', '/'))
else:
raise InvalidSessionError('no REMOTE_USER variable set')
|
Python
| 0.000014
|
@@ -663,16 +663,20 @@
tings, '
+GET_
FULL_NAM
@@ -723,16 +723,20 @@
ettings.
+GET_
FULL_NAM
|
28749f1a0560411cfd6207563a29ff50a8bc014c
|
update wikinews family file from trunk r9996
|
pywikibot/families/wikinews_family.py
|
pywikibot/families/wikinews_family.py
|
# -*- coding: utf-8 -*-
from pywikibot import family
__version__ = '$Id$'
# The Wikimedia family that is known as Wikinews
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'wikinews'
self.languages_by_size = [
'sr', 'en', 'pl', 'fr', 'de', 'it', 'es', 'pt', 'zh', 'ru', 'ja',
'sv', 'ta', 'ca', 'cs', 'el', 'fa', 'fi', 'ar', 'ro', 'he', 'bg',
'tr', 'sd', 'sq', 'uk', 'no', 'bs', 'ko', 'eo',
]
for lang in self.languages_by_size:
self.langs[lang] = '%s.wikinews.org' % lang
self.obsolete = {
'hu': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=28342
'jp': 'ja',
'nb': 'no',
'nl': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=20325
'th': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=28341
'zh-tw': 'zh',
'zh-cn': 'zh'
}
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are
# put after those, in code-alphabetical order.
self.interwiki_putfirst = {
'en': self.alphabetic,
'fi': self.alphabetic,
'fr': self.alphabetic,
'he': ['en'],
'hu': ['en'],
'pl': self.alphabetic,
}
# Global bot allowed languages on http://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = ['ca', 'cs', 'fa',]
# CentralAuth cross avaliable projects.
self.cross_projects = [
'wikipedia', 'wiktionary', 'wikibooks', 'wikiquote', 'wikisource', 'wikiversity',
'meta', 'mediawiki', 'test', 'incubator', 'commons', 'species'
]
def code2encoding(self, code):
return 'utf-8'
def shared_image_repository(self, code):
return ('commons', 'commons')
|
Python
| 0
|
@@ -972,32 +972,482 @@
'zh'%0A %7D%0A%0A
+ # CentralAuth cross avaliable projects.%0A self.cross_projects = %5B%0A 'wiktionary', 'wikibooks', 'wikiquote', 'wikisource', 'wikinews',%0A 'wikiversity', 'meta', 'mediawiki', 'test', 'incubator', 'commons',%0A 'species',%0A %5D%0A%0A # Global bot allowed languages on http://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation%0A self.cross_allowed = %5B'ca', 'cs', 'en', 'fa',%5D%0A%0A
# Which
@@ -1510,16 +1510,16 @@
links,%0A
-
@@ -2122,52 +2122,67 @@
-# Global bot allowed languages on
+self.obsolete = %7B%0A 'hu': None, #
http
+s
://
-met
+bugzill
a.wi
@@ -2197,111 +2197,53 @@
org/
-wiki/Bot_policy/Implementation#Current_implementation%0A self.cross_allowed = %5B'ca', 'cs', 'f
+show_bug.cgi?id=28342%0A 'jp': 'j
a',
-%5D%0A
%0A
@@ -2251,247 +2251,233 @@
-# CentralAuth cross avaliable projects.%0A self.cross_projects = %5B%0A 'wikipedia', 'wiktionary', 'wikibooks', 'wikiquote', 'wikisource', 'wikiversity',%0A 'meta', 'mediawiki', 'test', 'incubator', 'commons', 'species
+ 'nb': 'no',%0A 'nl': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=20325%0A 'th': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=28341%0A 'zh-tw': 'zh',%0A 'zh-cn': 'zh
'%0A
@@ -2478,25 +2478,25 @@
zh'%0A
-%5D
+%7D
%0A%0A def co
|
c5e1b8f34c740f52117098cf5bc8f42e4c88a931
|
fix for #45
|
snapchat_bots/bot.py
|
snapchat_bots/bot.py
|
import logging, time, uuid, requests, base64
from pysnap import Snapchat
from pysnap.utils import make_request_token, timestamp
from snap import Snap
from constants import DEFAULT_TIMEOUT, STATIC_TOKEN, BASE_URL
FORMAT = '[%(asctime)-15s] %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
logger.level = logging.DEBUG
class SnapchatBot(object):
def __init__(self, username, password, **kwargs):
self.bot_id = uuid.uuid4().hex[0:4]
self.auth_token = STATIC_TOKEN
self.username = username
self.password = password
r = self._make_request("/loq/login", {
'username': self.username,
'password': self.password
})
result = r.json()
self.auth_token = result['updates_response']['auth_token']
self.client = Snapchat()
self.client.username = username
self.client.auth_token = self.auth_token
self.current_friends = self.get_friends()
self.added_me = self.get_added_me()
if hasattr(self, "initialize"):
self.initialize(**kwargs)
def log(self, message, level=logging.DEBUG):
logger.log(level, "[%s-%s] %s" % (self.__class__.__name__, self.bot_id, message))
@staticmethod
def process_snap(snap_obj, data, is_story = False):
media_type = snap_obj["media_type"]
sender = snap_obj["sender"]
snap_id = snap_obj['id']
duration = snap_obj['time']
snap = Snap(data=data,
snap_id=snap_id,
media_type=media_type,
duration=duration,
sender=sender,
is_story=is_story)
return snap
def mark_viewed(self, snap):
self.client.mark_viewed(snap.snap_id)
def listen(self, timeout=DEFAULT_TIMEOUT):
while True:
self.log("Querying for new snaps...")
snaps = self.get_snaps()
if hasattr(self, "on_snap"):
for snap in snaps:
self.on_snap(snap.sender, snap)
added_me = self.get_added_me()
newly_added = set(added_me).difference(self.added_me)
newly_deleted = set(self.added_me).difference(added_me)
self.added_me = added_me
if hasattr(self, "on_friend_add"):
for friend in newly_added:
self.log("User %s added me" % friend)
self.on_friend_add(friend)
if hasattr(self, "on_friend_delete"):
for friend in newly_deleted:
self.log("User %s deleted me" % friend)
self.on_friend_delete(friend)
time.sleep(timeout)
def get_friends(self):
return map(lambda fr: fr['name'], self.client.get_friends())
def get_added_me(self):
updates = self.client.get_updates()
return map(lambda fr: fr['name'], updates["added_friends"])
def send_snap(self, recipients, snap):
media_id = self._upload_snap(snap)
if type(recipients) is not list:
recipients = [recipients]
recipients_str = ','.join(recipients)
self.log("Sending snap %s to %s" % (snap.snap_id, recipients_str))
self.client.send(media_id, recipients_str, snap.duration)
def post_story(self, snap):
media_id = self._upload_snap(snap)
response = self.client.send_to_story(media_id, snap.duration, snap.media_type)
try:
snap.story_id = response['json']['story']['id']
except:
pass
def delete_story(self, snap):
print snap.story_id
if snap.story_id is None:
return
self.client._request('delete_story', {
'username': self.username,
'story_id': snap.story_id
})
def add_friend(self, username):
self.client.add_friend(username)
def delete_friend(self, username):
self.client.delete_friend(username)
def block(self, username):
self.client.block(username)
def process_snaps(self, snaps, mark_viewed = True):
ret = []
for snap_obj in snaps:
if snap_obj['status'] == 2:
continue
data = self.client.get_blob(snap_obj["id"])
if data is None:
continue
snap = self.process_snap(snap_obj, data)
if mark_viewed:
self.mark_viewed(snap)
ret.append(snap)
return ret
def process_stories(self, stories):
ret = []
for snap_obj in stories:
media_key = base64.b64decode(snap_obj['media_key'])
media_iv = base64.b64decode(snap_obj['media_iv'])
data = self.client.get_story_blob(snap_obj['media_id'],
media_key,
media_iv)
if data is None:
continue
snap_obj['sender'] = self.username
snap = self.process_snap(snap_obj, data, is_story = True)
ret.append(snap)
return ret
def get_snaps(self, mark_viewed=True):
snaps = self.client.get_snaps()
return self.process_snaps(snaps)
def get_my_stories(self):
response = self.client._request('stories', {
'username': self.username
})
stories = map(lambda s: s['story'], response.json()['my_stories'])
return self.process_stories(stories)
def get_friend_stories(self):
response = self.client._request('stories', {
'username': self.username
})
ret = []
stories_per_friend = map(lambda s: s['stories'], response.json()['friend_stories'])
for stories_obj in stories_per_friend:
stories = map(lambda so: so['story'], stories_obj)
ret.extend(self.process_stories(stories))
return ret
def clear_stories(self):
for story in self.get_my_stories():
self.delete_story(story)
def _upload_snap(self, snap):
if not snap.get("uploaded"):
snap.media_id = self.client.upload(snap.file.name)
snap.uploaded = True
return snap.media_id
def _make_request(self, path, data = None, method = 'POST', files = None):
if data is None:
data = {}
headers = {
'User-Agent': 'Snapchat/8.1.1 (iPhone5,1; iOS 8.1.3; gzip)',
'Accept-Language': 'en-US;q=1, en;q=0.9',
'Accept-Locale': 'en'
}
now = timestamp()
if method == 'POST':
data['timestamp'] = now
data['req_token'] = make_request_token(self.auth_token, str(now))
resp = requests.post(BASE_URL + path, data = data, files = files, headers = headers)
else:
resp = requests.get(BASE_URL + path, params = data, headers = headers)
return resp
|
Python
| 0
|
@@ -6136,21 +6136,16 @@
nap.
-get(%22
uploaded
%22):%0A
@@ -6140,18 +6140,16 @@
uploaded
-%22)
:%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.