commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
f7e4ca11c7bfc35bf0fd6becd2a5d5fdd2ca5ed5 | Add a script to split data with partitions. | src/main/python/partition_data.py | src/main/python/partition_data.py | import csv;
import random;
import sys;
in_file = str(sys.argv[1])
out_file = str(sys.argv[2])
num_partitions = int(sys.argv[3])
header = [];
partitions = [];
for i in range(num_partitions):
partitions.append([])
# Load all the training rows
row_num = 0;
with open(in_file) as file:
reader = csv.reader(file);
header = reader.next();
for row in reader:
partitions[row_num % num_partitions].append(row);
row_num += 1;
# Write test and train files for k partitions
for i in range(num_partitions):
train_rows = []
test_rows = partitions[i];
for j in range(num_partitions):
if i != j:
for row in partitions[j]:
train_rows.append(row);
with open(out_file+'_k'+str(i+1)+'_train.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in train_rows:
writer.writerow(row)
with open(out_file+'_k'+str(i+1)+'_test.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in test_rows:
writer.writerow(row)
| Python | 0 | |
dc76e7c085e7462d75567bf3d0228defb6bbbc58 | Add tests for converter | tests/test_csv_converter.py | tests/test_csv_converter.py | import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
sys.path.insert(1, os.path.dirname(".."))
import unittest
from csv_converter import CsvConverter
class TestCsvConverter(unittest.TestCase):
def test_parse_csv(self):
converter = CsvConverter("tests/data/data_1.csv")
converter.setSourceColumns("tuotekoodi", "qty")
converter.read_file()
def test_convert_row(self):
converter = CsvConverter("")
row = converter.convertRow({
"product_code": "some_code",
"quantity": "50"
})
self.assertEqual("some_code", row["product_code"])
with self.assertRaises(ValueError):
row = converter.convertRow({
"product_code": "23",
"quantity": "error"
})
with self.assertRaises(ValueError):
row = converter.convertRow({
"product_code": "",
"quantity": "error"
})
with self.assertRaises(ValueError):
row = converter.convertRow({
"product_code": "sd",
"quantity": ""
})
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
645507ed9ec43b354880673fbc75afe169ef6697 | Add test capturing bad implementation of contains handler. | tests/unit/test_handlers.py | tests/unit/test_handlers.py | from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
| Python | 0 | |
d61c42221774f36477b1288396f4e7e7337e905c | add data migration | formly/migrations/0012_fix_multi_text_answer_data.py | formly/migrations/0012_fix_multi_text_answer_data.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2018-01-23 13:46
from __future__ import unicode_literals
import json
from django.db import migrations
def migrate_data(apps, schema_editor):
FieldResult = apps.get_model("formly", "FieldResult")
# alias for Field.MULTIPLE_TEXT
MULTIPLE_TEXT_TYPE = 8
multiple_text_results = FieldResult.objects.filter(question__field_type=MULTIPLE_TEXT_TYPE)
print("\n")
if multiple_text_results.exists() is False:
print("formly-data-migration: No multiple text results data found. Skipping data migration.")
return
print("formly-data-migration: Updating data on {} FieldResult instances".format(multiple_text_results.count()))
for result in multiple_text_results:
raw_answer = result.answer["answer"]
if isinstance(raw_answer, unicode):
try:
answer = json.loads(raw_answer)
except:
answer = [raw_answer]
result.answer["answer"] = answer
result.save()
print("formly-data-migration: Data update complete!")
class Migration(migrations.Migration):
dependencies = [
("formly", "0011_field_mapping"),
]
operations = [
migrations.RunPython(migrate_data),
]
| Python | 0.000001 | |
a1f864de0c5e71f0e9dc0ff4a23dc8101556832b | add new script | icart_mini_navigation/scripts/navigation_strategy.py | icart_mini_navigation/scripts/navigation_strategy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import roslib; roslib,load_manifest('rospeex_if')
except:
pass
import rospy
import re
from rospeex_if import ROSpeexInterface
from std_msgs.msg import String
syscommand_pub = rospy.Publisher('syscommand', String, queue_size=10)
rospy.init_node('navigation_strategy', anonymous=True)
r = rospy.Rate(10)
class talk_node(object):
def __init__(self):
self._interface = ROSpeexInterface()
def sr_response(self, message):
run = re.compile('(?P<run>走行)').search(message)
start = re.compile('(?P<start>開始)').search(message)
print 'you said : %s' %message
if run is not None and start is not None:
text = u'ナビゲーションを開始します。'
robot_msg = 'start'
rospy.loginfo(robot_msg)
syscommand_pub.publish(robot_msg)
print 'rospeex reply : %s' %text
self._interface.say(text, 'ja', 'nict')
def run(self):
self._interface.init()
self._interface.register_sr_response(self.sr_response)
self._interface.set_spi_config(language='ja',engine='nict')
rospy.spin()
if __name__ == '__main__':
try:
node = talk_node()
node.run()
except rospy.ROSInterruptException:
pass
| Python | 0.000001 | |
b3e9075e819402f93f7dc2e29b61e3e621ab7355 | Add unit tests for avging imputations | impy/imputations/tests/test_averaging_imputations.py | impy/imputations/tests/test_averaging_imputations.py | """test_averaging_imputations.py"""
import unittest
import numpy as np
from impy.imputations import mean_imputation
from impy.imputations import mode_imputation
from impy.imputations import median_imputation
from impy.datasets import random_int
class TestAveraging(unittest.TestCase):
""" Tests for Averaging """
def setUp(self):
self.data = random_int(missingness="complete")
def test_mean_return_type(self):
"""Mean Imputation Return Type"""
self.assertEqual(str(type(mean_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mode_return_type(self):
"""Mode Imputation Return Type"""
self.assertEqual(str(type(mode_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_median_return_type(self):
"""Median Imputation Return Type"""
self.assertEqual(str(type(median_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mean_fill(self):
""" Mean Imputation Fill Complete Data(nothing should happen)"""
actual = mean_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_mode_fill(self):
""" Mode Imputation Fill Complete Data(nothing should happen)"""
actual = mode_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_median_fill(self):
""" Median Imputation Fill Complete Data(nothing should happen)"""
actual = median_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
dd784f9035f66fd7d4febb8e43a09353821312b9 | add solution for Kth Largest Element in an Array | algorithms/KthLargestElementInAnArray/KthLargestElementInAnArray.py | algorithms/KthLargestElementInAnArray/KthLargestElementInAnArray.py | class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {integer}
def findKthLargest(self, nums, k):
k = len(nums) - k
def quickselect(st, ed):
pivot = nums[ed]
pos = st
for i in xrange(st, ed):
if nums[i] < pivot:
nums[i], nums[pos] = nums[pos], nums[i]
pos += 1
nums[pos], nums[ed] = nums[ed], nums[pos]
if pos == k:
return nums[pos]
elif pos < k:
return quickselect(pos+1, ed)
else:
return quickselect(st, pos-1)
return quickselect(0, len(nums)-1)
| Python | 0 | |
d3afe17fa3f259e2b09f76e4b486e4cbf9e659c3 | Create albumCoverFinder.py | albumCoverFinder.py | albumCoverFinder.py | # albumCoverFinder - Brian Tran, btran29@gmail.com
# This program scans a tree of directories containing mp3 files. For
# each directory, it attempts to download the cover image from the
# Apple iTunes service. Subdirectories must be named <Artist>/<Album>
# contain .mp3 files to be considered. The cover will be saved to
# "cover.jpg" in each directory.
# Usage example:
# albumCoverFinder.py <music directory>
import sys
import os
import shutil
import re
import urllib.request
import json
import tempfile
# For testing + possible future expansion with classes
defaults = {
"artist": 'Jack Johnson',
"album": 'In Between Dreams',
"country": 'US',
"media": 'music',
"attribute": 'albumTerm',
"base": 'https://itunes.apple.com/search?'
}
# Clean up album names via dictionary below
cleanup_table = {
' ': '+'
}
# Clean up album folder names for input
def clean_input(term):
print("\n" + "Search Term: " + "\"" + term + "\"")
# Replaces strings in folder names with keywords
# in cleanup_table via regex
pattern = re.compile('|'.join(cleanup_table.keys()))
term = pattern.sub(lambda x: cleanup_table[x.group()], term)
return term
# Generate url for apple api search
def gen_url(term):
url = defaults["base"] + \
'term=' + term + '&' + \
'attribute=' + defaults["attribute"] + '&' +\
'media=' + defaults["media"]
print("URL Used: " + url)
return url
# Connect to website and collect response
def collect_data(url):
response = urllib.request.urlopen(url)
# Convert to http response to utf-8
string = response.read().decode('utf-8')
data = json.loads(string) # returns dictionary object
return data
# Parse data to get album cover url
def parse_data(data, artist):
data = data['results']
# Initialize key vars
found = False
album_art_url = 'stringThing'
# Loop over results to find matching artist given album
for result in data:
if result['artistName'] == artist:
found = True
album_art_url = result['artworkUrl100']
print("Album Art URL: " + album_art_url)
break
if found is False:
print("No album/artist combination found.")
return album_art_url
# Download album art
def download(album_art_url):
img = urllib.request.urlopen(album_art_url)
output = tempfile.mktemp(".jpg")
# Enable writing
o = open(output, "wb")
o.write(img.read())
o.close()
return output
# Simplified method
def get_art(directory):
# Get path values, artist, album
final_path = directory + os.sep + "cover.jpg"
values = directory.split(os.sep)
artist = values[-2]
album = values[-1]
# Run through procedure
url = gen_url(clean_input(album))
data = collect_data(url)
parsed_url = parse_data(data, artist)
dl_art = download(parsed_url)
if dl_art is not None:
# Copy file to location
shutil.copyfile(dl_art, final_path)
os.remove(dl_art)
print("Saved to: " + final_path)
# Define usage
def usage(argv):
print("Usage" + argv[1] + "<music root directory>")
sys.exit(1)
# Main method
def main(argv):
if len(argv) < 2:
usage(argv)
source_directory = argv[1]
print("Searching within: " + source_directory)
# Obtain list of directories
directories = [source_directory]
for directory in directories:
files = os.listdir(directory)
for file in files:
if os.path.isdir(os.path.join(directory, file)):
directories.append(os.path.join(directory, file))
# Travel through directories
for directory in directories:
files = os.listdir(directory)
for file in files:
# TODO: skip directories with cover.jpg already present
# Only directories with mp3 files
if file.endswith('.mp3'):
# Get album art for this directory
get_art(directory)
break
# TODO: try out os.walk
# for root, dirs, files in os.walk(source_directory):
# for directory in dirs:
# # for file in files:
# # if file.endswith(".mp3"):
# # # y
# # Get album art for this directory
# get_art(directory)
# Limits this python file to script functionality (vs a module)
if __name__ == "__main__":
main(sys.argv)
| Python | 0 | |
aa411ddcd62b824c0bfe8660c795b71e6e6929ea | add reset command. | axes/utils.py | axes/utils.py | from axes.models import AccessAttempt
def reset(ip=None, silent=False):
if not ip:
attempts = AccessAttempt.objects.all()
if attempts:
for attempt in AccessAttempt.objects.all():
attempt.delete()
else:
if not silent:
print 'No attempts found.'
else:
try:
attempt = AccessAttempt.objects.get(ip_address=ip)
except:
if not silent:
print 'No matching attempt found.'
else:
attempt.delete()
| Python | 0.001104 | |
ceb8a32637bc0fd9ab0517be7f025755e19ec2c7 | add leetcode Excel Sheet Column Number | leetcode/ExcelSheetColumnNumber/solution.py | leetcode/ExcelSheetColumnNumber/solution.py | # -*- coding:utf-8 -*-
class Solution:
# @param s, a string
# @return an integer
def titleToNumber(self, s):
col = 0
for c in s:
col = col * 26 + ord(c) - ord('A') + 1
return col
| Python | 0.000001 | |
cf7e9dfec0c0cdab913f98ff325210b552610219 | Add new runner, search! | salt/runners/search.py | salt/runners/search.py | '''
Runner frontend to search system
'''
# Import salt libs
import salt.search
import salt.output
def query(term):
'''
Query the search system
'''
search = salt.search.Search(__opts__)
result = search.query(term)
salt.output.display_output(result, 'pprint', __opts__)
return result
| Python | 0 | |
da704e95b010330efd350e7ed85e51f252b8a453 | add missing migration | aldryn_redirects/migrations/0002_on_delete_and_verbose_names.py | aldryn_redirects/migrations/0002_on_delete_and_verbose_names.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-22 08:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('aldryn_redirects', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='redirecttranslation',
options={'default_permissions': (), 'managed': True, 'verbose_name': 'redirect Translation'},
),
migrations.AlterField(
model_name='redirect',
name='site',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aldryn_redirects_redirect_set', to='sites.Site'),
),
migrations.AlterField(
model_name='redirecttranslation',
name='language_code',
field=models.CharField(db_index=True, max_length=15, verbose_name='Language'),
),
]
| Python | 0.000258 | |
17ae9e25663d029af11236584b4c759c895ae830 | Improve and consolidate condition scripts of Lithium to support timeouts and regex via optparse. r=Jesse | util/fileIngredients.py | util/fileIngredients.py | #!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import with_statement
import re
def fileContains(f, s, isRegex):
if isRegex:
return fileContainsRegex(f, re.compile(s, re.MULTILINE))
else:
return fileContainsStr(f, s), s
def fileContainsStr(f, s):
found = False
with open(f, 'rb') as g:
for line in g:
if line.find(s) != -1:
print line.rstrip()
found = True
return found
def fileContainsRegex(f, regex):
# e.g. ~/fuzzing/lithium/lithium.py crashesat --timeout=30
# --regex '^#0\s*0x.* in\s*.*(?:\n|\r\n?)#1\s*' ./js --ion -n 735957.js
# Note that putting "^" and "$" together is unlikely to work.
matchedStr = ''
found = False
with open(f, 'rb') as g:
foundRegex = regex.search(g.read())
if foundRegex:
matchedStr = foundRegex.group()
print matchedStr
found = True
return found, matchedStr
| Python | 0 | |
03baa59cea76ab85f661bfa3e8d910fd6a7ae82a | Remove leading slash in redirections | nikola/plugins/task/redirect.py | nikola/plugins/task/redirect.py | # -*- coding: utf-8 -*-
# Copyright © 2012-2016 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Generate redirections."""
from __future__ import unicode_literals
import os
from nikola.plugin_categories import Task
from nikola import utils
class Redirect(Task):
"""Generate redirections."""
name = "redirect"
def gen_tasks(self):
"""Generate redirections tasks."""
kw = {
'redirections': self.site.config['REDIRECTIONS'],
'output_folder': self.site.config['OUTPUT_FOLDER'],
'filters': self.site.config['FILTERS'],
}
yield self.group_task()
if kw['redirections']:
for src, dst in kw["redirections"]:
src_path = os.path.join(kw["output_folder"], src.lstrip('/'))
yield utils.apply_filters({
'basename': self.name,
'name': src_path,
'targets': [src_path],
'actions': [(utils.create_redirect, (src_path, dst))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.redirect')],
}, kw["filters"])
| # -*- coding: utf-8 -*-
# Copyright © 2012-2016 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Generate redirections."""
from __future__ import unicode_literals
import os
from nikola.plugin_categories import Task
from nikola import utils
class Redirect(Task):
"""Generate redirections."""
name = "redirect"
def gen_tasks(self):
"""Generate redirections tasks."""
kw = {
'redirections': self.site.config['REDIRECTIONS'],
'output_folder': self.site.config['OUTPUT_FOLDER'],
'filters': self.site.config['FILTERS'],
}
yield self.group_task()
if kw['redirections']:
for src, dst in kw["redirections"]:
src_path = os.path.join(kw["output_folder"], src)
yield utils.apply_filters({
'basename': self.name,
'name': src_path,
'targets': [src_path],
'actions': [(utils.create_redirect, (src_path, dst))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.redirect')],
}, kw["filters"])
| Python | 0.000001 |
21ef2114975a315815d960fd1f28c5e4036fb935 | Update browsermark to use results.AddValue(..) | tools/perf/benchmarks/browsermark.py | tools/perf/benchmarks/browsermark.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Browsermark CSS, DOM, WebGL, JS, resize and page load benchmarks.
Browsermark benchmark suite have five test groups:
a) CSS group: measures your browsers 2D and 3D performance, and finally executes
CSS Crunch test
b) DOM group: measures variety of areas, like how well your browser traverse in
Document Object Model Tree or how fast your browser can create dynamic content
c) General group: measures areas like resize and page load times
d) Graphics group: tests browsers Graphics Processing Unit power by measuring
WebGL and Canvas performance
e) Javascript group: executes number crunching by doing selected Array and
String operations
Additionally Browsermark will test your browsers conformance, but conformance
tests are not included in this suite.
"""
import os
from telemetry import benchmark
from telemetry.page import page_measurement
from telemetry.page import page_set
from telemetry.value import scalar
class _BrowsermarkMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
# Select nearest server(North America=1) and start test.
js_start_test = """
for (var i=0; i < $('#continent a').length; i++) {
if (($('#continent a')[i]).getAttribute('data-id') == '1') {
$('#continent a')[i].click();
$('.start_test.enabled').click();
}
}
"""
tab.ExecuteJavaScript(js_start_test)
tab.WaitForJavaScriptExpression(
'window.location.pathname.indexOf("results") != -1', 600)
result = int(tab.EvaluateJavaScript(
'document.getElementsByClassName("score")[0].innerHTML'))
results.AddValue(
scalar.ScalarValue(results.current_page, 'Score', 'score', result))
@benchmark.Disabled
class Browsermark(benchmark.Benchmark):
"""Browsermark suite tests CSS, DOM, resize, page load, WebGL and JS."""
test = _BrowsermarkMeasurement
def CreatePageSet(self, options):
ps = page_set.PageSet(
file_path=os.path.abspath(__file__),
archive_data_file='../page_sets/data/browsermark.json',
make_javascript_deterministic=False)
ps.AddPageWithDefaultRunNavigate('http://browsermark.rightware.com/tests/')
return ps
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Browsermark CSS, DOM, WebGL, JS, resize and page load benchmarks.
Browsermark benchmark suite have five test groups:
a) CSS group: measures your browsers 2D and 3D performance, and finally executes
CSS Crunch test
b) DOM group: measures variety of areas, like how well your browser traverse in
Document Object Model Tree or how fast your browser can create dynamic content
c) General group: measures areas like resize and page load times
d) Graphics group: tests browsers Graphics Processing Unit power by measuring
WebGL and Canvas performance
e) Javascript group: executes number crunching by doing selected Array and
String operations
Additionally Browsermark will test your browsers conformance, but conformance
tests are not included in this suite.
"""
import os
from telemetry import benchmark
from telemetry.page import page_measurement
from telemetry.page import page_set
class _BrowsermarkMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
# Select nearest server(North America=1) and start test.
js_start_test = """
for (var i=0; i < $('#continent a').length; i++) {
if (($('#continent a')[i]).getAttribute('data-id') == '1') {
$('#continent a')[i].click();
$('.start_test.enabled').click();
}
}
"""
tab.ExecuteJavaScript(js_start_test)
tab.WaitForJavaScriptExpression(
'window.location.pathname.indexOf("results") != -1', 600)
result = int(tab.EvaluateJavaScript(
'document.getElementsByClassName("score")[0].innerHTML'))
results.Add('Score', 'score', result)
@benchmark.Disabled
class Browsermark(benchmark.Benchmark):
"""Browsermark suite tests CSS, DOM, resize, page load, WebGL and JS."""
test = _BrowsermarkMeasurement
def CreatePageSet(self, options):
ps = page_set.PageSet(
file_path=os.path.abspath(__file__),
archive_data_file='../page_sets/data/browsermark.json',
make_javascript_deterministic=False)
ps.AddPageWithDefaultRunNavigate('http://browsermark.rightware.com/tests/')
return ps
| Python | 0.000001 |
b0a6192649dd47548e007410b9f1a60ec23466de | Add files via upload | XMLAnalyze2.py | XMLAnalyze2.py | # Author: Andrew Sainz
#
# Purpose: XMLParser is designed to iterate through a collection of Post data collected from Stack Overflow
# forums. Data collected to analize the code tagged information to find the language of the code
# being utilized.
#
# How to use: To run from command line input "python XMLParser.py [XML file name].xml"
import xml.etree.ElementTree as ET
import sys
import re
from nltk.util import ngrams
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.classify import PositiveNaiveBayesClassifier
def parseBodyForTagCode(body):
try:
# Code is a string that contains all code tag data within the body
# ex. code = ['<code>EXCEPT</code>, <code>LEFT JOIN</code>']
code = [body[m.start():m.end()] for m in re.finditer('<code>(.+?)</code>', body)]
# print(code)
except AttributeError:
code = None
return code
def features(sentence):
words = sentence.lower().split()
return dict(('contains(%s)' %w, True) for w in words)
# Known list tag fields
knownJavaTags = []
knownJavaMention = []
knownC = []
knownCSharp = []
knownPython = []
xmldoc = sys.argv[1]
tree = ET.parse(xmldoc)
root = tree.getroot()
# print (root.attrib)
myList = []
# for each row in the xml document gather body information
for row in root:
# Body holds all comment information from post
body = row.get('Body')
rowId = row.get('Id')
# Tags for comment post
tags = row.get('Tags')
# parse body to find code tags
code = parseBodyForTagCode(body)
# Encode list information about code into UTF8
codeUni = repr([x.encode('UTF8') for x in code])
# If code isn't present ignore post move to next post
if codeUni == '[]':
continue
cleanCode = ""
for element in codeUni:
print (element is str)
element.decode()
cleanCode = element + cleanCode
cleanCode = re.sub('<code>|</code>','',cleanCode)
print (cleanCode)
if tags != None:
# Assign all known code to list
if ("<java>" in tags):
knownJavaTags.append(codeUni)
if ("<python>" in tags) or ("python" in body):
knownPython.append(rowId+'`'+codeUni+'`'+tags)
if ("<C>" in tags) or ("C" in body):
knownC.append(rowId+'`'+codeUni+'`'+tags)
if ("<C#>" in tags) or ("C#" in body):
knownCSharp.append(rowId+'`'+codeUni+'`'+tags)
# Known post tags are added to myList
myList.append(rowId+'`'+codeUni+'`'+tags)
else:
# unknown code tag is added to myList
myList.append(rowId+'`'+codeUni)
if "java" in body:
knownJavaMention.append(codeUni)
# Assign positive features
positive_featuresets = list(map(features, knownJavaTags))
unlabeled_featuresets = list(map(features, knownJavaMention))
classifier = PositiveNaiveBayesClassifier.train(positive_featuresets, unlabeled_featuresets)
# Ngram section
# print(myList)
############################################################################
for item in myList:
allCodeTags = [item[m.start():m.end()] for m in re.finditer('<code>(.+?)</code>', item)]
for code in allCodeTags:
cleanCode = re.sub('<code>|</code>','',code)
# print (cleanCode)
# print(classifier.classify(features(cleanCode)))
trigrams = ngrams(cleanCode.split(), 3)
# for grams in trigrams:
# print (grams)
# break | Python | 0 | |
8204a8b84cdcd515ea1dcf7ab67574b6db5baca6 | Add WS caller | web_services/ws_test.py | web_services/ws_test.py | import functools
import xmlrpclib
HOST = 'localhost'
PORT = 8069
DB = 'odoo_curso'
USER = 'admin'
PASS = 'admin'
ROOT = 'http://%s:%d/xmlrpc/' % (HOST,PORT)
# 1. Login
uid = xmlrpclib.ServerProxy(ROOT + 'common').login(DB,USER,PASS)
print "Logged in as %s (uid:%d)" % (USER,uid)
call = functools.partial(
xmlrpclib.ServerProxy(ROOT + 'object').execute,
DB, uid, PASS)
# 2. Read the sessions
model = 'openacademy.session'
domain = []
method_name = 'search_read'
sessions = call(model, method_name, domain, ['name','seats','taken_seats'])
for session in sessions:
print "Session %s (%s seats), taken seats %d" % (session['name'], session['seats'], session['taken_seats'])
# 3.create a new session
course_id = call('openacademy.course', 'search', [('name','ilike','Functional')])[0]
session_id = call(model, 'create', {
'name' : 'My session loca',
'course_id' : course_id,
})
| Python | 0 | |
605fb4c6726d0c66bada870bffe526d493195b33 | Create USN.py | USN.py | USN.py | #Spooky scary skeeletons send shiveers down your spine
#You are a gunner in the Navy. Destroy the Commies.
| Python | 0.000003 | |
129e548ac0be8ee3a60dd85aca9d095456b7d3a6 | Add new py-testresources package (#14031) | var/spack/repos/builtin/packages/py-testresources/package.py | var/spack/repos/builtin/packages/py-testresources/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTestresources(PythonPackage):
"""Testresources, a pyunit extension for managing expensive test resources.
"""
homepage = "https://launchpad.net/testresources"
url = "https://pypi.io/packages/source/t/testresources/testresources-2.0.1.tar.gz"
version('2.0.1', sha256='ee9d1982154a1e212d4e4bac6b610800bfb558e4fb853572a827bc14a96e4417')
depends_on('py-setuptools', type='build')
| Python | 0 | |
3e7b9b69e68c8594eac92d88f0579aab40d7d5ae | Test aborting queued live migration | nova/tests/functional/libvirt/test_live_migration.py | nova/tests/functional/libvirt/test_live_migration.py | # Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from lxml import etree
from nova.tests.functional import integrated_helpers
from nova.tests.functional.libvirt import base as libvirt_base
class LiveMigrationQueuedAbortTest(
libvirt_base.LibvirtMigrationMixin,
libvirt_base.ServersTestBase,
integrated_helpers.InstanceHelperMixin
):
"""Functional test for bug 1949808.
This test is used to confirm that VM's state is reverted properly
when queued Live migration is aborted.
"""
api_major_version = 'v2.1'
microversion = '2.74'
ADMIN_API = True
def setUp(self):
super().setUp()
# We will allow only one live migration to be processed at any
# given period of time
self.flags(max_concurrent_live_migrations='1')
self.src_hostname = self.start_compute(hostname='src')
self.dest_hostname = self.start_compute(hostname='dest')
self.src = self.computes[self.src_hostname]
self.dest = self.computes[self.dest_hostname]
# Live migration's execution could be locked if needed
self.lock_live_migration = threading.Lock()
def _migrate_stub(self, domain, destination, params, flags):
# Execute only if live migration is not locked
with self.lock_live_migration:
self.dest.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
conn = self.src.driver._host.get_connection()
# Because migrateToURI3 is spawned in a background thread,
# this method does not block the upper nova layers. Because
# we don't want nova to think the live migration has
# finished until this method is done, the last thing we do
# is make fakelibvirt's Domain.jobStats() return
# VIR_DOMAIN_JOB_COMPLETED.
server = etree.fromstring(
params['destination_xml']
).find('./uuid').text
dom = conn.lookupByUUIDString(server)
dom.complete_job()
def test_queued_live_migration_abort(self):
# Lock live migrations
self.lock_live_migration.acquire()
# Start instances: first one would be used to occupy
# executor's live migration queue, second one would be used
# to actually confirm that queued live migrations are
# aborted properly.
self.server_a = self._create_server(
host=self.src_hostname, networks='none')
self.server_b = self._create_server(
host=self.src_hostname, networks='none')
# Issue live migration requests for both servers. We expect that
# server_a live migration would be running, but locked by
# self.lock_live_migration and server_b live migration would be
# queued.
self._live_migrate(
self.server_a,
migration_expected_state='running',
server_expected_state='MIGRATING'
)
self._live_migrate(
self.server_b,
migration_expected_state='queued',
server_expected_state='MIGRATING'
)
# Abort live migration for server_b
serverb_migration = self.api.api_get(
'/os-migrations?instance_uuid=%s' % self.server_b['id']
).body['migrations'].pop()
self.api.api_delete(
'/servers/%s/migrations/%s' % (self.server_b['id'],
serverb_migration['id']))
self._wait_for_migration_status(self.server_b, ['cancelled'])
# Unlock live migrations and confirm that server_a becomes
# active again after successful live migration
self.lock_live_migration.release()
self._wait_for_state_change(self.server_a, 'ACTIVE')
# FIXME(artom) Assert the server_b never comes out of 'MIGRATING'
self.assertRaises(
AssertionError,
self._wait_for_state_change, self.server_b, 'ACTIVE')
self._wait_for_state_change(self.server_b, 'MIGRATING')
| Python | 0.000103 | |
226238bf4c672a58bb6c066f79301701d594a5c0 | Add notobuilder script | scripts/notobuilder.py | scripts/notobuilder.py | """Build a Noto font from one or more source files.
By default, places unhinted TTF, hinted TTF, OTF and (if possible) variable
fonts into the ``output/`` directory.
Currently does not support building from Monotype sources.
"""
import logging
import os
import re
import sys
from gftools.builder import GFBuilder
from gftools.builder.autohint import autohint
class NotoBuilder(GFBuilder):
def __init__(self, sources):
family = self.get_family_name(sources[0])
self.config = {
"sources": sources,
"familyName": family,
"buildVariable": True,
"autohintTTF": False, # We will, our own way
"buildWebfont": False,
"vfDir": "output/%s/unhinted/variable-ttf" % family,
"otDir": "output/%s/unhinted/otf" % family,
"ttDir": "output/%s/unhinted/ttf" % family,
}
self.outputs = set()
self.logger = logging.getLogger("GFBuilder")
self.fill_config_defaults()
def get_family_name(self, source=None):
if not source:
source = self.config["sources"][0]
source, _ = os.path.splitext(os.path.basename(source))
fname = re.sub(r"([a-z])([A-Z])", r"\1 \2", source)
fname = re.sub("-?MM$", "", fname)
return fname
def post_process_ttf(self, filename):
super().post_process_ttf(filename)
self.outputs.add(filename)
hinted_dir = "output/%s/hinted/ttf" % self.get_family_name()
os.makedirs(hinted_dir, exist_ok=True)
hinted = filename.replace("unhinted", "hinted")
try:
autohint(filename, hinted)
self.outputs.add(hinted)
except Exception as e:
self.logger.error("Couldn't autohint %s: %s" % (filename, e))
def post_process(self, filename):
super().post_process(filename)
self.outputs.add(filename)
def build_variable(self):
try:
super().build_variable()
except Exception as e:
self.logger.error("Couldn't build variable font: %s" % e)
if __name__ == '__main__':
import argparse
# https://stackoverflow.com/a/20422915
class ActionNoYes(argparse.Action):
def __init__(self, option_strings, dest, default=None, required=False, help=None):
if default is None:
raise ValueError('You must provide a default with Yes/No action')
if len(option_strings)!=1:
raise ValueError('Only single argument is allowed with YesNo action')
opt = option_strings[0]
if not opt.startswith('--'):
raise ValueError('Yes/No arguments must be prefixed with --')
opt = opt[2:]
opts = ['--' + opt, '--no-' + opt]
super(ActionNoYes, self).__init__(opts, dest, nargs=0, const=None,
default=default, required=required, help=help)
def __call__(self, parser, namespace, values, option_strings=None):
if option_strings.startswith('--no-'):
setattr(namespace, self.dest, False)
else:
setattr(namespace, self.dest, True)
parser = argparse.ArgumentParser(description='Build a Noto font')
parser.add_argument('sources', metavar='FILE', nargs='+',
help='source files')
parser.add_argument('--variable', action=ActionNoYes, default=True,
help='build a variable font')
parser.add_argument('--otf', action=ActionNoYes, default=True,
help='build an OTF')
parser.add_argument('--verbose','-v', action="store_true", help='verbose logging')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
builder = NotoBuilder(args.sources)
builder.config["buildVariable"] = args.variable
builder.config["buildOTF"] = args.otf
builder.build()
print("Produced the following files:")
for o in builder.outputs:
print("* "+o)
| Python | 0 | |
f8712c62ad069b815ff775bd758bdbf693bdbdb7 | Add some constants. | src/pyfuckery/constants.py | src/pyfuckery/constants.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# XXX Update Docstring
"""
pyFuckery - constants.py
Created on 2/12/17.
"""
# Stdlib
import logging
import re
# Third Party Code
# Custom Code
log = logging.getLogger(__name__)
# Brainfuck tokens
SYM_PTR_INC = '>'
SYM_PTR_DEC = '<'
SYM_DATA_INC = '+'
SYM_DATA_DEC = '-'
SYM_IO_OUTPUT = '.'
SYM_IO_INPUT = ','
SYM_JMP_FWD = '['
SYM_JMP_BACKWARD = ']'
| Python | 0.000001 | |
37190d6fba4d2a769ff2dcd154aa8cf1721f7026 | yea yea | gmm.py | gmm.py | """Trains a GMM on formant data (e.g. from the Hillenbrand corpus).
"""
import numpy as np
from sklearn.mixture import GMM
from sklearn import metrics
import pylab as pl
from collections import defaultdict
def parse(fname):
with open(fname) as f:
d = map(lambda l: l.rstrip('\n').split(), f.readlines())
header = d[0]
d = filter(lambda x: not 'NaN' in x, d)
return header, np.array(d[1:])
def eval_clusters(y_pred, y):
# maximize the 1-to-1 matching between y_pred and y
sety = set(y)
counts = defaultdict(lambda: defaultdict(lambda: 0))
for i in xrange(y_pred.shape[0]):
counts[y[i]][y_pred[i]] += 1
maps_to = {}
for y_, yp_c_d in counts.iteritems():
max_ = 0
ind_max = None
for yp, c in yp_c_d.iteritems():
if c > max_:
max_ = c
ind_max = yp
maps_to[y_] = ind_max
y_gold = np.array(map(lambda x: maps_to[x], y))
print "Adjusted rand scores:",
print metrics.adjusted_rand_score(y_gold, y_pred)
print "Homogeneity:",
print metrics.homogeneity_score(y_gold, y_pred)
print "Completeness:",
print metrics.completeness_score(y_gold, y_pred)
print "V-measure:",
print metrics.v_measure_score(y_gold, y_pred)
if __name__ == "__main__":
h, d = parse('formants.dat')
X = d[:,3:5].astype(np.float)
y = d[:,2]
print "All the", len(set(y)), "vowels:", set(y)
gmm = GMM(n_components=len(set(y)))
gmm.fit(X)
eval_clusters(gmm.predict(X), y)
| Python | 0.999953 | |
c97e44697444b15686bd0a6b5158c90630958238 | Add LRU example | lru.py | lru.py | from datetime import datetime
class LRUCacheItem(object):
"""Data structure of items stored in cache"""
def __init__(self, key, item):
self.key = key
self.item = item
self.timestamp = datetime.now()
class LRUCache(object):
"""A sample class that implements LRU algorithm"""
def __init__(self, length, delta=None):
self.length = length
self.delta = delta
self.hash = {}
self.item_list = []
def insertItem(self, item):
"""Insert new items to cache"""
if item.key in self.hash:
# Move the existing item to the head of item_list.
item_index = self.item_list.index(item)
self.item_list[:] = self.item_list[:item_index] + self.item_list[item_index+1:]
self.item_list.insert(0, item)
else:
# Remove the last item if the length of cache exceeds the upper bound.
if len(self.item_list) > self.length:
self.removeItem(self.item_list[-1])
# If this is a new item, just append it to
# the front of item_list.
self.hash[item.key] = item
self.item_list.insert(0, item)
def removeItem(self, item):
"""Remove those invalid items"""
del self.hash[item.key]
del self.item_list[self.item_list.index(item)]
def validateItem(self):
"""Check if the items are still valid."""
def _outdated_items():
now = datetime.now()
for item in self.item_list:
time_delta = now - item.timestamp
if time_delta.seconds > self.delta:
yield item
map(lambda x: self.removeItem(x), _outdated_items())
| Python | 0 | |
dd7ffbf97f9ae8426d7f60e465941f3f70bccdd6 | add file | new.py | new.py | print("test")
| Python | 0.000001 | |
494c8b88727dc958a7ba37f76d4c470837d26e1d | Define register files | reg.py | reg.py | EXP = 'EXP'
VAL = 'VAL'
ENV = 'ENV'
UNEV = 'UNEV'
FUNC = 'FUNC'
ARGL = 'ARGL'
CONT = 'CONT'
CURR = 'CURR'
STACK = 'STACK' | Python | 0.000043 | |
1c46aa8a03e577ddb3db55a11df3db70905110d2 | Add serial_logger.py | serial_logger.py | serial_logger.py | #!/usr/bin/env python
# encoding: utf-8
# Log serial monitor data
# TO-DO: add options for serial device, baud rate
import serial
import datetime
ser = serial.Serial('/dev/cu.usbmodemfa131', 9600)
now = datetime.datetime.now()
def get_date_string():
day = now.day
month = now.month
year = now.year
current_day = "{0}-{1}-{2}".format(year, month, day)
return current_day
while True:
current_date = get_date_string()
filename = current_date + '.temperature.log'
with open(filename, 'a') as log:
try:
temp = ser.readline()
#temp = 76
now = datetime.datetime.now()
iso = now.isoformat()
data = "{0} {1}".format(iso, temp)
print data.strip()
log.write(data)
#print now, temp
except:
pass
| Python | 0.000021 | |
0df0daf7f52015258c3607bb2822c1c77c5e8207 | add tensorflow sample | python/other/flow.py | python/other/flow.py | import tensorflow as tf
a = tf.constant(1, name="a")
b = tf.constant(1, name="b")
c = a + b
print(c)
graph = tf.get_default_graph()
print(graph.as_graph_def())
with tf.Session() as sess:
print(sess.run(c)) | Python | 0 | |
fdc900d5da48ae9aea1c7537e026dc2d46c62bc8 | add some reuseable aggregation code | shrunk/aggregations.py | shrunk/aggregations.py |
def match_short_url(url):
return {"$match": {"short_url":url}}
def match_id(id):
return {"$match": {"short_url":url}}
#monthly visits aggregations phases
group_ips={"$group": {
"_id": "$source_ip",
"times": {
"$addToSet": "$time"
},
"count": {
"$sum": 1
}
}}
take_first_visit={"$project": {
"time": {
"$arrayElemAt": ["$times",0]
},
"count": 1
}}
#this monthly sort can probably get abstracted and reused
group_months={"$group": {
"_id": {
"month": {"$month": "$time"},
"year" : {"$year" : "$time"}
},
"first_time_visits": {
"$sum": 1
},
"all_visits": {
"$sum": "$count"
}
}}
make_sortable={"$project": {
"month": "$_id.month",
"year" : "$_id.year",
"first_time_visits": 1,
"all_visits": 1
}}
chronological_sort={ "$sort": {
"year" : 1,
"month": 1
}}
clean_results={"$project": {
"first_time_visits": 1,
"all_visits": 1
}}
monthly_visits_aggregation=[group_ips, take_first_visit, group_months, #process data
make_sortable, chronological_sort, clean_results] #sort
| Python | 0.000001 | |
f9da8c4aa061223dac5147f6eaec6ad3419d1d6a | Add cli module to accept a language option | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/cli.py | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/cli.py | import os
os.environ["KIVY_NO_ARGS"] = "1"
import click
from {{cookiecutter.repo_name}}.{{cookiecutter.repo_name}} import {{cookiecutter.app_class_name}}
@click.command()
@click.option(
'-l', '--language', help='Default language of the App', default='en',
type=click.Choice(['en', 'de'])
)
def main(language):
"""Run {{cookiecutter.app_class_name}} with the given language setting.
"""
{{cookiecutter.app_class_name}}(language).run()
| Python | 0.000001 | |
09379060a5acb36462a21a5ad78d12f33270a20e | add python template for searching AD | python/python/templates/ad-template.py | python/python/templates/ad-template.py | #!/usr/bin/env python
'''
Copyright (C) 2011 Bryan Maupin <bmaupincode@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import getpass
import sys
import ldap
from ldap.controls import SimplePagedResultsControl
# change these as necessary
debug = True
ad_host = 'ad.example.com'
# DN used for binding to AD
ad_bind_dn = 'cn=%s,ou=users,dc=example,dc=com'
# DN used as the search base
ad_base_dn = 'ou=users,dc=example,dc=com'
ad_filter = '(objectClass=*)'
ad_attrs = ['cn', 'distinguishedName']
# Scope needs to be one of ldap.SCOPE_BASE, ldap.SCOPE_ONELEVEL, or
# ldap.SCOPE_SUBTREE
ad_scope = ldap.SCOPE_SUBTREE
# How many search results to return at a time (must be less than 1000 for AD)
ad_page_size = 1000
def main():
ad_object = ad_connect()
results = ad_search(ad_object)
print results
def ad_connect():
'''Binds to AD
Returns: AD connection object, used to perform queries
'''
username = raw_input('Please enter your AD username (or press enter to '
'use %s): ' % (getpass.getuser()))
if username == '':
username = getpass.getuser()
bind_dn = ad_bind_dn % (username)
bind_password = getpass.getpass('Please enter your AD password: ')
if debug:
print 'Binding with DN: %s' % (bind_dn)
try:
# initialize secure connection to AD server
ad_object = ldap.initialize( 'ldaps://%s' %(ad_host))
ad_object.simple_bind_s(bind_dn, bind_password)
if debug:
print 'Successfully bound to server.\n'
except ldap.LDAPError, error_message:
sys.stderr.write('Couldn\'t connect to AD server. %s\n' %
error_message)
return ad_object
def ad_search(ad_object, filter=ad_filter, attrs=ad_attrs,
base=ad_base_dn, scope=ad_scope):
'''Function to search AD
It will default to global variables for filter, attributes, base, and
scope
Returns a list of search results. Each entry itself is a list, where the
first item is the DN of the entry, and the second item is a dictionary of
attributes and values.
'''
search_results = []
ad_control = SimplePagedResultsControl(
ldap.LDAP_CONTROL_PAGE_OID, True, (ad_page_size, '')
)
try:
ad_pages = 0
while True:
# Send search request
msgid = ad_object.search_ext(
ad_base_dn,
ad_scope,
ad_filter,
ad_attrs,
serverctrls=[ad_control]
)
ad_pages += 1
if debug:
print 'Getting page %d' % (ad_pages)
unused_code, results, unused_msgid, serverctrls = \
ad_object.result3(msgid)
if debug:
print '%d results' % len(results)
if results and len(results) > 0:
search_results.extend(results)
for serverctrl in serverctrls:
if serverctrl.controlType == ldap.LDAP_CONTROL_PAGE_OID:
unused_est, cookie = serverctrl.controlValue
if cookie:
ad_control.controlValue = (ad_page_size, cookie)
break
if not cookie:
break
return search_results
if debug:
sys.stderr.write('LDAP search results not found\n'
'base: %s\n'
'filter: %s\n'
'attributes: %s\n\n' % (base, filter, attrs))
return []
except ldap.LDAPError, error_message:
print 'search_results:'
try:
print search_results
except NameError: # if search_results hasn't been declared
print # print a blank line
sys.stderr.write('LDAPError: %s\n' % (error_message))
if __name__ == '__main__':
main()
| Python | 0 | |
d7ea709e50510016bb448cb45e159528e416f08b | Create dianping_spider.py | DataHouse/crawler/dianping/dianping_spider.py | DataHouse/crawler/dianping/dianping_spider.py | """
a web spider for daz hong dian ping
"""
import time
import requests
from bs4 import BeautifulSoup
from lxml import etree
import pandas as pd
from pymongo import MongoClient
CITY_FILEPATH = 'city.xml'
CATEGORY_FILEPATH = 'type.xml'
SLEEP_TIME = 2
class City(object):
def __init__(self, pinyin, id, name):
self.pinyin = pinyin
self.id = id
self.name = name
def __str__(self):
return '{pinyin = ' + self.pinyin + '; id = ' + self.id + '; name = ' + self.name + '}';
def __repr__(self):
return self.__str__()
class Category(object):
def __init__(self, id, name):
self.id = id
self.name = name
def __str__(self):
return '{id = ' + self.id + '; name = ' + self.name + '}';
def __repr__(self):
return self.__str__()
def parse_city_xml(city_xml_filepath):
"""
parse the city_xml_filepath file in res directory, and return job list
:param city_xml_filepath:
:return:
"""
citylist = []
tree = etree.parse(city_xml_filepath)
for _ in tree.xpath('//city'):
city = City(_.get('pinyin').strip(), _.get('id').strip(), _.text.strip())
citylist.append(city)
return citylist
def parse_category_xml(type_xml_filepath):
"""
parse the type_xml_filepath file in res directory, and return job list
:param type_xml_filepath:
:return:
"""
categorylist = []
tree = etree.parse(type_xml_filepath)
for _ in tree.xpath('//type'):
category = Category(_.get('id').strip(), _.text.strip())
categorylist.append(category)
return categorylist
def crawl(start_num, city, category):
headers = {
'Host': 'mapi.dianping.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
}
payload = {
'start': start_num,
'categoryid': category.id,
'sortid': 0,
'maptype': 0,
'cityid': city.id
}
main_url = 'http://mapi.dianping.com/searchshop.json'
response = requests.get(main_url, params=payload, timeout=20, headers=headers)
if response.status_code == 200:
return response.json()
else:
print('Error!')
return None
def insert_item(item):
"""
insert an object into mongodb
:param item:
:return:
"""
client = MongoClient()
db = client.dianping.hotel
result = db.insert_one(item)
if __name__ == '__main__':
categorylist = parse_category_xml(CATEGORY_FILEPATH)
citylist = parse_city_xml(CITY_FILEPATH)
for city in citylist:
for category in categorylist:
# data = []
max_num = 25 # can be assigned to any number
start_num = 0
while start_num < max_num:
try:
dat = crawl(start_num, city, category)
if dat is not None:
max_num = dat['recordCount']
start_num += 25
print(dat['list'])
for _ in dat['list']:
_['cityName'] = city.name
_['categoryName'] = category.name
insert_item(_)
# data.append(dat['list'])
time.sleep(SLEEP_TIME)
# df = pd.DataFrame(data, )
# df = pd.DataFrame(data)
# df.to_excel('./food.xlsx', 'Food', index=False)
except:
pass
| Python | 0.000004 | |
195b74304fa1c5eab3bc2e16df1346c2f92916f8 | Test py | testnet/tests/configs_api_test.py | testnet/tests/configs_api_test.py | #!/usr/bin/env python3
import unittest
import datetime
from exonum import ExonumApi, random_hex
class ConfigsApi(ExonumApi):
def new_config_propose(self, config, height, actual_from_height):
tx, c = self.send_transaction("config/propose", {"config": config, "height": height, "actual_from_height": actual_from_height})
return (self.get_config_propose(tx))
def new_config_vote(self):
tx, _ = self.send_transaction(
"config/vote", {"config_propose_hash": hash})
def get_config_propose(self, hash):
r = self.get("config/propose/" + hash)
return r.json()
def get_config_vote(self, pubkey):
r = self.get("config/vote/" + hash)
return r.json()
class ConfigsApiTest(ConfigsApi):
def setUp(self):
super().setUp()
self.host = "http://127.0.0.1:8400/api/v1"
self.times = 120
def create_many_proposes(self, txs):
final_tx = None
print()
print(" - Create {} config_proposes".format(txs))
start = datetime.datetime.now()
for i in range(txs):
r, c = self.post_transaction(
"wallets/create", {"name": "name_" + str(i)})
final_tx = r["tx_hash"]
tx = self.wait_for_transaction(final_tx)
self.assertNotEqual(tx, None)
finish = datetime.datetime.now()
delta = finish - start
ms = delta.seconds * 1000 + delta.microseconds / 1000
print(" - Commited, txs={}, total time: {}s".format(txs, ms / 1000))
start = datetime.datetime.now()
for i in range(txs):
info = self.find_user(cookies[i])
self.assertEqual(info["name"], "name_" + str(i))
finish = datetime.datetime.now()
delta = finish - start
ms = delta.seconds * 1000 + delta.microseconds / 1000
print(" - All users found, total time: {}s".format(ms / 1000))
def test_create_config_propose(self):
r, c = self.create_user("My First User")
self.assertEqual(r["name"], "My First User")
self.assertEqual(r["balance"], 0)
def test_create_proposes_1_10(self):
self.create_many_proposes(10)
def test_create_proposes_2_100(self):
self.create_many_proposes(100)
def test_create_proposes_3_1000(self):
self.create_many_proposes(1000)
def test_create_proposes_4_5000(self):
self.create_many_proposes(5000)
def test_create_proposes_5_10000(self):
self.create_many_proposes(10000)
if __name__ == '__main__':
unittest.main(verbosity=2, buffer=None)
| Python | 0 | |
1ac0c90037923c06a337b7236b678d8ca2b45e5f | Fix unit test | tests/unit/engines/test_acgsou.py | tests/unit/engines/test_acgsou.py | from collections import defaultdict
import mock
from searx.engines import acgsou
from searx.testing import SearxTestCase
class TestAcgsouEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dic = defaultdict(dict)
dic['pageno'] = 1
params = acgsou.request(query, dic)
self.assertTrue('url' in params)
self.assertTrue(query in params['url'])
self.assertTrue('acgsou.com' in params['url'])
def test_response(self):
resp = mock.Mock(text='<html></html>')
self.assertEqual(acgsou.response(resp), [])
html = """
<html>
<table id="listTable" class="list_style table_fixed">
<thead class="tcat">
<tr>
<th axis="string" class="l1 tableHeaderOver">发布时间</th>
<th axis="string" class="l2 tableHeaderOver">分类</th>
<th axis="string" class="l3 tableHeaderOver">资源名称</th>
<th axis="size" class="l4 tableHeaderOver">大小</th>
<th axis="number" class="l5 tableHeaderOver">种子</th>
<th axis="number" class="l6 tableHeaderOver">下载</th>
<th axis="number" class="l7 tableHeaderOver">完成</th>
<th axis="string" class="l8 tableHeaderOver">发布者/联盟</th>
</tr>
</thead>
<tbody class="tbody" id="data_list">
<tr class="alt1 ">
<td nowrap="nowrap">date</td>
<td><a href="category.html">testcategory</a></td>
<td style="text-align:left;">
<a href="show-torrentid.html" target="_blank">torrentname</a>
</td>
<td>1MB</td>
<td nowrap="nowrap">
<span class="bts_1">
29
</span>
</td>
<td nowrap="nowrap">
<span class="btl_1">
211
</span>
</td>
<td nowrap="nowrap">
<span class="btc_">
168
</span>
</td>
<td><a href="random.html">user</a></td>
</tr>
</tbody>
</table>
</html>
"""
resp = mock.Mock(text=html)
results = acgsou.response(resp)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
r = results[0]
self.assertEqual(r['url'], 'https://www.acgsou.com/show-torrentid.html')
self.assertEqual(r['content'], 'Category: "testcategory".')
self.assertEqual(r['title'], 'torrentname')
self.assertEqual(r['filesize'], 1048576)
| from collections import defaultdict
import mock
from searx.engines import acgsou
from searx.testing import SearxTestCase
class TestAcgsouEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dic = defaultdict(dict)
dic['pageno'] = 1
params = acgsou.request(query, dic)
self.assertTrue('url' in params)
self.assertTrue(query in params['url'])
self.assertTrue('acgsou.com' in params['url'])
def test_response(self):
resp = mock.Mock(text='<html></html>')
self.assertEqual(acgsou.response(resp), [])
html = """
<table id="listTable" class="list_style table_fixed">
<thead class="tcat">
<tr>
tablehead
</tr>
</thead>
<tbody class="tbody" id="data_list">
<tr class="alt1 ">
<td nowrap="nowrap">date</td>
<td><a href="category.html">testcategory</a></td>
<td style="text-align:left;">
<a href="show-torrentid.html" target="_blank">torrentname</a>
</td>
<td>1MB</td>
<td nowrap="nowrap">
<span class="bts_1">
29
</span>
</td>
<td nowrap="nowrap">
<span class="btl_1">
211
</span>
</td>
<td nowrap="nowrap">
<span class="btc_">
168
</span>
</td>
<td><a href="random.html">user</a></td>
</tr>
</table>
"""
resp = mock.Mock(text=html)
results = acgsou.response(resp)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
r = results[0]
self.assertEqual(r['url'], 'https://www.acgsou.com/show-torrentid.html')
self.assertEqual(r['content'], 'Category: "testcategory".')
self.assertEqual(r['title'], 'torrentname')
self.assertEqual(r['filesize'], 1048576)
| Python | 0.000005 |
1f24571c358941932860eab9b46b386adc7c7ecc | Add script to output the users with unassigned tickets | p3/management/commands/users_with_unassigned_tickets.py | p3/management/commands/users_with_unassigned_tickets.py | # -*- coding: utf-8 -*-
""" Print information of the users who got unassigned tickets."""
from django.core.management.base import BaseCommand, CommandError
from django.core import urlresolvers
from conference import models
from conference import utils
from p3 import models as p3_models
from conference import models as conf_models
from assopy import models as assopy_models
from collections import defaultdict, OrderedDict
from optparse import make_option
import operator
import simplejson as json
import traceback
### Globals
### Helpers
def get_all_order_tickets():
orders = assopy_models.Order.objects.filter(_complete=True)
order_tkts = [ordi.ticket for order in orders for ordi in order.orderitem_set.all() if ordi.ticket is not None]
conf_order_tkts = [ot for ot in order_tkts if ot.fare.code.startswith('T')]
return conf_order_tkts
def get_assigned_ticket(ticket_id):
return p3_models.TicketConference.objects.filter(ticket=ticket_id)
def has_assigned_ticket(ticket_id):
return bool(get_assigned_ticket(ticket_id))
#
# def is_ticket_assigned_to_someone_else(ticket, user):
# tickets = p3_models.TicketConference.objects.filter(ticket_id=ticket.id)
#
# if not tickets:
# return False
# #from IPython.core.debugger import Tracer
# #Tracer()()
# #raise RuntimeError('Could not find any ticket with ticket_id {}.'.format(ticket))
#
# if len(tickets) > 1:
# raise RuntimeError('You got more than one ticket from a ticket_id.'
# 'Tickets obtained: {}.'.format(tickets))
#
# tkt = tickets[0]
# if tkt.ticket.user_id != user.id:
# return True
#
# if not tkt.assigned_to:
# return False
#
# if tkt.assigned_to == user.email:
# return False
# else:
# return True
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--emails',
action='store_true',
dest='emails',
default=False,
help='Will print user emails.',
),
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
tkts = get_all_order_tickets()
# unassigned tickets
un_tkts = [t for t in tkts if not has_assigned_ticket(t.id)]
# users with unassigned tickets
users = set()
for ut in un_tkts:
users.add(ut.user)
output = []
if options['emails']:
output = sorted([usr.email.encode('utf-8') for usr in users])
else:
output = sorted([usr.get_full_name().encode('utf-8') for usr in users])
#for ot in order_tkts:
# tkt = get_conference_ticket(ot.id)
#from IPython.core.debugger import Tracer
#Tracer()()
print(', '.join(output))
| Python | 0 | |
aa720214722ca6ea445cf4ba38aa5f51ef7772b4 | add random user for notes | add_random_user.py | add_random_user.py | #!/usr/bin/python
#coding=utf-8
import sys
import MySQLdb
from DBUtils.PooledDB import PooledDB
import hashlib
import time
import random
g_dbPool = PooledDB(MySQLdb, 5, host='function-hz.com', user='notes', passwd='welc0me', db='db_notes', port=3306, charset = "utf8", use_unicode = True);
def create_random_user(user_name, szPwd):
#create user by cell phone number and send dynamic password
conn = g_dbPool.connection()
cur=conn.cursor()
count = cur.execute("insert into user(user_name, password) values (%s, %s) " \
, (user_name, hashlib.md5(szPwd).hexdigest()))
conn.commit()
if (1 == count):
return True
else:
return False
if __name__ == '__main__':
print ("start add rendom user")
for i in range(1, 5000000):
szPhone = str(random.randint(11111111111, 99999999999))
szPwd = "123456"
print ("create user %d %s ==> %s" % (i, szPhone, szPwd))
# nPhone = random.randint(11111111111, 99999999999)
create_random_user(szPhone, szPwd)
| Python | 0 | |
05aa314ac9b5d38bb7a30e30aced9b27b2797888 | Add tests for non-async constructs | python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_syntax.py | python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_syntax.py | # Add taintlib to PATH so it can be imported during runtime without any hassle
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from taintlib import *
# This has no runtime impact, but allows autocomplete to work
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..taintlib import *
# Actual tests
class Context:
def __enter__(self):
return TAINTED_STRING
def __exit__(self, exc_type, exc, tb):
pass
def test_with():
ctx = Context()
taint(ctx)
with ctx as tainted:
ensure_tainted(tainted) # $ tainted
class Iter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def test_for():
iter = Iter()
taint(iter)
for tainted in iter:
ensure_tainted(tainted) # $ tainted
# Make tests runable
test_with()
test_for()
| Python | 0.000006 | |
388c51ea5f83f718b885d784b566bc1873998c3a | add management command used to find all duplicate districts | custom/icds_reports/management/commands/find_duplicate_district_topojsons.py | custom/icds_reports/management/commands/find_duplicate_district_topojsons.py | from django.core.management import BaseCommand
from custom.icds_reports.utils.topojson_util.topojson_util import get_topojson_file_for_level, \
get_district_topojson_data
class Command(BaseCommand):
help = "Prints out any districts whose names are duplicated across states."
def handle(self, *args, **kwargs):
district_topojson_data = get_district_topojson_data()
districts_to_states = {}
districts_with_duplicates = set()
for state, data in district_topojson_data.items():
for district_name in data['districts']:
if district_name in districts_to_states:
districts_with_duplicates.add(district_name)
districts_to_states[district_name].append(state)
else:
districts_to_states[district_name] = [state]
print('District Name: [States]\n')
for duplicate_district in districts_with_duplicates:
print(f'{duplicate_district}: {", ".join(districts_to_states[duplicate_district])}')
| Python | 0.000027 | |
8634db8fe61f819cf24023514d94e4ebfc7e819f | Add Stats() class | auth0/v2/stats.py | auth0/v2/stats.py | from .rest import RestClient
class Stats(object):
"""Auth0 stats endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
jwt_token (str): An API token created with your account's global
keys. You can create one by using the token generator in the
API Explorer: https://auth0.com/docs/api/v2
"""
def __init__(self, domain, jwt_token):
self.domain = domain
self.client = RestClient(jwt=jwt_token)
def _url(self, action):
return 'https://%s/api/v2/stats/%s' % (self.domain, action)
def active_users(self):
return self.client.get(self._url('active-users'))
def daily_stats(self, from_date=None, to_date=None):
return self.client.get(self._url('daily'), params={'from': from_date,
'to': to_date})
| Python | 0.000001 | |
c1fcf54b63de95c85a9505d83062d8b320b1cbdf | Add python cloudfront update_distribution example to replace ACM Certificate | python/example_code/cloudfront/update_distribution_certificate.py | python/example_code/cloudfront/update_distribution_certificate.py | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import sys
#support for python 2 and 3 input types
def read(output):
if sys.version_info[0] < 3:
return(raw_input(output))
else:
return(input(output))
# Create CloudFront client
cf = boto3.client('cloudfront')
# List distributions with the pagination interface
print("\nAvailable CloudFront Distributions:\n")
paginator = cf.get_paginator('list_distributions')
for distributionlist in paginator.paginate():
for distribution in distributionlist['DistributionList']['Items']:
print("Domain: " + distribution['DomainName'])
print("Distribution Id: " + distribution['Id'])
print("Certificate Source: " + distribution['ViewerCertificate']['CertificateSource'])
if (distribution['ViewerCertificate']['CertificateSource'] == "acm"):
print("Certificate ARN: " + distribution['ViewerCertificate']['Certificate'])
print("")
print('Enter the Distribution Id of the CloudFront Distribution who\'s ACM Certificate you would like to replace. ')
distribution_id = read('Note that certificate source must be ACM - DistributionId: ')
distribution_config_response=cf.get_distribution_config(Id=distribution_id)
distribution_config=distribution_config_response['DistributionConfig']
distribution_etag=distribution_config_response['ETag']
if (distribution_config['ViewerCertificate']['CertificateSource'] != "acm"):
print("\nThe DistributionId you have entered is not currently using an ACM Certificate, exiting...\n")
exit()
old_cert_arn=distribution_config['ViewerCertificate']['ACMCertificateArn']
new_cert_arn=read("Please enter the ARN of the new ACM Certificate you would like to attach to Distribution " + distribution_id + ": ")
print("Replacing: " + old_cert_arn + "\nwith: " + new_cert_arn + "\n")
distribution_config['ViewerCertificate']['ACMCertificateArn']=new_cert_arn
distribution_config['ViewerCertificate']['Certificate']=new_cert_arn
cf.update_distribution(DistributionConfig=distribution_config,Id=distribution_id,IfMatch=distribution_etag)
| Python | 0 | |
b861b70e72b582a1bd3ae3ae6fa8ae2478b4ebe4 | add the tests | popgen/test/test_categorizer.py | popgen/test/test_categorizer.py | import pytest
import numpy as np
from ..census_helpers import Census
from .. import categorizer as cat
@pytest.fixture
def c():
return Census("827402c2958dcf515e4480b7b2bb93d1025f9389")
@pytest.fixture
def acs_data(c):
population = ['B01001_001E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
all_columns = population + sex + race + male_age_columns + \
female_age_columns
df = c.block_group_query(all_columns, "06", "075", tract="030600")
return df
@pytest.fixture
def pums_data(c):
return c.download_population_pums("06", "07506")
def test_categorize(acs_data, pums_data):
p_acs_cat = cat.categorize(acs_data, {
("population", "total"): "B01001_001E",
("age", "19 and under"): "B01001_003E + B01001_004E + B01001_005E + "
"B01001_006E + B01001_007E + B01001_027E + "
"B01001_028E + B01001_029E + B01001_030E + "
"B01001_031E",
("age", "20 to 35"): "B01001_008E + B01001_009E + B01001_010E + "
"B01001_011E + B01001_012E + B01001_032E + "
"B01001_033E + B01001_034E + B01001_035E + "
"B01001_036E",
("age", "35 to 60"): "B01001_013E + B01001_014E + B01001_015E + "
"B01001_016E + B01001_017E + B01001_037E + "
"B01001_038E + B01001_039E + B01001_040E + "
"B01001_041E",
("age", "above 60"): "B01001_018E + B01001_019E + B01001_020E + "
"B01001_021E + B01001_022E + B01001_023E + "
"B01001_024E + B01001_025E + B01001_042E + "
"B01001_043E + B01001_044E + B01001_045E + "
"B01001_046E + B01001_047E + B01001_048E + "
"B01001_049E",
("race", "white"): "B02001_002E",
("race", "black"): "B02001_003E",
("race", "asian"): "B02001_005E",
("race", "other"): "B02001_004E + B02001_006E + B02001_007E + "
"B02001_008E",
("sex", "male"): "B01001_002E",
("sex", "female"): "B01001_026E"
}, index_cols=['NAME'])
assert len(p_acs_cat) == 3
assert len(p_acs_cat.columns) == 11
assert len(p_acs_cat.columns.names) == 2
assert p_acs_cat.columns[0][0] == "age"
assert np.all(cat.sum_accross_category(p_acs_cat) < 2)
def age_cat(r):
if r.AGEP <= 19:
return "19 and under"
elif r.AGEP <= 35:
return "20 to 35"
elif r.AGEP <= 60:
return "35 to 60"
return "above 60"
def race_cat(r):
if r.RAC1P == 1:
return "white"
elif r.RAC1P == 2:
return "black"
elif r.RAC1P == 6:
return "asian"
return "other"
def sex_cat(r):
if r.SEX == 1:
return "male"
return "female"
_, jd_persons = cat.joint_distribution(
pums_data,
cat.category_combinations(p_acs_cat.columns),
{"age": age_cat, "race": race_cat, "sex": sex_cat}
)
jd_persons | Python | 0.000028 | |
151293037b941aba874fb2641c1bf982e2143beb | Create solution.py | hackerrank/algorithms/implementation/medium/the_time_in_words/py/solution.py | hackerrank/algorithms/implementation/medium/the_time_in_words/py/solution.py | #!/bin/python3
import sys
def solution(hrs, min):
lookup = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'quarter',
16: 'sixteen',
17: 'seventeen',
18: 'eighteen',
19: 'nineteen',
20: 'twenty',
21: 'twenty one',
22: 'twenty two',
23: 'twenty three',
24: 'twenty four',
25: 'twenty five',
26: 'twenty six',
27: 'twenty seven',
28: 'twenty eight',
29: 'twenty nine',
30: 'half',
}
if min == 0:
return "{} o' clock".format(lookup[hrs])
elif min <= 30:
if min == 15 or min == 30:
return "{} past {}".format(lookup[min], lookup[hrs])
else:
return "{} minute{} past {}".format(lookup[min], '' if min == 1 else 's', lookup[hrs])
rem = 60 - min
if rem == 15 or rem == 30:
return "{} to {}".format(lookup[rem], lookup[hrs + 1])
return "{} minute{} to {}".format(lookup[rem], '' if min == 1 else 's', lookup[hrs + 1])
h = int(input().strip())
m = int(input().strip())
s = solution(h, m)
print(s)
| Python | 0.000018 | |
8ba0fcfa893e007f1c6cc794a36bd3604498c380 | add rapiro.py | rapiroController.kivy/rapiro.py | rapiroController.kivy/rapiro.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import serial
import os
import time
import datetime
import threading
_str = ''
if os.name == 'posix':
com = serial.Serial('/dev/ttyAMA0', 57600, timeout = 0.05)
else:
com = sys.stdout
def a2dist(v):
d = 26.59*pow(v/1024.0*5.0,-1.209)
return(d)
def rxData():
global _str
while (1):
n = com.inWaiting()
#print n, _str
if n > 0:
_str += com.read(n)
def command(data):
inst = data.split(',')
r = ''
try:
t = inst[0]
s = inst[1]
except:
t = 'x'
s = 'Not define'
if t == 'a':
"""
Arduino
"""
com.write(s)
r = com.readline()
elif t == 'p':
"""
Raspberry pi
"""
os.system(s)
else:
pass
return(t, s, r)
def main():
#print(command('a,#M0'))
#print(command('a,#Z'))
#print(command('a,#PS02A090S05A000T001'))
print(command('a,#M0'))
print(command('a,#Q'))
print(command('a,#A6'))
#print(command('a,#A1'))
#print(command('a,#A2'))
#print(command('a,#A3'))
#print(command('a,#A4'))
#print(command('a,#A5'))
print(command('a,#A6'))
#print(command('a,#A7'))
print(command('a,#C'))
print(command('a,#D'))
if __name__ == '__main__':
#t1 = threading.Thread(target=rxData)
#t1.setDaemon(True)
#t1.start()
main()
| Python | 0.999586 | |
a2848885e85ad6d9685bb8ae35747300ed4b6b8b | Add a BaseTokenizer | spicedham/tokenizer.py | spicedham/tokenizer.py | class BaseTokenizer(object):
def __init__(self, config):
pass
def tokenize(self, text):
return [text]
| Python | 0.000006 | |
75a882bf38c88d73e38d13fbb8b1499ff4ae4ea6 | Add migration for changing users added by OSF for meetings with emails for fullnames to their guid | scripts/remove_after_use/set_meetings_users_fullnames_to_guids.py | scripts/remove_after_use/set_meetings_users_fullnames_to_guids.py | import sys
import logging
import django
from django.db import transaction
django.setup()
from osf.models import OSFUser
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
dry_run = '--dry' in sys.argv
with transaction.atomic():
users = OSFUser.objects.filter(fullname__regex=r'^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$', tags__name='osf4m')
logger.info('{} users found added by OSF 4 Meetings with emails for fullnames'.format(users.count()))
for user in users:
user.fullname = user._id
if not dry_run:
user.save()
if __name__ == '__main__':
main()
| Python | 0 | |
a6b35a9a94b2e4b32c2236258812b44e81184515 | Add management command for resyncing mobile worker location user data | corehq/apps/users/management/commands/fix_location_user_data.py | corehq/apps/users/management/commands/fix_location_user_data.py | from corehq.apps.locations.models import Location
from corehq.apps.users.models import CommCareUser
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = "domain"
help = "Fix location user data for mobile workers."
def process_user(self, user):
if user.location_id:
user.set_location(Location.get(user.location_id))
else:
user.unset_location()
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("Usage: python manage.py fix_location_user_data %s" % self.args)
domain = args[0]
ids = (
CommCareUser.ids_by_domain(domain, is_active=True) +
CommCareUser.ids_by_domain(domain, is_active=False)
)
for doc in iter_docs(CommCareUser.get_db(), ids):
user = CommCareUser.wrap(doc)
try:
self.process_user(user)
except Exception as e:
print "Error processing user %s: %s" % (user._id, e)
| Python | 0 | |
a31ef338ef4029be92b0c578bdd12706a0f1c17d | Move zpool grains into salt.grains.zpool | salt/grains/zpool.py | salt/grains/zpool.py | # -*- coding: utf-8 -*-
'''
ZFS grain provider
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: salt.utils, salt.module.cmdmod
:platform: illumos,freebsd,linux
.. versionadded:: Oxygen
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
import salt.utils.dictupdate
import salt.utils.path
import salt.utils.platform
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
__virtualname__ = 'zfs'
__salt__ = {
'cmd.run': salt.modules.cmdmod.run,
'cmd.run_all': salt.modules.cmdmod.run_all,
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Figure out if we need to be loaded
'''
# Don't load on windows, NetBSD, or proxy
# NOTE: ZFS on Windows is in development
# NOTE: ZFS on NetBSD is in development
if salt.utils.platform.is_windows() or salt.utils.platform.is_netbsd() or 'proxyminion' in __opts__:
return False
# Don't load if we do not have the zpool command
if not salt.utils.path.which('zpool'):
return False
return True
def _zpool_data(zpool_cmd):
'''
Provide grains about zpools
'''
# collect zpool data
grains = {}
for zpool in __salt__['cmd.run']('{zpool} list -H -o name,size'.format(zpool=zpool_cmd)).splitlines():
if 'zpool' not in grains:
grains['zpool'] = {}
zpool = zpool.split()
grains['zpool'][zpool[0]] = zpool[1]
# return grain data
return grains
def zpool():
'''
Provide grains for zfs/zpool
'''
grains = {}
zpool_cmd = salt.utils.path.which('zpool')
grains = salt.utils.dictupdate.update(grains, _zpool_data(zpool_cmd), merge_lists=True)
return grains
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| Python | 0.001271 | |
bb8a448e5e3f935f5ee4f8da9c78bcb651486c15 | Create ZigZagConversion_002.py | leetcode/006-ZigZag-Conversion/ZigZagConversion_002.py | leetcode/006-ZigZag-Conversion/ZigZagConversion_002.py | class Solution:
# @param {string} s
# @param {integer} numRows
# @return {string}
def convert(self, s, numRows):
if numRows < 2:
return s
halfsize = numRows - 1
size = 2 * halfsize
res = ''
for i in range(numRows):
j, cnt = i, 1
while j < len(s):
res += s[j]
if j % halfsize == 0:
j += size
else:
j = cnt * size - j
cnt += 1
return res
| Python | 0.000001 | |
95fcdd4f2f65a330adcb115b7ce6d6084efc6ae8 | Add examples. | examples/sampling/metropolis.py | examples/sampling/metropolis.py | #!/usr/bin/env python
import numpy as np
from util import MetropolisSampler, log_gaussian
from matplotlib import pyplot as plt
def __main__():
np.random.seed(4)
# Generate parameters
num_dims = 3
mu = np.random.normal(0, 3, num_dims)
cov = np.diag(np.random.gamma(.5, size=num_dims))
# Create a sampler
sampler = MetropolisSampler(lambda x: -log_gaussian(x, mu, cov)[0], cov / num_dims)
# Draw samples
sampler.sample(mu, 1000)
# Show the trace
sampler.trace_plot(values=mu)
plt.show()
if __name__ == '__main__':
__main__()
| Python | 0 | |
415717bddb00ca650bef61a5c6054a7b47575b56 | Implement unit test for break. | jaspyx/tests/visitor/test_break.py | jaspyx/tests/visitor/test_break.py | import ast
from jaspyx.ast_util import ast_store, ast_load
from jaspyx.tests.visitor.v8_helper import V8Helper
class TestBreak(V8Helper):
def test_break(self):
assert self.run(
[
ast.Assign(
[ast_store('i')],
ast.Num(0),
),
ast.While(
ast.Compare(
ast_load('i'),
[ast.Lt()],
[ast.Num(10)]
),
[
ast.Break(),
],
[]
)
],
'i',
int
) == 0
| Python | 0.000001 | |
607c84d56524389a150e940d89f6ecb52420e8f3 | plain, AC | leetcode/0162_find-peak-element.py | leetcode/0162_find-peak-element.py | #
# @lc app=leetcode id=162 lang=python3
#
# [162] Find Peak Element
#
from typing import List
# @lc code=start
INT_MIN = - 2 ** 64 + 1
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
if len(nums) == 1:
return 0
nums.insert(0, INT_MIN)
nums.append(INT_MIN)
for i in range(len(nums)):
if nums[i+1] > nums[i] and nums[i+1] > nums[i+2]:
return i
return -1
# @lc code=end
s = Solution()
a = [1,2,1,3,5,6,4]
print(s.findPeakElement(a))
| Python | 0.998553 | |
6db056ed452dfe01e5b6b414d414dd02d8960cef | use new mesolve output format | examples/ex_landau_zener.py | examples/ex_landau_zener.py | #
# Textbook example: Landau-Zener transitions in a quantum two-level system.
#
from qutip import *
from pylab import *
import time
def hamiltonian_t(t, args):
""" evaluate the hamiltonian at time t. """
H0 = args[0]
H1 = args[1]
return H0 + t * H1
def qubit_integrate(delta, eps0, A, gamma1, gamma2, psi0, tlist):
# Hamiltonian
sx = sigmax()
sz = sigmaz()
sm = destroy(2)
H0 = - delta/2.0 * sx - eps0/2.0 * sz
H1 = - A/2.0 * sz
# collapse operators
c_op_list = []
n_th = 0.0 # zero temperature
# relaxation
rate = gamma1 * (1 + n_th)
if rate > 0.0:
c_op_list.append(sqrt(rate) * sm)
# excitation
rate = gamma1 * n_th
if rate > 0.0:
c_op_list.append(sqrt(rate) * sm.dag())
# dephasing
rate = gamma2
if rate > 0.0:
c_op_list.append(sqrt(rate) * sz)
# evolve and calculate expectation values
# method 1: function callback which returns the time-depdent qobj
#H_args = (H0, H1)
#expt_list = mesolve(hamiltonian_t, psi0, tlist, c_op_list, [sm.dag() * sm], H_args)
# method 2: a function callback that returns the coefficient for a qobj
#H = [H0, [H1, lambda x,y: x]]
#expt_list = mesolve(H, psi0, tlist, c_op_list, [sm.dag() * sm], {})
# method 3: a string that defines the coefficient. The solver generates
# and compiles C code using cython. This method is usually the fastest
# for large systems or long time evolutions, but there is fixed-time
# overhead that makes it inefficient for small and short-time evolutions.
H = [H0, [H1, 't']]
output = mesolve(H, psi0, tlist, c_op_list, [sm.dag() * sm], {})
return output.expect[0]
#
# set up the calculation
#
delta = 0.5 * 2 * pi # qubit sigma_x coefficient
eps0 = 0.0 * 2 * pi # qubit sigma_z coefficient
A = 2.0 * 2 * pi # sweep rate
gamma1 = 0.0 # relaxation rate
gamma2 = 0.0 # dephasing rate
psi0 = basis(2,0) # initial state
tlist = linspace(-20.0, 20.0, 5000)
start_time = time.time()
p_ex = qubit_integrate(delta, eps0, A, gamma1, gamma2, psi0, tlist)
print 'time elapsed = ' + str(time.time() - start_time)
plot(tlist, real(p_ex), 'b', tlist, real(1-p_ex), 'r')
plot(tlist, 1 - exp( - pi * delta **2 / (2 * A)) * ones(shape(tlist)), 'k')
xlabel('Time')
ylabel('Occupation probability')
title('Landau-Zener transition')
legend(("Excited state", "Ground state", "Landau-Zener formula"), loc=0)
show()
| #
# Textbook example: Landau-Zener transitions in a quantum two-level system.
#
from qutip import *
from pylab import *
import time
def hamiltonian_t(t, args):
""" evaluate the hamiltonian at time t. """
H0 = args[0]
H1 = args[1]
return H0 + t * H1
def qubit_integrate(delta, eps0, A, gamma1, gamma2, psi0, tlist):
# Hamiltonian
sx = sigmax()
sz = sigmaz()
sm = destroy(2)
H0 = - delta/2.0 * sx - eps0/2.0 * sz
H1 = - A/2.0 * sz
# collapse operators
c_op_list = []
n_th = 0.0 # zero temperature
# relaxation
rate = gamma1 * (1 + n_th)
if rate > 0.0:
c_op_list.append(sqrt(rate) * sm)
# excitation
rate = gamma1 * n_th
if rate > 0.0:
c_op_list.append(sqrt(rate) * sm.dag())
# dephasing
rate = gamma2
if rate > 0.0:
c_op_list.append(sqrt(rate) * sz)
# evolve and calculate expectation values
# method 1: function callback which returns the time-depdent qobj
#H_args = (H0, H1)
#expt_list = mesolve(hamiltonian_t, psi0, tlist, c_op_list, [sm.dag() * sm], H_args)
# method 2: a function callback that returns the coefficient for a qobj
#H = [H0, [H1, lambda x,y: x]]
#expt_list = mesolve(H, psi0, tlist, c_op_list, [sm.dag() * sm], {})
# method 3: a string that defines the coefficient. The solver generates
# and compiles C code using cython. This method is usually the fastest
# for large systems or long time evolutions, but there is fixed-time
# overhead that makes it inefficient for small and short-time evolutions.
H = [H0, [H1, 't']]
expt_list = mesolve(H, psi0, tlist, c_op_list, [sm.dag() * sm], {})
return expt_list[0]
#
# set up the calculation
#
delta = 0.5 * 2 * pi # qubit sigma_x coefficient
eps0 = 0.0 * 2 * pi # qubit sigma_z coefficient
A = 2.0 * 2 * pi # sweep rate
gamma1 = 0.0 # relaxation rate
gamma2 = 0.0 # dephasing rate
psi0 = basis(2,0) # initial state
tlist = linspace(-20.0, 20.0, 5000)
start_time = time.time()
p_ex = qubit_integrate(delta, eps0, A, gamma1, gamma2, psi0, tlist)
print 'time elapsed = ' + str(time.time() - start_time)
plot(tlist, real(p_ex), 'b', tlist, real(1-p_ex), 'r')
plot(tlist, 1 - exp( - pi * delta **2 / (2 * A)) * ones(shape(tlist)), 'k')
xlabel('Time')
ylabel('Occupation probability')
title('Landau-Zener transition')
legend(("Excited state", "Ground state", "Landau-Zener formula"), loc=0)
show()
| Python | 0.000126 |
a1e451ab3525c5a0852782d1990f848b2329cb72 | add sinawb token | server/crawler/sinawb/TokenConstant.py | server/crawler/sinawb/TokenConstant.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author: AsherYang
Email: ouyangfan1991@gmail.com
Date: 2017/9/22.
Desc: sinaWeibo appkey
@see: http://open.weibo.com/apps/2489615368/info/basic?action=review
"""
domain="https://api.weibo.com/2/"
token=""
appkey = "2489615368"
secret = "dbb84df92e9a9c8f8e10d9985a8038a8" | Python | 0.999782 | |
1a9b6c7c58c5960df18335552780c3ca668dea5e | add evaluation script for ihm | evaluation/evalutate_ihm.py | evaluation/evalutate_ihm.py | import sklearn.utils as sk_utils
from mimic3models import metrics
import numpy as np
import pandas as pd
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('prediction', type=str)
parser.add_argument('--test_listfile', type=str, default='../data/in-hospital-mortality/test/listfile.csv')
parser.add_argument('--n_iters', type=int, default=10000)
args = parser.parse_args()
pred_df = pd.read_csv(args.prediction, index_col=False)
test_df = pd.read_csv(args.test_listfile, index_col=False)
df = test_df.merge(pred_df, left_on='stay', right_on='stay', how='left', suffixes=['_l', '_r'])
assert (df['prediction'].isnull().sum() == 0)
assert (df['y_true_l'].equals(df['y_true_r']))
n_samples = df.shape[0]
data = np.zeros((n_samples, 2))
data[:, 0] = np.array(df['prediction'])
data[:, 1] = np.array(df['y_true_l'])
auroc_score = metrics.print_metrics_binary(data[:, 1], data[:, 0], verbose=0)["auroc"]
aucs = []
for i in range(args.n_iters):
cur_data = sk_utils.resample(data, n_samples=len(data))
cur_auc = metrics.print_metrics_binary(cur_data[:, 1], cur_data[:, 0], verbose=0)["auroc"]
aucs += [cur_auc]
print "{} iterations".format(args.n_iters)
print "ROC of AUC = {}".format(auroc_score)
print "mean = {}".format(np.mean(aucs))
print "median = {}".format(np.median(aucs))
print "std = {}".format(np.std(aucs))
print "2.5% percentile = {}".format(np.percentile(aucs, 2.5))
print "97.5% percentile = {}".format(np.percentile(aucs, 97.5))
if __name__ == "__main__":
main()
| Python | 0 | |
e3c17a893ef4e0790af05cc238ac9038923b115a | Create docs directory for sphinx integration | docs/__init__.py | docs/__init__.py | #TODO:Create DOCS files for later integration using Sphinx
| Python | 0 | |
c206cfd940dd8ba58edb86f16691bcf50b6e5e30 | Add modgraph.py demo from Michael Hohn <mhhohn@lbl.gov> | tclpkg/gv/demo/modgraph.py | tclpkg/gv/demo/modgraph.py | #!/usr/bin/python
# display the kernel module dependencies
# author: Michael Hohn <mhholn@lbl.gov>
# based on: modgraph.tcl by John Ellson <ellson@research.att.com>
import sys
# sys.path.append('/usr/lib/graphviz/python')
sys.path.append('/usr/lib64/graphviz/python')
import gv
modules = open("/proc/modules", 'r').readlines()
G = gv.digraph("G")
gv.setv(G, 'rankdir', 'LR')
gv.setv(G, 'nodesep', '0.05')
gv.setv(G, 'node', 'shape', 'box')
gv.setv(G, 'node', 'width', '0')
gv.setv(G, 'node', 'height', '0')
gv.setv(G, 'node', 'margin', '.03')
gv.setv(G, 'node', 'fontsize', '8')
gv.setv(G, 'node', 'fontname', 'helvetica')
gv.setv(G, 'edge', 'arrowsize', '.4')
for rec in modules:
fields = rec.split(' ')
n = gv.node(G, fields[0])
for usedby in fields[3].split(','):
if (usedby != '-') & (usedby != ''):
gv.edge(n, gv.node(G, usedby))
gv.layout(G, 'dot')
# The 'xlib' renderer is provided by graphviz-cairo
gv.render(G, 'xlib')
| Python | 0 | |
5c7a4547558e6f6959ae1878f56efef8716456c4 | add script to convert distances into probabilities | scripts/distance2probability.py | scripts/distance2probability.py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 Hervé BREDIN
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Convert distance to probability
Usage:
distance2probability.py train <distance_matrix> <groundtruth_matrix> <d2p_model>
distance2probability.py apply <distance_matrix> <d2p_model> <probability_matrix>
distance2probability.py (-h | --help)
distance2probability.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
from pyannote.algorithms.stats.llr import LLRIsotonicRegression
import numpy as np
import pickle
def do_train(distance_matrix, groundtruth_matrix, d2p_model):
# load distance matrix
x = np.load(distance_matrix)
# load groundtruth matrix
y = np.load(groundtruth_matrix)
# train isotonic regression
ir = LLRIsotonicRegression()
ir.fit(x, y)
# save regression
pickle.dump(ir, d2p_model)
def do_apply(distance_matrix, d2p_model, probability_matrix):
# load distance matrix
x = np.load(distance_matrix)
# load regression
ir = pickle.load(d2p_model)
# apply isotonic regression
y = ir.apply(x)
# save probability matrix
np.save(probability_matrix, y)
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.1')
print arguments
if arguments['train']:
distance_matrix = arguments['<distance_matrix>']
groundtruth_matrix = arguments['<groundtruth_matrix>']
d2p_model = arguments['<d2p_model>']
do_train(distance_matrix, groundtruth_matrix, d2p_model)
if arguments['apply']:
distance_matrix = arguments['<distance_matrix>']
d2p_model = arguments['<d2p_model>']
probability_matrix = arguments['<probability_matrix>']
do_apply(distance_matrix, d2p_model, probability_matrix)
| Python | 0.000006 | |
2197e16cf20bba5d373f4b7a250b8f1190be8ede | Add focus attribute example. | examples/focused-windows.py | examples/focused-windows.py | #!/usr/bin/env python3
from argparse import ArgumentParser
import i3ipc
i3 = i3ipc.Connection()
def focused_windows():
tree = i3.get_tree()
workspaces = tree.workspaces()
for workspace in workspaces:
container = workspace
while container:
if not hasattr(container, 'focus') \
or not container.focus:
break
container_id = container.focus[0]
container = container.find_by_id(container_id)
if container:
coname = container.name
wsname = workspace.name
print('WS', wsname +':', coname)
if __name__ == '__main__':
parser = ArgumentParser(description = 'Print the names of the focused window of each workspace.')
parser.parse_args()
focused_windows()
| Python | 0 | |
e7146bbee86ea744d080f18a4f27def9cb26e33e | add corpus_test1.py to see how to parse music21 songs | experiments/corpus_test1.py | experiments/corpus_test1.py | #!/usr/bin/env python3
# Copyright 2016 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment to help determine the best way to use music21 objects.
The music21 libaries have a lot of purposes beyond what I need so for now I
think all I need is to know how to access the note pitches and their positions
and durations within the work. From those three bits of info I can then
construct a waveform representing that music given a tempo to define the length
of a quarter note.
"""
import numpy
from music21 import corpus
from potty_oh.common import get_cmd_line_parser
from potty_oh.common import call_main
def main():
parser = get_cmd_line_parser(description=__doc__)
parser.parse_args()
work_path = numpy.random.choice(corpus.getComposer('bach'))
work = corpus.parse(work_path)
for note in work.flat.notes:
print('{} [{}]: {} {}'.format(note.offset, note.duration.quarterLength,
note.pitch, note.frequency))
return 0
if __name__ == "__main__":
call_main(main)
| Python | 0.000001 | |
6ccdf23c67af632a46017d63b5f51d2c207be0ab | Add file | scheduled_bots/scripts/merge_duplicate_gene_proteins.py | scheduled_bots/scripts/merge_duplicate_gene_proteins.py | from tqdm import tqdm
from wikidataintegrator.wdi_core import WDItemEngine, MergeError
from wikidataintegrator.wdi_login import WDLogin
from scheduled_bots.local import WDUSER, WDPASS
login = WDLogin(WDUSER, WDPASS)
s_protein = """
SELECT DISTINCT ?item1 ?item2 ?value {{
?item1 wdt:P352 ?value .
?item2 wdt:P352 ?value .
?item1 wdt:P31|wdt:P279 wd:Q8054 .
?item2 wdt:P31|wdt:P279 wd:Q8054 .
FILTER NOT EXISTS {{?item1 wdt:P703 wd:Q15978631}}
FILTER( ?item1 != ?item2 && STR( ?item1 ) < STR( ?item2 ) ) .
}}"""
s_gene = """
SELECT DISTINCT ?item1 ?item2 ?value {{
?item1 wdt:P351 ?value .
?item2 wdt:P351 ?value .
?item1 wdt:P703 ?taxon1 .
?item2 wdt:P703 ?taxon2 .
FILTER( ?item1 != ?item2 && STR( ?item1 ) < STR( ?item2 ) && ?taxon1 = ?taxon2) .
FILTER NOT EXISTS {{?item1 wdt:P703 wd:Q15978631}}
}}"""
s = s_gene
items = [{k: v['value'].split("/")[-1] for k, v in x.items()} for x in
WDItemEngine.execute_sparql_query(s)['results']['bindings']]
for x in tqdm(items):
try:
WDItemEngine.merge_items(from_id=x['item2'], to_id=x['item1'], login_obj=login, ignore_conflicts='statement|description|sitelink')
except MergeError as e:
print(e)
pass
| Python | 0.000002 | |
495da73f305a2a0e79a28d251b5b93caea06656d | Add UglifyJS as a filter. | mediagenerator/filters/uglifier.py | mediagenerator/filters/uglifier.py | from django.conf import settings
from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
class Uglifier(Filter):
def __init__(self, **kwargs):
super(Uglifier, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Uglifier only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
args = ['uglifyjs']
try:
args = args + settings.UGLIFIER_OPTIONS
except AttributeError:
pass
try:
cmd = Popen(args,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
yield output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to run UglifyJs. "
"Please make sure you have Node.js and UglifyJS installed "
"and that it's in your PATH.\n"
"Error was: %s" % e)
| Python | 0 | |
660a3c5f3f8a4c63c21c27ce58c5639d37409ae1 | add thing to filter tracts | filter_tracts.py | filter_tracts.py | import json
calif_tracts_data = open('tracts.json')
calif_tracts = json.load(calif_tracts_data)
sf_tracts = []
for r in calif_tracts["features"]:
if r["properties"]["COUNTY"] == "075":
sf_tracts.append(r)
calif_tracts_data.close()
print json.dumps({"type": "FeatureCollection", "features": sf_tracts}) | Python | 0 | |
ce908d76dd624c687ab3c641d474857b6ffe011b | enable create and delete dhcp server | n0core/porter/dhcp.py | n0core/porter/dhcp.py | from ipaddress import IPv4Interface # noqa
import os
from typing import Tuple # noqa
from pyroute2 import IPRoute
from pyroute2 import NetNS
from pyroute2 import NSPopen
class DHCP(object):
"""
Manage namespaces, veth pairs and dnsmasq processes.
"""
ip = IPRoute()
@classmethod
def _get_veth_names(cls, subnet_id):
# type: (str) -> Tuple[str, str]
"""
Get names of veths linked to DHCP server on specified subnet.
Args:
subnet_id: Subnet ID.
Returns:
Name of one of the veth pair.
Name of the other.
"""
return 'tap-dhcp-' + subnet_id, 'eth-dhcp-' + subnet_id
@classmethod
def _get_netns_name(cls, subnet_id):
# type: (str) -> str
"""
Gets netns name by subnet.
Args:
subnet_id: Uuid of subnet.
Returns:
netns name.
"""
return 'dhcp-' + subnet_id
@classmethod
def _get_pid_filename(cls, netns_name):
# type: (str) -> Tuple[str, str]
"""
Get dnsmasq pid filename by netns.
Args:
netns_name: netns name.
Returns:
Path to directory of pid file.
Path to dnsmasq pid file.
"""
dirname = os.path.join('/var/run/', netns_name)
return dirname, os.path.join(dirname, 'dnsmasq.pid')
@classmethod
def _start_dnsmasq_process(cls, netns_name, interface_name, pool):
# type: (str, str, Tuple[str, str]) -> None
"""
Start dnsmasq process on netns.
1. Create directory where to save pid file.
2. Start dnsmasq process.
Args:
netns_name: netns name.
interface_name: Name of interface used by dnsmasq.
pool: DHCP allocation pool. Allocate pool[0]-pool[1].
"""
dirname, pid_filename = cls._get_pid_filename(netns_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
interface = '--interface=' + interface_name
dhcp_range = '--dhcp-range=' + pool[0] + ',' + pool[1] + ',' + '12h'
pid_file = '--pid-file=' + pid_filename
cmd = ['/usr/sbin/dnsmasq',
'--no-resolv',
'--no-hosts',
interface,
dhcp_range,
pid_file]
NSPopen(netns_name, cmd)
@classmethod
def create_dhcp_server(cls, subnet_id, interface_addr, bridge_name, pool):
# type: (str, IPv4Interface, str, Tuple[str, str]) -> None
"""
Create DHCP server on specified subnet.
1. Create netns if not exists.
in command: `ip netns add $netns_name`
2. Create veth pair.
in command: `ip link add $tap_name type veth peer name $peer_name`
3. Link one of the veth pair to bridge.
in command: `ip link set dev $tap_name master $bridge_name`
4. Move the other veth to netns.
in command: `ip link set $peer_name netns $netns_name`
5. Add ip address to the veth.
in command: `ip netns exec $netns_name \
ip addr add $address/$prefixlen dev $peer`
6. Set up veths.
in command: `ip link set $name up`
7. Start dnsmasq process.
Args:
subnet_id: Subnet id.
interface_addr: IP address of DHCP server.
bridge_name: Name of bridge linked to DHCP server.
pool: DHCP allocation pool. Allocate pool[0]-pool[1].
"""
netns_name = cls._get_netns_name(subnet_id)
netns = NetNS(netns_name, flags=os.O_CREAT)
tap_name, peer_name = cls._get_veth_names(subnet_id)
cls.ip.link('add', ifname=tap_name, peer=peer_name, kind='veth')
tap = cls.ip.link_lookup(ifname=tap_name)[0]
bri = cls.ip.link_lookup(ifname=bridge_name)[0]
cls.ip.link('set', index=tap, master=bri)
peer = cls.ip.link_lookup(ifname=peer_name)[0]
cls.ip.link('set', index=peer, net_ns_fd=netns_name)
address = str(interface_addr.ip)
prefixlen = int(interface_addr.network.prefixlen)
netns.addr('add', index=peer, address=address, prefixlen=prefixlen)
cls.ip.link('set', index=tap, state='up')
netns.link('set', index=peer, state='up')
netns.close()
cls._start_dnsmasq_process(netns_name, peer_name, pool)
@classmethod
def delete_dhcp_server(cls, subnet_id):
# type : (str) -> None
"""
Delete DHCP server on specified subnet.
1. Kill dnsmasq process.
2. Delete veth pairs.
in command: `ip link del $tap_name`
2. Delete related netns.
in command: `ip netns del $netns_name`
Args:
subnet_id: Subnet id.
"""
netns_name = cls._get_netns_name(subnet_id)
dirname, pid_filename = cls._get_pid_filename(netns_name)
with open(pid_filename, 'r') as f:
pid = int(f.read())
os.kill(pid, 9)
os.remove(pid_filename)
os.rmdir(dirname)
tap_name, _ = cls._get_veth_names(subnet_id)
tap = cls.ip.link_lookup(ifname=tap_name)[0]
cls.ip.link('del', index=tap)
netns = NetNS(netns_name)
netns.close()
netns.remove()
| Python | 0 | |
68a720ab539c6ba94fdf181328f27be453a9097f | Add examine_latent.py script | examine_latent.py | examine_latent.py | # Examine the Million Song Dataset. Running this script will visualize each Echo
# Nest Taste Profile songs' latent features with t-SNE.
import os, subprocess
import matplotlib.pyplot as plt
from matplotlib.text import Annotation
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
from sklearn.cluster import MiniBatchKMeans, AgglomerativeClustering
from krotos.msd.latent.features import LatentFeatures
from krotos.paths import ROOT_PATH
from krotos.debug import report, report_newline
report("Get LatentFeatures instance...")
lf = LatentFeatures()
report("Scaling latent features")
latents = lf.Y
# latents = StandardScaler().fit_transform(latents)
samples = 5000
report("Selecting {0} random samples...".format(samples))
sample_idxs = np.random.choice(latents.shape[0], size=samples, replace=False)
latents = latents[sample_idxs, :]
report("Performing t-SNE embedding...")
model = TSNE(n_components=2, method='barnes_hut')
embedding = model.fit_transform(latents)
report("Performing clustering...")
n_clusters = 10
db = AgglomerativeClustering(
n_clusters=n_clusters,
linkage='average',
affinity='cosine'
).fit(latents)
cluster_labels = db.labels_
cmap = plt.get_cmap('jet', n_clusters)
report("Getting norms...")
norms = np.linalg.norm(latents, axis=1)
# min_max_norms = (norms - np.min(norms)) / (np.max(norms) - np.min(norms))
report("Retrieving song labels...")
song_labels = {}
echonest = lf._echonest
unique_tracks_path = os.path.join(ROOT_PATH, 'msd/resources/unique_tracks.txt')
if not os.path.exists(unique_tracks_path): raise Exception("unique_tracks.txt not found.")
with open(unique_tracks_path, 'r') as unique_tracks:
i = 0
for line in unique_tracks:
_, song_id, artist, track = line.strip().split("<SEP>")
song_labels[song_id] = (artist + ' - ' + track)
i += 1
report("{0:7d} song labels...".format(i), sameline=True)
report_newline()
sid_mismatches_path = os.path.join(ROOT_PATH, 'msd/resources/sid_mismatches.txt')
if not os.path.exists(sid_mismatches_path): raise Exception("sid_mismatches.txt not found.")
with open(sid_mismatches_path, 'r') as sid_mismatches:
i = 0
for line in sid_mismatches:
song_labels[line[8:26]] = "<bad data: mismatched song>"
i += 1
report("{0:5d} erroneous song labels noted...".format(i), sameline=True)
report_newline()
report("Plotting...")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(embedding[:, 0], embedding[:, 1], c=cluster_labels, cmap=cmap, s=100, alpha=0.5, linewidths=0.0, picker=True)
last_annotation = None
last_latent = None
last_ind = None
def report_top(idx, n):
report("\tGlobal closest songs:")
closest = sorted(zip(*lf.closest(lf.Y[idx], n=n)), key=lambda x: x[1], reverse=True)
for echonest_id, score in closest:
report("\t\t{0:7.5}: {1}".format(score, song_labels[echonest_id]))
def onpick(event):
global last_annotation
global last_latent
global last_ind
ind = event.ind[0]
idx = sample_idxs[ind]
x, y = embedding[ind]
track_id_echonest = echonest.get_track_id(idx)
label = unicode(song_labels[track_id_echonest], errors='ignore')
if last_ind == ind:
report_top(idx, 5)
return
last_ind = ind
if last_latent is not None:
s = np.dot(last_latent, latents[ind]) / (np.linalg.norm(last_latent) * np.linalg.norm(latents[ind]))
d = np.linalg.norm(last_latent - latents[ind])
report('Cosine similarity: {0: 7.5f}'.format(s))
report('L2 distance: {0: .5f}'.format(d))
last_latent = latents[ind]
report('Selected point at (x={1: 7.3f}, y={2: 7.3f}, norm={3: 7.3f}): \t{0}.'.format(label, x, y, norms[ind]))
if last_annotation is not None: last_annotation.remove()
last_annotation = Annotation(
label,
xy=(x, y),
xytext=(-20, 20),
textcoords='offset points',
ha='right',
va='bottom',
bbox=dict(boxstyle = 'round,pad=0.5', fc='white', alpha=0.6),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0', alpha=0.4)
)
ax.add_artist(last_annotation)
fig.canvas.draw()
fig.canvas.mpl_connect('pick_event', onpick)
report("Displaying plot")
plt.show()
report_newline()
| Python | 0.000001 | |
e687dce8c8441728f1af6336497f7a131730db4f | Add untracked campaigns.py | framework/auth/campaigns.py | framework/auth/campaigns.py | import httplib as http
from framework.exceptions import HTTPError
from website import mails
VALID_CAMPAIGNS = (
'prereg',
)
EMAIL_TEMPLATE_MAP = {
'prereg': mails.CONFIRM_EMAIL_PREREG
}
def email_template_for_campaign(campaign, default=None):
if campaign in VALID_CAMPAIGNS:
try:
return EMAIL_TEMPLATE_MAP[campaign]
except KeyError as e:
if default:
return default
else:
raise e
def campaign_for_user(user):
campaigns = [tag for tag in user.system_tags if tag in VALID_CAMPAIGNS]
if campaigns:
return campaigns[0]
def campaign_url_for(campaign):
# Defined inside this function to ensure a request context
REDIRECT_MAP = {
'prereg': '/prereg/'
}
if campaign not in VALID_CAMPAIGNS:
raise HTTPError(http.BAD_REQUEST)
else:
try:
return REDIRECT_MAP[campaign]
except KeyError:
raise HTTPError(http.NOT_FOUND)
| Python | 0.000001 | |
96ed06f1f3dab3aa9d0f8150c41a5c1b943a86b0 | Add test for config module | frappe/tests/test_config.py | frappe/tests/test_config.py | # Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.config import get_modules_from_all_apps_for_user
class TestConfig(unittest.TestCase):
def test_get_modules(self):
frappe_modules = frappe.get_all("Module Def", filters={"app_name": "frappe"}, pluck="name")
all_modules_data = get_modules_from_all_apps_for_user()
first_module_entry = all_modules_data[0]
all_modules = [x["module_name"] for x in all_modules_data]
self.assertIn("links", first_module_entry)
self.assertIsInstance(all_modules_data, list)
self.assertFalse([x for x in frappe_modules if x not in all_modules])
| Python | 0.000002 | |
d7bfed84d773e7ccbd23e910a533f70b4dd02184 | Add module entrypoint | g2sd/__main__.py | g2sd/__main__.py | from .g2sd import cmd
if __name__ == "__main__":
cmd()
| Python | 0.000001 | |
7421cecfd6b304692eb19d76d3f90a61a950bc83 | add get_reviewers | get_reviewers.py | get_reviewers.py |
import sys
import urllib2
import time
from lxml import html
def get_reviewers(bookid, star=1):
allstar10_list = []
for tag in ['collections', 'doings', 'wishes']:
reached_end = False
i = 0
while not reached_end:
print "start %d" % i
page_url = "http://book.douban.com/subject/%s/%s?start=%d" % (bookid, tag, i)
response = urllib2.urlopen(page_url)
page_html = response.read()
tree = html.fromstring(page_html)
reviews_element_list = tree.xpath('//*[@id="' + tag + '_tab"]//table')
if len(reviews_element_list) < 20:
reached_end = True
reviewer_list = tree.xpath('//*[@id="' + tag + '_tab"]//table/tr/td/div[@class="pl2"]/a')
reviewers = [ el.attrib['href'] for el in reviewer_list ]
review_list = tree.xpath('//*[@id="' + tag + '_tab"]//table/tr/td/p[@class="pl"]/span[last()]')
reviews = [ el.attrib['class'] for el in review_list ]
review_stars = "allstar%d0" % star
allstar10_list.extend([reviewer for (reviewer,review) in zip(reviewers, reviews) if review == review_stars])
i += 20
time.sleep(1)
return allstar10_list
if __name__ == "__main__":
bookid = sys.argv[1]
allstar10_list = get_reviewers( bookid )
for i in allstar10_list:
print i
| Python | 0 | |
258a8d38d590f856e144b1e725fe38619c6758ea | Create notes_extractor.py | notes_extractor/notes_extractor.py | notes_extractor/notes_extractor.py | #!/usr/bin/env python3
###############################################################################
# Name : extract_notes.py #
# Version : v. 1.0.0.0 #
# Author : Abel Gancsos #
# Description : Helps extract data about Apple Notes. #
###############################################################################
import os, sys, sqlite3;
class INNote:
identifier=None;name=None;
def __init__(self, row=None):
if row != None:
self.identifier = row[0];
self.name = row[1];
pass;
class NotesExtractor:
notes_path=None;connection=None;cursor=None;
def __init__(self, params=dict()):
self.notes_path = params["-p"] if "-p" in params.keys() else "{0}/Library/Group Containers/group.com.apple.notes/NoteStore.sqlite".format(os.environ['HOME']);
assert os.path.exists(self.notes_path), "Notes cache must exist...";
self.connection = sqlite3.connect(self.notes_path);
self.cursor = self.connection.cursor();
def ensure_close(self):
self.connection.commit();
self.connection.close();
def search(self, keyword=""):
notes = list();
self.cursor.execute("SELECT ZIDENTIFIER, ZTITLE1 FROM ZICCLOUDSYNCINGOBJECT WHERE ZTITLE1 LIKE '%{0}%'".format(keyword));
rows = self.cursor.fetchall();
for row in rows: notes.append(INNote(row));
return notes;
pass;
if __name__ == "__main__":
params = dict();
for i in range(0, len(sys.argv) - 1): params[sys.argv[i]] = sys.argv[i + 1];
session = NotesExtractor(params);
notes = session.search(params["-n"] if "-n" in params.keys() else "");
for note in notes: print("{1}\t\tnotes://showNote?identifier={0}".format(note.identifier, note.name));
session.ensure_close();
| Python | 0 | |
c0d135fc40142561e4a2409e47b34c367a6a7ef4 | add script to read device logs from rms dump | util/scripts/devicelogs.py | util/scripts/devicelogs.py | from rmsdump import *
def read_log_entry (log_entry):
return tuple(log_entry.val[i].val for i in range(0, 3))
def print_log (log_atom):
print '%s> %s: %s' % (log_atom[0].strftime('%Y-%m-%d %H:%M:%S'), log_atom[1], log_atom[2])
if __name__ == "__main__":
data = sys.stdin.read()
stream = DataStream(data)
(rmses, num_rms, err) = extract_rms(stream)
log_rmses = [rms for rms in rmses if rms['name'].startswith('LOG_') and rms['name'] != 'LOG_IX']
log_entries = []
for log_rms in log_rmses:
log_entries.extend([rec['content'][1] for rec in log_rms['records']])
log_digest = [read_log_entry(le) for le in log_entries]
for la in sorted(log_digest, key=lambda la: la[0]):
print_log(la)
| Python | 0 | |
f1ccab2168dea1b0827f4ca929f0036e84170a76 | Add tests for cross domain xhr view | go/base/tests/test_views.py | go/base/tests/test_views.py | """Test for go.base.utils."""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from go.base.tests.utils import VumiGoDjangoTestCase
class BaseViewsTestCase(VumiGoDjangoTestCase):
def cross_domain_xhr(self, url):
return self.client.post(reverse('cross_domain_xhr'), {'url': url})
@patch('requests.get')
def test_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': None})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://username:password@domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr_with_https_and_port(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr(
'https://username:password@domain.com:443/foo')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('https://domain.com:443/foo',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
| Python | 0 | |
b67cc70a6cf04e605ad93933dd9d8a88db94f093 | add a simple flask app | backend/app.py | backend/app.py | from flask import Flask
import db
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
#db.process_db()
app = Flask(__name__)
app.run(debug=True)
| Python | 0.000002 | |
ce894232e3d8b00be3520f9f8d34ceb706b8dd75 | Create removelast2pagesofpdf.py | RemoverLast2PagesOfPDF/removelast2pagesofpdf.py | RemoverLast2PagesOfPDF/removelast2pagesofpdf.py | import PyPDF2, os
import sys
import CmdFormat
import shutil
CDMF = CmdFormat.CmdFormat("PDF分离器")
class PDFMerger(object):
"""docstring for PDFMerger"""
def __init__(self,ROOTPATH):
self.__ROOTPATH = ROOTPATH+"\\"
self.__countriesCount = 0
self.__currentCoutry=""
self.bRegenerate = True
def __messages(self):
CDMF.set_cmd_color(CmdFormat.FOREGROUND_RED | CmdFormat.FOREGROUND_GREEN | \
CmdFormat.FOREGROUND_BLUE | CmdFormat.FOREGROUND_INTENSITY)
print("\n")
print("========================== 欢迎使用 ==================================")
def __quiry(self,mes):
while True:
content = CDMF.print_green_input_text(mes)
if content=="y" or content=="Y" or content=="n" or content=="N":
break
if content=="y" or content=="Y":
return True
else:
return False
def Run(self):
self.__messages()
allFiles = os.listdir(self.__ROOTPATH)
CDMF.print_blue_text("扫描待统计村民资料...,")
nNumFile = 0;
nNumNoContent = 0;
for fileOrDir in allFiles:
if fileOrDir.startswith(('1','2','3','4','5','6','7','8','9','0')) and fileOrDir.endswith('.pdf'):
nNumFile = nNumFile + 1
CDMF.print_blue_text("扫描完毕!共有 "+str(nNumFile) + " 户的资料,",end='')
CDMF.print_blue_text("需要统计的有 "+str(nNumFile) + " 户.")
#多个村
bdeleteOrg = self.__quiry("是否删掉原文件(请输入y或n):")
index = 1
for file in allFiles:
filefull = os.path.join(self.__ROOTPATH,file)
if not os.path.isdir(filefull):
if filefull.endswith('.pdf'): #找到以.pdf结尾的文件
(filepath,tempfilename) = os.path.split(filefull)
(filename,extension) = os.path.splitext(tempfilename)
if filename.startswith(('1','2','3','4','5','6','7','8','9','0')):
pdfWriter = PyPDF2.PdfFileWriter() #生成一个空白的pdf文件
inPDFfile = open(filefull,'rb')
pdfReader = PyPDF2.PdfFileReader(inPDFfile) #以只读方式依次打开pdf文件
for pageNum in range(pdfReader.numPages):
if pageNum<pdfReader.numPages-2:
pdfWriter.addPage(pdfReader.getPage(pageNum)) #将打开的pdf文件内容一页一页的复制到新建的空白pdf里
outPdfName = self.__ROOTPATH+'\\'+'Res_'+filename+'.pdf'
pdfOutput = open(outPdfName,'wb')
pdfWriter.write(pdfOutput) #将复制的内容全部写入合并的pdf
pdfOutput.close()
inPDFfile.close()
outPdfName="" #清空outPdfName
CDMF.print_yellow_text(str(index)+'/'+str(nNumFile)+' ---> '+file+" 成功!")
index += 1
if bdeleteOrg:
os.remove(filefull)
if __name__ == '__main__':
ROOTPATH = os.getcwd()
Job = PDFMerger(ROOTPATH)
Job.Run()
CDMF.print_yellow_text("任务完成!")
quit = input("按任意键退出...")
| Python | 0.000001 | |
2b25b9ba1c9417e3e25a91055a65551210eb5313 | Add meal migrations | app/timetables/migrations/0002_meal.py | app/timetables/migrations/0002_meal.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-16 17:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Meal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60, unique=True)),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
],
),
]
| Python | 0.000003 | |
835d9628513a80215641bc4c63eae1fae7b8442b | rewrite portforwarding api | xos/api/utility/portforwarding.py | xos/api/utility/portforwarding.py | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import serializers
from rest_framework import generics
from rest_framework.views import APIView
from core.models import *
from django.forms import widgets
from django.core.exceptions import PermissionDenied
from xos.exceptions import XOSNotFound
from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
from django.db.models import Q
class PortForwarding(Port):
class Meta:
proxy = True
app_label = "core"
def __init__(self, *args, **kwargs):
super(PortForwarding, self).__init__(*args, **kwargs)
class PortForwardingSerializer(serializers.Serializer):
id = serializers.CharField(read_only=True)
ip = serializers.CharField(read_only=True)
ports = serializers.CharField(read_only=True, source="network.ports")
hostname = serializers.CharField(read_only=True, source="instance.node.name")
class Meta:
model = PortForwarding
fields = ('id', 'ip', 'ports', 'hostname')
class PortForwardingViewSet(XOSViewSet):
base_name = "list"
method_name = "portforwarding"
method_kind = "viewset"
serializer_class = PortForwardingSerializer
def get_queryset(self):
queryset = queryset=Port.objects.exclude(Q(network__isnull=True) |
Q(instance__isnull=True) |
Q(instance__node__isnull=True) |
Q(network__ports__exact='') |
Q(ip__isnull=True) | Q(ip__exact=''))
node_name = self.request.query_params.get('node_name', None)
if node_name is not None:
queryset = queryset.filter(instance__node__name = node_name)
return queryset
| Python | 0.000002 | |
33f43dd2e167afd40c4a5c516ae7cae35519b4c5 | Add partial output for testcases to comet | judge/bridge/judgecallback.py | judge/bridge/judgecallback.py | import logging
from .judgehandler import JudgeHandler
from judge.models import Submission, SubmissionTestCase
from judge.simple_comet_client import send_message
logger = logging.getLogger('judge.bridge')
class DjangoJudgeHandler(JudgeHandler):
def finish(self):
JudgeHandler.finish(self)
for id in self._load:
submission = Submission.objects.get(id=id)
submission.status = 'IE'
submission.save()
def on_grading_begin(self, packet):
JudgeHandler.on_grading_begin(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
submission.status = 'G'
submission.save()
send_message('sub_%d' % submission.id, 'grading-begin')
def on_grading_end(self, packet):
JudgeHandler.on_grading_end(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
time = 0
memory = 0
points = 0.0
total = 0
status = 0
status_codes = ['AC', 'WA', 'MLE', 'TLE', 'IR', 'RTE']
for case in SubmissionTestCase.objects.filter(submission=submission):
time += case.time
total += case.total
points += case.points
memory = max(memory, case.memory)
i = status_codes.index(case.status)
if i > status:
status = i
total = round(total, 1)
points = round(points / total * submission.problem.points, 1)
if not submission.problem.partial and points != total:
points = 0
submission.status = 'D'
submission.time = time
submission.memory = memory
submission.points = points
submission.result = status_codes[status]
submission.save()
chan = 'sub_%d' % submission.id
send_message(chan, 'grading-end %.3f %d %.1f %.1f %s' % (time, memory, points, submission.problem.points,
submission.result))
def on_compile_error(self, packet):
JudgeHandler.on_compile_error(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
submission.status = submission.result = 'CE'
submission.save()
send_message('sub_%d' % submission.id, 'compile-error %s' % packet['log'])
def on_bad_problem(self, packet):
JudgeHandler.on_bad_problem(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
submission.status = submission.result = 'IE'
submission.save()
send_message('sub_%d' % submission.id, 'bad-problem %s' % packet['problem'])
def on_test_case(self, packet):
JudgeHandler.on_test_case(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
test_case = SubmissionTestCase.objects.get_or_create(submission=submission, case=packet['position'])[0]
status = packet['status']
if status & 2:
test_case.status = 'RTE'
elif status & 4:
test_case.status = 'TLE'
elif status & 8:
test_case.status = 'MLE'
elif status & 16:
test_case.status = 'IR'
elif status & 1:
test_case.status = 'WA'
else:
test_case.status = 'AC'
test_case.time = packet['time']
test_case.memory = packet['memory']
test_case.points = packet['points']
test_case.total = packet['total-points']
test_case.save()
chan = 'sub_%d' % submission.id
send_message(chan, 'test-case %d %s %.3f %d %.1f %.1f (%s)' % (packet['position'], test_case.status,
packet['time'], packet['memory'],
float(test_case.points), float(test_case.total), packet['output']))
| import logging
from .judgehandler import JudgeHandler
from judge.models import Submission, SubmissionTestCase
from judge.simple_comet_client import send_message
logger = logging.getLogger('judge.bridge')
class DjangoJudgeHandler(JudgeHandler):
def finish(self):
JudgeHandler.finish(self)
for id in self._load:
submission = Submission.objects.get(id=id)
submission.status = 'IE'
submission.save()
def on_grading_begin(self, packet):
JudgeHandler.on_grading_begin(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
submission.status = 'G'
submission.save()
send_message('sub_%d' % submission.id, 'grading-begin')
def on_grading_end(self, packet):
JudgeHandler.on_grading_end(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
time = 0
memory = 0
points = 0.0
total = 0
status = 0
status_codes = ['AC', 'WA', 'MLE', 'TLE', 'IR', 'RTE']
for case in SubmissionTestCase.objects.filter(submission=submission):
time += case.time
total += case.total
points += case.points
memory = max(memory, case.memory)
i = status_codes.index(case.status)
if i > status:
status = i
total = round(total, 1)
points = round(points / total * submission.problem.points, 1)
if not submission.problem.partial and points != total:
points = 0
submission.status = 'D'
submission.time = time
submission.memory = memory
submission.points = points
submission.result = status_codes[status]
submission.save()
chan = 'sub_%d' % submission.id
send_message(chan, 'grading-end %.3f %d %.1f %.1f %s' % (time, memory, points, submission.problem.points,
submission.result))
def on_compile_error(self, packet):
JudgeHandler.on_compile_error(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
submission.status = submission.result = 'CE'
submission.save()
send_message('sub_%d' % submission.id, 'compile-error %s' % packet['log'])
def on_bad_problem(self, packet):
JudgeHandler.on_bad_problem(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
submission.status = submission.result = 'IE'
submission.save()
send_message('sub_%d' % submission.id, 'bad-problem %s' % packet['problem'])
def on_test_case(self, packet):
JudgeHandler.on_test_case(self, packet)
submission = Submission.objects.get(id=packet['submission-id'])
test_case = SubmissionTestCase.objects.get_or_create(submission=submission, case=packet['position'])[0]
status = packet['status']
if status & 2:
test_case.status = 'RTE'
elif status & 4:
test_case.status = 'TLE'
elif status & 8:
test_case.status = 'MLE'
elif status & 16:
test_case.status = 'IR'
elif status & 1:
test_case.status = 'WA'
else:
test_case.status = 'AC'
test_case.time = packet['time']
test_case.memory = packet['memory']
test_case.points = packet['points']
test_case.total = packet['total-points']
test_case.save()
chan = 'sub_%d' % submission.id
send_message(chan, 'test-case %d %s %.3f %d %.1f %.1f' % (packet['position'], test_case.status,
packet['time'], packet['memory'],
float(test_case.points), float(test_case.total)))
| Python | 0.000011 |
f1826b2cf4c4103efe52713a57dc2fcabda1a45d | fix migration for real | kitsune/questions/migrations/0006_ios_questionlocale.py | kitsune/questions/migrations/0006_ios_questionlocale.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_questionlocale(apps, schema_editor):
Product = apps.get_model('products', 'Product')
QuestionLocale = apps.get_model('questions', 'QuestionLocale')
p, created = Product.objects.get_or_create(slug='ios', defaults={
'title': 'Firefox for iOS',
'description': 'Firefox for iPhone, iPad and iPod touch devices',
'display_order': 0,
'visible': False})
ql, created = QuestionLocale.objects.get_or_create(locale='en-US')
ql.products.add(p)
class Migration(migrations.Migration):
dependencies = [
('questions', '0005_change_locale_sr_Cyrl_to_sr'),
('products', '0001_initial'),
]
operations = [
migrations.RunPython(create_questionlocale),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_questionlocale(apps, schema_editor):
Product = apps.get_model('products', 'Product')
QuestionLocale = apps.get_model('questions', 'QuestionLocale')
p = Product.objects.get_or_create(slug='ios', defaults={
'title': 'Firefox for iOS',
'description': 'Firefox for iPhone, iPad and iPod touch devices',
'display_order': 0,
'visible': False})
QuestionLocale.objects.get_or_create(locale='en-US', product=p)
class Migration(migrations.Migration):
dependencies = [
('questions', '0005_change_locale_sr_Cyrl_to_sr'),
('products', '0001_initial'),
]
operations = [
migrations.RunPython(create_questionlocale),
]
| Python | 0.000001 |
c2e3e122560b8981079e1a89ff90fdf31c9eb8d1 | Reset timer on push. | astm/protocol.py | astm/protocol.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
from .asynclib import AsyncChat, call_later
from .records import HeaderRecord, TerminatorRecord
from .constants import STX, ENQ, ACK, NAK, EOT, ENCODING
log = logging.getLogger(__name__)
__all__ = ['ASTMProtocol']
class ASTMProtocol(AsyncChat):
"""Common ASTM protocol routines."""
#: ASTM header record class.
astm_header = HeaderRecord
#: ASTM terminator record class.
astm_terminator = TerminatorRecord
#: Flag about chunked transfer.
is_chunked_transfer = None
#: IO timer
timer = None
encoding = ENCODING
strip_terminator = False
_last_recv_data = None
_last_sent_data = None
def __init__(self, sock=None, map=None, timeout=None):
super(ASTMProtocol, self).__init__(sock, map)
if timeout is not None:
self.timer = call_later(timeout, self.on_timeout)
def found_terminator(self):
while self.inbox:
data = self.inbox.popleft()
if not data:
continue
self.dispatch(data)
def dispatch(self, data):
"""Dispatcher of received data."""
self._last_recv_data = data
if data == ENQ:
handler = self.on_enq
elif data == ACK:
handler = self.on_ack
elif data == NAK:
handler = self.on_nak
elif data == EOT:
handler = self.on_eot
elif data.startswith(STX): # this looks like a message
handler = self.on_message
else:
handler = lambda: self.default_handler(data)
resp = handler()
if resp is not None:
self.push(resp)
def default_handler(self, data):
raise ValueError('Unable to dispatch data: %r', data)
def push(self, data):
self._last_sent_data = data
if self.timer is not None and not self.timer.cancelled:
self.timer.reset()
return super(ASTMProtocol, self).push(data)
def on_enq(self):
"""Calls on <ENQ> message receiving."""
def on_ack(self):
"""Calls on <ACK> message receiving."""
def on_nak(self):
"""Calls on <NAK> message receiving."""
def on_eot(self):
"""Calls on <EOT> message receiving."""
def on_message(self):
"""Calls on ASTM message receiving."""
def on_timeout(self):
"""Calls when timeout event occurs. Used to limit waiting time for
response data."""
log.warn('Communication timeout')
def handle_read(self):
if self.timer is not None and not self.timer.cancelled:
self.timer.reset()
super(ASTMProtocol, self).handle_read()
def handle_close(self):
if self.timer is not None and not self.timer.cancelled:
self.timer.cancel()
super(ASTMProtocol, self).handle_close()
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
from .asynclib import AsyncChat, call_later
from .records import HeaderRecord, TerminatorRecord
from .constants import STX, ENQ, ACK, NAK, EOT, ENCODING
log = logging.getLogger(__name__)
__all__ = ['ASTMProtocol']
class ASTMProtocol(AsyncChat):
"""Common ASTM protocol routines."""
#: ASTM header record class.
astm_header = HeaderRecord
#: ASTM terminator record class.
astm_terminator = TerminatorRecord
#: Flag about chunked transfer.
is_chunked_transfer = None
#: IO timer
timer = None
encoding = ENCODING
strip_terminator = False
_last_recv_data = None
_last_sent_data = None
def __init__(self, sock=None, map=None, timeout=None):
super(ASTMProtocol, self).__init__(sock, map)
if timeout is not None:
self.timer = call_later(timeout, self.on_timeout)
def found_terminator(self):
while self.inbox:
data = self.inbox.popleft()
if not data:
continue
self.dispatch(data)
def dispatch(self, data):
"""Dispatcher of received data."""
self._last_recv_data = data
if data == ENQ:
handler = self.on_enq
elif data == ACK:
handler = self.on_ack
elif data == NAK:
handler = self.on_nak
elif data == EOT:
handler = self.on_eot
elif data.startswith(STX): # this looks like a message
handler = self.on_message
else:
handler = lambda: self.default_handler(data)
resp = handler()
if resp is not None:
self.push(resp)
def default_handler(self, data):
raise ValueError('Unable to dispatch data: %r', data)
def push(self, data):
self._last_sent_data = data
return super(ASTMProtocol, self).push(data)
def on_enq(self):
"""Calls on <ENQ> message receiving."""
def on_ack(self):
"""Calls on <ACK> message receiving."""
def on_nak(self):
"""Calls on <NAK> message receiving."""
def on_eot(self):
"""Calls on <EOT> message receiving."""
def on_message(self):
"""Calls on ASTM message receiving."""
def on_timeout(self):
"""Calls when timeout event occurs. Used to limit waiting time for
response data."""
log.warn('Communication timeout')
def handle_read(self):
if self.timer is not None and not self.timer.cancelled:
self.timer.reset()
super(ASTMProtocol, self).handle_read()
def handle_close(self):
if self.timer is not None and not self.timer.cancelled:
self.timer.cancel()
super(ASTMProtocol, self).handle_close()
| Python | 0 |
4178691ed3826239721f2d9a6435ef90cfb5cf82 | Add color to input | flask_init/run.py | flask_init/run.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import os
import six
import templates
from .creator import Creator
from .exceptions import InvalidFolderName
def color_input(color, text):
return six.moves.input(color+text+'\033[0m')
def color_print(color, text):
six.print_(color+text+'\033[0m')
def main():
name = color_input("\033[35m", "> Project name [flask_proj]: ")
name = name or 'flask_proj'
module = color_input("\033[35m", "> Module name [common]: ")
module = module or 'common'
creator = Creator(os.getcwd())
try:
creator.create_folder(creator.root_path, name)
proj_path = os.path.join(creator.root_path, name)
except InvalidFolderName:
six.print_("\nInvalid Project Name, use another name!")
else:
creator.create_file(proj_path, "manage.py", templates.manager)
creator.create_folder(proj_path, "requirements")
creator.create_file(os.path.join(proj_path, "requirements"), "dev.txt",
templates.requirements)
app_init = templates.app_init.substitute(module=module)
creator.create_module(proj_path, "app", app_init)
app_path = os.path.join(proj_path, "app")
creator.create_folder(app_path, "templates")
template_path = os.path.join(app_path, "templates")
creator.create_file(template_path, "base.html", templates.base_html)
creator.create_folder(template_path, module)
creator.create_file(os.path.join(template_path, module),
"index.html", templates.module_html)
module_init = templates.module_init.substitute(module=module)
creator.create_folder(app_path, "static")
creator.create_module(app_path, module, module_init)
module_view = templates.module_views.substitute(module=module)
module_path = os.path.join(app_path, module)
creator.create_file(module_path, "views.py", module_view)
creator.create_file(module_path, "models.py", templates.blank)
color_print("\033[31m", "\n".join(creator.errors))
color_print("\033[34m", "Complete!")
six.print_("You can install package using ", end="")
color_print("\033[34m", "pip install -r requirements/dev.txt")
six.print_("You can run using ", end="")
color_print("\033[34m", "python manage.py run")
if __name__ == '__main__':
main()
| #!/usr/bin/python
# -*- coding:utf-8 -*-
import os
import six
import templates
from .creator import Creator
from .exceptions import InvalidFolderName
def main():
name = six.moves.input('Input project name (default is "flask_proj"): ')
name = name or 'flask_proj'
module = six.moves.input('Input module name (default is "common"): ')
module = module or 'common'
creator = Creator(os.getcwd())
try:
creator.create_folder(creator.root_path, name)
proj_path = os.path.join(creator.root_path, name)
except InvalidFolderName:
six.print_("\nInvalid Project Name, use another name!")
else:
creator.create_file(proj_path, "manage.py", templates.manager)
creator.create_folder(proj_path, "requirements")
creator.create_file(os.path.join(proj_path, "requirements"), "dev.txt",
templates.requirements)
app_init = templates.app_init.substitute(module=module)
creator.create_module(proj_path, "app", app_init)
app_path = os.path.join(proj_path, "app")
creator.create_folder(app_path, "templates")
template_path = os.path.join(app_path, "templates")
creator.create_file(template_path, "base.html", templates.base_html)
creator.create_folder(template_path, module)
creator.create_file(os.path.join(template_path, module),
"index.html", templates.module_html)
module_init = templates.module_init.substitute(module=module)
creator.create_folder(app_path, "static")
creator.create_module(app_path, module, module_init)
module_view = templates.module_views.substitute(module=module)
module_path = os.path.join(app_path, module)
creator.create_file(module_path, "views.py", module_view)
creator.create_file(module_path, "models.py", templates.blank)
six.print_("\n".join(creator.errors))
six.print_("You can install package "
"\"pip install -r requirements/dev.txt\"")
six.print_("You can run \"python manage.py run\"")
if __name__ == '__main__':
main()
| Python | 0.000002 |
48933f27c098b05276271a62ed3c970e4d5721b0 | add missing file | src/radical/repex/utils.py | src/radical/repex/utils.py |
import radical.utils as ru
# ------------------------------------------------------------------------------
#
def expand_ln(to_link, src_sbox, tgt_sbox, rid, cycle):
expand = {'rid' : rid,
'cycle': cycle}
if not src_sbox: src_sbox = '.'
if not tgt_sbox: tgt_sbox = '.'
ret = list()
for data in ru.as_list(to_link):
src, tgt = data.split('>')
try:
src = src.strip() % expand
tgt = tgt.strip() % expand
except:
raise RuntimeError('expansion error: %s : %s : %s' % (src, tgt, expand))
ret.append('%s/%s > %s/%s' % (src_sbox, src, tgt_sbox, tgt))
return ret
# ------------------------------------------------------------------------------
#
def last_task(replica):
cs = replica.current_stage
if cs >= len(replica.stages):
cs -= 1
assert(cs < len(replica.stages))
tasks = replica.stages[cs].tasks
assert(tasks)
assert(len(tasks) == 1)
return list(tasks)[0]
# ------------------------------------------------------------------------------
| Python | 0.000003 | |
88788c215c619ab894e21243d584541f311dbfb9 | Add eventlet test check to new tests __init__.py | oslo_concurrency/tests/__init__.py | oslo_concurrency/tests/__init__.py | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
if os.environ.get('TEST_EVENTLET'):
import eventlet
eventlet.monkey_patch()
| Python | 0.000009 | |
112c3a5a7728aea9be59b4bab1c26932e5faceaf | replace simple_api.py, set filename via commandline param, git add files that dont exist | import_fusion.py | import_fusion.py | #!/usr/bin/python
import json
import requests
import sys
import codecs
import subprocess
from datetime import datetime
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", dest="output_file", help="output filename (will be stored under the data/ directory)")
(options, args) = parser.parse_args()
if not options.output_file:
print "must include -f <output filename>"
exit()
OUTPUT_PATH = 'data/' + options.output_file
with open("api.json") as f:
keys = json.loads(f.read())
server_key = keys["ServerKey"]
tablename = keys['fusion_table']
endpoint = 'https://www.googleapis.com/fusiontables/v1/query?sql=SELECT * FROM '
apicall = "".join([endpoint, tablename, "&key=", server_key])
raw = requests.get(apicall)
if not raw.ok:
print("something wrong with the apicall\n would print the requests object for inspection and debugging:")
print('dir:',dir(raw))
print('status code:',raw.status_code)
print('text:', raw.text)
sys.exit()
data = raw.json()
geojson = {"type": "FeatureCollection", "features": []}
for place in data['rows']:
geojson['features'].append(
{
"geometry": {
"type": "Point",
"coordinates": [
place[6],
place[5]
]
},
"type": "Feature",
"properties": {
"city": place[1],
"name": place[0],
"district":place[2],
"subdistrict":place[3],
"address":place[4],
"operator": place[16],
"days": [
place[8],
place[9],
place[10],
place[11],
place[12],
place[13]
],
"phones" : place[7],
"notes": place[15],
"error" : place[17]
}
}
)
with codecs.open(OUTPUT_PATH,'wb+', 'utf-8') as f:
output = "stations=" + json.dumps(geojson, indent=4, ensure_ascii=False)
f.write(output)
f.close()
subprocess.call(['git', 'add', OUTPUT_PATH])
subprocess.call(['git','commit', OUTPUT_PATH, '-m', 'commiting updated geojson from Fusion table %s' % datetime.now().strftime("%d/%m/%Y %H:%M")])
| Python | 0 | |
43d23f19933e898254d58c4874e6f0c0ac3b1cc6 | Add example config file | config-example.py | config-example.py | # Example configuration file for for Pyaiot
# Configuration options are shared between all pyaiot components.
# Debug
# Enable debug logging for all components.
#debug = False
# Broker host:
# Other component connect to this host for their broker connection. The
# dashboard passes this hostname to the clients for their broker connection.
#broker_host = 'localhost'
# Broker port number:
# This is the tcp port number the websocket of the broker is listening on. Other
# component use this configuration options to determine which port number to
# connect to.
#broker_port = 8020
# Key file
# The key file is necessary to authenticate different components to the broker.
# Both the broker and the other components use the path specified to find the
# key file for authentication.
#key_file = '~/.pyaiot/keys'
# coap port
# The coap component listens on this port for CoAP messages from nodes
#coap_port = 5683
# MQTT host
# The hostname of the MQTT broker. The mqtt component connects to this hostname
# for the MQTT broker connection.
#mqtt_host = 'localhost'
# MQTT port
# The port the MQTT broker listens on. The MQTT component connects to this port
# on the MQTT broker.
#mqtt_port = 1886
# Gateway port
# This port is used by the websocket gateway to listen on. Websocket nodes
# connect to this port to connect with the websocket gateway.
#gateway_port = 8001
# max time
# Both the CoAP broker and the MQTT broker remove nodes from the broker after
# this many seconds without any messages from a node.
#max_time = 120
# Web Port
# The web interface listens on this port for HTTP connections.
#web_port = 8080
# Broker SSL
# When enabled, the URI to the broker is supplied with wss to indicate to use
# SSL to connect to the broker. Use this when you have a reverse proxy in front
# of the dashboard to handle SSL termination.
#broker_ssl=False
# Camera URL
# The HTTP clients get this URL for their connection to webcam images. If None
# is configured, no webcam functionality is configured
#camera_url = None
# Title
# The title of the web page.
#title = 'IoT Dashboard'
# Logo
# The logo for the navbar of the dashboard. Should be an URL to the image. If
# None is configured, no logo is shown.
#logo = None
# Favicon
# Optionally show a favicon on the dashboard. Should be an URL to an image. If
# None is configured, no favicon is passed to the web page.
#favicon = None
| Python | 0.000001 | |
783c3e740c154fb27b247b1cb8af5c853a8be973 | add basic_routing_test on experimental scenario | source/jormungandr/tests/routing_tests_experimental.py | source/jormungandr/tests/routing_tests_experimental.py | # Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import logging
from tests.tests_mechanism import AbstractTestFixture
from tests_mechanism import dataset
from check_utils import *
from nose.tools import eq_
import jormungandr.scenarios.experimental
from jormungandr.instance import Instance
def check_journeys(resp):
assert not resp.get('journeys') or sum([1 for j in resp['journeys'] if j['type'] == "best"]) == 1
@dataset(["main_routing_test"])
class TestJourneysExperimental(AbstractTestFixture):
"""
Test the experiental scenario
All the tests are defined in "TestJourneys" class, we only change the scenario
NOTE: for the moment we cannot import all routing tests, so we only get 2, but we need to add some more
"""
def setup(self):
logging.debug('setup for experimental')
from jormungandr import i_manager
dest_instance = i_manager.instances['main_routing_test']
self.old_scenario = dest_instance._scenario
dest_instance._scenario = jormungandr.scenarios.experimental.Scenario()
def teardown(self):
from jormungandr import i_manager
i_manager.instances['main_routing_test']._scenario = self.old_scenario
def test_journeys(self):
#NOTE: we query /v1/coverage/main_routing_test/journeys and not directly /v1/journeys
#not to use the jormungandr database
response = self.query_region(journey_basic_query, display=True)
check_journeys(response)
is_valid_journey_response(response, self.tester, journey_basic_query)
def test_error_on_journeys(self):
""" if we got an error with kraken, an error should be returned"""
query_out_of_production_bound = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}"\
.format(from_coord="0.0000898312;0.0000898312", # coordinate of S in the dataset
to_coord="0.00188646;0.00071865", # coordinate of R in the dataset
datetime="20110614T080000") # 2011 should not be in the production period
response, status = self.query_no_assert("v1/coverage/main_routing_test/" + query_out_of_production_bound)
assert status != 200, "the response should not be valid"
check_journeys(response)
assert response['error']['id'] == "date_out_of_bounds"
assert response['error']['message'] == "date is not in data production period"
#and no journey is to be provided
assert 'journeys' not in response or len(response['journeys']) == 0
@dataset(["main_ptref_test"])
class TestJourneysExperimentalWithPtref(AbstractTestFixture):
"""Test the experimental scenario with ptref_test data"""
def setup(self):
logging.debug('setup for experimental')
from jormungandr import i_manager
dest_instance = i_manager.instances['main_ptref_test']
self.old_scenario = dest_instance._scenario
dest_instance._scenario = jormungandr.scenarios.experimental.Scenario()
def teardown(self):
from jormungandr import i_manager
i_manager.instances['main_ptref_test']._scenario = self.old_scenario
def test_strange_line_name(self):
response = self.query("v1/coverage/main_ptref_test/journeys"
"?from=stop_area:stop2&to=stop_area:stop1"
"&datetime=20140107T100000", display=True)
check_journeys(response)
eq_(len(response['journeys']), 1)
| Python | 0.000001 | |
b1caa89d75aecc564d504e5baffd0dc7619cd587 | Create foursq_friends.py | foursq_friends.py | foursq_friends.py | import json
from foursq_utils import *
def fetch_usr_friends(user_id):
super_token = 'QEJ4AQPTMMNB413HGNZ5YDMJSHTOHZHMLZCAQCCLXIX41OMP'
url = 'https://api.foursquare.com/v2/users/' + str(user_id) + '/friends?oauth_token=' + super_token + '&v=20210115'
try:
raw = get_raw_info(url)
data = json.loads(raw)
if data['meta']['code'] != 200:
return -1
friends_info = data['response']['friends']
friendsUID = []
if 'items' in friends_info.keys():
for item in friends_info['items']:
friendsUID.append(item['id'])
friends_info.setdefault('friendsUID',friendsUID)
else:
friends_info.setdefault('friendsUID', [])
return friends_info
except:
return -1
| Python | 0.000028 | |
139524072cc56d19ce887aaa95705dff8a952cc2 | Add lc035_search_insert_position.py | lc035_search_insert_position.py | lc035_search_insert_position.py | """Leetcode 35. Search Insert Position
Easy
URL: https://leetcode.com/problems/search-insert-position/
Given a sorted array and a target value, return the index if the target is found.
If not, return the index where it would be if it were inserted in order.
You may assume no duplicates in the array.
Example 1:
Input: [1,3,5,6], 5
Output: 2
Example 2:
Input: [1,3,5,6], 2
Output: 1
Example 3:
Input: [1,3,5,6], 7
Output: 4
Example 4:
Input: [1,3,5,6], 0
Output: 0
"""
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000002 | |
1669cd22d6c8ee5bcb37c6770c98ddcf8848d901 | Make lots of xyz with differing natoms visualizable | pad-trajectory.py | pad-trajectory.py | #!/usr/bin/env python3
#
# Script to generate an ext-xyz trajectory from individual ext-xyz files with varying atom numbers using ASE.
# Appens 'X' atoms at origin to obtain frames with equal lengths
# by Patrick Melix
# 2020/06/08
#
from ase import io, Atom
import os
def main(inList, outFile='traj.xyz', outFormat='extxyz'):
#if output exists mv to .bak
if os.path.isfile(outFile):
print('ATTENTION: {:} exists, moving to *.bak'.format(outFile))
os.rename(outFile, outFile+'.bak')
traj = []
for inFile in inList:
if not os.path.isfile(inFile):
raise ValueError('File {:} does not exist'.format(inFile))
print(inFile)
traj.append(io.read(inFile))
maxLen = max([len(frame) for frame in traj])
for i in range(len(traj)):
if len(traj[i]) < maxLen:
for j in range(maxLen-len(traj[i])):
traj[i].append(Atom('X'))
with open(outFile,'w') as f:
for frame in traj:
frame.write(f, format=outFormat)
return
#########################
# Functions
########################
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Combine different lengths of XYZ')
parser.add_argument('--outformat', help='Output ASE Format', default='extxyz')
parser.add_argument('--outfile', help='Output File', default='traj.xyz')
parser.add_argument('-files', type=str, nargs='+', default=[], help='All the XYZ Files')
args = parser.parse_args()
main(args.files, args.outfile, args.outformat)
| Python | 0.000002 | |
900b09803f5c49b8645ba7f3d47eb17515061377 | Create heads_and_legs.py | heads_and_legs.py | heads_and_legs.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Heads and Legs
#Problem level: 8 kyu
def animals(heads, legs):
if heads==0 and legs==0:
return (0,0)
y = legs//2 - heads
x = heads-y
if x<0 or y<0 or legs%2!=0:
return "No solutions"
return (x,y)
| Python | 0.000431 | |
fed98c8a9723c6fe18c123015b51714dc4ccdf68 | add migrations | actual_play/migrations/0006_game_thumbnail.py | actual_play/migrations/0006_game_thumbnail.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-10-20 22:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('actual_play', '0005_auto_20161010_1313'),
]
operations = [
migrations.AddField(
model_name='game',
name='thumbnail',
field=models.ImageField(blank=True, null=True, upload_to='actual_play/image/%Y/%m/%d'),
),
]
| Python | 0.000001 | |
5c9ffc4a0ab9f8aed3071a0bf4ad0fc69070b628 | Create inside_market.py | inside_market.py | inside_market.py | import pandas as pd
import numpy as np
# update current state of our bid and ask
# iterate thru each trade and determine if a fill was generated
# id
# price
# qty
# side - bid/ask
# status - live, canceled, rejected
LIVE = 0
CANCELED = 1
REJECTED = 2
FILLED = 3
PARTIAL = 4
BID = 5
ASK = 6
MINUS_INF = -9999999
PLUS_INF = 9999999
# represents a limit order in our inside market
class order():
id = 0
def __init__(self, price, qty, side, status):
self.id = order.id
order.id += 1
self.price = price
self.qty = qty
self.side = side
self.status = status
def cancel(self):
self.status = CANCELED
def modify(self, new_price, new_qty = -1):
self.price = new_price
if new_qty > 0:
self.qty = new_qty
def evaluate(self, trade_price):
if self.side == BID:
if trade_price < self.price:
self.status = FILLED
return True, self.price
else:
return False, 0.0
else:
if trade_price > self.price:
self.status = FILLED
return True, self.price
else:
return False, 0.0
class inside_market():
def __init__(self, bid_price, ask_price):
if bid_price < ask_price:
self.bid = order(bid_price, 1, BID, LIVE)
self.ask = order(ask_price, 1, ASK, LIVE)
self.status = 1
else:
self.status = -1
def update(self, side, new_price):
if side == BID:
if new_price < self.ask.price:
self.bid.price = new_price
return True, "MODIFIED ORDER ID = ", self.bid.id
else:
return False, "FAILED TO MODIFY ORDER ID = ", self.bid.id, " RESULTING BID WOULD HAVE CROSSED OUR ASK"
else:
if new_price > self.bid.price:
self.ask.price = new_price
return True, "MODIFIED ORDER ID = ", self.ask.id
else:
return False, "FAILED TO MODIFY ORDER ID = ", self.bid.id, " RESULTING ASK WOULD HAVE CROSSED OUR BID"
def evaluate(self, trade_price):
bid_fill, bid_fill_price = self.bid.evaluate(trade_price)
ask_fill, ask_fill_price = self.ask.evaluate(trade_price)
if bid_fill == True:
return BID, bid_fill_price
elif ask_fill == True:
return ASK, ask_fill_price
else:
return None, 0.0
def shift(self, increment):
self.bid.price += increment
self.ask.price += increment
def exit(self, side, increment):
if side == BID:
# shift the bid down to minus_inf to not buy anymore
self.bid.price = MINUS_INF
self.ask.price -= increment
else:
# shift the ask up to plus_inf to not sell anymore
self.ask.price = PLUS_INF
self.bid.price += increment
| Python | 0.000059 | |
0dd3894fb8816f6f904e5c7d204ab2672b304588 | Add earth mesh module | gravity_waves/earth_mesh.py | gravity_waves/earth_mesh.py | from __future__ import absolute_import, print_function, division
from firedrake import *
__all__ = ["generate_earth_mesh"]
def generate_earth_mesh(r_level, num_layers, thickness, hexes=False):
"""Generates an Earth-like spherical mesh for the gravity wave
problem.
:arg r_level: An ``int`` denoting the number of refinement
levels.
:arg num_layers: An ``int`` denoting the number of mesh layers.
:arg thickness: The thickness of the spherical shell (in meters).
:arg hexes: A ``bool`` indicating whether to generate a hexahedral mesh.
Returns: A Firedrake extruded spherical mesh.
"""
earth_radius = 6.371e6
layer_height = thickness / num_layers
if hexes:
spherical_base = CubedSphereMesh(earth_radius,
refinement_level=r_level)
else:
spherical_base = IcosahedralSphereMesh(earth_radius,
refinement_level=r_level)
earth_mesh = ExtrudedMesh(spherical_base, layers=num_layers,
layer_height=layer_height,
extrusion_type="radial")
return earth_mesh
| Python | 0 | |
5578d11f45e9c41ab9c4311f2bed48b9c24d9bf5 | Create file for Nonterminal have method | tests/grammar_term-nonterm_test/NonterminalHaveTest.py | tests/grammar_term-nonterm_test/NonterminalHaveTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
""" | Python | 0.000001 | |
b40c6ce73c439e7d74b867702fdd2c4cd7ad8b15 | add testrunner to automactically create/delete a test db during python and django tests. | couchdbkit/ext/django/testrunner.py | couchdbkit/ext/django/testrunner.py | from django.test.simple import DjangoTestSuiteRunner
from django.conf import settings
from couchdbkit.ext.django import loading as loading
from couchdbkit.resource import ResourceNotFound
class CouchDbKitTestSuiteRunner(DjangoTestSuiteRunner):
"""
A test suite runner for couchdbkit. This offers the exact same functionality
as the default django test suite runner, except that it connects all the couchdbkit
django-extended models to a test database. The test database is deleted at the
end of the tests. To use this, just add this file to your project and the following
line to your settings.py file:
TEST_RUNNER = 'myproject.testrunner.CouchDbKitTestSuiteRunner'
"""
dbs = []
def get_test_db_name(self, dbname):
return "%s_test" % dbname
def setup_databases(self, **kwargs):
print "overridding the couchdbkit database settings to use a test database!"
# first pass: just implement this as a monkey-patch to the loading module
# overriding all the existing couchdb settings
self.dbs = [(app, self.get_test_db_name(url)) for app, url in getattr(settings, "COUCHDB_DATABASES", [])]
old_handler = loading.couchdbkit_handler
couchdbkit_handler = loading.CouchdbkitHandler(self.dbs)
loading.couchdbkit_handler = couchdbkit_handler
loading.register_schema = couchdbkit_handler.register_schema
loading.get_schema = couchdbkit_handler.get_schema
loading.get_db = couchdbkit_handler.get_db
# register our dbs with the extension document classes
for app, value in old_handler.app_schema.items():
for name, cls in value.items():
cls.set_db(loading.get_db(app))
return super(CouchDbKitTestSuiteRunner, self).setup_databases(**kwargs)
def teardown_databases(self, old_config, **kwargs):
deleted_databases = []
skipcount = 0
for app, item in self.dbs:
app_label = app.split('.')[-1]
db = loading.get_db(app_label)
if db.dbname in deleted_databases:
skipcount += 1
continue
try:
db.server.delete_db(db.dbname)
deleted_databases.append(db.dbname)
print "deleted database %s for %s" % (db.dbname, app_label)
except ResourceNotFound:
print "database %s not found for %s! it was probably already deleted." % (db.dbname, app_label)
if skipcount:
print "skipped deleting %s app databases that were already deleted" % skipcount
return super(CouchDbKitTestSuiteRunner, self).teardown_databases(old_config, **kwargs) | Python | 0 | |
159156cb962ad0c8f4ea6f022471c75f33306f7e | Add unit tests for snapshots_client | tempest/tests/services/compute/test_snapshots_client.py | tempest/tests/services/compute/test_snapshots_client.py | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import mockpatch
from tempest_lib import exceptions as lib_exc
from tempest.services.compute.json import snapshots_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestSnapshotsClient(base.BaseComputeServiceTest):
FAKE_SNAPSHOT = {
"createdAt": "2015-10-02T16:27:54.724209",
"displayDescription": u"Another \u1234.",
"displayName": u"v\u1234-001",
"id": "100",
"size": 100,
"status": "available",
"volumeId": "12"
}
FAKE_SNAPSHOTS = {"snapshots": [FAKE_SNAPSHOT]}
def setUp(self):
super(TestSnapshotsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = snapshots_client.SnapshotsClient(
fake_auth, 'compute', 'regionOne')
def _test_create_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.create_snapshot,
'tempest.common.service_client.ServiceClient.post',
{"snapshot": self.FAKE_SNAPSHOT},
to_utf=bytes_body, status=200,
volume_id=self.FAKE_SNAPSHOT["volumeId"])
def test_create_snapshot_with_str_body(self):
self._test_create_snapshot()
def test_create_shapshot_with_bytes_body(self):
self._test_create_snapshot(bytes_body=True)
def _test_show_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot,
'tempest.common.service_client.ServiceClient.get',
{"snapshot": self.FAKE_SNAPSHOT},
to_utf=bytes_body, snapshot_id=self.FAKE_SNAPSHOT["id"])
def test_show_snapshot_with_str_body(self):
self._test_show_snapshot()
def test_show_snapshot_with_bytes_body(self):
self._test_show_snapshot(bytes_body=True)
def _test_list_snapshots(self, bytes_body=False, **params):
self.check_service_client_function(
self.client.list_snapshots,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_SNAPSHOTS, to_utf=bytes_body, **params)
def test_list_snapshots_with_str_body(self):
self._test_list_snapshots()
def test_list_snapshots_with_byte_body(self):
self._test_list_snapshots(bytes_body=True)
def test_list_snapshots_with_params(self):
self._test_list_snapshots('fake')
def test_delete_snapshot(self):
self.check_service_client_function(
self.client.delete_snapshot,
'tempest.common.service_client.ServiceClient.delete',
{}, status=202, snapshot_id=self.FAKE_SNAPSHOT['id'])
def test_is_resource_deleted_true(self):
module = ('tempest.services.compute.json.snapshots_client.'
'SnapshotsClient.show_snapshot')
self.useFixture(mockpatch.Patch(
module, side_effect=lib_exc.NotFound))
self.assertTrue(self.client.is_resource_deleted('fake-id'))
def test_is_resource_deleted_false(self):
module = ('tempest.services.compute.json.snapshots_client.'
'SnapshotsClient.show_snapshot')
self.useFixture(mockpatch.Patch(
module, return_value={}))
self.assertFalse(self.client.is_resource_deleted('fake-id'))
| Python | 0.000001 | |
913c9a10b2eb3b3d9de108a82a3251b2c0de0e10 | Add test for Hostname object | cybox/test/objects/hostname_test.py | cybox/test/objects/hostname_test.py | # Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.hostname_object import Hostname
from cybox.test.objects import ObjectTestCase
class TestHostname(ObjectTestCase, unittest.TestCase):
object_type = "HostnameObjectType"
klass = Hostname
_full_dict = {
'is_domain_name': True,
'hostname_value': "www.example.com",
'naming_system': ["DNS", "NETBIOS"],
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
514aca20c6f076a86819d7180f36c3b2e8bcc33b | Add integration test checking compatibility of Keras models with TF optimizers. | tests/integration_tests/test_tensorflow_integration.py | tests/integration_tests/test_tensorflow_integration.py | from __future__ import print_function
import os
import tempfile
import pytest
import keras
from keras import layers
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
@pytest.mark.skipif(keras.backend.backend() != 'tensorflow', reason='Requires TF backend')
@keras_test
def test_tf_optimizer():
import tensorflow as tf
num_hidden = 10
output_dim = 2
input_dim = 10
target = 0.8
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=1., rho=0.95, epsilon=1e-08)
(x_train, y_train), (x_test, y_test) = get_test_data(
num_train=1000, num_test=200,
input_shape=(input_dim,),
classification=True, num_classes=output_dim)
model = keras.Sequential()
model.add(layers.Dense(num_hidden,
activation='relu',
input_shape=(input_dim,)))
model.add(layers.Dense(output_dim, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=8, batch_size=16,
validation_data=(x_test, y_test), verbose=2)
assert history.history['val_acc'][-1] >= target
# Test saving.
_, fname = tempfile.mkstemp('.h5')
model.save(fname)
model = keras.models.load_model(fname)
assert len(model.weights) == 4
os.remove(fname)
if __name__ == '__main__':
pytest.main([__file__])
| Python | 0 | |
ebffda0ec0f2619ad1071bb1d00d87ce08d59498 | Add support for SymDIVINE tool | benchexec/tools/symdivine.py | benchexec/tools/symdivine.py | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
import os
class Tool(benchexec.tools.template.BaseTool):
"""
SymDIVINE wrapper object
"""
BINS = ['symdivine', 'run_symdivine.py', 'compile_benchmark.py', 'lart', 'libz3.so']
def executable(self):
"""
Find the path to the executable file that will get executed.
This method always needs to be overridden,
and most implementations will look similar to this one.
The path returned should be relative to the current directory.
"""
return util.find_executable(self.BINS[0])
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
"""
Return the name of the tool, formatted for humans.
"""
return 'SymDIVINE'
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
"""
Compose the command line to execute from the name of the executable,
the user-specified options, and the inputfile to analyze.
This method can get overridden, if, for example, some options should
be enabled or if the order of arguments must be changed.
All paths passed to this method (executable, tasks, and propertyfile)
are either absolute or have been made relative to the designated working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@param options: a list of options, in the same order as given in the XML-file.
@param tasks: a list of tasks, that should be analysed with the tool in one run.
In most cases we we have only _one_ inputfile.
@param propertyfile: contains a specification for the verifier.
@param rlimits: This dictionary contains resource-limits for a run,
for example: time-limit, soft-time-limit, hard-time-limit, memory-limit, cpu-core-limit.
All entries in rlimits are optional, so check for existence before usage!
"""
directory = os.path.dirname(executable)
# Ignore propertyfile since we run only reachability
return [os.path.join('.', directory, self.BINS[1]), directory] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
join_output = '\n'.join(output)
if isTimeout:
return 'TIMEOUT'
if returncode == 2:
return 'Pre-run phase failed: ' + join_output
if join_output is None:
return 'ERROR - no output'
elif 'Safe.'in join_output:
return result.RESULT_TRUE_PROP
elif 'Error state' in join_output:
return result.RESULT_FALSE_REACH
else:
return result.RESULT_UNKNOWN
def program_files(self, executable):
"""
OPTIONAL, this method is only necessary for situations when the benchmark environment
needs to know all files belonging to a tool
(to transport them to a cloud service, for example).
Returns a list of files or directories that are necessary to run the tool.
"""
directory = os.path.dirname(executable)
return map(lambda x: os.path.join('.', directory, x), self.BINS)
| Python | 0 | |
3840fbe6ca33e48b9bdbd78e85830a13606f612c | Create efi-smc.py | efi-smc.py | efi-smc.py | #!/usr/bin/python
from lxml import html
import requests
# Get the EFI/SMC table from Apple's Website
page = requests.get('http://support.apple.com/en-us/HT1237')
tree = html.fromstring(page.text)
# Count the number of rows which will be used in looping
rows = tree.xpath('//*[@id="kbtable"]/tbody/tr')
# For each row:
for i in range(len(rows)):
# Get the friendly name, model, EFI version, SMC version, and the download URLs
friendly_name = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[1]/text()' % locals())
model = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[2]/p/text()' % locals())
efi_version = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[3]/p/a/text()' % locals())
efi_url = tree.xpath('//*[@id="kbtable"]/tbody/tr[3]/td[3]/p/a/@href' % locals())
smc_version = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[4]/p/a/text()' % locals())
smc_url = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[4]/a/@href' % locals())
# Print everything in a human-readable format
if not friendly_name:
continue
else:
print friendly_name[0]
if not model:
model = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[2]/text()' % locals())
print model[0]
else:
print model[0]
if not efi_version:
efi_version = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[3]/a/text()' % locals())
if not efi_version:
print 'No EFI'
else:
print efi_version[0]
print efi_url[0]
else:
print efi_version[0]
if not smc_version:
smc_version = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[4]/a/text()' % locals())
if not smc_version:
print 'No SMC'
else:
print smc_version[0]
print smc_url[0]
else:
print smc_version[0]
print '\n'
| Python | 0.000001 | |
29d58850f54476003b96822c7f46998d3ce6c44d | Update SQLite DB with modified grades from the given filesystem tree | bin/update_db.py | bin/update_db.py |
__author__ = 'Gheorghe Claudiu-Dan, claudiugh@gmail.com'
import sqlite3
import os
from time import *
from stat import *
VMCHECKER_ROOT_ENVAR = 'VMCHECKER_ROOT'
VMCHECKER_DB_NAME = 'vmchecker.db'
GRADE_VALUE_FILE = 'NOTA'
if not os.environ.has_key(VMCHECKER_ROOT_ENVAR):
print "Error: ", VMCHECKER_ROOT_ENVAR, "is not set. "
exit()
vmchk_root = os.environ[VMCHECKER_ROOT_ENVAR]
db_path = os.path.join(vmchk_root, VMCHECKER_DB_NAME)
cwd = os.getcwd()
checked_root = os.path.join(vmchk_root, 'checked')
if not cwd.startswith(checked_root):
print "Error: working directory not in the VMCHECKER_ROOT subtree "
exit()
db_conn = sqlite3.connect(db_path)
db_conn.isolation_level = None # this is for autocommiting updates
db_cursor = db_conn.cursor()
##################################################
#
# DB routines
#
def DB_get_hw(hw_name):
""""Get a homework entry
@return
- the id of the homework
- None if it doesn't exist"""
global db_cursor
db_cursor.execute('SELECT id FROM teme WHERE nume = ?;', (hw_name,))
result = db_cursor.fetchone()
if None == result:
return result
else:
return result[0]
def DB_save_hw(hw_name):
""""If the homework identified by (hw_name)
exists then update the DB, else insert a new entry """
global db_cursor
id_hw = DB_get_hw(hw_name)
if None == id_hw:
db_cursor.execute('INSERT INTO teme (nume) values (?)', (hw_name,))
db_cursor.execute('SELECT last_insert_rowid();');
(id_hw,) = db_cursor.fetchone()
return id_hw
else:
return id_hw
def DB_get_student(student_name):
""""Get a student entry
@return
- the id of the entry
- None if it doesn't exist"""
global db_cursor
db_cursor.execute('SELECT id FROM studenti WHERE nume = ?;', (student_name,))
result = db_cursor.fetchone()
if None == result:
return result
else:
return result[0]
def DB_save_student(student_name):
""""If the student identified by (student_name)
exists then update the DB, else insert a new entry """
global db_cursor
id_student = DB_get_student(student_name)
if None == id_student:
db_cursor.execute('INSERT INTO studenti (nume) values (?)', (student_name,))
db_cursor.execute('SELECT last_insert_rowid();');
(id_student,) = db_cursor.fetchone()
return id_student
else:
return id_student
def DB_get_grade(id_hw, id_student):
""""Get a grade entry
@return
- a touple containing the id and the last modification timestamp
- (None, None) if it doesn't exist"""
global db_cursor
db_cursor.execute('SELECT id, data FROM note WHERE id_tema = ? and id_student = ?;', (id_hw, id_student))
result = db_cursor.fetchone()
if None == result:
return (None, None)
else:
return result
def DB_save_grade(id_hw, id_student, grade, data):
""""If the grade identified by (id_hw, id_student)
exists then update the DB, else insert a new entry """
global db_cursor
(id_grade, db_data) = DB_get_grade(id_hw, id_student)
if None == id_grade:
db_cursor.execute('INSERT INTO note (id_tema, id_student, nota, data) values (?, ?, ?, ?)', (id_hw, id_student, grade, data ))
else:
db_cursor.execute('UPDATE note set nota = ?, data = ? where id = ?', (grade, data, id_grade))
#
#################################################
def update_hws(path):
"""For each dentry from path, launch the next
level update routine - update_students() """
for hw_name in os.listdir(path):
path_hw = os.path.join(path, hw_name)
mode = os.stat(path_hw)[ST_MODE]
if S_ISDIR(mode):
# save hw in the DB
id_hw = DB_save_hw(hw_name)
print hw_name
update_students(path_hw, id_hw)
def update_students(path, id_hw):
"""For each dentry from path,
launch the update_grade() routine"""
for student_name in os.listdir(path):
path_student = os.path.join(path, student_name)
mode = os.stat(path_student)[ST_MODE]
if S_ISDIR(mode):
# save student in the DB
id_student = DB_save_student(student_name)
print "\t ", student_name,
update_grade(path_student, id_hw, id_student)
def get_grade_modifdata(grade_filename):
return strftime("%Y-%m-%d %H-%M-%S", gmtime(os.path.getmtime(grade_filename)))
def get_grade_value(grade_filename):
""" read an integer from the first line of the file """
f = open(grade_filename, 'r')
value = int(f.read())
f.close()
return value
def update_grade(path, id_hw, id_student):
"""Reads the grade's value only if the file containing the
value was modified since the last update of the DB for this
submission."""
grade_filename = os.path.join(path, GRADE_VALUE_FILE)
if not os.path.exists(grade_filename):
print "Error. File ", grade_filename, " for grade value does not exist "
return None
data_modif = get_grade_modifdata(grade_filename)
(id_grade, db_data) = DB_get_grade(id_hw, id_student)
if db_data != data_modif:
# modified since last db save
grade_value = get_grade_value(grade_filename)
if None != grade_value:
# update information from DB
DB_save_grade(id_hw, id_student, grade_value, data_modif)
print "\t\t UPDATED "
else:
print " "
# determine the level
LEVEL_HWS = 0
LEVEL_STUDENTI = 1
LEVEL_GRADE = 2
path = cwd
level = LEVEL_HWS
while path != checked_root:
(path, tail) = os.path.split(path)
level = level + 1
if level == LEVEL_HWS:
update_hws(cwd)
elif level == LEVEL_STUDENTI:
# get the name for homework
(head, nume_hw) = os.path.split(cwd)
# get the id
id_hw = DB_save_hw(nume_hw)
update_students(cwd, id_hw)
elif level == LEVEL_GRADE:
# get the names from the path
(head, nume_student) = os.path.split(cwd)
(head, nume_hw) = os.path.split(head)
# get the DB identifiers
id_hw = DB_save_hw(nume_hw)
id_student = DB_save_student(nume_student)
update_grade(cwd, id_hw, id_student)
db_cursor.close();
db_conn.close();
| Python | 0.999998 | |
cdc6b62400f66d1b2747b5668a6618c961deb962 | create game class | powerball/game.py | powerball/game.py | #!/usr/bin/env python
from collections import Counter
from .player import Player
class Game:
def __init__(self, players=None):
"""
Initialize the game instance.
players may be initialized by argument or by calling the begin method.
winning_numbers is initialized with an empty list. It should be populated
by the generate_winning_numbers method when called.
:param players (list): list of players
"""
self.players = list()
self.winning_numbers = list()
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.