commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
f3a6281098b11ddd353a394d914186d5c7683f9b | add jupyter module | rasa/jupyter.py | rasa/jupyter.py | import pprint as pretty_print
from typing import Any, Dict, Text, TYPE_CHECKING
from rasa_core.utils import print_success, print_error
if TYPE_CHECKING:
from rasa_core.agent import Agent
from rasa_core.interpreter import NaturalLanguageInterpreter
def pprint(object: Any):
pretty_print.pprint(object, indent=2)
def chat(model: Text = None, agent: 'Agent' = None,
interpreter: 'NaturalLanguageInterpreter' = None):
if model:
from rasa.run import create_agent
agent = create_agent(model)
elif agent and interpreter:
agent.set_interpreter(interpreter)
else:
print_error("You either have to define a model path or an agent and "
"an interpreter.")
print("Your bot is ready to talk! Type your messages here or send '/stop'.")
while True:
message = input()
if message == '/stop':
break
for response in agent.handle_text(message):
_display_bot_response(response)
def _display_bot_response(response: Dict):
from IPython.display import Image, display
for response_type, value in response.items():
if response_type == 'text':
print_success(value)
if response_type == 'image':
image = Image(url=value)
display(image,)
| Python | 0 | |
d8407723f9bf40ca166e5471e76c03c257bc71f9 | Add lc208_implement_trie_prefix_tree.py | lc208_implement_trie_prefix_tree.py | lc208_implement_trie_prefix_tree.py | """Leetcode 208. Implement Trie (Prefix Tree)
Medium
URL: https://leetcode.com/problems/implement-trie-prefix-tree/
Implement a trie with insert, search, and startsWith methods.
Example:
Trie trie = new Trie();
trie.insert("apple");
trie.search("apple"); // returns true
trie.search("app"); // returns false
trie.startsWith("app"); // returns true
trie.insert("app");
trie.search("app"); // returns true
Note:
You may assume that all inputs are consist of lowercase letters a-z.
All inputs are guaranteed to be non-empty strings.
Your Trie object will be instantiated and called as such:
obj = Trie()
obj.insert(word)
param_2 = obj.search(word)
param_3 = obj.startsWith(prefix)
"""
class Trie(object):
def __init__(self):
"""
Initialize your data structure here.
"""
pass
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: None
"""
pass
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
pass
def startsWith(self, prefix):
"""
Returns if there is any word in the trie that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000001 | |
8a7c3ad110c00e6049fa452634d06a6873a36f90 | Add an examples folder. | examples/api.py | examples/api.py | # -*- coding: utf-8 -*-
'''
This example demonstrates the skosprovider API with a simple
DictionaryProvider containing just three items.
'''
from skosprovider.providers import DictionaryProvider
from skosprovider.uri import UriPatternGenerator
from skosprovider.skos import ConceptScheme
larch = {
'id': '1',
'uri': 'http://id.trees.org/1',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'The Larch'},
{'type': 'prefLabel', 'language': 'nl', 'label': 'De Lariks'}
],
'notes': [
{'type': 'definition', 'language': 'en', 'note': 'A type of tree.'}
],
'member_of': ['3'],
'matches': {
'close': ['http://id.python.org/different/types/of/trees/nr/1/the/larch']
}
}
chestnut = {
'id': '2',
'uri': 'http://id.trees.org/2',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'The Chestnut'},
{'type': 'altLabel', 'language': 'nl', 'label': 'De Paardekastanje'},
{'type': 'altLabel', 'language': 'fr', 'label': 'la châtaigne'}
],
'notes': [
{
'type': 'definition', 'language': 'en',
'note': 'A different type of tree.'
}
],
'member_of': ['3'],
'matches': {
'related': ['http://id.python.org/different/types/of/trees/nr/17/the/other/chestnut']
}
}
species = {
'id': 3,
'uri': 'http://id.trees.org/3',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'Trees by species'},
{'type': 'prefLabel', 'language': 'nl', 'label': 'Bomen per soort'}
],
'type': 'collection',
'members': ['1', '2'],
'notes': [
{
'type': 'editorialNote',
'language': 'en',
'note': 'As seen in How to Recognise Different Types of Trees from Quite a Long Way Away.'
}
]
}
provider = DictionaryProvider(
{
'id': 'TREES',
'default_language': 'nl',
'subject': ['biology']
},
[larch, chestnut, species],
uri_generator=UriPatternGenerator('http://id.trees.org/types/%s'),
concept_scheme=ConceptScheme('http://id.trees.org')
)
# Get a concept or collection by id
print(provider.get_by_id(1).label().label)
# Get a concept or collection by uri
print(provider.get_by_uri('http://id.trees.org/types/1'))
# Get all concepts and collections in a provider
# If possible, show a Dutch(as spoken in Belgium) label
print(provider.get_all(language='nl-BE'))
# Get the top concepts in a provider
print(provider.get_top_concepts())
# Find anything that has a label of horse
print(provider.find({'label': 'The Larch'}))
# Get the top of a display hierarchy
print(provider.get_top_display())
# Get the children to display in a hierarchy concept 1
# If possible, show a French(as spoken in Belgium) label
print(provider.get_children_display(3, language='fr-BE'))
# Get all concepts underneath a concept or collection
print(provider.expand(3))
| Python | 0 | |
1b00a597d8145b2df05054fef8d072d452209463 | Make SurfaceHandler (for sfc data) | src/data/surface.py | src/data/surface.py | from glob import glob
# Third-party modules
import pandas as pd
# Hand-made modules
from base import LocationHandlerBase
SFC_REGEX_DIRNAME = "sfc[1-5]"
KWARGS_READ_CSV_SFC_MASTER = {
"index_col": 0,
}
KWARGS_READ_CSV_SFC_LOG = {
"index_col": 0,
"na_values": ['', ' ']
}
class SurfaceHandler(LocationHandlerBase):
def __init__(self,
sfc_master_filepath,
sfc_file_prefix="sfc_",
sfc_file_suffix=".tsv"):
super().__init__(sfc_master_filepath, **KWARGS_READ_CSV_SFC_MASTER)
self.sfc_file_prefix = sfc_file_prefix
self.sfc_file_suffix = sfc_file_suffix
self.SFC_REGEX_DIRNAME = SFC_REGEX_DIRNAME
def read_tsv(self, path_or_buf):
df_ret = pd.read_csv(path_or_buf, **self.gen_read_csv_kwargs(KWARGS_READ_CSV_SFC_LOG))
df_ret.index = self.parse_datetime(pd.Series(df_ret.index).apply(str))
return df_ret
def to_tsv(self, df, path_or_buf, **kwargs):
df.to_csv(path_or_buf, **self.gen_to_csv_kwargs(kwargs))
def gen_filepath_list(self, aid_list):
sfc_regex_filepath_list = [
self.path.join(
self.INTERIM_DATA_BASEPATH,
self.SFC_REGEX_DIRNAME,
self.sfc_file_prefix + str(aid) + self.sfc_file_suffix
) for aid in aid_list
]
return [
sfc_file \
for sfc_regex_filepath in sfc_regex_filepath_list \
for sfc_file in glob(sfc_regex_filepath)
]
def retrive_data(self, filepath_list, name_list):
if len(filepath_list) < 1:
raise ValueError("Empty list ?")
df_ret = self.read_tsv(filepath_list[0])
df_ret.columns = [str(col_name) + '_' + name_list[0] for col_name in df_ret.columns]
if len(filepath_list) > 1:
for filepath, name in zip(filepath_list[1:], name_list[1:]):
df_ret = df_ret.merge(
self.read_tsv(filepath),
how="outer",
left_index=True,
right_index=True,
suffixes=(".", "_{}".format(name))
)
return df_ret
if __name__ == '__main__':
print("Surface!")
| Python | 0 | |
c025cd6649e2326ade7b81df8408c4363fdb2050 | add music handler | app/music_handler.py | app/music_handler.py | #-*- coding:utf-8 -*-
from tools.httptools import Route
from models import Music
@Route.get("/music")
def get_music_handler(app):
ret={};
ret['code']=200
ret['msg']='ok'
ret['type']=3
ret['data']=[
{'music_name':'CountrintStars','music_url':'http://7xs7oc.com1.z0.glb.clouddn.com/music%2FJason%20Chen%20-%20Counting%20Stars.mp3'},
]
return ret
@Route.post("/music")
def post_music_handler(app):
return 'ok'
| Python | 0.000001 | |
3091555ca7fc421f886a1df1ac28f677feb70a53 | Add default value for the fields object and field of the social network app model | app/migrations/0006_auto_20150825_1513.py | app/migrations/0006_auto_20150825_1513.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20150819_1054'),
]
operations = [
migrations.AlterField(
model_name='socialnetworkapp',
name='field_real_time_updates',
field=models.CharField(default=b'feed', max_length=50, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='socialnetworkapp',
name='object_real_time_updates',
field=models.CharField(default=b'page', max_length=100, null=True, blank=True),
preserve_default=True,
),
]
| Python | 0 | |
f05bd26c7a275c38c092c821e5ef62284c36e783 | Test transormation matrices | test/test_interpolate.py | test/test_interpolate.py | import pywt
import sys
import numpy as np
from scipy.ndimage.interpolation import affine_transform
sys.path.insert(0, '../mlp_test')
from data_utils import load_mnist
from skimage import transform as tf
test_data = load_mnist()[2]
chosen_index = 7
test_x_chosen = test_data[0][chosen_index]
test_y_chosen = test_data[1][chosen_index]
transm = np.eye(28, k=0) + np.eye(28, k=1)
pic_arr = test_x_chosen.reshape((28, 28))
pic_trans = np.dot(pic_arr, transm)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.subplot(2 , 1, 1)
plt.imshow(pic_arr, cmap = cm.Greys_r,interpolation='nearest')
plt.subplot(2 , 1, 2)
plt.imshow(pic_trans, cmap = cm.Greys_r,interpolation='nearest')
plt.show() | Python | 0 | |
47cbcf130e76604ed93306f02fc2221a276d3bbf | Split out | pentai/gui/spacer.py | pentai/gui/spacer.py | from kivy.uix.widget import Widget
class HSpacer(Widget):
pass
class VSpacer(Widget):
pass
| Python | 0.000462 | |
e37a616d23805ced7250d4cdd6422751d8ae5143 | Add populate_anticrispr.py | phageAPI/populate_anticrispr.py | phageAPI/populate_anticrispr.py | #! /usr/bin/env python
import os
from Bio import SeqIO
import textwrap
def populate(sequences, AntiCRISPR):
for seq in sequences:
spacer, _ = AntiCRISPR.objects.get_or_create(
accession=seq.name,
sequence=str(seq.seq))
spacer.save()
def main():
import argparse
parser = argparse.ArgumentParser(description=textwrap.dedent("""\
Import anticrispr sequences into the API DB.
To use, first get the list of accession numbers from
https://www.nature.com/articles/nmicrobiol201685. This list is
available locally in `data/antiCRISPR_accessions.txt`,
The script `acc2gb.py` can then be used to download the antiCRISPR
protein sequence in fasta format, assuming you have NICB access:
cat data/antiCRISPR_accessions.txt | python acc2gb.py your@email.com protein fasta > anticrispr.txt
Finally, populate the database with the accession numbers in the
accession field and the sequences in the sequence field:
cd phageAPI
populate_anticrispr.py ../anticrispr.txt
"""),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('sequences', metavar='FILE', nargs=1,
help='path to sequences file, in fasta format')
args = parser.parse_args()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'phageAPI.settings')
import django
django.setup()
from restapi.models import AntiCRISPR
populate(SeqIO.parse(args.sequences[0], 'fasta'), AntiCRISPR)
if __name__ == '__main__':
main()
| Python | 0 | |
333cbe13d8104934a924f223427fa06a60a8b080 | Create php4dvd_1.py | php4dvd/php4dvd_1.py | php4dvd/php4dvd_1.py | # -*- coding: utf-8 -*-
from selenium import webdriver
import unittest
class Untitled(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8080/"
self.verificationErrors = []
self.accept_next_alert = True
def test_untitled(self):
driver = self.driver
driver.get(self.base_url + "/php4dvd/")
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("admin")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("submit").click()
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| Python | 0.000013 | |
d894e39e0280aaa45cef914f2202e978797b26fb | Update and rename 2 to 28.py | exercises/28.py | exercises/28.py | '''
Write a function find_longest_word()
that takes a list of words and returns
the length of the longest one.
Use only higher order functions.
'''
def find_longest_word(lst):
return len(max(lst, key=len))
| Python | 0.0001 | |
4d9da24a0356bc63be6ab06eb084f97116d1dac4 | should be functioning | cogs/packages.py | cogs/packages.py | import discord
from cogs import common
class Packages:
def __init__(self, bot):
self.bot = bot
def pkg_url(pkg):
"""Returns the URL for JSON data about a package on PyPI."""
return f'https://pypi.python.org/pypi/{pkg}/json'
@commands.command()
async def pypi(self, ctx, pkg: str):
async with ctx.bot.session.get(pkg_url(pkg), headers=common.user_agent) as ps:
pjson = await ps.json()
pkg_s = discord.Embed(title=f'PyPI stats for {pkg}', colour=0x690E8)
pkg_s.add_field(name='Version', value=pjson['info']['version'])
await ctx.send(embed=pkg)
| Python | 0.999455 | |
183b0c573478ff5e2480758abec629ddce4f0766 | Create missing migration for model Meta changes in 9d1e29150407e906bc651a8249c53e5e6d1fb1e7. | atmo/jobs/migrations/0035_auto_20170529_1424.py | atmo/jobs/migrations/0035_auto_20170529_1424.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-29 14:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0034_auto_20170529_1424'),
]
operations = [
migrations.AlterModelOptions(
name='sparkjobrun',
options={'get_latest_by': 'created_at', 'ordering': ['-created_at']},
),
]
| Python | 0 | |
b674ff31ab846bc4c11b615ad7f738ff176d5f96 | Add /team test | picoCTF-web/tests/api/functional/v1/test_team.py | picoCTF-web/tests/api/functional/v1/test_team.py | """Tests for the /api/v1/team endpoints."""
from common import ( # noqa (fixture)
ADMIN_DEMOGRAPHICS,
clear_db,
client,
decode_response,
get_csrf_token,
register_test_accounts,
TEACHER_DEMOGRAPHICS,
USER_DEMOGRAPHICS,
get_conn
)
def test_get_my_team(client):
"""Tests the /team endpoint."""
clear_db()
register_test_accounts()
client.post('/api/v1/user/login', json={
'username': USER_DEMOGRAPHICS['username'],
'password': USER_DEMOGRAPHICS['password']
})
expected_fields = {
'achievements': [],
'affiliation': 'Sample School',
'competition_active': False,
'country': 'US',
'eligible': True,
'flagged_submissions': [],
'max_team_size': 1,
'progression': [],
'score': 0,
'size': 1,
'solved_problems': [],
'team_name': 'sampleuser'
}
expected_member_fields = {
'affiliation': 'None',
'country': 'US',
'email': 'sample@example.com',
'firstname': 'Sample',
'lastname': 'User',
'username': 'sampleuser',
'usertype': 'student'
}
res = client.get('/api/v1/team')
assert res.status_code == 200
for k, v in expected_fields.items():
assert res.json[k] == v
assert len(res.json['members']) == 1
for k, v in expected_member_fields.items():
assert res.json['members'][0][k] == v
db = get_conn()
uid = db.users.find_one({'username': USER_DEMOGRAPHICS['username']})['uid']
assert res.json['members'][0]['uid'] == uid
| Python | 0 | |
d8c8287cce7ddc48f4ea271a54bd6efa8dcabe66 | Create OutputNeuronGroup_multiple_outputs_1.py | examples/OutputNeuronGroup_multiple_outputs_1.py | examples/OutputNeuronGroup_multiple_outputs_1.py | '''
Example of a spike receptor (only receives spikes)
In this example spikes are received and processed creating a raster plot at the end of the simulation.
'''
from brian import *
import numpy
from brian_multiprocess_udp import BrianConnectUDP
# The main function with the NeuronGroup(s) and Synapse(s) must be named "main_NeuronGroup".
# It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group
# will supply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput.
# The size of the output spike train equals NumOfNeuronsOutput and must be the same size of the NeuronGroup who is
# going to interface with the rest of the system to send spikes.
# The function must return all the NeuronGroup objects and all the Synapse objects this way:
# ([list of all NeuronGroups],[list of all Synapses])
# and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation.
#
# Here is also possible to use "dummy" NeuronGroups only to receive and/or send spikes.
my_neuron_input_number = 45
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
Nr=NeuronGroup(my_neuron_input_number, model='v:1', reset=0, threshold=0.5, clock=simclock)
Nr.v=0
# SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT
Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock)
Syn_iNG_Nr[:,:]='i==j'
print "Total Number of Synapses:", len(Syn_iNG_Nr) #DEBUG!
Syn_iNG_Nr.w=1
MExt=SpikeMonitor(Nr) # Spikes sent by UDP
Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP
return ([Nr],[Syn_iNG_Nr],[MExt,Mdummy])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
pass
figure()
raster_plot(simulation_MN[1])
title("Spikes Received by UDP")
show(block=True)
# savefig('output.pdf')
if __name__=="__main__":
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=my_neuron_input_number, post_simulation_function=post_simulation_function,
input_addresses=[("127.0.0.1", 10101, my_neuron_input_number)], simclock_dt=1, inputclock_dt=2, TotalSimulationTime=10000, sim_repetitions=0, brian_address=2)
| Python | 0 | |
c70a127e17286f18e8d2d46bdc2e5ec6b0c55d0d | Add script to output statistics on body part emotion pairs | generate_body_part_emotion_pairs.py | generate_body_part_emotion_pairs.py | """Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
import copy
from collections import Counter
from count_labels import load_data
from emotools.heem_utils import heem_body_part_labels, heem_emotion_labels
from count_labels import corpus_metadata
from genre2period import print_results_line_period
def get_emotion_body_part_pairs(file_name):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
emotions2body = {}
emotions = Counter()
for labelset in Y:
body_parts = [lb for lb in labelset if lb in heem_body_part_labels]
emotion_lbls = [lb for lb in labelset if lb in heem_emotion_labels]
if body_parts and emotion_lbls:
for em in emotion_lbls:
for bp in body_parts:
if not emotions2body.get(em):
emotions2body[em] = Counter()
emotions2body[em][bp] += 1
emotions[em] += 1
return emotions, emotions2body
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='csv file containing corpus metadata')
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
args = parser.parse_args()
f_name = args.file
input_dir = args.input_dir
text2period, text2year, text2genre, period2text, genre2text = \
corpus_metadata(f_name)
# statistics for entire corpus
global_emotions = Counter()
emotion_body_pairs = Counter()
period_counters = {}
# process texts
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
text_id = text_file.replace('.txt', '')
in_file = os.path.join(input_dir, text_file)
period = text2period.get(text_id)
emotions, emotions2body = get_emotion_body_part_pairs(in_file)
global_emotions.update(emotions)
for em, body_counter in emotions2body.iteritems():
if not period_counters.get(em):
period_counters[em] = {}
if not period_counters.get(em).get(period):
period_counters[em][period] = Counter()
period_counters[em][period].update(body_counter)
for em, freq in global_emotions.most_common():
print '{}\t{}'.format(em, freq)
print 'Body part\tRenaissance\tClassisim\tEnlightenment\tNone\tTotal'
merged_body_parts = Counter()
for c in period_counters.get(em):
merged_body_parts.update(period_counters.get(em).get(c))
for label, freq in merged_body_parts.most_common():
print print_results_line_period(label, period_counters.get(em))
print
print
| Python | 0 | |
97f9540adfdd3805deb4f8e124513c34d65e0444 | Add fritzcollectd | fritzcollectd.py | fritzcollectd.py | # fritzcollectd - FRITZ!Box collectd plugin
# Copyright (c) 2014 Christian Fetzer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" fritzcollectd - FRITZ!Box collectd plugin """
__version__ = '0.1.0'
import collectd # pylint: disable=import-error
import fritzconnection
class FritzCollectd(object):
""" Collect data from FRITZ!Box and dispatch them to collectd """
PLUGIN_NAME = 'fritzbox'
SERVICE_ACTIONS = [
('WANIPConnection', 'GetStatusInfo'),
('WANCommonInterfaceConfig', 'GetCommonLinkProperties'),
('WANCommonInterfaceConfig', 'GetAddonInfos')
]
VALUES = {
'NewPhysicalLinkStatus': ('dslstatus', 'gauge'),
'NewConnectionStatus': ('constatus', 'gauge'),
'NewUptime': ('uptime', 'uptime'),
'NewLayer1DownstreamMaxBitRate': ('downstreammax', 'bitrate'),
'NewLayer1UpstreamMaxBitRate': ('upstreammax', 'bitrate'),
'NewByteSendRate': ('sendrate', 'bitrate'),
'NewByteReceiveRate': ('receiverate', 'bitrate'),
'NewTotalBytesSent': ('totalbytessent', 'bytes'),
'NewTotalBytesReceived': ('totalbytesreceived', 'bytes')
}
CONVERSION = {
'NewPhysicalLinkStatus': lambda x: 1 if x == 'Up' else 0,
'NewConnectionStatus': lambda x: 1 if x == 'Connected' else 0,
'NewByteSendRate': lambda x: 8 * x,
'NewByteReceiveRate': lambda x: 8 * x
}
def __init__(self):
self._fritz_address = fritzconnection.fritzconnection.FRITZ_IP_ADDRESS
self._fritz_port = fritzconnection.fritzconnection.FRITZ_TCP_PORT
self._fritz_user = fritzconnection.fritzconnection.FRITZ_USERNAME
self._fritz_password = ''
self._plugin_instance = ''
self._verbose = False
self._fc = None
def callback_configure(self, config):
""" Configure callback """
for node in config.children:
if node.key == 'Address':
self._fritz_address = node.values[0]
elif node.key == 'Port':
self._fritz_port = int(node.values[0])
elif node.key == 'User':
self._fritz_user = node.values[0]
elif node.key == 'Password':
self._fritz_password = node.values[0]
elif node.key == 'Instance':
self._plugin_instance = node.values[0]
else:
collectd.warning('fritzcollectd: Unknown config %s' % node.key)
def callback_init(self):
""" Init callback
Initialize the connection to the FRITZ!Box
"""
self.fritz_init()
def callback_read(self):
""" Read callback
Read data from the FRITZ!Box and dispatch values to collectd.
"""
values = self.fritz_read_data()
for instance, (value_type, value) in values.items():
self._dispatch_value(value_type, instance, value)
def _dispatch_value(self, value_type, instance, value):
""" Dispatch value to collectd """
val = collectd.Values()
val.plugin = self.PLUGIN_NAME
val.plugin_instance = self._plugin_instance
val.type = value_type
val.type_instance = instance
val.values = [value]
val.dispatch()
def fritz_init(self):
""" Initialize the connection to the FRITZ!Box """
try:
self._fc = fritzconnection.FritzConnection(
address=self._fritz_address, port=self._fritz_port,
user=self._fritz_user, password=self._fritz_password)
except IOError:
collectd.error("fritzcollectd: Failed to connect to %s" %
self._fritz_address)
def fritz_read_data(self):
""" Read data from the FRITZ!Box
The data is read from all actions defined in SERVICE_ACTIONS.
This function returns a dict in the following format:
{instance: (value_type, value)} where value_type and instance are
mapped from VALUES and CONVERSION.
"""
values = {}
# Don't try to gather data if the connection is not available
if self._fc is None:
return values
# Combine all values available in SERVICE_ACTIONS into a dict
for service, action in self.SERVICE_ACTIONS:
values.update(self._fc.call_action(service, action))
# Construct a dict: {instance: (value_type, value)} from the queried
# results applying a conversion (if defined)
nop = lambda x: x
result = {
instance:
(value_type, self.CONVERSION.get(key, nop)(values.get(key)))
for key, (instance, value_type) in self.VALUES.items()
}
return result
FC = FritzCollectd()
collectd.register_config(FC.callback_configure)
collectd.register_init(FC.callback_init)
collectd.register_read(FC.callback_read)
| Python | 0.00002 | |
e94dcbe781666ba8f083efab3dd63818d805c6d8 | Add flac2mp3 script. | flac2mp3.py | flac2mp3.py | #!/usr/bin/env python
import argparse
import subprocess
import os
import glob
def gettag(tag, filename):
proc = subprocess.Popen(["metaflac", "--no-utf8-convert",
"--show-tag=" + tag, filename], stdout=subprocess.PIPE)
out = proc.communicate()[0].rstrip()
remove = len(out.split("=")[0]) + 1
return out[remove:]
def decode_flac(flacfile, wavfile):
proc = subprocess.Popen(["flac", "-d", "-f", "-o", wavfile, flacfile])
proc.wait()
return 0 if proc.returncode == 0 else 1
def encode_mp3(wavfile, mp3file):
proc = subprocess.Popen(["lame", "-h", "-V0", wavfile, mp3file])
proc.wait()
return 0 if proc.returncode == 0 else 1
def tag_mp3(mp3file, metadata):
proc = subprocess.Popen(["eyeD3",
"-t", metadata["title"],
"-n", metadata["tracknumber"],
"-a", metadata["artist"],
"-A", metadata["album"],
"-G", metadata["genre"],
"-Y", metadata["year"],
"-A", metadata["album"],
"--add-image=" + metadata["cover"] + ":FRONT_COVER",
mp3file])
proc.wait()
return 0 if proc.returncode == 0 else 1
# parse command line arguments
parser = argparse.ArgumentParser(description='Convert flac files to mp3');
parser.add_argument('-i', metavar='input directory')
parser.add_argument('-o', metavar='output directory')
args = parser.parse_args()
if args.i:
indir = args.i
else:
indir="."
if args.o:
outdir = args.o
else:
outdir="."
print("read flac files from " + indir + "; results will be written to " +
outdir)
# convert and flag each file in directory
for filepath in os.listdir(indir):
print "path:" + filepath
if not filepath.endswith(".flac"):
continue
basename = os.path.basename(filepath)[0:-5]
flacname = indir + "/" + basename + ".flac"
wavname = outdir + "/" + basename + ".wav"
mp3name = outdir + "/" + basename + ".mp3"
print "transcode: " + flacname
metadata = {
"title" : gettag("TITLE", flacname),
"tracknumber" : gettag("TRACKNUMBER", flacname),
"artist" : gettag("ARTIST", flacname),
"album" : gettag("ALBUM", flacname),
"genre" : gettag("GENRE", flacname),
"year" : gettag("DATE", flacname)
}
if os.path.isfile("cover.png"):
metadata["cover"] = "cover.png"
elif os.path.isfile("cover.jpg"):
metadata["cover"] = "cover.jpg"
else:
metadata["cover"] = ""
print metadata
if decode_flac(flacname, wavname):
print "decoding flac failed"
exit(1)
if encode_mp3(wavname, mp3name):
print "encoding mp3 failed"
exit(1)
if tag_mp3(mp3name, metadata):
print "tagging mp3 failed"
exit(1)
os.remove(wavname);
print "finished"
| Python | 0 | |
19ee2fbee238e94b7944154d692a9e488ee19a79 | Add basic opps database configuration | opps/db/conf.py | opps/db/conf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf import settings
from appconf import AppConf
class OppsDataBaseConf(AppConf):
HOST = getattr(settings, 'OPPS_DB_HOSR', None)
USER = getattr(settings, 'OPPS_DB_USER', None)
PASSWORD = getattr(settings, 'OPPS_DB_PASSWORD', None)
PORT = getattr(settings, 'OPPS_DB_PORT', None)
NAME = getattr(settings, 'OPPS_DB_NAME', None)
TYPE = getattr(settings, 'OPPS_DB_TYPE', None)
OPTION = getattr(settings, 'OPPS_BD_OPTION', None)
class Meta:
prefix = 'opps_db'
| Python | 0 | |
8fa3accb670c665f6db420e291a2bc617f836994 | Add script that counts word frequencies in the debate data | debates2csv.py | debates2csv.py | #!/usr/bin/env python
# -.*- coding: utf-8 -.*-
"""Script to extract counts for words in word field.
Usage: debates2csv.py <xml-file or directory containing xml files>
2014-11-18 j.vanderzwaan@esciencecenter.nl
"""
import argparse
import xml.etree.ElementTree as ET
import re
from nltk.tokenize import RegexpTokenizer
from collections import Counter
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('xml', help='the name of the xml file containing the '
'the word field counts should be extracted for')
args = parser.parse_args()
# file or directory?
if os.path.isfile(args.xml):
files = [args.xml]
else:
files = []
for fn in os.listdir(args.xml):
file_name = '{}{}{}'.format(args.xml, os.sep, fn)
if os.path.isfile(file_name):
files.append(file_name)
# a list of the LIWC anger words
# abuse.* means that any word starting with 'abuse' (e.g., 'abuser' or
# 'abuses') is counted.
word_field = ['abuse.*', 'abusi.*', 'aggravat.*', 'aggress.*', 'agitat.*',
'anger.*', 'angr.*', 'annoy.*', 'antagoni.*', 'argh.*',
'argu.*', 'arrogan.*', 'assault.*', 'asshole.*', 'attack.*',
'bastard.*', 'battl.*', 'beaten', 'bitch.*', 'bitter.*',
'blam.*', 'bother.*', 'brutal.*', 'cheat.*', 'confront.*',
'contempt.*', 'contradic.*', 'crap', 'crappy', 'critical',
'critici.*', 'crude.*', 'cruel.*', 'cunt.*', 'cut', 'cynic',
'damn.*', 'danger.*', 'defenc.*', 'defens.*', 'despis.*',
'destroy.*', 'destruct.*', 'disgust.*', 'distrust.*',
'domina.*', 'dumb.*', 'dump.*', 'enemie.*', 'enemy.*',
'enrag.*', 'envie.*', 'envious', 'envy.*', 'evil.*',
'feroc.*', 'feud.*', 'fiery', 'fight.*', 'foe.*', 'fought',
'frustrat.*', 'fuck', 'fucked.*', 'fucker.*', 'fuckin.*',
'fucks', 'fume.*', 'fuming', 'furious.*', 'fury', 'goddam.*',
'greed.*', 'grouch.*', 'grr.*', 'harass.*', 'hate', 'hated',
'hateful.*', 'hater.*', 'hates', 'hating', 'hatred',
'heartless.*', 'hell', 'hellish', 'hit', 'hostil.*',
'humiliat.*', 'idiot.*', 'insult.*', 'interrup.*',
'intimidat.*', 'jealous.*', 'jerk', 'jerked', 'jerks',
'kill.*', 'liar.*', 'lied', 'lies', 'lous.*', 'ludicrous.*',
'lying', 'mad', 'maddening', 'madder', 'maddest', 'maniac.*',
'mock', 'mocked', 'mocker.*', 'mocking', 'mocks', 'molest.*',
'moron.*', 'murder.*', 'nag.*', 'nast.*', 'obnoxious.*',
'offence.*', 'offend.*', 'offens.*', 'outrag.*', 'paranoi.*',
'pettie.*', 'petty.*', 'piss.*', 'poison.*', 'prejudic.*',
'prick.*', 'protest', 'protested', 'protesting', 'punish.*',
'rage.*', 'raging', 'rape.*', 'raping', 'rapist.*',
'rebel.*', 'resent.*', 'revenge.*', 'ridicul.*', 'rude.*',
'sarcas.*', 'savage.*', 'sceptic.*', 'screw.*', 'shit.*',
'sinister', 'skeptic.*', 'smother.*', 'snob.*', 'spite.*',
'stubborn.*', 'stupid.*', 'suck', 'sucked', 'sucker.*',
'sucks', 'sucky', 'tantrum.*', 'teas.*', 'temper', 'tempers',
'terrify', 'threat.*', 'ticked', 'tortur.*', 'trick.*',
'ugl.*', 'vicious.*', 'victim.*', 'vile', 'villain.*',
'violat.*', 'violent.*', 'war', 'warfare.*', 'warred',
'warring', 'wars', 'weapon.*', 'wicked.*']
tokenizer = RegexpTokenizer(r'\w+')
num_words = 0
all_words = Counter()
wf_words = Counter()
for input_file in files:
# read xml file
tree = ET.parse(input_file)
root = tree.getroot()
for speech in tree.getiterator('speech'):
speaker = speech.attrib.get('speaker')
text = ET.tostring(speech)
# remove xml tags
text = re.sub('<[^>]*>', '', text)
# remove html entities (e.g., ɣ)
text = re.sub('&#\d+;', '', text)
# convert to lower case
text = text.lower()
# extract a list of words
words = tokenizer.tokenize(text)
# count words
num_words += len(words)
all_words.update(words)
regex = re.compile('^{}$'.format('$|^'.join(word_field)))
# count word field words
for word in all_words:
if regex.match(word):
wf_words[word] += all_words[word]
# print output
print 'Word\tFrequency'
print 'TOTAL\t{}'.format(num_words)
for (word, freq) in wf_words.most_common():
print '{}\t{}'.format(word, freq)
| Python | 0 | |
d1c791ccf5b2873bbc248c9b079a5b68159ffb50 | Add ECM Keys script | python/ecep/portal/management/commands/update_ecm.py | python/ecep/portal/management/commands/update_ecm.py | import csv
import os
import re
from django.core.management.base import NoArgsCommand
from django.conf import settings
from portal.models import Location
class Command(NoArgsCommand):
"""
Import Cleaned Site Name, Address, and ECM Keys
"""
def handle(self, *args, **options):
with open('master-list.csv', 'rb') as master:
reader = csv.DictReader(master)
for row in reader:
try:
l = Location.objects.get(pk=int(row['Portal ID']))
l.site_name = row['Master Site Name']
l.address = row['Master Address']
l.ecm_key = row['ECM Key']
l.save()
print l.site_name
except:
print "Ruh roh!"
continue
| Python | 0 | |
8ed1fccb2a1d72815bde93b19d45069e59db0900 | add force404 sample | force404.py | force404.py | # -*- coding:utf-8 -*-
from bottle import route, run, abort, error
@route("/")
def top():
abort(404, "go to 404")
return "Hello world!"
@error(404)
def error404(error):
return "Not Found!"
run(host="0.0.0.0", port=8080, debug=True, reloader=True) | Python | 0 | |
5d58200622e05728acce8ffba1ddf7e5063f556c | Create formatIO.py | formatIO.py | formatIO.py |
# 将输入输出格式化例如:(xxxx) -> (x)(x)(x)(x), ([x]) -> x, [xxxx] ->[x][x][x][x]
def formatting(old_tune):
'''
格式化
'''
new_tune = ''
sharped = False
low = high = 0
for i in old_tune:
if i == '(':
low = low + 1
elif i == '[':
high = high + 1
elif i == ']':
high = high - 1
elif i == ')':
low = low - 1
elif i == '#':
sharped = True
if low == high:
new_tune = new_tune + i
elif low > high:
new_tune = new_tune + '(' * (low - high) + i
elif low < high:
new_tune = new_tune + '[' * (high - low) + i
else:
return 'error'
else:
if sharped:
if low == high:
new_tune = new_tune + i
elif low > high:
new_tune = new_tune + i + ')' * (low - high)
elif low < high:
new_tune = new_tune + i + ']' * (low - high)
else:
return 'error'
sharped = False
else:
if low == high:
new_tune = new_tune + i
elif low > high:
new_tune = new_tune + '(' * (low - high) + i + ')' * (low - high)
elif low < high:
new_tune = new_tune + '[' * (high - low) + i + ']' * (low - high)
else:
return 'error'
print(new_tune)
return new_tune
| Python | 0.000003 | |
aff6ff82ec4fc0076f8356d782a2a103510ebbfd | use Queue for product and custom problem | product_custom/use_queue.py | product_custom/use_queue.py |
# http://blog.jobbole.com/52412/
from threading import Thread
import time
import random
from Queue import Queue
queue = Queue(10)
class ProducerThread(Thread):
def run(self):
nums = range(5)
while True:
num = random.choice(nums)
queue.put(num)
print "Produced", num
time.sleep(random.random())
class ConsumerThread(Thread):
def run(self):
while True:
num = queue.get()
queue.task_done()
print "Consumed", num
time.sleep(random.random())
ProducerThread().start()
ConsumerThread().start()
| Python | 0 | |
b75a0293f214de4196d9df50ef5906885c2810fc | Create empClass.py | empClass.py | empClass.py | from rgfunc import *
class Employee(object):
year = 0
month = 0
day = 0
city = ""
country = ""
lastname = ""
def __init__(self,name):
self.name = name
def dateofbirth(self):
return str(self.day)+"/"+str(self.month)+"/"+str(self.year)
#fullname = name," ",lastname
def fullname(self):
return str(self.name)+" "+str(self.lastname)
if __name__ == "__main__":
print "Error-Invalid File to Run- Please Run main.py."
exit()
| Python | 0.000002 | |
36f2890f326f80ec251ec0de12345525448842b9 | Move file to bin; update ami to LTS release; use standard security group and keyname; add owner to instance name | bin/aws_launcher.py | bin/aws_launcher.py | #!/usr/bin/env python
# encoding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# ideas from
# https://github.com/mozilla/telemetry-server/tree/master/provisioning/aws
import argparse
import json
import sys
import traceback
import time
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType
from boto.ec2.blockdevicemapping import BlockDeviceMapping
except:
sys.stderr.write("Requires boto; try 'pip install boto'\n")
exit(1)
default_config = {
"image": "ami-5189a661",
"region": "us-west-2",
"key_name": "20130730-svcops-base-key-dev",
"instance_type": "c3.2xlarge",
"security_groups": ["pipeline-analysis"],
"iam_role": "pipeline-dev-iam-access-IamInstanceProfile-YVZ950U23IFP",
"shutdown": "terminate",
"ephemeral_map": {
"/dev/xvdb": "ephemeral0",
"/dev/xvdc": "ephemeral1"
},
"owner": "datapipeline",
"tags": {
"App": "pipeline",
"Type": "analysis",
"Env": "dev",
}
}
class Launcher(object):
def __init__(self):
parser = self.get_arg_parser()
args = parser.parse_args()
self.read_user_data()
self.setup_config(args)
def get_arg_parser(self):
parser = argparse.ArgumentParser(description='Launch EC2 instances')
parser.add_argument(
"-c", "--config-file",
help="JSON config file",
type=file,
default=None
)
parser.add_argument(
"-k", "--aws-key",
help="AWS Key",
default=None
)
parser.add_argument(
"-s", "--aws-secret-key",
help="AWS Secret Key",
default=None
)
parser.add_argument(
"-o", "--owner",
help="AWS owner tag",
default=None
)
return parser
def read_user_data(self):
with open("userdata.sh", "r") as fh:
self.user_data = fh.read()
def setup_config(self, args):
self.config = default_config.copy()
if args.config_file:
user_config = json.load(args.config_file)
self.config.update(user_config)
if args.aws_key:
self.config["aws_key"] = args.aws_key
if args.aws_secret_key:
self.config["aws_secret_key"] = args.aws_secret_key
if args.owner:
self.config["owner"] = args.owner
def fire_up_instance(self):
self.conn = boto.ec2.connect_to_region(
self.config["region"],
aws_access_key_id=self.config.get("aws_key", None),
aws_secret_access_key=self.config.get("aws_secret_key", None)
)
mapping = BlockDeviceMapping()
for device, eph_name in self.config["ephemeral_map"].iteritems():
mapping[device] = BlockDeviceType(ephemeral_name=eph_name)
reservation = self.conn.run_instances(
self.config["image"],
key_name=self.config["key_name"],
instance_type=self.config["instance_type"],
security_groups=self.config["security_groups"],
block_device_map=mapping,
user_data=self.user_data,
instance_profile_name=self.config["iam_role"],
instance_initiated_shutdown_behavior=self.config["shutdown"]
)
instance = reservation.instances[0]
name_string = "{0}-{1}-{2}".format(
self.config["owner"],
self.config["tags"]["App"],
self.config["tags"]["Type"])
owner_tags = {"Name": name_string, "Owner": self.config["owner"]}
self.conn.create_tags([instance.id], owner_tags)
self.conn.create_tags([instance.id], self.config["tags"])
while instance.state == 'pending':
print "Instance is pending -- Waiting 10s for instance", \
instance.id, "to start up..."
time.sleep(10)
instance.update()
print ("Instance {0} is {1}".format(instance.id, instance.state))
print ("ubuntu@{0}".format(instance.public_dns_name))
def main():
try:
launcher = Launcher()
launcher.fire_up_instance()
return 0
except Exception, e:
print "Error:", e
traceback.print_exc()
return 1
if __name__ == "__main__":
sys.exit(main())
| Python | 0 | |
d9be3f189fc34117bdec6e0c7856f7a7dc5f902a | Add tool for generating the JSONP required by the documentation versions. | cdap-docs/tools/versionscallback-gen.py | cdap-docs/tools/versionscallback-gen.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Cask Data, Inc.
#
# Used to generate JSONP from a CDAP documentation directory on a webserver.
#
# sudo echo "versionscallback({\"development\": \"2.6.0-SNAPSHOT\", \"current\": \"2.5.2\", \"versions\": [\"2.5.1\", \"2.5.0\"]});" > json-versions.js; ls -l
import sys
from os import getcwd, listdir, readlink
from os.path import isdir, islink, join
def add_value(call, name, value):
if value:
if call:
call += ', '
call += '\\\"%s\\\": \\\"%s\\\"' % (name, value)
return call
def add_object(call, name, value):
if value:
if call:
call += ', '
call += ('\\\"%s\\\": %s' % (name, value)).replace("\'", '\\\"')
return call
def walk_directory(path=''):
global current, development, versions
if not path:
path = getcwd()
onlydirs = [ d for d in listdir(path) if isdir(join(path,d)) ]
onlydirs.reverse()
for d in onlydirs:
if d == 'current':
d_path = join(path,d)
if islink(d_path):
current = readlink(d_path)
elif d.endswith('SNAPSHOT'):
development = d
elif d and d != current:
versions.append(d)
def build(path=''):
global current, development, versions
call = ''
walk_directory(path)
call = add_value(call, 'development', development)
call = add_value(call, 'current', current)
call = add_object(call, 'versions', versions)
target = join(path, 'json-versions.js')
print 'sudo echo "versionscallback({%s});" > %s; ls -l' % (call, target)
def usage():
print 'Generates a command that creates the "versionscallback" JSONP from a CDAP documentation directory on a webserver.'
print 'Run this with the path to the directory containing the documentation directories.'
print 'python %s <path>' % sys.argv[0]
# Main
if __name__ == '__main__':
current = ''
development = ''
versions = []
path = ''
if len(sys.argv) > 1:
path = sys.argv[1]
build(path)
else:
usage()
| Python | 0 | |
fe1d75065f7371502cf81ea57e2a1019c2db093c | add config.py | custom_config.py | custom_config.py | # ===== GAE dev_appserver.py settings =====
# [Required]
gae_sdk_path = ""
project_path = ""
# [Optional]
datastore_path = ""
port = ""
admin_port = ""
# ===== GAE Helper settings =====
# [Log]
log_path = ""
append_date_to_log = False
# [Request Filter]
file_type_filter = []
custom_regex_filter = []
use_time_delimiter = False
| Python | 0.000002 | |
90c36d54f8822ef28bef98be4ba735d15b405648 | add get_dump.py utility | get_dump.py | get_dump.py | #!/usr/bin/python
import argparse
import mosquitto
import time, random
import sys
def on_mqtt_message(arg0, arg1, arg2=None):
#
#~ print "on_mqtt_message", arg0, arg1, arg2
if arg2 is None:
mosq, obj, msg = None, arg0, arg1
else:
mosq, obj, msg = arg0, arg1, arg2
if msg.topic != retain_hack_topic:
print "%s\t%s" % (msg.topic, msg.payload)
else:
#~ print "done!"
client.disconnect()
sys.exit(0)
if __name__ =='__main__':
parser = argparse.ArgumentParser(description='MQTT retained message deleter', add_help=False)
parser.add_argument('-h', '--host', dest='host', type=str,
help='MQTT host', default='localhost')
parser.add_argument('-p', '--port', dest='port', type=int,
help='MQTT port', default='1883')
parser.add_argument('topic' , type=str,
help='Topic mask to unpublish retained messages from. For example: "/devices/my-device/#"')
args = parser.parse_args()
client = mosquitto.Mosquitto()
client.connect(args.host, args.port)
client.on_message = on_mqtt_message
client.subscribe(args.topic)
# hack to get retained settings first:
retain_hack_topic = "/tmp/%s/retain_hack" % ( client._client_id)
client.subscribe(retain_hack_topic)
client.publish(retain_hack_topic, '1')
while 1:
rc = client.loop()
if rc != 0:
break
| Python | 0.000002 | |
e04d3bfd20879d0e8e404a3fff4ab37b914cd303 | Add ContactForm | contact/forms.py | contact/forms.py | from django import forms
from django.core.exceptions import ValidationError
from prosodyauth.forms import PlaceholderForm
from simplecaptcha import captcha
contact_reasons = (
('question', 'Question'),
('problem', 'Problem'),
('suggestion', 'Suggestion'),
('other', 'Other'),
)
class ContactForm(PlaceholderForm):
username = forms.CharField(widget=forms.HiddenInput)
ip_address = forms.GenericIPAddressField(widget=forms.HiddenInput)
subject = forms.ChoiceField(choices=contact_reasons)
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea)
| Python | 0 | |
0aed5df2f7c08cdb365b098a93800b0269c0c6b4 | Create class Dataset | gammacat/dataset.py | gammacat/dataset.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
from .utils import load_yaml, write_yaml
from gammapy.catalog.gammacat import GammaCatResource
__all__ = [
'DataSet',
]
log = logging.getLogger(__name__)
class DataSet:
"""Process a dataset file."""
resource_type = 'ds'
def __init__(self, data, resource):
log.debug('DataSet.__init__()')
self.resource = resource
self.data = data
@classmethod
def read(cls, filename):
data = load_yaml(filename)
resource = cls._read_resource_info(data, filename)
return cls(data = data, resource = resource)
def write(self, filename):
write_yaml(self.data, filename)
def folder(self):
return self.data['reference_id'].replace('&', '%26')
@classmethod
def _read_resource_info(cls, data, location):
try:
file_id = data['file_id']
except:
file_id = -1
return GammaCatResource(
source_id = data['source_id'],
reference_id = data['reference_id'],
file_id = file_id,
type=cls.resource_type,
location=location
) | Python | 0.000002 | |
828b065e857b5f148a0d20b06fd9d45824a1befc | add manager.py flask api for genmodel | genmodel/manager.py | genmodel/manager.py | from flask import Flask, request, render_template, jsonify
import psycopg2
import os
# Connect to Database
try:
DB_NAME=os.environ['DB_NAME']
DB_USER=os.environ['DB_USER']
DB_PASS=os.environ['DB_PASS']
except KeyError as e:
raise Exception('environment variables for database connection must be set')
conn = psycopg2.connect(dbname=DB_NAME,
user=DB_USER,
password=DB_PASS,
host=localhost,
port=5432
)
app = Flask(__name__)
@app.route('/')
def man():
return 'Not implemented'
@app.route('/jobs', methods=["GET", "POST"])
def jobs():
if request.method == "GET":
cur = conn.cursor()
cur.execute('SELECT * FROM jobs')
resp = cur.fetchall()
return resp
elif request.method == "POST":
# Take a JSON with attributes of job, start job, then redirect to that
# job's monitoring page (jobs/job_id)
return 'Not implemented'
else:
return 'Not implemented'
@app.route('/jobs/<job_id>', methods=["GET", "PATCH", "DELETE"])
def job_for_id(job_id):
if request.method == "GET":
# Job monitoring for a specific job
return 'GET job #' + job_id
elif request.method == "PATCH":
# TODO: Should this be an endpoint?
# Modify job, scale resolvers
return 'PATCH job #' + job_id
elif request.method == "DELETE":
# Remove all dedicated Digital Ocean containers, stop all publishers,
# writers and workers. Purge the queue.
return 'DELETE job #' + job_id
return job_id
if __name__ == '__main__':
app.run(port=5000, host= '0.0.0.0', debug=True)
| Python | 0 | |
7e28a3fe54c24a38a90bf0e7cf2f634ca78ee2ed | Add script used to generate a Cantor set | cantorset.py | cantorset.py | ### BEGIN LICENSE
# The MIT License (MIT)
#
# Copyright (C) 2015 Christopher Wells <cwellsny@nycap.rr.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
### END LICENSE
"""A script which visually draws a Cantor set."""
import turtle
import time
def rec_draw(l, r, x, xd, t, pen):
"""Recursively draw each section of the Cantor set, until the set
number of rows has been met."""
if x < t:
# Draw the first full line, is redundant after first recursion
pen.up()
pen.goto(l, (-(x - 1) * xd))
pen.down()
pen.goto(r, (-(x - 1) * xd))
# Find the length of each of the lesser lines
diff = (r - l) / 3
# Draw the first lesser line (1/3)
pen.up()
pen.goto(l, -x * xd)
pen.down()
pen.goto(l + diff, -x * xd)
rec_draw(l, l + diff, x + 1, xd, t, pen)
# Draw the second lesser line (3/3)
pen.up()
pen.goto(l + diff * 2, -x * xd)
pen.down()
pen.goto(r, -x * xd)
rec_draw(l + diff * 2, r, x + 1, xd, t, pen)
else:
# End once the given number of lines has been met
return
def main():
"""Draw a visual representation of a Cantor set."""
# Create the pen and set its initial values
pen = turtle.Turtle()
pen.ht()
pen.speed(0)
# Set the values of the Cantor set
left = -200 # The right boundry
right = 200 # The left boundry
starting_row = 0 # The location of the first row
row_distance = 10 # The distance between rows
rows = 5 # The number of rows
# Draw the Cantor set
rec_draw(left, right, starting_row, row_distance, rows, pen)
time.sleep(500)
# Run the main method of the script
if __name__ == '__main__':
main()
| Python | 0 | |
282383ab66f85ff6eb58b98c34558c02c9cf44eb | add a tool to list recipes used by builders (and ones not on recipes) | scripts/tools/builder_recipes.py | scripts/tools/builder_recipes.py | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import operator
import os
import subprocess
import sys
import tempfile
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
BLACKLISTED_MASTERS = [
'master.chromium.reserved',
'master.chromiumos.unused',
'master.client.reserved',
'master.reserved',
'master.tryserver.reserved',
]
def getMasterConfig(path):
with tempfile.NamedTemporaryFile() as f:
subprocess.check_call([
os.path.join(BASE_DIR, 'scripts', 'tools', 'runit.py'),
os.path.join(BASE_DIR, 'scripts', 'tools', 'dump_master_cfg.py'),
os.path.join(path),
f.name])
return json.load(f)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--only-nonrecipe', action='store_true')
args = parser.parse_args()
data = []
for master in os.listdir(os.path.join(BASE_DIR, 'masters')):
if master in BLACKLISTED_MASTERS:
continue
path = os.path.join(BASE_DIR, 'masters', master)
if not os.path.isdir(path):
continue
config = getMasterConfig(path)
for builder in config['builders']:
try:
recipe = builder['factory']['properties'].get(
'recipe', ['<no recipe>'])[0]
except Exception as e:
recipe = '<error: %r>' % e
if (args.only_nonrecipe and
recipe != '<no recipe>' and
not recipe.startswith('<error:')):
continue
data.append({
'master': master,
'builder': builder['name'],
'recipe': recipe,
})
master_padding = max(len(row['master']) for row in data)
builder_padding = max(len(row['builder']) for row in data)
pattern = '%%-%ds | %%-%ds | %%s' % (master_padding, builder_padding)
for row in sorted(data, key=operator.itemgetter('master', 'builder')):
print pattern % (row['master'], row['builder'], row['recipe'])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Python | 0.000004 | |
b3a2daace23a8723d97428234b7503684b87adcf | Add BWA alignment module to replace Blast | readtagger/bwa.py | readtagger/bwa.py | import os
import subprocess
import pysam
import temporary
class Bwa(object):
"""Hold Blast-related data and methods."""
def __init__(self, input_path, bwa_index=None, reference_fasta=None, threads=1):
"""
BWA object for `sequences`.
Align sequences in fastq/fasta file `input_path` to bwa_index or construct a new index using reference_fasta
>>> from test.helpers import roo_seq
>>> from .fasta_io import write_sequences
>>> with temporary.temp_dir() as tempdir:
... reference_fasta = os.path.join(str(tempdir), 'reference.fasta')
... write_sequences({'cluster_1_left_sequences_0': roo_seq}, output_path=reference_fasta)
... b = Bwa(input_path=reference_fasta, reference_fasta=reference_fasta)
>>> len(b.bwa_run) == 1
True
>>> result = b.reads_to_clusters()
>>> assert len(result[1]['left_sequences']) == 1
"""
self.input_path = input_path
self.bwa_index = bwa_index
self.reference_fasta = reference_fasta
self.threads = threads
self.bwa_run = self.run()
self.clusters = self.reads_to_clusters()
self.best_candidates, self.left_candidates, self.right_candidates = self.describe_clusters()
pass
def run(self):
"""Run bwa command."""
with temporary.temp_dir() as temp_dir:
temp_dir = str(temp_dir)
if not self.bwa_index:
self.bwa_index = make_bwa_index(self.reference_fasta, dir=temp_dir)
proc = subprocess.Popen(['bwa', 'mem', '-t', str(self.threads), self.bwa_index, self.input_path],
stdout=subprocess.PIPE, env=os.environ.copy(), close_fds=True)
f = pysam.AlignmentFile(proc.stdout)
self.header = f.header['SQ']
reads = [r for r in f]
proc.stdout.close()
return reads
def reads_to_clusters(self):
"""Map a readname back to a specific cluster contig or sequence."""
clusters = {}
for r in self.bwa_run:
qname = r.query_name
cluster_number = int(qname.split('cluster_')[1].split('_')[0])
if cluster_number not in clusters:
clusters[cluster_number] = {'left_sequences': {}, 'right_sequences': {}, 'left_contigs': {}, 'right_contigs': {}}
for cluster_item in clusters[cluster_number].keys():
if cluster_item in qname:
number = int(qname.split('_%s_' % cluster_item)[1])
clusters[cluster_number][cluster_item][number] = r
break
return clusters
def describe_clusters(self):
"""Return a list of possible matches, sorted by the length of the region that is being covered."""
for cluster_number, cluster in self.clusters.items():
left = self.split_reads_into_tid_clusters(cluster['left_contigs'] or cluster['left_sequences'])
right = self.split_reads_into_tid_clusters(cluster['right_contigs'] or cluster['right_sequences'])
common_tids = set(left.keys()) & set(right.keys())
best_candidates = []
left_candidates = []
right_candidates = []
if common_tids:
for common_tid in common_tids:
length = self.header[common_tid]['LN']
min_left = min([r.pos for r in left[common_tid]])
min_right = min([r.pos for r in right[common_tid]])
max_left = max([(r.pos + r.alen) for r in left[common_tid]])
max_right = max([(r.pos + r.alen) for r in right[common_tid]])
if max_right - min_left >= max_left - min_right:
start = min_left
end = max_right
else:
start = min_right
end = max_left
full_length_fraction = (end - start) / float(length)
support = len(set(r.query_name for r in left[common_tid])) + len(set(r.query_name for r in right[common_tid]))
best_candidates.append((self.header[common_tid]['SN'], start, end, full_length_fraction, support))
else:
for tid, reads in left.items():
length = self.header[tid]['LN']
start = min([r.pos for r in reads])
end = max([(r.pos + r.alen) for r in reads])
full_length_fraction = (end - start) / float(length)
support = len(set(r.query_name for r in reads))
left_candidates.append((self.header[tid]['SN'], start, end, full_length_fraction, support))
for tid, reads in right.items():
length = self.header[tid]['LN']
start = min([r.pos for r in reads])
end = max([(r.pos + r.alen) for r in reads])
full_length_fraction = (end - start) / float(length)
support = len(set(r.query_name for r in reads))
right_candidates.append((self.header[tid]['SN'], start, end, full_length_fraction, support))
# Sort by distance between end and start. That's probably not the best idea ...
best_candidates = sorted(best_candidates, key=lambda x: x[2] - x[1])
left_candidates = sorted(left_candidates, key=lambda x: x[2] - x[1])
right_candidates = sorted(right_candidates, key=lambda x: x[2] - x[1])
return best_candidates, left_candidates, right_candidates
def split_reads_into_tid_clusters(self, read_d):
"""Split reads in read_d into clusters based on the read tid."""
cluster = {}
for read in read_d.values():
if read.tid not in cluster:
cluster[read.tid] = [read]
else:
cluster[read.tid].append(read)
return cluster
def make_bwa_index(reference_fasta, dir='.'):
"""Make a bwa index for reference_fasta and return path to the index's basename."""
fasta_basename = os.path.basename(reference_fasta)
target_fasta = os.path.join(dir, fasta_basename)
os.symlink(reference_fasta, target_fasta)
args = ['bwa', 'index', target_fasta]
subprocess.call(args, env=os.environ.copy())
return os.path.abspath(target_fasta)
| Python | 0 | |
bf3f14692b6e2a348f5a0171ad57e494801ed4f4 | Add python script to write lib svm expected data format from my collected data | scripts/writelibsvmdataformat.py | scripts/writelibsvmdataformat.py | """
A script to write out lib svm expected data format from my collecting data
"""
import os
import sys
import csv
import getopt
cmd_usage = """
usage: writelibsvmdataformat.py --inputs="/inputs/csv_files" --output="/output/lib_svm_data"
"""
feature_space = 10
def write_libsvm_data(input_files, output_file):
"""
:param input_files: input files, each of which contains a single label at first row, and a bunch of data following
:param output_file: output file, which meet lib svm expected data format
"""
with open(output_file, 'wb') as output_csv_file:
output_writer = csv.writer(output_csv_file, delimiter=' ')
for input_file in input_files:
with open(input_file, 'rb') as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=' ')
# assume there is only one item in each row
label = input_reader.next()
i = 1 # start from index 1
line = [label[0]]
for row in input_reader:
if int(row[0]) != 0:
line.append(':'.join([str(i), row[0]]))
i += 1
if i > feature_space:
output_writer.writerow(line)
i = 1
line = [label[0]]
def main(argv):
"""
:param argv: command line arguments
:rtype : error status, success 0 and fail 1
"""
try:
optlist, args = getopt.getopt(argv[1:], "hi:o:", ["help", "inputs=", "output="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print cmd_usage
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
if not os.path.exists(inputs):
print("Input files folder not exist")
return 1
elif opt in ("-o", "--output"):
output_file = opt_arg
# print the messages
print("Inputs folder: " + inputs)
print("Output file: " + output_file)
assert isinstance(output_file, basestring)
assert isinstance(inputs, basestring)
input_files = []
for root, dirs, files in os.walk(inputs):
for name in files:
if name.endswith('.csv'):
input_files.append(os.path.abspath(os.path.join(root, name)))
if len(input_files) == 0:
print("No input files.")
return 1
write_libsvm_data(input_files, output_file)
if __name__ == "__main__":
sys.exit(main(sys.argv)) | Python | 0 | |
900c93e6917ef92da02cca6865284e0004b01695 | add file | aiovk/mixins.py | aiovk/mixins.py | class LimitRateDriverMixin(object):
requests_per_second = 3
| Python | 0.000001 | |
6f75dd27772812cbd91b0fa9582110607c3ee2a3 | check format strings for errors | pychecker/pychecker2/FormatStringChecks.py | pychecker/pychecker2/FormatStringChecks.py | from pychecker2.Check import Check
from pychecker2.util import BaseVisitor
from pychecker2.Warning import Warning
from compiler import ast, walk
from types import *
import re
class Unknown(Exception): pass
def _compute_constant(node):
try:
if isinstance(node, ast.Const):
return node.value
if isinstance(node, ast.Add):
return _compute_constant(node.left) + \
_compute_constant(node.right)
if isinstance(node, ast.Mul):
return _compute_constant(node.left) * \
_compute_constant(node.right)
except TypeError:
pass
raise Unknown
format_re = re.compile('%([(]([a-zA-Z_]+)[)])?[ #+-]*'
'([0-9]*|[*])(|[.](|[*]|[0-9]*))([hlL])?'
'([diouxXeEfFgGcrs%])')
class FormatError(Exception):
def __init__(self, position):
self.position = position
FORMAT_UNKNOWN, FORMAT_DICTIONARY, FORMAT_TUPLE = range(3)
def _check_format(s):
pos = 0
specs = []
while 1:
pos = s.find('%', pos)
if pos < 0:
break
match = format_re.search(s, pos)
if not match or match.start(0) != pos:
raise FormatError(pos)
if match.group(7) != '%':
specs.append( (match.group(2), match.group(3), match.group(5),
match.group(6), match.group(7)) )
pos = match.end(0)
return specs
def _compute_tuple_size(node):
try:
if isinstance(node, ast.Tuple):
return len(node.nodes)
if isinstance(node, ast.Add):
return _compute_constant(node.left) + \
_compute_constant(node.right)
if isinstance(node, ast.Mul):
return _compute_constant(node.left) * \
_compute_constant(node.right)
except TypeError:
pass
raise Unknown
class FormatStringCheck(Check):
"Look for warnings in format strings"
badFormat = \
Warning('Report illegal format specifications in format strings',
'Bad format specifier at position %d (%s)')
uselessModifier = \
Warning('Report unused modifiers for format strings (l, h, L)',
'Modifier %s is not necessary')
mixedFormat = \
Warning('Report format strings which use both positional and named formats',
'Cannot mix positional and named formats (%s)')
formatCount = \
Warning('Report positional format string with the wrong '
'number of arguments',
'Wrong number of arguments supplied for format: '
'%d given %d required')
unknownFormatName = \
Warning('Report unknown names if locals() or globals() '
'are used for format strings',
'The name %s is not defined in %s')
def check(self, file, unused_checker):
if not file.parseTree:
return
for scope in file.scopes.values():
class GetMod(BaseVisitor):
def __init__(self):
self.mods = []
def visitMod(self, node):
self.mods.append(node)
self.visitChildren(node)
mods = walk(scope.node, GetMod()).mods
for mod in mods:
try:
s = _compute_constant(mod.left)
except Unknown:
continue
if not isinstance(s, StringType):
continue
try:
formats = _check_format(s)
except FormatError, detail:
part = 10
example = s[detail.position : detail.position + part]
example += len(s) > detail.position + part and "..." or ""
file.warning(mod, self.badFormat, detail.position, example)
continue
if not formats:
continue
count = len(formats)
format_type = FORMAT_UNKNOWN
names = []
for f in formats:
name, width, precision, lmodifier, type = f
if lmodifier:
file.warning(mod, self.uselessModifier, lmodifier)
if name:
if format_type == FORMAT_TUPLE:
file.warning(mod, self.mixedFormat, '%s' % name)
format_type = FORMAT_DICTIONARY
names.append(name)
else:
if format_type == FORMAT_DICTIONARY:
file.warning(mod, self.mixedFormat, '%%%s' % type)
format_type = FORMAT_TUPLE
if width == '*':
count += 1
if precision == '*':
count += 1
if format_type == FORMAT_TUPLE:
try:
n = _compute_tuple_size(mod.right)
if n != count:
file.warning(mod, self.formatCount, n, count)
except Unknown:
pass
else:
if isinstance(mod.right, ast.CallFunc) and \
isinstance(mod.right.node, ast.Name):
defines = []
if mod.right.node.name == 'locals':
defines = scope.defs.keys()
if mod.right.node.name == 'globals':
defines = scope.defs.keys()
for p in parents(scope):
defines.extend(p.defs.keys())
for n in names:
if not n in defines:
file.warning(mod, self.unknownFormatName,
n, mod.right.node.name)
| Python | 0.000001 | |
a30277835e65195fc68e6708fe5da394bc43e08c | Test Projection | tests/test_projection.py | tests/test_projection.py | from demosys.test import DemosysTestCase
from demosys.opengl import Projection
class ProjectionTest(DemosysTestCase):
def test_create(self):
proj = Projection(fov=60, near=0.1, far=10)
proj.update(fov=75, near=1, far=100)
proj.tobytes()
proj.projection_constants
| Python | 0.000001 | |
1c2c7d5134780e58bd69f24ee06050b2f405d946 | Add unit test for run_nohw | src/program/lwaftr/tests/subcommands/run_nohw_test.py | src/program/lwaftr/tests/subcommands/run_nohw_test.py | """
Test the "snabb lwaftr run_nohw" subcommand.
"""
import unittest
from random import randint
from subprocess import call, check_call
from test_env import DATA_DIR, SNABB_CMD, BaseTestCase
class TestRun(BaseTestCase):
program = [
str(SNABB_CMD), 'lwaftr', 'run_nohw',
]
cmd_args = {
'--duration': '1',
'--bench-file': '/dev/null',
'--conf': str(DATA_DIR / 'icmp_on_fail.conf'),
'--inet-if': '',
'--b4-if': '',
}
veths = []
@classmethod
def setUpClass(cls):
cls.create_veth_pair()
@classmethod
def create_veth_pair(cls):
veth0 = cls.random_veth_name()
veth1 = cls.random_veth_name()
# Create veth pair.
check_call(('ip', 'link', 'add', veth0, 'type', 'veth', 'peer', \
'name', veth1))
# Set interfaces up.
check_call(('ip', 'link', 'set', veth0, 'up'))
check_call(('ip', 'link', 'set', veth1, 'up'))
# Add interface names to class.
cls.veths.append(veth0)
cls.veths.append(veth1)
@classmethod
def random_veth_name(cls):
return 'veth%s' % randint(10000, 999999)
def test_run_nohw(self):
self.execute_run_test(self.cmd_args)
def execute_run_test(self, cmd_args):
self.cmd_args['--inet-if'] = self.veths[0]
self.cmd_args['--b4-if'] = self.veths[1]
output = self.run_cmd(self.build_cmd())
self.assertIn(b'link report', output,
b'\n'.join((b'OUTPUT', output)))
def build_cmd(self):
result = self.program
for item in self.cmd_args.items():
for each in item:
result.append(each)
return result
@classmethod
def tearDownClass(cls):
cls.remove_veths()
@classmethod
def remove_veths(cls):
for i in range(0, len(cls.veths), 2):
check_call(('ip', 'link', 'delete', cls.veths[i]))
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
d36ce70863653238d88e8ec23416ec894d6140eb | Create _geoserver_publish_layergroup.py | lib/cybergis/gs/_geoserver_publish_layergroup.py | lib/cybergis/gs/_geoserver_publish_layergroup.py | from base64 import b64encode
from optparse import make_option
import json
import urllib
import urllib2
import argparse
import time
import os
import subprocess
def make_request(url, params, auth=None, data=None, contentType=None):
"""
Prepares a request from a url, params, and optionally authentication.
"""
print url + urllib.urlencode(params)
req = urllib2.Request(url + urllib.urlencode(params), data=data)
if auth:
req.add_header('AUTHORIZATION', 'Basic ' + auth)
if contentType:
req.add_header('Content-type', contentType)
else:
if data:
req.add_header('Content-type', 'text/xml')
return urllib2.urlopen(req)
def parse_url(url):
if (url is None) or len(url) == 0:
return None
index = url.rfind('/')
if index != (len(url)-1):
url += '/'
return url
def buildPOSTDataLayerGroup(layergroup,layers):
data = "<layerGroup><name>"+layergroup+"</name>"
for layer in layers:
data += "<layer>"+layer+"</layer>"
data += "</layergroup>"
return data
def createLayerGroup(verbose, geoserver, workspace, auth, layergroup, layers):
if verbose > 0:
print('Creating GeoServer Layergroup for '+layergroup+".")
params = {}
data = buildPOSTDataLayerGroup(layergroup,layers)
url = geoserver+"rest/workspaces/"+workspace+"/layergroups.xml"
try:
request = make_request(url=url+'?', params=params, auth=auth, data=data)
except:
#raise Exception("Create layergroup failed with url="+url+", params="+str(params)+", data="+data)
print "Create layergroup failed with url="+url+", params="+str(params)+", data="+data
raise
if request.getcode() != 201:
raise Exception("Create layergroup failed: Status Code {0}".format(request.getcode()))
if verbose > 0:
print('Layer created.')
def parse_layers(layers):
if layers and len(layers) > 0:
try:
return layers.split(",")
except:
return None
else:
return None
def run(args):
#print args
#==#
verbose = args.verbose
#==#
layers = parse_layers(args.layers)
geoserver = parse_url(args.geoserver)
workspace = args.workspace
layergroup = args.layergroup
#==#
auth = None
if args.username and args.password:
auth = b64encode('{0}:{1}'.format(args.username, args.password))
#==#
print "=================================="
print "#==#"
print "CyberGIS Script / cybergis-scrit-geoserver-publish-layergroup.py"
print "Publishes multiple layers as a layer group"
print "#==#"
#==#
if not layers:
print "Could not parse layers correctly."
return 1;
#==#
#Publish Layers as Layer Group
try:
createLayerGroup(verbose, geoserver, workspace, auth, layergroup, layers)
except:
print "Couldn't create layergroup from layers "+args.layers+"."
raise
print "=================================="
| Python | 0.000001 | |
ff3e3e6be3a5a46db73a772f99071e83b9026d98 | add wikipedia plugin | plugins/wiki.py | plugins/wiki.py | import wikipedia
MAX_LEN = 350
@yui.command('wiki', 'wk', 'w')
def wiki(argv):
"""wiki [-lang] <article>"""
lang = 'en'
if len(argv) < 2:
return
# check if a language is given
argv = argv[1:]
if len(argv) > 1 and argv[0].startswith('-'):
lang = argv[0][1:]
argv = argv[1:]
article = ' '.join(argv)
try:
wikipedia.set_lang(lang)
sum = wikipedia.summary(article)
except Exception as ex:
return "Couldn't find an article for '%s'" % article
if len(sum) > MAX_LEN:
sum = sum[:MAX_LEN-3] + '...'
return sum
| Python | 0.000001 | |
dfb611c84bb339cfdf7f9e0e06be8aa6ef6e17e3 | Update ycm config | utils/.ycm_extra_conf.py | utils/.ycm_extra_conf.py | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
#c、c++ include path
'-isystem',
'/usr/include',
'-isystem',
'/usr/include/c++/4.8.2',
'-isystem',
'/usr/local/include',
#3rdParty include path
'-isystem',
'/usr/local/3rdParty/boost/include',
'-isystem',
'/usr/local/3rdParty/log4cpp/include',
'-isystem',
'/usr/local/3rdParty/thrift/include',
'-isystem',
'/usr/local/3rdParty/RCF/include',
'-isystem',
'/usr/local/3rdParty/zeromq/include',
'-isystem',
'/usr/local/3rdParty/ssl/include',
'-isystem',
'/usr/local/3rdParty/uuid/include',
#project include path
'-isystem',
'./include',
'-isystem',
'./src',
#'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
#'/System/Library/Frameworks/Python.framework/Headers',
#'-isystem',
#'../llvm/include',
#'-isystem',
#'../llvm/tools/clang/include',
#'-I',
#'.',
#'-I',
#'./ClangCompleter',
#'-isystem',
#'./tests/gmock/gtest',
#'-isystem',
#'./tests/gmock/gtest/include',
#'-isystem',
#'./tests/gmock',
#'-isystem',
#'./tests/gmock/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| Python | 0 | |
8fc91c780cf7f0b43deac69b0e60f2b9472af172 | Add script to automatically setup ln -s for the utilities I use | set-links.py | set-links.py | """
Helper script to set up ln -s <desired utilities> on a given bin/ PATH.
"""
import os
utilities = (
'mineutils/mc',
'misc/gitmail',
'misc/pipu',
'misc/reclick',
)
def run(program, *args):
"""Spawns a the given program as a subprocess and waits for its exit"""
# I for Invariant argument count, P for using PATH environmental variable
os.spawnlp(os.P_WAIT, program, program, *args)
if __name__ == '__main__':
where = None
try:
import pyperclip
where = pyperclip.paste()
if where.startswith('file://'):
where = where[len('file://'):]
if not os.path.isdir(where):
where = None
except ImportError:
pass
if not where:
where = input('Where should the links be created?\n: ')
if not os.path.isdir(where):
os.makedirs(where)
utilities = tuple(os.path.abspath(x) for x in utilities)
os.chdir(where)
for utility in utilities:
print(f'Creating link for {utility}...')
run('ln', '-s', utility)
print('Done!')
| Python | 0 | |
7556fd9f55fe84a82a4843fb0ba43e7ad144e874 | Update tendrl_definitions.py | tendrl/node_agent/persistence/tendrl_definitions.py | tendrl/node_agent/persistence/tendrl_definitions.py | from tendrl.bridge_common.etcdobj.etcdobj import EtcdObj
from tendrl.bridge_common.etcdobj import fields
class TendrlDefinitions(EtcdObj):
"""A table of the Os, lazily updated
"""
__name__ = '/tendrl_definitions_node_agent'
data = fields.StrField("data")
| from tendrl.bridge_common.etcdobj.etcdobj import EtcdObj
from tendrl.bridge_common.etcdobj import fields
class TendrlDefinitions(EtcdObj):
"""A table of the Os, lazily updated
"""
__name__ = '/tendrl_definitions_node_agent'
data = fields.StrField("data")
def render(self):
self.__name__ = self.__name__ % self.node_uuid
return super(TendrlDefinitions, self).render()
| Python | 0 |
907fa0a42dd90ca67d86e61ce7984d5764455fb9 | add missing __init__.py | src/distribution/__init__.py | src/distribution/__init__.py | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# core.py - distutils functions for kaa packages
# -----------------------------------------------------------------------------
# $Id: distribution.py 2110 2006-11-29 00:41:31Z tack $
#
# -----------------------------------------------------------------------------
# Copyright (C) 2006 Dirk Meyer, Jason Tackaberry
#
# First Edition: Dirk Meyer <dmeyer@tzi.de>
# Maintainer: Dirk Meyer <dmeyer@tzi.de>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version
# 2.1 as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# -----------------------------------------------------------------------------
from core import *
| Python | 0.001057 | |
1dc8f8cb519f14b794f5bc698fe65c5944b35a20 | Add Beginning plug-in with basic checking | hairball/neu.py | hairball/neu.py | """This module provides plugins for NEU metrics."""
import kurt
from hairball.plugins import HairballPlugin
from PIL import Image
import os
class Variables(HairballPlugin):
"""Plugin that counts the number of variables in a project."""
def __init__(self):
super(Variables, self).__init__()
self.total = 0
def finalize(self):
"""Output the number of variables in the project."""
print("Number of variables: %i" % self.total)
def analyze(self, scratch):
"""Run and return the results of the Variables plugin."""
self.total = len(scratch.variables)
for sprite in scratch.sprites:
self.total += len(sprite.variables)
class Lists(HairballPlugin):
"""Plugin that counts the number of lists in a project."""
def __init__(self):
super(Lists, self).__init__()
self.total = 0
def finalize(self):
"""Output the number of lists in the project."""
print("Number of lists: %i" % self.total)
def analyze(self, scratch):
"""Run and return the results of the Lists plugin."""
self.total = len(scratch.lists)
for sprite in scratch.sprites:
self.total += len(sprite.lists)
class BlockCounts(HairballPlugin):
"""Plugin that keeps track of the number of blocks in a project."""
def __init__(self):
super(BlockCounts, self).__init__()
self.blocks = 0
def finalize(self):
"""Output the aggregate block count results."""
print("Number of blocks %i" % self.blocks)
def analyze(self, scratch):
"""Run and return the results from the BlockCounts plugin."""
for script in self.iter_scripts(scratch):
for block in self.iter_blocks(script.blocks):
self.blocks += 1
class Colors(HairballPlugin):
"""Plugin that keeps track of the colors of the stage images."""
def __init__(self):
self.colors ={}
def finalize(self):
"""Output the aggregate block count results."""
print self.colors
def compute_average_image_color(self, img):
"""
Compute the most frequent color in img.
Code adapted from
http://blog.zeevgilovitz.com/detecting-dominant-colours-in-python/
"""
image = Image.open(img)
w, h = image.size
pixels = image.getcolors(w * h)
most_frequent_pixel = pixels[0]
for count, colour in pixels:
if count > most_frequent_pixel[0]:
most_frequent_pixel = (count, colour)
rgb = []
for i in range(3):
rgb.append (most_frequent_pixel[1][i])
trgb = tuple(rgb)
trgb = '#%02x%02x%02x' % trgb #Transform rgb to Hex color (HTML)
return trgb
def analyze(self, scratch):
"""Run and return the results from the BlockCounts plugin."""
#ToDo: get the images from stage and characters
class Ending(HairballPlugin):
"""Plugin that checks if the project seems to end."""
def __init__(self):
super(Ending, self).__init__()
self.total = 0
def finalize(self):
"""Output whether the project seems to end or not."""
if self.total > 0:
print "The game seems to end at some point"
else:
print "The game seems to not ever end"
def analyze(self, scratch):
"""Run and return the results of the Ending plugin."""
for script in self.iter_scripts(scratch):
for name, _, _ in self.iter_blocks(script.blocks):
if name == "stop %s":
self.total
all_scripts = list(self.iter_scripts(scratch))
class Beginning(HairballPlugin):
"""
Plugin that checks if the project seems to show instructions or a menu
when the project is launched.
"""
def __init__(self):
super(Beginning, self).__init__()
self.backdropWhenGreenFlag = 0
self.spritesHidden = 0
self.spritesShown = 0
self.actions = []
def finalize(self):
"""Output whether the project seems to have beginning instructions"""
if (self.backdropWhenGreenFlag > 0
and self.spritesHidden > 0
and self.spritesShown >0
and len(self.actions) > 0):
print "The game seems to present instructions or a menu when launched"
else:
print "The game seems to NOT present instructions or a menu when launched"
def backdropGreenFlag (self, all_scripts):
#Check if a specific backdrop is displayed when green flag
backdropWhenGreenFlag = 0
for script in all_scripts:
if self.script_start_type(script) == self.HAT_GREEN_FLAG:
for name, _, _ in self.iter_blocks(script.blocks):
if name == 'switch backdrop to %s':
backdropWhenGreenFlag = 1
break
if backdropWhenGreenFlag == 1:
break
return backdropWhenGreenFlag
def analyze(self, scratch):
"""Run and return the results of the Beginning plugin."""
all_scripts = list(self.iter_scripts(scratch))
self.backdropWhenGreenFlag = self.backdropGreenFlag(all_scripts)
print self.backdropWhenGreenFlag
| Python | 0 | |
92f63d6ad055aa213b67ad2778187faee1fde821 | Add in printParents.py | printParents.py | printParents.py | from types import *
# https://stackoverflow.com/questions/2611892/get-python-class-parents
def printParents(thing, ident = 2):
'''
Print out all the parents (till the ancestors) of a given class / object.
@param indent: Print indentation
'''
typ = type(thing)
if typ is ClassType:
printClassParents(thing, 0)
elif typ is InstanceType:
print("Object: {}".format(thing))
printClassParents(thing.__class__, 0)
else:
print("'{}' - '{}'".format(thing, type))
print("I don't know your parents.")
def printClassParents(cls, level = 0, indent = 2):
thisLevel = ' ' * indent * level + "{} --> {{ {} }}".format(
cls, ', '.join(str(c) for c in cls.__bases__))
print(thisLevel)
for base in cls.__bases__:
printClassParents(base, level + 1)
if __name__ == '__main__':
import sys
def help(names):
print("Invalid arg: {}\nSyntax: modeul1.class1 module2.class2".format(names))
if len(sys.argv) > 1:
# input args: module1.class1 module2.class2 ...
# eg. printParents.py Tkinter.Frame Tkinker.Button
# https://stackoverflow.com/questions/4821104/python-dynamic-instantiation-from-string-name-of-a-class-in-dynamically-imported
for names in sys.argv[1:]:
mc = names.split('.')
if len(mc) == 2:
# price you pay when you go dynamic
try:
ctor = getattr(__import__(mc[0]), mc[1])
inst = ctor()
printParents(inst)
print('=' * 32)
except:
help(names)
else:
help(names)
else:
from ttk import *
button = Button()
printParents(button)
print('=' * 32)
printParents(Label)
print('=' * 32)
printParents(8)
| Python | 0.001401 | |
87565c1e6032bff2cc3e20f5c4f46b7a17977f7c | Add organisation for TFL Dial a Ride | migrations/versions/0098_tfl_dar.py | migrations/versions/0098_tfl_dar.py | """empty message
Revision ID: 0098_tfl_dar
Revises: 0097_notnull_inbound_provider
Create Date: 2017-06-05 16:15:17.744908
"""
# revision identifiers, used by Alembic.
revision = '0098_tfl_dar'
down_revision = '0097_notnull_inbound_provider'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
TFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'tfl_dar_x2.png',
''
)""".format(TFL_DAR_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(TFL_DAR_ID))
| Python | 0 | |
22585d29220709dc3a3de16b03c626ca27c715ca | Add migration version? Not sure if this is right | migrations/versions/3025c44bdb2_.py | migrations/versions/3025c44bdb2_.py | """empty message
Revision ID: 3025c44bdb2
Revises: None
Create Date: 2014-12-16 12:13:55.759378
"""
# revision identifiers, used by Alembic.
revision = '3025c44bdb2'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
| Python | 0.000006 | |
23b291f6952f8b6a9fade50fef98dbf9d8721862 | reorganize french corpus | reorganize_french_corpus.py | reorganize_french_corpus.py | import os
import re
import wave, struct
import librosa
import numpy as np
import subprocess
import shutil
new_sr = 22050
corpus = '/media/share/datasets/aligner_benchmarks/AlignerTestData/2_French_2000files'
#corpus = os.path.expanduser('~/dog_cat')
sorted_corpus = '/media/share/datasets/aligner_benchmarks/sorted_quebec_french'
#sorted_corpus = 'lizard'
confreeufr = []
confrefr = []
senf = []
filler = []
willf = []
willf2 = []
'''ampp = []
apchk = []
cas2 = []
cas4 = []
chess = []
enco = []
ersapro9 = []
fogea = []
give_prod = []
inc = []
incfast = []
mrbr = []
npgi = []
npgi2 = []
npgi4 = []
nvp2 = []
RFRcontour = []
rnrp = []
sco = []
scoinPro = []
scoinPro2 = []
socr = []
socrLo = []
syse6 = []
syse7 = []
syse8 = []
other = []
unused = []'''
for root, dirs, files in os.walk(corpus):
for f in files:
if os.path.exists(corpus + '/' + f):
if re.search('confreeufr', f):
confreeufr.append(f)
if re.search('confrefr', f):
confrefr.append(f)
elif re.search('filler', f):
filler.append(f)
elif re.search('willf', f):
willf.append(f)
elif re.search('willf2', f):
willf2.append(f)
elif re.search('senf', f):
senf.append(f)
'''if re.search('henrison', f):
henrison.append(f)
if re.search('ampp', f):
ampp.append(f)
elif re.search('apchk', f):
apchk.append(f)
elif re.search('cas2', f):
cas2.append(f)
elif re.search('cas4', f):
cas4.append(f)
elif re.search('chess', f):
chess.append(f)
elif re.search('enco', f):
enco.append(f)
elif re.search('ersapro9', f):
ersapro9.append(f)
elif re.search('fogea', f):
fogea.append(f)
elif re.search('give-prod', f):
give_prod.append(f)
elif re.search('inc', f):
inc.append(f)
elif re.search('incfast', f):
incfast.append(f)
elif re.search('mrbr', f):
mrbr.append(f)
elif re.search('npgi', f):
npgi.append(f)
elif re.search('npgi2', f):
npgi2.append(f)
elif re.search('npgi4', f):
npgi4.append(f)
elif re.search('nvp2', f):
nvp2.append(f)
elif re.search('RFRcontour', f):
RFRcontour.append(f)
elif re.search('rnrp', f):
rnrp.append(f)
elif re.search('sco', f):
sco.append(f)
elif re.search('scoinPro', f):
scoinPro.append(f)
elif re.search('scoinPro2', f):
scoinPro2.append(f)
elif re.search('socr', f):
socr.append(f)
elif re.search('socrLo', f):
socrLo.append(f)
elif re.search('syse6', f):
syse6.append(f)
elif re.search('syse7', f):
syse7.append(f)
elif re.search('syse8', f):
syse8.append(f)
else:
other.append(f)'''
subspeaker = []
subjectids = {}
experiments = [confreeufr, confrefr, filler, senf, willf, willf2]
#[ampp, apchk, cas2, cas4, chess, enco, ersapro9, fogea, give_prod, inc, incfast,
#mrbr, npgi, npgi2, npgi4, nvp2, RFRcontour, rnrp, sco, scoinPro, scoinPro2, socr, socrLo,
#syse6, syse7, syse8]
for experiment in experiments:
for i in experiment:
dog = i.split('_')
subid = dog[0] + '_' + dog[1]
if len(list(dog[2])) == 1:
cat = re.sub(i, '0' + dog[2] + '_' + dog[3], i)
elif len(list(dog[2])) == 2:
cat = re.sub(i, dog[2] + '_' + dog[3], i)
if subid not in subjectids:
subjectids[subid] = [(i, cat)]
else:
subjectids[subid].append((i, cat))
'''for i in other:
dog = i.split('.')
subid = dog[0]
cat = '01_1.' + dog[1]
if subid not in subjectids:
subjectids[subid] = [(i, cat)]
else:
subjectids[subid].append((i, cat))'''
if not os.path.exists(sorted_corpus):
os.makedirs(sorted_corpus)
for i in subjectids.keys():
if not os.path.exists(sorted_corpus + '/' + i):
os.makedirs(sorted_corpus + '/' + i)
for j in subjectids[i]:
try:
shutil.copy(corpus + '/' + j[0], sorted_corpus + '/' + i)
#os.rename(corpus + '/' + j[0], '/media/share/datasets/aligner_benchmarks/sorted_quebec_french/' + i + '/' + j[1])
except:
pass
'''for root, dirs, files in os.walk(corpus):
for f in files:
filepath = os.path.join(root, f)
subprocess.call(['sox', filepath.replace('\\','/'), filepath.replace('\\','/'),
'gain', '-1', 'rate', '-I', str(new_sr)]'''
'''for root, dirs, files in os.walk(corpus):
print (root)
print (dirs, 1)
for f in files:
d = os.path.basename(root)
print(d + '/' + f)
if d != '.DS_Store' and f != '.DS_Store' and f != 'confre_eu_FR.txt':
os.rename(corpus + '/' + d + '/' + f, corpus + '/' + d + '/' + d + '_' + f)'''
| Python | 0.999985 | |
1938698ba018df7130acd2f955d43fbe05fc574b | Add donut example | examples/demo/donut.py | examples/demo/donut.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 60
"""
Points revolving around circles (revolving in a circle) to create a torus.
"""
import numpy as np
from vispy import gloo
from vispy import app
from vispy.gloo import gl
from vispy.util.transforms import perspective, translate, rotate
# points and offsets
n, p = 60, 15
do = 2 * np.pi / 600.
dt = -2 * np.pi / 900.
theta = np.linspace(0, 2 * np.pi, n, endpoint=False)
omega = np.linspace(0, 2 * np.pi, p, endpoint=False)
theta = np.tile(theta[:, np.newaxis], (1, p)).ravel()
omega = np.tile(omega[np.newaxis, :], (n, 1)).ravel()
# colors (offset by one in adjacent "columns" of the torus)
ref = 0.6
dc = 2 * np.pi / 6
color = np.linspace(0, 2 * np.pi, p, endpoint=False)
color = np.array([(1-ref) * np.sin(color) + ref,
(1-ref) * np.sin(color + dc) + ref,
(1-ref) * np.sin(color + 2 * dc) + ref,
np.ones(color.shape)], dtype=np.float32).T
idx = np.arange(n * (p + 1)) % p
idx = np.reshape(idx, (n, p + 1))[:, :-1].ravel()
color = color[idx, :]
u_size = 10
u_amt = 0.25
data = np.zeros(n * p, [('a_omega', np.float32, 1),
('a_theta', np.float32, 1),
('a_color', np.float32, 4)])
data['a_color'] = color
data['a_omega'] = omega
data['a_theta'] = theta
VERT_SHADER = """
// Uniforms
// --------
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform float u_size;
uniform float u_amt;
// Attributes
// ----------
attribute float a_omega;
attribute float a_theta;
attribute vec4 a_color;
attribute mat4 a_model;
// Varyings
// --------
varying vec4 v_color;
void main (void) {
v_color = a_color;
float radius = (1 - u_amt) + (u_amt * cos(a_omega));
float x = radius * cos(a_theta);
float y = radius * sin(a_theta);
float z = u_amt * sin(a_omega);
gl_Position = u_projection * u_view * u_model
* vec4(x, y, z, 1.0);
gl_PointSize = u_size;
}
"""
FRAG_SHADER = """
// Varyings
// ------------------------------------
varying vec4 v_color;
// Main
// ------------------------------------
void main()
{
float d = 2*(length(gl_PointCoord.xy - vec2(0.5,0.5)));
gl_FragColor = vec4(v_color.rgb, v_color.a*(1-d));
}
"""
class Canvas(app.Canvas):
def __init__(self, **kwargs):
self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
self.view = np.eye(4, dtype=np.float32)
self.model = np.eye(4, dtype=np.float32)
rotate(self.model, -60, 1, 0, 0)
self.projection = np.eye(4, dtype=np.float32)
self.translate = 3.5
translate(self.view, 0, 0, -self.translate)
self.vbo = gloo.VertexBuffer(data)
self.program.set_vars(self.vbo)
self.program['u_model'] = self.model
self.program['u_view'] = self.view
self.program['u_size'] = u_size
self.program['u_amt'] = u_amt
self.timer = app.Timer(1.0 / 400)
self.timer.connect(self.on_timer)
self.timer.start()
# Initialize the canvas for real
app.Canvas.__init__(self, **kwargs)
def on_initialize(self, event):
gl.glClearColor(0, 0, 0, 1)
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
def on_key_press(self, event):
if event.text == ' ':
if self.timer.running:
self.timer.stop()
else:
self.timer.start()
def on_timer(self, event):
self.update()
def on_resize(self, event):
width, height = event.size
gl.glViewport(0, 0, width, height)
self.projection = perspective(45.0, width / float(height), 1.0, 1000.0)
self.program['u_projection'] = self.projection
def on_mouse_wheel(self, event):
self.translate += event.delta[1]
self.translate = min(max(2, self.translate), 10)
self.view = np.eye(4, dtype=np.float32)
translate(self.view, 0, 0, -self.translate)
self.program['u_view'] = self.view
self.update()
def on_paint(self, event):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
# update angles
data['a_omega'] += do
data['a_theta'] += dt
# prevent accumulation
data['a_omega'] %= 2 * np.pi
data['a_theta'] %= 2 * np.pi
self.vbo.set_data(data)
self.program.draw(gl.GL_POINTS)
if __name__ == '__main__':
c = Canvas(show=True, size=(600, 600),
title="Atom [zoom with mouse scroll]")
# c.show()
app.run()
| Python | 0 | |
e21657f377cab4319c1a3ce6fedc76d15c8e6c0a | UVA 11777 Automate the grades | cp-book/ch2/lineards/collections/_11777_AutomateTheGrades.py | cp-book/ch2/lineards/collections/_11777_AutomateTheGrades.py | # Problem name: 11777 Automate the Grades
# Problem url: https://uva.onlinejudge.org/external/117/11777.pdf
# Author: Andrey Yemelyanov
import sys
import math
def readline():
return sys.stdin.readline().strip()
def main():
n_tests = int(readline())
for test in range(n_tests):
print("Case {}: {}".format((test + 1), get_final_grade(*[int(x) for x in readline().split()])))
def get_final_grade(term1, term2, final, attendance, class_test1, class_test2, class_test3):
class_test_grade = get_class_test_grade(class_test1, class_test2, class_test3)
total_grade = term1 + term2 + final + attendance + class_test_grade
return get_letter_grade(total_grade)
def get_class_test_grade(class_test1, class_test2, class_test3):
sorted_grades = [class_test1, class_test2, class_test3]
sorted_grades.sort()
return (sorted_grades[-1] + sorted_grades[-2]) / 2
def get_letter_grade(total_grade):
if total_grade >= 90:
return "A"
elif total_grade >= 80 and total_grade < 90:
return "B"
elif total_grade >= 70 and total_grade < 80:
return "C"
elif total_grade >= 60 and total_grade < 70:
return "D"
return "F"
if __name__=="__main__":
main()
| Python | 0.999999 | |
64c70f3f73d14d5bdd18cf5c4ad8b15ec745f517 | Add helpful script for ascii checking - fyi @bruskiza | config/check_ascii.py | config/check_ascii.py | import json
files = ["go-ussd_public.ibo_NG.json"]
def is_ascii(s):
return all(ord(c) < 128 for c in s)
current_message_id = 0
for file_name in files:
json_file = open(file_name, "rU").read()
json_data = json.loads(json_file)
print "Proccessing %s\n-------" % file_name
for key, value in json_data.items():
# Ignore non-content keys and empty keys
if len(value) == 2:
if not is_ascii(value[1]):
print ("Non-ascii translation found of <%s>: %s" % (key, value[1]))
print "Done Proccessing %s\n-------" % file_name
| Python | 0 | |
289ce4a720c5863f6a80e1b86083fd2919b52f14 | Add file for tests of start symbol as not nonterminal | tests/startsymbol_tests/NotNonterminalTest.py | tests/startsymbol_tests/NotNonterminalTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:12
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NotNonterminalTest(TestCase):
pass
if __name__ == '__main__':
main()
| Python | 0.000001 | |
764f02e5e8c53b47cb2e28375a049accba442f0c | Create __init__.py | app/__init__.py | app/__init__.py | # -*- encoding: utf-8 -*-
# app/__init__.py
| Python | 0.000429 | |
4215862d899c81f3421bd8607dc59500e41e2348 | make enemy present enemy -bat and fireball- in game | text_materials_test/pygame/airplane_game/4_enemy.py | text_materials_test/pygame/airplane_game/4_enemy.py | # https://blog.naver.com/PostList.nhn?from=postList&blogId=samsjang&categoryNo=80¤tPage=10
# revision Jaehyeong Kim
import pygame
import sys
import random
import time
from pygame.locals import *
# 상수영역
# 초당 프레임 수
FPS = 30
# 윈도우 크기, 비율 일정하게 만듦
WINDOWWIDTH = 1080
WINDOWHEIGHT = int(WINDOWWIDTH / 2)
# 배경 최대 크기
ORIGINBACKGROUNDWIDTH = 1280
ORIGINBACKGROUNDHEIGHT = 640
# 스프라이트 속도
BACKGROUNDSPEED = 2
BATSPEED = 7
FIREBALLSPEED = 15
# 박쥐 재시작 시간
BATTIME = 8
# 색
WHITE = (255, 255, 255)
def init_enemy(image):
x = WINDOWWIDTH
y = random.randrange(0, WINDOWHEIGHT - image.get_height())
return x, y
def draw_object(image, x, y):
global DISPLAYSURF
DISPLAYSURF.blit(image, (x, y))
def main():
global FPSCLOCK, DISPLAYSURF
global IMAGESDICT
# 비행기 왼쪽 초기 위치
airplane_x = WINDOWWIDTH * 0.05
airplane_y = WINDOWHEIGHT * 0.8
airplane_y_change = 0
airplane_x_change = 0
# 비행기 크기
AIRPLANEWIDTH = IMAGESDICT["airplane"].get_width()
AIRPLANEHEIGHT = IMAGESDICT["airplane"].get_height()
# 윈도우 변경에 따른 배경크기 변경
BACKGROUNDWIDTH = IMAGESDICT["background"].get_width()
# 배경 초기 위치
background_x = 0
other_background_x = BACKGROUNDWIDTH
# 박쥐 초기 위치
bat_x, bat_y = init_enemy(IMAGESDICT["bat"])
# 박쥐 초기화 시간변수
bat_remove_time = 0
# 파이어볼 초기화 및 초기 위치
# 1/7확률로 fireball이 날아간다.
fireball_choice = random.randint(1, 7)
if fireball_choice == 1 or fireball_choice == 2:
fireball_x, fireball_y = init_enemy(IMAGESDICT["fireball%s" % fireball_choice])
else:
fireball_x, fireball_y = WINDOWWIDTH, 0
# game loop
while True:
# event handle
for event in pygame.event.get():
# 종료
if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
airplane_y_change = -5
elif event.key == K_DOWN:
airplane_y_change = 5
if event.key == K_RIGHT:
airplane_x_change = 5
elif event.key == K_LEFT:
airplane_x_change = -5
if event.type == KEYUP:
if event.key == K_UP or event.key == K_DOWN:
airplane_y_change = 0
elif event.key == K_RIGHT or event.key == K_LEFT:
airplane_x_change = 0
# event에 따른 비행기 위치 변경 및 제한
airplane_y += airplane_y_change
if airplane_y < 0:
airplane_y = 0
elif airplane_y > WINDOWHEIGHT - AIRPLANEHEIGHT:
airplane_y = WINDOWHEIGHT - AIRPLANEHEIGHT
airplane_x += airplane_x_change
if airplane_x < 0:
airplane_x = 0
elif airplane_x > WINDOWWIDTH - AIRPLANEWIDTH:
airplane_x = WINDOWWIDTH - AIRPLANEWIDTH
# 배경 위치 설정
background_x -= BACKGROUNDSPEED
if background_x == -BACKGROUNDWIDTH:
background_x = BACKGROUNDWIDTH
draw_object(IMAGESDICT["background"], background_x, 0)
other_background_x -= BACKGROUNDSPEED
if other_background_x == -BACKGROUNDWIDTH:
other_background_x = BACKGROUNDWIDTH
draw_object(IMAGESDICT["background"], other_background_x, 0)
# 박쥐 위치 설정
bat_x -= BATSPEED
if bat_x <= 0 and BATTIME <= time.time()-bat_remove_time:
bat_remove_time = time.time()
bat_x, bat_y = init_enemy(IMAGESDICT["bat"])
# fireball 위치 설정
if fireball_choice == 1 or fireball_choice == 2:
fireball_x -= FIREBALLSPEED
else:
fireball_x -= 2 * FIREBALLSPEED
if fireball_x <= 0:
fireball_choice = random.randint(1, 7)
if fireball_choice == 1 or fireball_choice == 2:
fireball_x, fireball_y = init_enemy(IMAGESDICT["fireball%s" % fireball_choice])
else:
fireball_x, fireball_y = WINDOWWIDTH, 0
# 다른 스프라이트 그리기
draw_object(IMAGESDICT["airplane"], airplane_x, airplane_y)
draw_object(IMAGESDICT["bat"], bat_x, bat_y)
if fireball_choice == 1 or fireball_choice == 2:
draw_object(IMAGESDICT["fireball%s" % fireball_choice], fireball_x, fireball_y)
pygame.display.update()
FPSCLOCK.tick(FPS)
def game_init():
global FPSCLOCK, DISPLAYSURF
global IMAGESDICT
FPSCLOCK = pygame.time.Clock()
pygame.init()
# DISPLAY Surface 설정하기
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('PyFlying')
# 이미지 받아오기
IMAGESDICT = {"airplane": pygame.image.load('images/plane.png'),
"background": pygame.image.load('images/background.png'),
"bat": pygame.image.load('images/bat.png'),
"fireball1": pygame.image.load('images/fireball.png'),
"fireball2": pygame.image.load('images/fireball2.png')}
# 배경 이미지 게임 윈도우 크기에 맞추기
assert WINDOWWIDTH <= ORIGINBACKGROUNDWIDTH or WINDOWHEIGHT <= ORIGINBACKGROUNDHEIGHT,\
'게임 윈도우 크기가 너무 큽니다.'
IMAGESDICT["background"] = pygame.transform.scale(IMAGESDICT["background"], (WINDOWWIDTH, WINDOWHEIGHT))
main()
if __name__ == '__main__':
game_init()
| Python | 0.000055 | |
d9291843d575e587efdd7aa0c4605fee766dc232 | clean up test queries | examples/test_query.py | examples/test_query.py | from raco import RACompiler
from raco.language import CCAlgebra, MyriaAlgebra
from raco.algebra import LogicalAlgebra
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
def testEmit(query, name):
LOG.info("compiling %s", query)
# Create a compiler object
dlog = RACompiler()
# parse the query
dlog.fromDatalog(query)
#print dlog.parsed
LOG.info("logical: %s",dlog.logicalplan)
dlog.optimize(target=CCAlgebra, eliminate_common_subexpressions=False)
LOG.info("physical: %s",dlog.physicalplan[0][1])
# generate code in the target language
code = dlog.compile()
with open(name+'.c', 'w') as f:
f.write(code)
queries = [
("A(s1) :- T(s1)", "scan"),
("A(s1) :- T(s1), s>10", "select"),
("A(s1) :- T(s1), s>0, s<10", "select_conjunction"),
("A(s1,s2) :- T(s1,s2), s>10, s2>10", "two_var_select"),
("A(s1,o2) :- T(s1,p1,o1), R(o2,p1,o2)", "join"),
("A(a,b,c) :- R(a,b), S(b,c)", "two_path"),
("A(a,c) :- R(a,b), S(b,c)", "two_hop"),
("A(a,b,c) :- R(a,b), S(b,c), T(c,d)", "three_path"),
("A(a,b,c) :- R(a,b), S(b,c), T(c,a)", "directed_triangles"),
("A(s1,s2,s3) :- T(s1,s2,s3), R(s3,s4), s1<s2, s4<100", "select_then_join"),
#("A(a,b,c) :- R(a,b), S(b,c), T(c,a), a<b, b<c", "increasing_triangles"),
#("A(s1,s2,s3) :- T(s1,s2,s3), R(s3,s4), s1<s4", "equi_and_range"),
#("A(s1,s2,s3) :- T(s1,s2),R(s3,s4), s1<s3", "range_join"),
#("A(a,b,c,d,e):-X(a,b),Y(a,c),Z(a,d,e),T(a,b),K(b,a)", "complex_joins"),
]
for q in queries:
query, name = q
testEmit(query, name)
| Python | 0.000007 | |
3aea50fd086975cfdcc6a337b2ff5a6cace8ce95 | Create kuubio.py | kuubio.py | kuubio.py | from keras_diagram import ascii
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.models import model_from_json
from load_data import *
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('bmh')
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import spacy as sp
import ascify as asc
def vectorize_text(data):
'''
OUTPUT: a list of lists with the word vectors in lists
USE EXAMPLE: vectors = vectorize_text(list_with_strings)
'''
nlp = sp.load('en')
c = len(data)
l = []
for i in range(c):
asc_string = asc.Ascify(str(data[i])).ascify()
uni_string = unicode(asc_string)
vec_obj = nlp(uni_string)
vector = vec_obj.vector
l.append(vector)
return l
def _transform_data(x_text,y_var,data):
x = vectorize_text(data[x_text])
y = [data[y_var]]
try:
if len(y) == 1:
y = map(list, zip(*y))
if len(y) == len(x):
x1 = x # we do this to retain the original y and x
y1 = y
df_x = pd.DataFrame(x1)
df_y = pd.DataFrame(y1)
df_y = pd.DataFrame(df_y[0] >= df_y[0].mean()).astype(int)
#df_y = pd.DataFrame(df_y[0] >= 0.2).astype(int) # convert to 1/0
except:
print "ERROR: something went"
return df_x,df_y
def _load_model(filename):
json_file = open(filename, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
return loaded_model
def kuubio(X,Y,data,dims=8,epoch=5,model='model',save_model=False):
X,Y = _transform_data(X,Y,tweets)
'''
NOTE: 1) the data has to be in float or something that
goes nicely in to 'float32'
2) the data has to be in pandas dataframe
with no column names (other than int)
'''
if model != 'model':
model = _load_model(model)
np.random.seed(7)
X = X.astype('float32')
Y = Y.astype('float32')
X = np.array(X)
Y = np.array(Y)
X = X[:,0:dims]
#Y = Y[:,8]
model = Sequential()
model.add(Dense(dims+4, input_dim=dims, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(X, Y, epochs=epoch, batch_size=10)
scores = model.evaluate(X, Y)
#print(history.history.keys())
print ""
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print ""
print(ascii(model))
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
ax1.plot(history.history['acc'])
ax2.plot(history.history['loss'])
ax1.set_title('model accuracy')
ax1.set_xlabel('epoch')
ax1.set_ylabel('accuracy')
ax2.set_title('model loss')
ax2.set_xlabel('epoch')
ax2.set_ylabel('loss')
fig.set_size_inches(12,3)
if save_model == True:
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("model.h5")
print("Saved model to disk")
fig.show()
| Python | 0.000001 | |
acfa4877ac50a3895cc9f9cb2e349f948d4b8001 | add a script to fetch official hero data from battle.net | bin/get_official_heroes.py | bin/get_official_heroes.py | import sys
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
"""
The official heroes listing on battle.net is populated by a list of
Objects defined in JS (window.heroes). This script fetches the full
list and outputs a list of tuples relating official hero names to the
battle.net slugs.
To run this script, you should install phantomjs in addition to the
import dependencies.
"""
def get_heroes_data():
# We prefer the PhantomJS driver to avoid opening any GUI windows.
browser = webdriver.PhantomJS()
browser.get("http://us.battle.net/heroes/en/heroes/#/")
heroes = browser.execute_script("return window.heroes;")
browser.quit()
return heroes
def main():
heroes = get_heroes_data()
heroes = [(h['name'], h['slug']) for h in heroes]
print(heroes)
if __name__ == "__main__":
main()
| Python | 0 | |
381c2537eff5003758d552281edfd885ee40ab80 | Add migrations | sideloader/migrations/0003_auto_20141203_1708.py | sideloader/migrations/0003_auto_20141203_1708.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sideloader', '0002_auto_20141203_1611'),
]
operations = [
migrations.AlterField(
model_name='project',
name='build_script',
field=models.CharField(default=b'', max_length=255, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='project',
name='package_name',
field=models.CharField(default=b'', max_length=255, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='project',
name='postinstall_script',
field=models.CharField(default=b'', max_length=255, blank=True),
preserve_default=True,
),
]
| Python | 0.000001 | |
f405829f9f4bed9c833f7e25dc97610e34b5dd71 | Add JSONField tests | cornflake/tests/fields/test_json_field.py | cornflake/tests/fields/test_json_field.py | import pytest
from cornflake.fields import JSONField
@pytest.mark.parametrize(('value', 'expected'), [
(True, True),
(False, False),
(123, 123),
(123.456, 123.456),
('foo', 'foo'),
(['foo', 'bar'], ['foo', 'bar']),
({'foo': 'bar'}, {'foo': 'bar'})
])
def test_to_representation(value, expected):
assert JSONField().to_representation(value) == expected
@pytest.mark.parametrize(('data', 'expected'), [
(True, True),
(False, False),
(123, 123),
(123.456, 123.456),
('foo', 'foo'),
(['foo', 'bar'], ['foo', 'bar']),
({'foo': 'bar'}, {'foo': 'bar'})
])
def test_to_internal_value(data, expected):
assert JSONField().to_internal_value(data) == expected
| Python | 0.000001 | |
8df06647abc7e5125e88af68000f04ac9eca3290 | add missing file | quadpy/_exception.py | quadpy/_exception.py | class QuadpyError(Exception):
pass
| Python | 0.000003 | |
f641c8aa8e2eb5d98a90a10813fae6af4b136133 | Add command that reindexes all tenants in parallel | bluebottle/clients/management/commands/reindex.py | bluebottle/clients/management/commands/reindex.py | from optparse import make_option
import subprocess
from multiprocessing import Pool
from bluebottle.common.management.commands.base import Command as BaseCommand
from bluebottle.clients.models import Client
def reindex(schema_name):
print(f'reindexing tenant {schema_name}')
return (
schema_name,
subprocess.call(
f'./manage.py tenant_command -s {schema_name} search_index --rebuild -f',
shell=True
)
)
class Command(BaseCommand):
help = 'Reindex all tenants'
option_list = BaseCommand.options + (
make_option(
'--processes',
default=8,
help='How many processes run in parallel'
),
)
def handle(self, *args, **options):
pool = Pool(processes=options['processes'])
tasks = [pool.apply_async(reindex, args=[str(tenant.schema_name)]) for tenant in Client.objects.all()]
results = [result.get() for result in tasks]
for tenant, result in results:
if result != 0:
print(f'Tenant failed to index: {tenant}')
pool.close()
| Python | 0.000001 | |
1bfc53f5645d6dc7dbbdd020f23e86bebfdc2fc9 | Add quick.py (quicksort) | python/quick.py | python/quick.py | #!/usr/bin/env python3
def main():
arr = []
fname = sys.argv[1]
with open(fname, 'r') as f:
for line in f:
arr.append(int(line.rstrip('\r\n')))
quicksort(arr, start=0, end=len(arr)-1)
print('Sorted list is: ', arr)
return
def quicksort(arr, start, end):
if end - start < 1:
return 0
b = start + 1
for i in range(start+1, end):
if arr[i] <= arr[start]:
arr[b], arr[i] = arr[i], arr[b]
b += 1
arr[start], arr[b-1] = arr[b-1], arr[start]
quicksort(arr, start, b-1)
quicksort(arr, b, end)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
a493352e5d7e92303a571c2a7d4bcb4e6c76a90c | add qvm-ls | qubesmgmt/tests/tools/qvm_ls.py | qubesmgmt/tests/tools/qvm_ls.py | # pylint: disable=protected-access,pointless-statement
#
# The Qubes OS Project, https://www.qubesmgmt-os.org/
#
# Copyright (C) 2015 Joanna Rutkowska <joanna@invisiblethingslab.com>
# Copyright (C) 2015 Wojtek Porczyk <woju@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import unittest
import qubesmgmt
import qubesmgmt.vm
import qubesmgmt.tools.qvm_ls
import qubesmgmt.tests
import qubesmgmt.tests.tools
from qubesmgmt.tests import TestVM, TestVMCollection
class TestApp(object):
def __init__(self):
self.domains = TestVMCollection(
[
('dom0', TestVM('dom0')),
('test-vm', TestVM('test-vm')),
]
)
class TC_00_Column(qubesmgmt.tests.QubesTestCase):
def test_100_init(self):
try:
testcolumn = qubesmgmt.tools.qvm_ls.Column('TESTCOLUMN')
self.assertEqual(testcolumn.ls_head, 'TESTCOLUMN')
finally:
try:
qubesmgmt.tools.qvm_ls.Column.columns['TESTCOLUMN']
except KeyError:
pass
class TC_10_globals(qubesmgmt.tests.QubesTestCase):
def test_100_simple_flag(self):
flag = qubesmgmt.tools.qvm_ls.simple_flag(1, 'T', 'internal')
# TODO after serious testing of QubesVM and Qubes app, this should be
# using normal components
vm = TestVM('test-vm', internal=False)
self.assertFalse(flag(None, vm))
vm.internal = True
self.assertTrue(flag(None, vm))
@unittest.skip('column list generated dynamically')
def test_900_formats_columns(self):
for fmt in qubesmgmt.tools.qvm_ls.formats:
for col in qubesmgmt.tools.qvm_ls.formats[fmt]:
self.assertIn(col.upper(), qubesmgmt.tools.qvm_ls.Column.columns)
class TC_50_List(qubesmgmt.tests.QubesTestCase):
def test_100_list_with_status(self):
app = TestApp()
app.domains['test-vm'].internal = False
app.domains['test-vm'].updateable = False
app.domains['test-vm'].template = TestVM('template')
app.domains['test-vm'].netvm = TestVM('sys-net')
app.domains['test-vm'].label = 'green'
app.domains['dom0'].label = 'black'
with qubesmgmt.tests.tools.StdoutBuffer() as stdout:
qubesmgmt.tools.qvm_ls.main([], app=app)
self.assertEqual(stdout.getvalue(),
'NAME STATUS LABEL TEMPLATE NETVM\n'
'dom0 -r------ black - -\n'
'test-vm -r------ green template sys-net\n')
class TC_90_List_with_qubesd_calls(qubesmgmt.tests.QubesTestCase):
def test_100_list_with_status(self):
self.app.expected_calls[
('dom0', 'mgmt.vm.List', None, None)] = \
b'0\x00vm1 class=AppVM state=Running\n' \
b'template1 class=TemplateVM state=Halted\n' \
b'sys-net class=AppVM state=Running\n'
self.app.expected_calls[
('dom0', 'mgmt.label.List', None, None)] = \
b'0\x00red\nblack\ngreen\nblue\n'
self.app.expected_calls[
('vm1', 'mgmt.vm.List', None, None)] = \
b'0\x00vm1 class=AppVM state=Running\n'
self.app.expected_calls[
('sys-net', 'mgmt.vm.List', None, None)] = \
b'0\x00sys-net class=AppVM state=Running\n'
self.app.expected_calls[
('template1', 'mgmt.vm.List', None, None)] = \
b'0\x00template1 class=TemplateVM state=Halted\n'
props = {
'label': b'type=label green',
'template': b'type=vm template1',
'netvm': b'type=vm sys-net',
'updateable': b'type=bool False',
'provides_network': b'type=bool False',
'hvm': b'type=bool False',
'installed_by_rpm': b'type=bool False',
'internal': b'type=bool False',
'debug': b'type=bool False',
'autostart': b'type=bool False',
}
for key, value in props.items():
self.app.expected_calls[
('vm1', 'mgmt.vm.property.Get', key, None)] = \
b'0\x00default=True ' + value
# setup template1
props['label'] = b'type=label black'
props['updateable'] = b'type=bool True'
for key, value in props.items():
self.app.expected_calls[
('template1', 'mgmt.vm.property.Get', key, None)] = \
b'0\x00default=True ' + value
self.app.expected_calls[
('template1', 'mgmt.vm.property.Get', 'template', None)] = \
b'' # request refused - no such property
# setup sys-net
props['label'] = b'type=label red'
props['provides_network'] = b'type=bool True'
props['updateable'] = b'type=bool False'
for key, value in props.items():
self.app.expected_calls[
('sys-net', 'mgmt.vm.property.Get', key, None)] = \
b'0\x00default=True ' + value
with qubesmgmt.tests.tools.StdoutBuffer() as stdout:
qubesmgmt.tools.qvm_ls.main([], app=self.app)
self.assertEqual(stdout.getvalue(),
'NAME STATUS LABEL TEMPLATE NETVM\n'
'sys-net a--N---- red template1 sys-net\n'
'template1 t-U----- black - sys-net\n'
'vm1 a------- green template1 sys-net\n')
self.assertAllCalled()
| Python | 0 | |
25a6ad2a6b37bac4dd553c4e534092f2261d6037 | Add response classes | client/responses.py | client/responses.py |
class SuccessResponse:
def __new__(cls, data=None, text=None, *args, **kwargs):
return {
'status_code': 200,
'ok': True,
'data': data,
'text': text
}
class NonSuccessResponse:
def __new__(cls, status=400, text=None, *args, **kwargs):
return {
'status_code': status,
'ok': True,
'text': text
}
class ErrorResponse:
def __new__(cls, status=400, text=None, *args, **kwargs):
return {
'status_code': status,
'ok': False,
'text': text
}
| Python | 0.000001 | |
bd4ccf9c4876b84cad23f68ea81ecadae733589a | add example for raw binary IO with header | examples/tomo/data/raw_binary_io_with_header.py | examples/tomo/data/raw_binary_io_with_header.py | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Example for the usage of raw binary reader/writer with header.
The code below defines a simple file format for 2D images using
- shape
- origin
- pixel size
First, a file is written using `FileWriterRawBinaryWithHeader`. This
requires a header in a certain format.
Then, the same file is read again using a file specification and the
`FileReaderRawBinaryWithHeader`. The specification is given as a
sequence of dictionaries with a certain structure.
"""
from __future__ import print_function
from collections import OrderedDict
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
import numpy as np
import scipy
import tempfile
import odl
# --- Writing --- #
# Create some test data. We arbitrarily define origin and pixel size.
# In practice, these could come from a `DiscreteLp` space as `mid_pt`
# and `cell_sides` properties.
image = scipy.misc.ascent()
shape = np.array(image.shape, dtype='int32')
origin = np.array([-1.0, 0.0], dtype='float32')
px_size = np.array([0.1, 0.1], dtype='float32')
# To make it storable as binary data, we take the string version of the data
# type with a fixed size of 10 characters and encode it as array of single
# bytes.
dtype = np.fromiter(str(image.dtype).ljust(10), dtype='S1')
# Create the header
# Use an OrderedDict for the header to have a predictable order when
# looping through it
header = OrderedDict()
header['shape'] = {'offset': 0, 'value': shape}
header['origin'] = {'offset': 8, 'value': origin}
header['px_size'] = {'offset': 16, 'value': px_size}
header['dtype'] = {'offset': 24, 'value': dtype}
# Initialize the writer with a file and the header. We use a temporary
# file in order to keep the workspace clean.
file = tempfile.NamedTemporaryFile()
writer = odl.tomo.data.FileWriterRawBinaryWithHeader(file, header)
# Write header and data to the file
writer.write(image)
# Print some stuff to see that the sizes are correct
print('File size ({}) = Image size ({}) + Header size ({})'
''.format(file.seek(0, 2), image.nbytes, writer.header_bytes))
# --- Reading --- #
# We build a specification for our file format that mirrors `header`.
header_fields = [
{'name': 'shape', 'offset': 0, 'size': 8, 'dtype': 'int32'},
{'name': 'origin', 'offset': 8, 'size': 8, 'dtype': 'float32'},
{'name': 'px_size', 'offset': 16, 'size': 8, 'dtype': 'float32'},
{'name': 'dtype', 'offset': 24, 'size': 10, 'dtype': 'S1'}
]
# Now we create a reader and read from our newly created file.
# TODO: make this simpler after fixing the properties
reader = odl.tomo.data.FileReaderRawBinaryWithHeader(
file, header_fields, set_attrs=False)
reader.header_bytes = writer.header_bytes
# Read header and data in one go
header_file, image_file = reader.read()
# Check that everything has been reconstructed correctly
shape_file = header_file['shape']['value']
origin_file = header_file['origin']['value']
px_size_file = header_file['px_size']['value']
dtype_file = header_file['dtype']['value']
print('shape -- original {}, from file {}'.format(shape, shape_file))
print('origin -- original {}, from file {}'.format(origin, origin_file))
print('px_size -- original {}, from file {}'.format(px_size, px_size_file))
print('dtype -- original {}, from file {}'
''.format(str(image.dtype), ''.join(dtype_file.astype(str))))
if plt is not None:
plt.figure()
plt.title('Original image')
plt.imshow(image, cmap='Greys_r')
plt.figure()
plt.title('Image from file')
plt.imshow(image, cmap='Greys_r')
plt.show()
| Python | 0 | |
d89bb401926698dc829be937d8f9c1959ecfd580 | make ok,eq actual functions | cement/utils/test.py | cement/utils/test.py | """Cement testing utilities."""
import unittest
from ..core import backend, foundation
# shortcuts
from nose.tools import ok_ as ok
from nose.tools import eq_ as eq
from nose.tools import raises
from nose import SkipTest
class TestApp(foundation.CementApp):
"""
Basic CementApp for generic testing.
"""
class Meta:
label = 'test'
config_files = []
argv = []
class CementTestCase(unittest.TestCase):
"""
A sub-class of unittest.TestCase.
"""
def __init__(self, *args, **kw):
super(CementTestCase, self).__init__(*args, **kw)
def setUp(self):
"""
Sets up self.app with a generic TestApp(). Also resets the backend
hooks and handlers so that everytime an app is created it is setup
clean each time.
"""
self.app = self.make_app()
def make_app(self, *args, **kw):
"""
Create a generic app using TestApp. Arguments and Keyword Arguments
are passed to the app.
"""
self.reset_backend()
return TestApp(*args, **kw)
def reset_backend(self):
"""
Remove all registered hooks and handlers from the backend.
"""
for _handler in backend.handlers.copy():
del backend.handlers[_handler]
for _hook in backend.hooks.copy():
del backend.hooks[_hook]
def ok(self, expr, msg=None):
"""Shorthand for assert."""
return ok(expr, msg)
def eq(self, a, b, msg=None):
"""Shorthand for 'assert a == b, "%r != %r" % (a, b)'. """
return eq(a, b, msg) | """Cement testing utilities."""
import unittest
from ..core import backend, foundation
# shortcuts
from nose.tools import ok_ as ok
from nose.tools import eq_ as eq
from nose.tools import raises
from nose import SkipTest
class TestApp(foundation.CementApp):
"""
Basic CementApp for generic testing.
"""
class Meta:
label = 'test'
config_files = []
argv = []
class CementTestCase(unittest.TestCase):
"""
A sub-class of unittest.TestCase.
"""
ok = ok
eq = eq
def __init__(self, *args, **kw):
super(CementTestCase, self).__init__(*args, **kw)
def setUp(self):
"""
Sets up self.app with a generic TestApp(). Also resets the backend
hooks and handlers so that everytime an app is created it is setup
clean each time.
"""
self.app = self.make_app()
def make_app(self, *args, **kw):
"""
Create a generic app using TestApp. Arguments and Keyword Arguments
are passed to the app.
"""
self.reset_backend()
return TestApp(*args, **kw)
def reset_backend(self):
"""
Remove all registered hooks and handlers from the backend.
"""
for _handler in backend.handlers.copy():
del backend.handlers[_handler]
for _hook in backend.hooks.copy():
del backend.hooks[_hook]
| Python | 0.000012 |
935c145dfb4b40c892b8de11c6cd8deb617b1dbb | make compatible to python2 and 3 | cryptoexchange/util/generate-api-key.py | cryptoexchange/util/generate-api-key.py | #/usr/bin/env python3
import json
import ssl
import getpass
import signal
try:
from urllib.request import Request, urlopen
from urllib.parse import urlparse, urlencode
from urllib.error import URLError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import Request, URLError, HTTPError, urlopen
try:
input = raw_input
except NameError:
pass
# Edit this to True if you'd like to create a Testnet key.
USE_TESTNET = True
BITMEX_TESTNET = "https://testnet.bitmex.com"
BITMEX_PRODUCTION = "https://www.bitmex.com"
def main():
print("########################")
print("BitMEX API Key Interface")
print("########################\n")
if USE_TESTNET:
print('Connected to testnet.bitmex.com. If you want to create a production key, edit this file and set ' +
'USE_TESTNET to False.\n')
else:
print('Connected to www.bitmex.com. If you want to create a testnet key, edit this file and set ' +
'USE_TESTNET to True.\n')
apiObj = auth()
while True:
prompt(apiObj)
def prompt(apiObj):
operations = ['list_keys', 'create_key', 'enable_key', 'disable_key', 'delete_key']
print("Available operations: " + ', '.join(operations))
operation = input("Enter command: ")
if operation not in operations:
print("ERROR: Operation not supported: %s" % operation)
exit(1)
getattr(apiObj, operation)()
print("\nOperation completed. Press <ctrl+c> to quit.")
def auth():
print("Please log in.")
email = input("Email: ")
password = getpass.getpass("Password: ")
otpToken = input("OTP Token (if enabled. If not, press <enter>): ")
apiObj = BitMEX(email, password, otpToken)
print("\nSuccessfully logged in.")
return apiObj
class BitMEX(object):
def __init__(self, email=None, password=None, otpToken=None):
self.base_url = (BITMEX_TESTNET if USE_TESTNET else BITMEX_PRODUCTION) + "/api/v1"
self.accessToken = None
self.accessToken = self._curl_bitmex("/user/login",
postdict={"email": email, "password": password, "token": otpToken})["id"]
def create_key(self):
"""Create an API key."""
print("Creating key. Please input the following options:")
name = input("Key name (optional): ")
print("To make this key more secure, you should restrict the IP addresses that can use it. ")
print("To use with all IPs, leave blank or use 0.0.0.0/0.")
print("To use with a single IP, append '/32', such as 207.39.29.22/32. ")
print("See this reference on CIDR blocks: http://software77.net/cidr-101.html")
cidr = input("CIDR (optional): ")
key = self._curl_bitmex("/apiKey",
postdict={"name": name, "cidr": cidr, "enabled": True})
print("Key created. Details:\n")
print("API Key: " + key["id"])
print("Secret: " + key["secret"])
print("\nSafeguard your secret key! If somebody gets a hold of your API key and secret,")
print("your account can be taken over completely.")
print("\nKey generation complete.")
def list_keys(self):
"""List your API Keys."""
keys = self._curl_bitmex("/apiKey/")
print(json.dumps(keys, sort_keys=True, indent=4))
def enable_key(self):
"""Enable an existing API Key."""
print("This command will enable a disabled key.")
apiKeyID = input("API Key ID: ")
try:
key = self._curl_bitmex("/apiKey/enable",
postdict={"apiKeyID": apiKeyID})
print("Key with ID %s enabled." % key["id"])
except:
print("Unable to enable key, please try again.")
self.enable_key()
def disable_key(self):
"""Disable an existing API Key."""
print("This command will disable a enabled key.")
apiKeyID = input("API Key ID: ")
try:
key = self._curl_bitmex("/apiKey/disable",
postdict={"apiKeyID": apiKeyID})
print("Key with ID %s disabled." % key["id"])
except:
print("Unable to disable key, please try again.")
self.disable_key()
def delete_key(self):
"""Delete an existing API Key."""
print("This command will delete an API key.")
apiKeyID = input("API Key ID: ")
try:
self._curl_bitmex("/apiKey/",
postdict={"apiKeyID": apiKeyID}, verb='DELETE')
print("Key with ID %s disabled." % apiKeyID)
except:
print("Unable to delete key, please try again.")
self.delete_key()
def _curl_bitmex(self, api, query=None, postdict=None, timeout=3, verb=None):
url = self.base_url + api
if query:
url = url + "?" + urlencode(query)
if postdict:
postdata = urlencode(postdict).encode("utf-8")
request = Request(url, postdata)
else:
request = Request(url)
if verb:
request.get_method = lambda: verb
request.add_header('user-agent', 'BitMEX-generate-api-key')
if self.accessToken:
request.add_header('accessToken', self.accessToken)
try:
response = urlopen(request, timeout=timeout)
except HTTPError as e:
if e.code == 401:
print("Login information incorrect, please check and restart.")
exit(1)
# 503 - BitMEX temporary downtime, likely due to a deploy. Try again
elif e.code == 503:
print("Unable to contact the BitMEX API (503). Please try again later." +
"Request: %s \n %s" % (url, json.dumps(postdict)))
exit(1)
else:
print("Error:", e)
print("Endpoint was: " + api)
print("Please try again.")
raise e
except (URLError, ssl.SSLError) as e:
print("Unable to contact the BitMEX API (URLError). Please check the URL. Please try again later. " +
"Request: %s \n %s" % (url, json.dumps(postdict)))
exit(1)
return json.loads(response.read().decode("utf-8"))
def signal_handler(signal, frame):
print('\nExiting...')
exit(0)
signal.signal(signal.SIGINT, signal_handler)
main()
| Python | 0.000076 | |
8e1e6585c4bfa76ebbd945d765c6a4a3dc98025d | Add new package: dnstracer (#18933) | var/spack/repos/builtin/packages/dnstracer/package.py | var/spack/repos/builtin/packages/dnstracer/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dnstracer(Package):
"""Dnstracer determines where a given Domain Name Server gets
its information from, and follows the chain of DNS servers back to
the servers which know the data."""
homepage = "https://github.com/Orc/dnstracer"
git = "https://github.com/Orc/dnstracer.git"
version('master', branch='master')
phases = ['configure', 'build', 'install']
def configure(self, spec, prefix):
configure = Executable('./configure.sh')
configure('--prefix={0}'.format(prefix))
def build(self, spec, prefix):
make()
def install(self, spec, prefix):
make('install')
| Python | 0 | |
2f889b045c1a03b3b046127380f15909ea117265 | add new package (#25844) | var/spack/repos/builtin/packages/py-kornia/package.py | var/spack/repos/builtin/packages/py-kornia/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyKornia(PythonPackage):
"""Open Source Differentiable Computer Vision Library for PyTorch."""
homepage = "https://www.kornia.org/"
pypi = "kornia/kornia-0.5.10.tar.gz"
version('0.5.10', sha256='428b4b934a2ba7360cc6cba051ed8fd96c2d0f66611fdca0834e82845f14f65d')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pytest-runner', type='build')
depends_on('py-torch@1.6.0:', type=('build', 'run'))
| Python | 0 | |
e73aac38882b90e7219035800b400c2ed1e181ef | add http data wrapper that can be used to specify options for a specific request | robj/lib/httputil.py | robj/lib/httputil.py | #
# Copyright (c) 2010 rPath, Inc.
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
#
"""
Module for httplib customizations.
"""
from robj.lib import util
class HTTPData(object):
__slots__ = ('data', 'method', 'size', 'headers', 'contentType', 'callback',
'chunked', 'bufferSize', 'rateLimit', )
CHUNK_SIZE = 262144
BUFFER_SIZE = 8192
def __init__(self, data=None, method=None, size=None, headers=None,
contentType=None, callback=None, chunked=None, bufferSize=None,
rateLimit=None):
if headers is None:
headers = {}
if data is not None:
if hasattr(data, 'read'):
if chunked:
headers['Transfer-Encoding'] = 'Chunked'
else:
data = data.encode('utf-8')
size = len(data)
self.method = method
self.data = data
self.headers = headers
self.size = size
self.contentType = contentType
self.callback = callback
self.chunked = chunked
self.bufferSize = bufferSize or self.BUFFER_SIZE
self.rateLimit = rateLimit
def iterheaders(self):
for k, v in sorted(self.headers.iteritems()):
yield k, str(v)
# Don't send a Content-Length header if chunking
if not self.chunked and self.size is not None:
yield 'Content-Length', str(self.size)
if self.contentType is not None:
yield 'Content-Type', self.contentType
def writeTo(self, connection):
if self.data is None:
return
if not hasattr(self.data, 'read'):
connection.send(self.data)
return
if not self.chunked:
util.copyfileobj(self.data, connection, bufSize=self.bufferSize,
callback=self.callback, rateLimit=self.rateLimit,
sizeLimit=self.size)
return
assert self.size is not None
# keep track of the total amount of data sent so that the
# callback passed in to copyfileobj can report progress correctly
sent = 0
chunk = self.CHUNK_SIZE
while self.size - sent:
if chunk > self.size - sent:
chunk = self.size - sent
# first send the hex-encoded size
connection.send('%x\r\n' % chunk)
# then the chunk of data
util.copyfileobj(self.data, connection, bufSize=chunk,
callback=self.callback, rateLimit=self.rateLimit,
sizeLimit=chunk, total=sent)
# send \r\n after the chunked data
connection.send("\r\n")
sent += chunk
# terminate the chunked encoding
connection.send('0\r\n\r\n')
def isHTTPData(obj):
return isinstance(obj, HTTPData)
| Python | 0 | |
4f5adf435eaf1f6f8b5ff8195a8f371afe3fbf21 | Add volume backup tempest tests | cinder/tests/tempest/api/volume/test_volume_backup.py | cinder/tests/tempest/api/volume/test_volume_backup.py | # Copyright (c) 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import testtools
from tempest.api.volume import base as volume_base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest import test
# TODO(obutenko): Remove this when liberty-eol happens.
snapshot_backup_opt = cfg.BoolOpt('snapshot_backup',
default=False,
help='Creating backup from snapshot not '
'implemented in Liberty.')
CONF = config.CONF
CONF.register_opt(snapshot_backup_opt, group='volume-feature-enabled')
class VolumesBackupsTest(volume_base.BaseVolumeTest):
@classmethod
def skip_checks(cls):
super(VolumesBackupsTest, cls).skip_checks()
if not CONF.volume_feature_enabled.backup:
raise cls.skipException("Cinder backup feature disabled")
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot_backup,
"Skip. Not implemented in Liberty.")
@test.idempotent_id('885410c6-cd1d-452c-a409-7c32b7e0be15')
def test_volume_snapshot_backup(self):
"""Create backup from snapshot."""
volume = self.create_volume()
# Create snapshot
snapshot = self.create_snapshot(volume['id'])
# Create backup
backup = self.create_backup(
volume_id=volume['id'],
snapshot_id=snapshot['id'])
# Get a given backup
backup = self.backups_client.show_backup(
backup['id'])['backup']
waiters.wait_for_backup_status(
self.backups_client,
backup['id'], 'available')
self.assertEqual(volume['id'], backup['volume_id'])
self.assertEqual(snapshot['id'], backup['snapshot_id'])
self.snapshots_client.delete_snapshot(snapshot['id'])
self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
self.volumes_client.delete_volume(volume['id'])
self.volumes_client.wait_for_resource_deletion(volume['id'])
@test.idempotent_id('b5d837b0-7066-455d-88fc-4a721a899306')
def test_backup_create_and_restore_to_an_existing_volume(self):
"""Test backup create and restore to an existing volume."""
# Create volume
src_vol = self.create_volume()
self.addCleanup(self.volumes_client.delete_volume,
src_vol['id'])
# Create backup
backup = self.backups_client.create_backup(
volume_id=src_vol['id'])['backup']
self.addCleanup(self.backups_client.delete_backup, backup['id'])
waiters.wait_for_backup_status(
self.backups_client,
backup['id'], 'available')
# Restore to existing volume
restore = self.backups_client.restore_backup(
backup_id=backup['id'],
volume_id=src_vol['id'])['restore']
waiters.wait_for_backup_status(
self.backups_client,
backup['id'], 'available')
waiters.wait_for_volume_status(
self.volumes_client,
src_vol['id'], 'available')
self.assertEqual(src_vol['id'], restore['volume_id'])
self.assertEqual(backup['id'], restore['backup_id'])
@test.idempotent_id('c810fe2c-cb40-43ab-96aa-471b74516a98')
def test_incremental_backup(self):
"""Test create incremental backup."""
# Create volume from image
volume = self.create_volume(size=CONF.volume.volume_size,
imageRef=CONF.compute.image_ref)
self.addCleanup(self.volumes_client.delete_volume,
volume['id'])
# Create backup
backup = self.backups_client.create_backup(
volume_id=volume['id'])['backup']
waiters.wait_for_backup_status(self.backups_client,
backup['id'],
'available')
# Create a server
bd_map = [{'volume_id': volume['id'],
'delete_on_termination': '0'}]
server_name = data_utils.rand_name('instance')
server = self.create_server(
name=server_name,
block_device_mapping=bd_map,
wait_until='ACTIVE')
# Delete VM
self.servers_client.delete_server(server['id'])
# Create incremental backup
waiters.wait_for_volume_status(self.volumes_client, volume['id'],
'available')
backup_incr = self.backups_client.create_backup(
volume_id=volume['id'],
incremental=True)['backup']
waiters.wait_for_backup_status(self.backups_client,
backup_incr['id'],
'available')
is_incremental = self.backups_client.show_backup(
backup_incr['id'])['backup']['is_incremental']
self.assertTrue(is_incremental)
self.backups_client.delete_backup(backup_incr['id'])
self.backups_client.wait_for_resource_deletion(backup_incr['id'])
self.backups_client.delete_backup(backup['id'])
self.backups_client.wait_for_resource_deletion(backup['id'])
| Python | 0.000089 | |
bc2abe4c295a371358064952e6c3afa395a4bd13 | Rename Longest-Common-Prefix.py to LongestCommonPrefixtwo.py | leetcode/14.-Longest-Common-Prefix/LongestCommonPrefixtwo.py | leetcode/14.-Longest-Common-Prefix/LongestCommonPrefixtwo.py | #!/usr/bin/python
#_*_ coding:utf-8 _*_
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
strNum=len(strs) #字符串的长度
if strNum == 0 or strs == None:
return ''
else:
prefix = strs[0] #对前缀进行赋值
for i in range(strNum):
if prefix == '' or strs[i] == '':
return ''
comlen = min(len(prefix),len(strs[i])) #减少寻找公共最小前缀的长度
j = 0
while j < comlen and prefix[j] == strs[i][j]: #寻找寻找公共最小前缀
j += 1
prefix = prefix[0:j]
return prefix
| Python | 0.999999 | |
5f9c9500296627a94221ecd9614209a2c791e8b9 | remove pointless condition | plugins/messages.py | plugins/messages.py | import random
class Message:
def __init__(self, sent, msg):
self.sent = sent
self.msg = msg
def replyFormat(self):
return 'From {user}: {msg}'.format(user = self.sent, msg = self.msg)
class MessageDatabase:
def __init__(self):
self.messages = {}
def pendingMessages(self, user):
cnt = len(self.messages[user])
return 'You have {nr} message{s} waiting for you.\nUse ~read [number] to get [number] of messages shown to you'.format(nr = cnt, s = 's' if cnt > 1 else '')
def addMessage(self, to, sent, msg):
if to not in self.messages: self.messages[to] = {}
if sent in self.messages[to]: return False
self.messages[to][sent] = Message(sent, msg)
return True
def getMessage(self, user):
return self.removeRandomMessage(user).replyFormat()
def getMessages(self, user, amnt):
''' This removes amnt number of messages from the message service '''
# This can be super-spammy for users with a lot of pending messages
# as they can opt to look at all at once
reply = ''
if amnt > len(self.messages[user]): amnt = len(self.messages[user])
while amnt > 0:
reply += self.getMessage(user) + ('\n' if amnt > 1 else '')
amnt -= 1
# Remove the user from the list if there's no messages left
if not self.messages[user]:
self.messages.pop(user)
return reply
def getAllMessages(self, user):
''' This gets and delete every message to this user from storage '''
# No need to test for existance, this assumes a message exists
# and usage should first test for existance.
messages = self.removeAllMessages(user)
combine = []
for msg in messages:
combine.append(messages[msg].replyFormat())
return '\n'.join(combine)
def hasMessage(self, user):
return user in self.messages
def alreadySentMessage(self, user, frm):
return user in self.messages and frm in self.messages[user]
def removeRandomMessage(self, to):
return self.messages[to].pop(random.choice(list(self.messages[to].keys())), None)
# Unused but still supported
def removeAllMessages(self, to):
return self.messages.pop(to, None)
| import random
class Message:
def __init__(self, sent, msg):
self.sent = sent
self.msg = msg
def replyFormat(self):
return 'From {user}: {msg}'.format(user = self.sent, msg = self.msg)
class MessageDatabase:
def __init__(self):
self.messages = {}
def pendingMessages(self, user):
cnt = len(self.messages[user])
return 'You have {nr} message{s} waiting for you.\nUse ~read [number] to get [number] of messages shown to you'.format(nr = cnt, s = 's' if cnt > 1 else '')
def addMessage(self, to, sent, msg):
if to not in self.messages: self.messages[to] = {}
if sent in self.messages[to]: return False
self.messages[to][sent] = Message(sent, msg)
return True
def getMessage(self, user):
return self.removeRandomMessage(user).replyFormat()
def getMessages(self, user, amnt):
''' This removes amnt number of messages from the message service '''
# This can be super-spammy for users with a lot of pending messages
# as they can opt to look at all at once
reply = ''
if amnt > len(self.messages[user]): amnt = len(self.messages[user])
while amnt > 0 and len(self.messages[user]) > 0:
reply += self.getMessage(user) + ('\n' if amnt > 1 else '')
amnt -= 1
# Remove the user from the list if there's no messages left
if not self.messages[user]:
self.messages.pop(user)
return reply
def getAllMessages(self, user):
''' This gets and delete every message to this user from storage '''
# No need to test for existance, this assumes a message exists
# and usage should first test for existance.
messages = self.removeAllMessages(user)
combine = []
for msg in messages:
combine.append(messages[msg].replyFormat())
return '\n'.join(combine)
def hasMessage(self, user):
return user in self.messages
def alreadySentMessage(self, user, frm):
return user in self.messages and frm in self.messages[user]
def removeRandomMessage(self, to):
return self.messages[to].pop(random.choice(list(self.messages[to].keys())), None)
# Unused but still supported
def removeAllMessages(self, to):
return self.messages.pop(to, None)
| Python | 0.000581 |
f0f72e5d8a64f7f49406022fd170808417220289 | Create publish.py | clickonce/publish.py | clickonce/publish.py | from __future__ import print_function
import subprocess
import os
import sys
import shutil
import datetime
import distutils.dir_util
if sys.version_info < (3,):
input = raw_input
str = unicode
pwd = os.getcwd()
appver_file = r'.\AppVer'
target_shares = {
'release': [],
'test' : [],
'dev' : []
}
# it needs this transformation because msbuild does a direct string concatenation instead of a path join.
target_shares = {k: [p if p.endswith(os.path.sep) else p+os.path.sep for p in v] for k, v in target_shares.items()}
output_dir = r'bin\publish'
publish_dir = r'bin\publishapp.publish'
msbuild_folder = r'%ProgramFiles%\MSBuild\12.0\bin\amd64' \
if os.path.exists(r'%s\MSBuild\12.0\bin\amd64' % os.environ['PROGRAMFILES'])\
else r'%ProgramFiles(x86)%\MSBuild\12.0\bin\amd64'
def get_appver():
with open(appver_file) as fd:
return fd.readline().strip()
def incr_appver(ver):
vers = ver.split('.')
vers[-1] = str(int(vers[-1]) + 1)
return '.'.join(vers)
def set_appver(ver):
with open(appver_file, 'w') as fd:
fd.write(ver)
def get_cmd(target, ver, env):
template = r'"{0}\msbuild" /t:clean;publish /property:OutputPath={1},PublishUrl={2},InstallUrl={2},UpdateUrl={2},ApplicationVersion={3},MinimumRequiredVersion={3},AssemblyName="{4}"'
cmd = template.format(msbuild_folder, output_dir, target, ver, 'NST System Configurator '+env)
return cmd
if __name__=='__main__':
error = {}
print('current python implementation version is', sys.version)
print('currently working in: %s' % pwd)
print('please make sure this script runs directly under the project folder.')
env = input('build environment(%s): ' % ', '.join(sorted(target_shares.keys())))
while env not in target_shares:
print("nonexisting environment: {}".format(env), file=sys.stderr)
env = input('build environment(%s): ' % ', '.join(sorted(target_shares.keys())))
ver = incr_appver(get_appver())
for i, p in enumerate(target_shares[env]):
cmd = get_cmd(p, ver, env+str(i))
print('executing {}'.format(cmd))
print('----------------------------------')
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
with proc.stdout:
for l in proc.stdout:
print(l.strip().decode('utf-8'))
proc.terminate()
print('----------------------------------')
if proc.returncode == 0:
try:
distutils.dir_util.copy_tree(publish_dir, p)
except Exception as e:
error[p] = e
sys.stderr.write("error occurred: %s\n" % str(e))
distutils.dir_util.copy_tree(publish_dir, r'bin\backup' + '\\' + str(i))
else:
print("error: %d" % proc.returncode, file=sys.stderr)
print
if len(error) != 0:
print('Error occurred:', file=sys.stderr)
for k, e in error.items():
print('%s: %s\n' % (k, str(e)), file=sys.stderr)
print('has backed up the folder.', file=sys.stderr)
try:
set_appver(ver)
except IOError as e:
print("failed to write to file: %s" % str(e), file=sys.stderr)
print('next application version will be %s.' % incr_appver(ver), file=sys.stderr)
input('press enter to continue...')
| Python | 0.000001 | |
badbf8c89216b97ac29ea3582d99d28535f82a7e | Update __init__.py | slither/__init__.py | slither/__init__.py | from . import slither
__all__ = ['slither','Mouse','Stage','Sprite','Sound']
| from slither import slither
__all__ = ['slither','Mouse','Stage','Sprite','Sound']
| Python | 0.000072 |
79df8ab80e6b14f16af125895f5b7338e5c41a60 | add IOTools | base/IOTools.py | base/IOTools.py | import ROOT
import os
class Writer:
def __init__(self, directory=None):
"""
:param directory:
"""
if directory is None:
directory = os.path.abspath(os.curdir)
self.dir = directory
self.__check_and_create_directory(self.dir)
def __check_and_create_directory(self, directory):
_logger.debug("Check if directory: %s exists" % (directory))
if not os.path.exists(directory):
_logger.debug("Create directory: %s exists" % (directory))
os.makedirs(directory)
def dump_canvas(self, canvas, message=None, image=None):
if image:
self.write_canvas_to_file(canvas, image)
else:
if message is None:
image = raw_input("save canvas as (<RET> for skipping): ")
else:
image = raw_input(message)
if image:
self.write_canvas_to_file(canvas, image)
def write_canvas_to_file(self, canvas, name, extension='pdf'):
ext = self.parse_extension_from_file_name(name)
if ext is not None:
extension = ext
name = ''.join(name.split('.')[0:-1])
if not extension.startswith('.'):
extension = '.' + extension
if extension == '.root':
self.write_object_to_root_tile(canvas, name + extension)
else:
canvas.SaveAs(os.path.join(os.path.join(self.dir,
name + extension)))
def write_object_to_root_tile(self, obj, filename, dir=''):
f = ROOT.gROOT.GetListOfFiles().FindObject(filename)
if not f:
f = ROOT.TFile.Open(filename, 'UPDATE')
d = f.GetDirectory(dir)
if not d:
d = make_root_dir(f, dir)
d.cd()
obj.Write()
def parse_extension_from_file_name(self, name):
ext = name.split('.')[-1]
if ext is name:
return None
return ext
def set_directory(self, directory):
self.__check_and_create_directory(directory)
self.dir = directory
| Python | 0.000001 | |
2786dd91b0bb7dc8849e3549ff40de28d72d40d5 | add a django multi-database router | rdrf/rdrf/db.py | rdrf/rdrf/db.py | from io import StringIO
import os
from django.core.management import call_command
from django.db import connections
class RegistryRouter:
# Whether clinical db is configured at all.
one_db = "clinical" not in connections
# Whether clinical db is configured to be the same as main db.
same_db = (one_db or
connections["default"].get_connection_params() ==
connections["clinical"].get_connection_params())
clinical_models = (
("rdrf", "clinical"),
("rdrf", "questionnaireresponsedata"),
# fixme: move CDEFile to clinical database. This is just
# tricky with migrations.
# ("rdrf", "cdefile"),
("rdrf", "patientdata"),
("rdrf", "formprogress"),
("rdrf", "modjgo"),
)
@classmethod
def is_clinical(cls, app_label, model_name):
return (app_label, model_name) in cls.clinical_models
def choose_db_model(self, model):
return self.choose_db(model._meta.app_label, model._meta.model_name)
def choose_db(self, app_label, model_name):
clinical = self.is_clinical(app_label, model_name)
return "clinical" if clinical and not self.one_db else "default"
def db_for_read(self, model, **hints):
return self.choose_db_model(model)
def db_for_write(self, model, **hints):
return self.choose_db_model(model)
def allow_migrate(self, db, app_label, model_name=None, **hints):
return (db == "default" and self.same_db or
db == self.choose_db(app_label, model_name))
def reset_sql_sequences(apps):
"""
Executes the necessary SQL to reset the primary key counters for
all tables in `apps`.
"""
os.environ['DJANGO_COLORS'] = 'nocolor'
commands = StringIO()
for app in apps:
call_command('sqlsequencereset', app, stdout=commands)
_execute_reset_sql_sequences(commands.getvalue().splitlines())
def _execute_reset_sql_sequences(commands):
# this gets nasty because the --database option of
# sqlsequencereset command doesn't work.
clinical_tables = ["_".join(m) for m in RegistryRouter.clinical_models]
def for_db(database):
def _for_db(command):
is_clinical = any(t in command for t in clinical_tables)
return (not command.startswith("SELECT") or
(database == "default" and not is_clinical) or
(database == "clinical" and is_clinical) or
(database == "default" and "clinical" not in connections))
return _for_db
for database in ["default", "clinical"]:
if database in connections:
cursor = connections[database].cursor()
cursor.execute("\n".join(filter(for_db(database), commands)))
| Python | 0.000001 | |
0b6f6a9fd3916d8a028d5c3ccf4ca4a0277b9781 | Add arena class prototype | src/arena.py | src/arena.py | import jsonpickle
class ArenaType():
Circle = 0
Square = 1
class ArenaCoverType():
Soil = 0
Sand = 1
Grass = 2
Stone = 3
class Arena():
def __init__(self, name, size, stype, cover):
self.name = name
self.size = size
self.type = stype
self.cover = cover | Python | 0 | |
4b0a21dd813d58370805053e60376f64b5927cd9 | Add tutorial for MakeNumpyDataFrame | tutorials/dataframe/df032_MakeNumpyDataFrame.py | tutorials/dataframe/df032_MakeNumpyDataFrame.py | ## \file
## \ingroup tutorial_dataframe
## \notebook
## Read data from Numpy arrays into RDataFrame.
##
## \macro_code
## \macro_output
##
## \date March 2021
## \author Stefan Wunsch (KIT, CERN)
import ROOT
import numpy as np
# Let's create some data in numpy arrays
x = np.array([1, 2, 3], dtype=np.int32)
y = np.array([4, 5, 6], dtype=np.float64)
# Read the data with RDataFrame
# The column names in the RDataFrame are defined by the keys of the dictionary.
# Please note that only fundamental types (int, float, ...) are supported.
df = ROOT.RDF.MakeNumpyDataFrame({'x': x, 'y': y})
# You can now use the RDataFrame as usualy, e.g. add a column ...
df = df.Define('z', 'x + y')
# ... or print the content
df.Display().Print()
# ... or save the data as a ROOT file
df.Snapshot('tree', 'df032_MakeNumpyDataFrame.root')
| Python | 0 | |
43eb4f930f14fcf693f0656a3f0bbe749ed98d2e | Move subgraph attribute copies tests to a separate file. | networkx/algorithms/components/tests/test_subgraph_copies.py | networkx/algorithms/components/tests/test_subgraph_copies.py | """ Tests for subgraphs attributes
"""
from copy import deepcopy
from nose.tools import assert_equal
import networkx as nx
class TestSubgraphAttributesDicts:
def setUp(self):
self.undirected = [
nx.connected_component_subgraphs,
nx.biconnected_component_subgraphs,
]
self.directed = [
nx.weakly_connected_component_subgraphs,
nx.strongly_connected_component_subgraphs,
nx.attracting_component_subgraphs,
]
self.subgraph_funcs = self.undirected + self.directed
self.D = nx.DiGraph()
self.D.add_edge(1, 2, eattr='red')
self.D.add_edge(2, 1, eattr='red')
self.D.node[1]['nattr'] = 'blue'
self.D.graph['gattr'] = 'green'
self.G = nx.Graph()
self.G.add_edge(1, 2, eattr='red')
self.G.node[1]['nattr'] = 'blue'
self.G.graph['gattr'] = 'green'
def test_subgraphs_default_copy_behavior(self):
# Test the default behavior of subgraph functions
# For the moment (1.10) the default is to copy
for subgraph_func in self.subgraph_funcs:
G = deepcopy(self.G if subgraph_func in self.undirected else self.D)
SG = list(subgraph_func(G))[0]
assert_equal(SG[1][2]['eattr'], 'red')
assert_equal(SG.node[1]['nattr'], 'blue')
assert_equal(SG.graph['gattr'], 'green')
SG[1][2]['eattr'] = 'foo'
assert_equal(G[1][2]['eattr'], 'red')
assert_equal(SG[1][2]['eattr'], 'foo')
SG.node[1]['nattr'] = 'bar'
assert_equal(G.node[1]['nattr'], 'blue')
assert_equal(SG.node[1]['nattr'], 'bar')
SG.graph['gattr'] = 'baz'
assert_equal(G.graph['gattr'], 'green')
assert_equal(SG.graph['gattr'], 'baz')
def test_subgraphs_copy(self):
for subgraph_func in self.subgraph_funcs:
test_graph = self.G if subgraph_func in self.undirected else self.D
G = deepcopy(test_graph)
SG = list(subgraph_func(G, copy=True))[0]
assert_equal(SG[1][2]['eattr'], 'red')
assert_equal(SG.node[1]['nattr'], 'blue')
assert_equal(SG.graph['gattr'], 'green')
SG[1][2]['eattr'] = 'foo'
assert_equal(G[1][2]['eattr'], 'red')
assert_equal(SG[1][2]['eattr'], 'foo')
SG.node[1]['nattr'] = 'bar'
assert_equal(G.node[1]['nattr'], 'blue')
assert_equal(SG.node[1]['nattr'], 'bar')
SG.graph['gattr'] = 'baz'
assert_equal(G.graph['gattr'], 'green')
assert_equal(SG.graph['gattr'], 'baz')
def test_subgraphs_no_copy(self):
for subgraph_func in self.subgraph_funcs:
G = deepcopy(self.G if subgraph_func in self.undirected else self.D)
SG = list(subgraph_func(G, copy=False))[0]
assert_equal(SG[1][2]['eattr'], 'red')
assert_equal(SG.node[1]['nattr'], 'blue')
assert_equal(SG.graph['gattr'], 'green')
SG[1][2]['eattr'] = 'foo'
assert_equal(G[1][2]['eattr'], 'foo')
assert_equal(SG[1][2]['eattr'], 'foo')
SG.node[1]['nattr'] = 'bar'
assert_equal(G.node[1]['nattr'], 'bar')
assert_equal(SG.node[1]['nattr'], 'bar')
SG.graph['gattr'] = 'baz'
assert_equal(G.graph['gattr'], 'baz')
assert_equal(SG.graph['gattr'], 'baz')
| Python | 0 | |
f36a3e4e6cfbc5d3aa14017dcfea6e0fc67514f0 | add delete_environment command | ebs_deploy/commands/delete_environment_command.py | ebs_deploy/commands/delete_environment_command.py | from ebs_deploy import out, parse_env_config
def add_arguments(parser):
"""
Args for the delete environment command
"""
parser.add_argument('-e', '--environment',
help='Environment name', required=True)
def execute(helper, config, args):
"""
Deletes an environment
"""
env_config = parse_env_config(config, args.environment)
cname_prefix = env_config.get('cname_prefix', None)
# env_name = args.environment
real_env_name = helper.environment_name_for_cname(cname_prefix)
environments = helper.get_environments()
for env in environments:
if env['EnvironmentName'] == real_env_name:
if env['Status'] != 'Ready':
out("Unable to delete " + env['EnvironmentName']
+ " because it's not in status Ready ("
+ env['Status'] + ")")
else:
out("Deleting environment: "+env['EnvironmentName'])
# helper.delete_environment(env['EnvironmentName'])
# environments_to_wait_for_term.append(env['EnvironmentName'])
out("Environment deleted")
return 0
| Python | 0.000003 | |
cf4e468ed28a7e750adfbcd41235ac5b90cb562b | Add new package: diffmark (#18930) | var/spack/repos/builtin/packages/diffmark/package.py | var/spack/repos/builtin/packages/diffmark/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Diffmark(AutotoolsPackage):
"""Diffmark is a DSL for transforming one string to another."""
homepage = "https://github.com/vbar/diffmark"
git = "https://github.com/vbar/diffmark.git"
version('master', branch='master')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('libxml2')
| Python | 0 | |
ddff4237ae0bb8dd2575265707a843f4497ccbf2 | Create headache.py | headache.py | headache.py | """
python plaintext obfuscator
by n.bush
"""
import string
import random
def mess_maker(size=6, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def headache(text):
charlist = list(text)
obfuscated = []
class_a = mess_maker(10)
class_b = mess_maker(10)
css = """
<style>
span.%s {}
span.%s {color: transparent; letter-spacing:-1em;}
</style>
""" % (class_a, class_b)
obfuscated.append(css)
for i in charlist:
mess = mess_maker(10)
span = '<span class="%s">%s</span><span class="%s">%s</span>' % (class_a, i, class_b, mess)
obfuscated.append(span)
return ''.join(obfuscated)
print headache("Hi. This is copyable. Not.")
| Python | 0.001409 | |
a7d6344428ef43374fb82f5b357968ec38402984 | Create test_step_motor_Model_28BYJ_48p.py | test/test_step_motor_Model_28BYJ_48p.py | test/test_step_motor_Model_28BYJ_48p.py | from gadgets.motors.step_motor import Model_28BYJ_48
st_mot = Model_28BYJ_48([11,15,16,18])
for i in range(2):
st_mot.angular_step(60,direction=2,waiting_time=2,bi_direction=True)
| Python | 0.00001 | |
4585d6426a6c2945a359bbe02c58702a07e68746 | Create new package. (#6209) | var/spack/repos/builtin/packages/r-gsubfn/package.py | var/spack/repos/builtin/packages/r-gsubfn/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGsubfn(RPackage):
"""gsubfn is like gsub but can take a replacement function or
certain other objects instead of the replacement string. Matches
and back references are input to the replacement function and
replaced by the function output. gsubfn can be used to split
strings based on content rather than delimiters and for
quasi-perl-style string interpolation. The package also has
facilities for translating formulas to functions and allowing
such formulas in function calls instead of functions. This can
be used with R functions such as apply, sapply, lapply, optim,
integrate, xyplot, Filter and any other function that expects
another function as an input argument or functions like cat or
sql calls that may involve strings where substitution is
desirable."""
homepage = "https://cran.r-project.org/package=gsubfn"
url = "https://cran.r-project.org/src/contrib/gsubfn_0.6-6.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/gsubfn"
version('0.6-6', '94195ff3502706c736d9c593c07252bc')
depends_on('r-proto', type=('build', 'run'))
| Python | 0 | |
e76fa7d23894bb88d47b761f683b4bbd797ef889 | Add Helpers object for cleaner helper syntax | knights/utils.py | knights/utils.py |
class Helpers:
'''
Provide a cheaper way to access helpers
'''
def __init__(self, members):
for key, value in members.items():
setattr(self, key, value)
| Python | 0 | |
6c0e5b7823be4d2defc9f0ff7b4abe76bc6f9af7 | sequence learner abc | marmot/learning/sequence_learner.py | marmot/learning/sequence_learner.py | # this is an abstract class representing a sequence learner, or 'structured' learner
# implementations wrap various sequence learning tools, in order to provide a consistent interface within Marmot
from abc import ABCMeta, abstractmethod
class SequenceLearner(object):
__metaclass__ = ABCMeta
# subclasses must provide the implementation
@abstractmethod
def fit(self, X, y):
'''
fit a sequence model to data in the format [[seq1_w1, seq1_w2, ...]],
:param X: a list of np.arrays, where each row in each array contains the features for an item in the sequence - X can be viewed as a 3d tensor
:param y: the true labels for each sequence
:return:
'''
pass
@abstractmethod
def predict(self, X):
'''
predict the tag for each item in each sequence
:param X: list of sequences list of np.array
:return: list of lists, where each list contains the predictions for the test sequence
'''
pass | Python | 0.999952 | |
4fe6f81e1ce58474761b7bae673e92e1d08c75b3 | required drilldown*s* not singular | cubes/backends/mixpanel/store.py | cubes/backends/mixpanel/store.py | # -*- coding=utf -*-
from ...model import *
from ...browser import *
from ...stores import Store
from ...errors import *
from .mixpanel import *
from string import capwords
DIMENSION_COUNT_LIMIT = 100
time_dimension_md = {
"name": "time",
"levels": ["year", "month", "day", "hour"],
"hierarchies": [
{"name":"mdh", "levels": ["year", "month", "day", "hour"]}
],
"info": { "is_date": True }
}
_time_dimension = create_dimension(time_dimension_md)
class MixpanelModelProvider(ModelProvider):
def cube(self, name):
"""Creates a mixpanel cube with following variables:
* `name` – cube name
* `measures` – cube measures: `total` and `uniques`
* `required_dimensions` – list of required dimension names
* `mappings` – mapping of corrected dimension names
Dimensions are Mixpanel's properties where ``$`` character is replaced
by the underscore ``_`` character.
"""
result = self.store.request(["events", "properties", "top"],
{"event":name, "limit":DIMENSION_COUNT_LIMIT})
if not result:
raise NoSuchCubeError(name)
names = result.keys()
# Replace $ with underscore _
dims = ["time"]
mappings = {}
for dim_name in result.keys():
fixed_name = dim_name.replace("$", "_")
if fixed_name != dim_name:
mappings[fixed_name] = dim_name
dims.append(fixed_name)
measures = attribute_list(["total", "unique"])
for m in measures:
m.aggregations = ['identity']
cube = Cube(name=name,
measures=measures,
required_dimensions=dims,
store=self.store_name,
mappings=mappings)
# TODO: this is new (remove this comment)
cube.category = self.store.category
# TODO: required_drilldowns might be a cube's attribute (fixed_dd?)
cube.info = {
"required_drilldowns": ["time"],
"category": cube.category
}
return cube
def dimension(self, name):
if name == "time":
return _time_dimension
level = Level(name, attribute_list([name]))
dim = Dimension(name,
levels=[level])
return dim
def list_cubes(self):
result = self.store.request(["events", "names"],
{"type":"general", })
cubes = []
for name in result:
label = capwords(name.replace("_", " "))
cube = {
"name": name,
"label": label,
"category": self.store.category,
"info": { "category": self.store.category }
}
cubes.append(cube)
return cubes
class MixpanelStore(Store):
def __init__(self, api_key, api_secret, category=None):
self.mixpanel = Mixpanel(api_key, api_secret)
self.category = category or "Mixpanel"
def model_provider_name(self):
return "mixpanel"
def request(self, *args, **kwargs):
"""Performs a mixpanel HTTP request. Raises a BackendError when
mixpanel returns `error` in the response."""
response = self.mixpanel.request(*args, **kwargs)
if "error" in response:
raise BackendError("Mixpanel request error: %s" % response["error"])
return response
| # -*- coding=utf -*-
from ...model import *
from ...browser import *
from ...stores import Store
from ...errors import *
from .mixpanel import *
from string import capwords
DIMENSION_COUNT_LIMIT = 100
time_dimension_md = {
"name": "time",
"levels": ["year", "month", "day", "hour"],
"hierarchies": [
{"name":"mdh", "levels": ["year", "month", "day", "hour"]}
],
"info": { "is_date": True }
}
_time_dimension = create_dimension(time_dimension_md)
class MixpanelModelProvider(ModelProvider):
def cube(self, name):
"""Creates a mixpanel cube with following variables:
* `name` – cube name
* `measures` – cube measures: `total` and `uniques`
* `required_dimensions` – list of required dimension names
* `mappings` – mapping of corrected dimension names
Dimensions are Mixpanel's properties where ``$`` character is replaced
by the underscore ``_`` character.
"""
result = self.store.request(["events", "properties", "top"],
{"event":name, "limit":DIMENSION_COUNT_LIMIT})
if not result:
raise NoSuchCubeError(name)
names = result.keys()
# Replace $ with underscore _
dims = ["time"]
mappings = {}
for dim_name in result.keys():
fixed_name = dim_name.replace("$", "_")
if fixed_name != dim_name:
mappings[fixed_name] = dim_name
dims.append(fixed_name)
measures = attribute_list(["total", "unique"])
for m in measures:
m.aggregations = ['identity']
cube = Cube(name=name,
measures=measures,
required_dimensions=dims,
store=self.store_name,
mappings=mappings)
# TODO: this is new (remove this comment)
cube.category = self.store.category
# TODO: required_drilldown might be a cube's attribute (fixed_dd?)
cube.info = {
"required_drilldown": "time",
"category": cube.category
}
return cube
def dimension(self, name):
if name == "time":
return _time_dimension
level = Level(name, attribute_list([name]))
dim = Dimension(name,
levels=[level])
return dim
def list_cubes(self):
result = self.store.request(["events", "names"],
{"type":"general", })
cubes = []
for name in result:
label = capwords(name.replace("_", " "))
cube = {
"name": name,
"label": label,
"category": self.store.category,
"info": { "category": self.store.category }
}
cubes.append(cube)
return cubes
class MixpanelStore(Store):
def __init__(self, api_key, api_secret, category=None):
self.mixpanel = Mixpanel(api_key, api_secret)
self.category = category or "Mixpanel"
def model_provider_name(self):
return "mixpanel"
def request(self, *args, **kwargs):
"""Performs a mixpanel HTTP request. Raises a BackendError when
mixpanel returns `error` in the response."""
response = self.mixpanel.request(*args, **kwargs)
if "error" in response:
raise BackendError("Mixpanel request error: %s" % response["error"])
return response
| Python | 0.99897 |
8efc243ef6f24025c0cac28c4c644b3928215b89 | add cupti_make_report.py | cupti_trace/cupti_make_report.py | cupti_trace/cupti_make_report.py | #!/usr/bin/python3
# Copyright (C) 2015 Samuel Pitoiset <samuel.pitoiset@gmail.com>
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import getopt
import os
import re
import subprocess
import shutil
import sys
def get_cupti_path():
return os.getenv("CUPTI_PATH", "/opt/cuda/extras/CUPTI")
def cupti_query_parse_output(output, token):
data = []
for line in output.splitlines():
if re.search(token, line):
b = line.find('=') + 2
e = line.find('\n')
data.append(line[b:])
return data
def cupti_get_domain_ids():
return cupti_query_parse_output(cupti_query_domains(), "^Id")
def cupti_query(opts):
cmd = get_cupti_path() + "/sample/cupti_query/cupti_query " + opts
proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if not proc.returncode == 0:
return proc.returncode
return stdout.decode()
def cupti_query_domains():
return cupti_query("-getdomains")
def cupti_query_events_by_domain(domain):
return cupti_query("-domain " + str(domain) + " -getevents")
def cupti_query_metrics():
return cupti_query("-getmetrics")
def cupti_save_domains_list():
f = open("list_domains.txt", "w")
f.write(cupti_query_domains())
f.close()
def cupti_save_events_list():
domain_ids = cupti_get_domain_ids()
for domain_id in domain_ids:
f = open("domain_" + str(domain_id) + ".txt", "w")
f.write(cupti_query_events_by_domain(domain_id))
f.close()
def cupti_save_metrics_list():
f = open("list_metrics.txt", "w")
f.write(cupti_query_metrics())
f.close()
def cupti_trace(chipset, opts):
cmd = "cupti_trace -a " + chipset + " " + opts
proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if not proc.returncode == 0:
return proc.returncode
return stdout.decode()
def cupti_get_event_names(domain_id):
output = cupti_query_events_by_domain(domain_id)
return cupti_query_parse_output(output, "^Name")
def cupti_trace_all_events(chipset):
f = open("report_events.txt", "w")
domain_ids = cupti_get_domain_ids()
for domain_id in domain_ids:
print ("Domain #" + str(domain_id))
event_names = cupti_get_event_names(domain_id)
for event_name in event_names:
print ("Event " + event_name)
f.write(cupti_trace(chipset, "-e " + event_name))
f.close()
def cupti_get_metric_names():
return cupti_query_parse_output(cupti_query_metrics(), "^Name")
def cupti_trace_all_metrics(chipset):
f = open("report_metrics.txt", "w")
metric_names = cupti_get_metric_names()
for metric_name in metric_names:
print ("Metric " + metric_name)
f.write(cupti_trace(chipset, "-m " + metric_name))
f.close()
def dry_run_valgrind_mmt():
cmd = "valgrind --tool=mmt"
proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if not proc.returncode == 1:
return proc.returncode
lines = stderr.decode().splitlines()
if not lines[0] == "valgrind: no program specified":
return 0
return 1
def main():
try:
long_opts = ["chipset=",
"overwrite"]
opts, args = getopt.getopt(sys.argv[1:], "a:o", long_opts)
except getopt.GetoptError as err:
print (str(err))
sys.exit(2)
chipset = None
overwrite = False
for opt, arg in opts:
if opt in ("-a", "--chipset"):
chipset = str(arg)
elif opt in ("-o", "--overwrite"):
overwrite = True
else:
assert False, "Unknown option!"
if chipset == None:
print ("Must specify a chipset (-a)")
sys.exit(2)
output_dir = "nv" + chipset + "_cupti_report"
if os.path.exists(output_dir):
if not overwrite:
print ("Output directory already exists, try --overwrite!")
sys.exit(2)
else:
shutil.rmtree(output_dir, ignore_errors=True)
os.mkdir(output_dir)
os.chdir(output_dir)
if not dry_run_valgrind_mmt():
print ("You are not running valgrind-mmt!")
sys.exit(2)
if not shutil.which("demmt"):
print ("Failed to find demt!")
# Check CUPTI samples
path = get_cupti_path() + "/sample/cupti_query/cupti_query"
if not os.path.exists(path):
print ("Failed to find cupti_query!")
path = get_cupti_path() + "/sample/callback_event/callback_event"
if not os.path.exists(path):
print ("Failed to find callback_event!")
path = get_cupti_path() + "/sample/callback_metric/callback_metric"
if not os.path.exists(path):
print ("Failed to find callback_metric!")
cupti_save_domains_list()
cupti_save_events_list()
cupti_save_metrics_list()
cupti_trace_all_events(chipset)
cupti_trace_all_metrics(chipset)
print ("Creating a tarball...")
os.chdir("../")
if shutil.which("tar"):
archive_name = output_dir + ".tar.gz"
cmd = "tar -czf " + archive_name + " " + output_dir
proc = subprocess.Popen(cmd.split())
stdout, stderr = proc.communicate()
if not proc.returncode == 0:
return proc.returncode
if shutil.which("xz"):
cmd = "xz " + archive_name
proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if not proc.returncode == 0:
return proc.returncode
print ("Thanks for running cupti_trace! :-)")
if __name__ == "__main__":
main()
| Python | 0.000002 | |
2838711c7fa12525c2ae6670bb130999654fe7ea | add shortest-palindrome | vol5/shortest-palindrome/shortest-palindrome.py | vol5/shortest-palindrome/shortest-palindrome.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-11-19 20:43:07
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-11-19 20:43:21
class Solution(object):
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
ss = s + '#' + s[::-1]
n = len(ss)
p = [0] * n
for i in xrange(1, n):
j = p[i - 1]
while j > 0 and ss[i] != ss[j]:
j = p[j - 1]
p[i] = j + (ss[i] == s[j])
return s[p[-1]:][::-1] + s | Python | 0.999999 | |
10f72d72e988bf4aa570e21b0e0d6979edb843a7 | add example "fit text path into a box" | examples/addons/fit_text_path_into_box.py | examples/addons/fit_text_path_into_box.py | # Copyright (c) 2021, Manfred Moitzi
# License: MIT License
from pathlib import Path
import ezdxf
from ezdxf import path, zoom
from ezdxf.math import Matrix44
from ezdxf.tools import fonts
from ezdxf.addons import text2path
DIR = Path('~/Desktop/Outbox').expanduser()
fonts.load()
doc = ezdxf.new()
doc.layers.new('OUTLINE')
doc.layers.new('FILLING')
msp = doc.modelspace()
attr = {'layer': 'OUTLINE', 'color': 1}
ff = fonts.FontFace(family="Arial")
sx, sy = 4, 2
# create target box
msp.add_lwpolyline([(0, 0), (sx, 0), (sx, sy), (0, sy)], close=True)
text_as_paths = text2path.make_paths_from_str("Squeeze Me", ff)
final_paths = path.fit_paths_into_box(text_as_paths, size=(sx, sy, 0), uniform=False)
final_paths = path.transform_paths(final_paths, Matrix44.scale(-1, 1, 1))
# move bottom/left corner to (0, 0) if required:
bbox = path.bbox(final_paths)
dx, dy, dz = -bbox.extmin
final_paths = path.transform_paths(final_paths, Matrix44.translate(dx,dy, dz))
path.render_lwpolylines(msp, final_paths, distance=0.01, dxfattribs=attr)
zoom.extents(msp)
doc.saveas(DIR / 'text2path.dxf')
| Python | 0 | |
e075b0b1c8d581107209e869eda7f6ff07a7321c | Add script to create a historic->modern dictionary | reverse_dict.py | reverse_dict.py | """Reverse modern->historic spelling variants dictonary to historic->modern
mappings
"""
import argparse
import codecs
import json
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dict', help='the name of the json file '
'containing the modern->spelling variants dictionary')
args = parser.parse_args()
dict_file = args.input_dict
modern_dict = {}
historic_dict = {}
with codecs.open(dict_file, 'rb', 'utf8') as f:
modern_dict = json.load(f, encoding='utf-8')
for modern_word, variants in modern_dict.iteritems():
for var in variants:
if var not in historic_dict.keys():
historic_dict[var] = Counter()
historic_dict[var][modern_word] += 1
print '#words in modern dict: {}'.format(len(modern_dict))
print '#words in historic dict: {}'.format(len(historic_dict))
# find historic words that map to mulitple terms
mappings_counter = Counter()
print '\nhistoric word\tmodern variant\tfrequency'
for w, mappings in historic_dict.iteritems():
mappings_counter[str(len(mappings)).zfill(3)] += 1
if len(mappings) > 1:
for variant, freq in mappings.iteritems():
print '{}\t{}\t{}'.format(w, variant, freq)
mp = mappings_counter.keys()
mp.sort()
print '\n#mappings\t#historic words'
for m in mp:
print '{}\t{}'.format(m, mappings_counter[m])
| Python | 0 | |
8a4175461e36c11356b41e28ca250121f200dc7e | add a new basic longliving statement checker which looks at the health of items on the server | datastage/dataset/longliving/sword_statement_check.py | datastage/dataset/longliving/sword_statement_check.py | import logging
import time
import thread
import urllib2
import sys
from django_longliving.base import LonglivingThread
from datastage.dataset import SUBMISSION_QUEUE
from datastage.web.dataset.models import DatasetSubmission
from datastage.web.dataset import openers
from sword2 import Connection, UrlLib2Layer
logger = logging.getLogger(__name__)
# list of all the error states that we can see in the statement that we want
# to be able to react to
ERROR_STATES = [
"http://databank.ox.ac.uk/errors/UnzippingIssue"
]
# NOTE: this thread is resistant to being stopped. A KeyboardInterrupt will
# NOT suffice, it will need to be killed with a "kill <pid>" on the command
# line
class SwordStatementCheckThread(LonglivingThread):
# FIXME: not quite sure how the __init__ function on LonglivingThread,
# so setting this as a class variable for the time being
# this is how long the thread will sleep between requests
throttle = 5
# this is how many times the thread will re-try contacting the server if
# it suffers a major exception (i.e. not a sword exception, but something
# network related)
retry_count = 10
def run(self):
# just keep going until the thread is killed
while True:
self._check_all_datasets()
def _check_all_datasets(self):
dss = DatasetSubmission.objects.all()
for dataset_submission in dss:
self._check_dataset(dataset_submission)
def _check_dataset(self, dataset_submission):
retry_counter = 0
while retry_counter < SwordStatementCheckThread.retry_count:
try:
logger.info("Checking state of dataset at " + dataset_submission.remote_url)
opener = openers.get_opener(dataset_submission.repository,
dataset_submission.submitting_user)
conn = Connection(error_response_raises_exceptions=False, http_impl=UrlLib2Layer(opener))
receipt = conn.get_deposit_receipt(dataset_submission.remote_url)
statement = conn.get_ore_sword_statement(receipt.ore_statement_iri)
for state_uri, state_desc in statement.states:
logger.info("Dataset has state URI: " + state_uri)
if state_uri in ERROR_STATES:
dataset_submission.status = 'error'
dataset_submission.save()
logger.info("URI: " + state_uri + " is an error state ... setting 'error' state on submission record")
time.sleep(SwordStatementCheckThread.throttle)
except urllib2.URLError as e:
# if we get an exception, try again up to the limit
retry_counter += 1
continue
else:
# if we don't get an exception, we're done
return
"""
def run(self):
client = self.get_redis_client()
for key, pk in self.watch_queue(client, SUBMISSION_QUEUE, True):
try:
self.process_item(client, pk)
except Exception:
logger.exception("Failed to process submission")
try:
dataset_submission = DatasetSubmission.objects.get(pk=pk)
dataset_submission.status = 'error'
dataset_submission.save()
except Exception:
logger.exception("Failed to mark submission as failed")
"""
"""
def process_item(self, client, pk):
dataset_submission = DatasetSubmission.objects.get(pk=pk)
logger.info("Received submission request for %r to %r",
dataset_submission.identifier,
dataset_submission.repository.homepage)
dataset = dataset_submission.dataset
opener = openers.get_opener(dataset_submission.repository,
dataset_submission.submitting_user)
def update_status(status):
logger.debug("Status updated to %r", status)
dataset_submission.status = status
dataset_submission.save()
dataset.complete_submission(opener, dataset_submission, update_status)
logger.info("Submission completed")
"""
| Python | 0 | |
34886d13155af33acd043ddcd0d87738a729115a | Add files via upload | faraday_cnn.py | faraday_cnn.py | # ============================================================================
# Convolutional Neural Network for training a classifier to determine the
# complexity of a faraday spectrum.
# Written using Keras and TensorFlow by Shea Brown
# https://sheabrownastro.wordpress.com/
# https://astrophysicalmachinelearning.wordpress.com/
# ============================================================================
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (7,7) # Make the figures a bit bigger
np.random.seed(11) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution1D, MaxPooling1D, GaussianNoise
from keras.utils import np_utils
from keras import backend as K
# Function to regularize the feature vector of each sample (row)
# ---------------------------------------------------------------
def regularizeData(data):
data=np.asarray(data)
reg=(data-data.mean())/data.max() #data.max(axis=1,keepdims=True)
return reg
batch_size = 5
nb_classes = 2
nb_epoch = 5
# Load some test data
X_train=np.load('x_train.npy')
y_train=np.load('y_train.npy')
X_test=np.load('x_test.npy')
y_test=np.load('y_test.npy')
# input spectrum dimensions
spec_length = 200
# number of convolutional filters to use
nb_filters = 64
# size of pooling area for max pooling
pool_length = 2
# convolution kernel size
filter_length = 9
# the data, shuffled and split between train and test sets
#X_train, y_train, X_test, y_test = load_wtf_data()
#print("The training data shape is",X_train.shape)
#print("The training target shape is",len(y_train))
#X_train = regularizeData(X_train)
#X_test = regularizeData(X_test)
#if K.image_dim_ordering() == 'th':
# X_train = X_train.reshape(X_train.shape[0], 1, spec_length)
# X_test = X_test.reshape(X_test.shape[0], 1, spec_length)
# input_shape = (2, spec_length)
#else:
# X_train = X_train.reshape(X_train.shape[0], spec_length, 1)
# X_test = X_test.reshape(X_test.shape[0], spec_length, 1)
input_shape = (spec_length, 2)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print(Y_train)
print(Y_test)
model = Sequential()
model.add(Convolution1D(nb_filters, filter_length,
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(Dropout(0.5))
model.add(GaussianNoise(0.4))
model.add(Convolution1D(2*nb_filters, filter_length))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(Dropout(0.6))
model.add(Convolution1D(2*nb_filters, filter_length))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(Dropout(0.6))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy',
optimizer='adadelta',
metrics=['binary_accuracy'])
#model.load_weights('possum_weights', by_name=False)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('Saving the weights in possum_weights')
model.save_weights('wtf_weights')
# The predict_classes function outputs the highest probability class
# according to the trained classifier for each input example.
predicted_classes = model.predict_classes(X_test)
print("The shape of the predicted classes is",predicted_classes.shape)
print("Predicted classes",predicted_classes)
print("Real classes",y_test)
# Check which items we got right / wrong
correct_indices = np.nonzero(predicted_classes == y_test)[0]
incorrect_indices = np.nonzero(predicted_classes != y_test)[0]
ff=sum(predicted_classes[y_test == 0] == 0)
ft=sum(predicted_classes[y_test == 0] == 1)
tf=sum(predicted_classes[y_test == 1] == 0)
tt=sum(predicted_classes[y_test == 1] == 1)
print('The confusion matrix is')
print(ff,tf)
print(ft,tt)
| Python | 0 | |
64e04143fec40f11cc573140d53bd96765426465 | Add scripts/evt2image.py to make image from event file | scripts/evt2image.py | scripts/evt2image.py | #!/usr/bin/env python3
#
# Copyright (c) 2017 Weitian LI <liweitianux@live.com>
# MIT license
"""
Make image by binning the event file, and update the manifest.
TODO: use logging module instead of print()
"""
import sys
import argparse
import subprocess
from manifest import get_manifest
from setup_pfiles import setup_pfiles
from chandra_acis import get_chips
def make_image(infile, outfile, chips, erange, fov, clobber=False):
"""
Make image by binning the event file.
Parameters
----------
infile : str
Path to the input event file
outfile : str
Filename and path of the output image file
chips : str
Chips of interest, e.g., ``7`` or ``0-3``
erange : str
Energy range of interest, e.g., ``700-7000``
fov : str
Path to the FoV file
"""
chips = chips.replace("-", ":")
erange = erange.replace("-", ":")
clobber = "yes" if clobber else "no"
fregion = "sky=region(%s[ccd_id=%s])" % (fov, chips)
fenergy = "energy=%s" % erange
fbin = "bin sky=::1"
subprocess.check_call(["punlearn", "dmcopy"])
subprocess.check_call([
"dmcopy", "infile=%s[%s][%s][%s]" % (infile, fregion, fenergy, fbin),
"outfile=%s" % outfile, "clobber=%s" % clobber
])
def main():
parser = argparse.ArgumentParser(
description="Make image by binning the event file")
parser.add_argument("--elow", dest="elow", type=int, default=700,
help="lower energy limit [eV] of the output image " +
"(default: 700 [eV])")
parser.add_argument("--ehigh", dest="ehigh", type=int,
help="upper energy limit [eV] of the output image " +
"(default: 7000 [eV])")
parser.add_argument("-i", "--infile", dest="infile",
help="event file from which to create the image " +
"(default: evt2_clean from manifest)")
parser.add_argument("-o", "--outfile", dest="outfile",
help="output image filename (default: " +
"build in format 'img_c<chip>_e<elow>-<ehigh>.fits')")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="show verbose information")
parser.add_argument("-C", "--clobber", dest="clobber", action="store_true",
help="overwrite existing file")
args = parser.parse_args()
setup_pfiles(["dmkeypar", "dmcopy"])
manifest = get_manifest()
fov = manifest.getpath("fov")
infile = args.infile if args.infile else manifest.getpath("evt2_clean")
chips = get_chips(infile, sep="-")
erange = "{elow}-{ehigh}".format(elow=args.elow, ehigh=args.ehigh)
if args.outfile:
outfile = args.outfile
else:
outfile = "img_c{chips}_e{erange}.fits".format(
chips=chips, elow=erange)
if args.verbose:
print("infile:", infile, file=sys.stderr)
print("outfile:", outfile, file=sys.stderr)
print("fov:", fov, file=sys.stderr)
print("chips:", chips, file=sys.stderr)
print("erange:", erange, file=sys.stderr)
make_image(infile, outfile, chips, erange, fov, args.clobber)
# Add created image to manifest
key = "img_e{erange}".format(erange=erange)
manifest.setpath(key, outfile)
if __name__ == "__main__":
main()
| Python | 0 | |
6988a498504b382fd86099d3c037100ad14c62d3 | fix bug, tpl_path is related to simiki source path, not wiki path | simiki/configs.py | simiki/configs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os import path as osp
from pprint import pprint
import yaml
from simiki import utils
def parse_configs(config_file):
base_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
try:
with open(config_file, "rb") as fd:
configs = yaml.load(fd)
except yaml.YAMLError, e:
msg = "Yaml format error in {}:\n{}".format(
config_file,
unicode(str(e), "utf-8")
)
sys.exit(utils.color_msg("error", msg))
if configs["base_dir"] is None:
configs["base_dir"] = osp.dirname(osp.realpath(config_file))
configs.update(
# The directory to store markdown files
source = osp.join(configs["base_dir"], configs["source"]),
# The directory to store the generated html files
destination = osp.join(configs["base_dir"], configs["destination"]),
# The path of html template file
tpl_path = osp.join(base_dir, "simiki/themes", configs["theme"]),
)
if configs.get("url", "") is None:
configs["url"] = ""
if configs.get("keywords", "") is None:
configs["keywords"] = ""
if configs.get("description", "") is None:
configs["description"] = ""
return configs
if __name__ == "__main__":
BASE_DIR = osp.dirname(osp.dirname(osp.realpath(__file__)))
config_file = osp.join(BASE_DIR, "_config.yml")
pprint(parse_configs(config_file))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os import path as osp
from pprint import pprint
import yaml
from simiki import utils
def parse_configs(config_file):
#base_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
try:
with open(config_file, "rb") as fd:
configs = yaml.load(fd)
except yaml.YAMLError, e:
msg = "Yaml format error in {}:\n{}".format(
config_file,
unicode(str(e), "utf-8")
)
sys.exit(utils.color_msg("error", msg))
if configs["base_dir"] is None:
configs["base_dir"] = osp.dirname(osp.realpath(config_file))
configs.update(
# The directory to store markdown files
source = osp.join(configs["base_dir"], configs["source"]),
# The directory to store the generated html files
destination = osp.join(configs["base_dir"], configs["destination"]),
# The path of html template file
tpl_path = osp.join(configs["base_dir"], "simiki/themes", configs["theme"]),
)
if configs.get("url", "") is None:
configs["url"] = ""
if configs.get("keywords", "") is None:
configs["keywords"] = ""
if configs.get("description", "") is None:
configs["description"] = ""
return configs
if __name__ == "__main__":
BASE_DIR = osp.dirname(osp.dirname(osp.realpath(__file__)))
config_file = osp.join(BASE_DIR, "_config.yml")
pprint(parse_configs(config_file))
| Python | 0 |
6adae60ee018966199ee1f8e2120b2eb65dcdc9e | Add stub for registration executable. | nanshe/nanshe/nanshe_registerer.py | nanshe/nanshe/nanshe_registerer.py | #!/usr/bin/env python
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Feb 20, 2015 13:00:51 EST$"
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.