commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
b14d24bd3281cab6e3c7f1e810ec50a0de5fa7eb | Add test for working file routes | cgwire/zou | test/files/test_route_working_files.py | test/files/test_route_working_files.py | from test.base import ApiDBTestCase
class TaskLastWorkingFilesTestCase(ApiDBTestCase):
def setUp(self):
super(TaskLastWorkingFilesTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_entity_type()
self.generate_fixture_entity()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.generate_fixture_department()
self.generate_fixture_task_type()
self.generate_fixture_task_status()
self.generate_fixture_task_status_wip()
self.generate_fixture_person()
self.generate_fixture_assigner()
self.generate_fixture_task()
self.generate_fixture_shot_task()
self.generate_fixture_software()
self.generate_fixture_shot_working_file()
self.generate_fixture_file_status()
self.generate_fixture_output_type()
self.generate_fixture_output_file()
self.maxDiff = None
self.task_id = self.task.id
def test_get_last_working_files(self):
self.generate_fixture_working_file(name="main", revision=1)
self.generate_fixture_working_file(name="main", revision=2)
self.generate_fixture_working_file(name="main", revision=3)
self.generate_fixture_working_file(name="main", revision=4)
working_file_main = self.generate_fixture_working_file(
name="main",
revision=5
)
self.generate_fixture_working_file(name="hotfix", revision=1)
self.generate_fixture_working_file(name="hotfix", revision=2)
working_file_hotfix = self.generate_fixture_working_file(
name="hotfix",
revision=3
)
working_file_wip = self.generate_fixture_working_file(
name="wip",
revision=1
)
working_files = self.get(
"/data/tasks/%s/last-working-files" % self.task.id
)
self.assertEqual(
working_files["main"],
working_file_main.serialize()
)
self.assertEqual(
working_files["hotfix"],
working_file_hotfix.serialize()
)
self.assertEqual(
working_files["wip"],
working_file_wip.serialize()
)
def test_new_working_file(self):
path = "/data/tasks/%s/working-files/new" % self.task_id
working_file = self.post(path, {
"name": "main",
"description": "description test",
"comment": "comment test"
})
self.assertEqual(working_file["revision"], 1)
path = "/data/tasks/%s/working-files/new" % self.task_id
working_file = self.post(path, {
"name": "main",
"description": "description test",
"comment": "comment test"
})
self.assertEqual(working_file["revision"], 2)
working_file = self.post(path, {
"name": "main",
"description": "description test",
"comment": "comment test"
})
self.assertEqual(working_file["revision"], 3)
self.assertEqual(
working_file["path"],
"/simple/productions/cosmos_landromat/assets/props/tree/shaders/"
"3ds_max/cosmos_landromat_props_tree_shaders_main_v003"
)
| agpl-3.0 | Python | |
23c9fe71f5f35afa54990a8383b5b6a0e75fb847 | Add tests for app.main.forms.AdminEmailAddressValidator | alphagov/digitalmarketplace-admin-frontend,alphagov/digitalmarketplace-admin-frontend,alphagov/digitalmarketplace-admin-frontend,alphagov/digitalmarketplace-admin-frontend | tests/app/main/test_form_validators.py | tests/app/main/test_form_validators.py | import mock
import pytest
from flask.ext.wtf import Form
from wtforms.fields.core import Field
from wtforms.validators import StopValidation
from app.main.forms import AdminEmailAddressValidator
@mock.patch('app.main.forms.data_api_client')
class TestAdminEmailAddressValidator(object):
def setup_method(self):
self.form_mock = mock.MagicMock(Form)
self.field_mock = mock.MagicMock(Field, data='the_email_address')
self.validator = AdminEmailAddressValidator(message='The message passed to validator')
def test_admin_email_address_validator_calls_api(self, data_api_client):
self.validator(self.form_mock, self.field_mock)
data_api_client.email_is_valid_for_admin_user.assert_called_once_with('the_email_address')
def test_admin_email_address_validator_raises_with_invalid_response(self, data_api_client):
data_api_client.email_is_valid_for_admin_user.return_value = False
with pytest.raises(StopValidation, match='The message passed to validator'):
self.validator(self.form_mock, self.field_mock)
def test_admin_email_address_validator_passes_with_valid_response(self, data_api_client):
data_api_client.email_is_valid_for_admin_user.return_value = True
assert self.validator(self.form_mock, self.field_mock) is None
| mit | Python | |
aa5c4fde763467cae63c205df8e4aaf7328ab713 | Add 'git cl format' presubmit check to src/device | chuan9/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,chuan9/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk | device/PRESUBMIT.py | device/PRESUBMIT.py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def CheckChangeOnUpload(input_api, output_api):
results = []
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
return results
| bsd-3-clause | Python | |
c654034a0da8b898dd55b03a895bf0dfdc6257bf | make vector2d hashable | helloTC/LearnPython | fluent_python/object/vector2d_v2.py | fluent_python/object/vector2d_v2.py | #!/usr/bin/env python
# encoding=utf-8
class vector2d(object):
def __init__(self, x, y):
self.__x = float(x)
self.__y = float(y)
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
def __iter__(self):
return (i for i in (self.x, self.y))
| mit | Python | |
fb3b3d2f9798872742541f7eae2d7b3e2a8a95ab | Add new abstraction with recursion | studiawan/pygraphc | pygraphc/abstraction/AutoAbstractionRecursion.py | pygraphc/abstraction/AutoAbstractionRecursion.py | import networkx as nx
import os
from pygraphc.preprocess.CreateGraphModel import CreateGraphModel
from pygraphc.clustering.Louvain import Louvain
class AutoAbstraction(object):
def __init__(self, log_file):
self.log_file = log_file
self.clusters = []
def __prepare_graph(self, cluster=None):
# get subgraph
if cluster:
subgraph = [int(node) for node in cluster]
graph_noattributes = self.graph_noattributes.subgraph(subgraph)
# create graph
else:
self.graph_model = CreateGraphModel(self.log_file)
self.graph = self.graph_model.create_graph()
self.graph_noattributes = self.graph_model.create_graph_noattributes()
self.graph_copy = self.graph.copy()
graph_noattributes = self.graph_noattributes
# write to gexf file
gexf_file = os.path.join('/', 'tmp', self.log_file.split('/')[-1] + '.gexf')
nx.write_gexf(graph_noattributes, gexf_file)
return gexf_file
def __get_community(self, cluster=None):
# prepare graph or subgraph
if cluster:
gexf_file = self.__prepare_graph(cluster)
else:
gexf_file = self.__prepare_graph()
# graph clustering based on Louvain community detection
louvain = Louvain(gexf_file)
clusters = louvain.get_cluster()
# stop-recursion case: if there is no more partition
if len(clusters.keys()) == 1:
self.clusters.append(clusters.values()[0])
print 'cluster with len=1', clusters.values()[0]
# recursion case: graph clustering
else:
for cluster_id, cluster in clusters.iteritems():
self.__get_community(cluster)
def get_abstraction(self):
self.__get_community()
# aa = AutoAbstraction('/home/hudan/Git/datasets/casper-rw/logs/messages')
# aa.get_abstraction()
| mit | Python | |
80fd2d73f7a206b5b517cb455da457fed9dc6403 | Add Rother District Council logo for letters | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0180_another_letter_org.py | migrations/versions/0180_another_letter_org.py | """empty message
Revision ID: 0180_another_letter_org
Revises: 0179_billing_primary_const
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0180_another_letter_org'
down_revision = '0179_billing_primary_const'
from alembic import op
NEW_ORGANISATIONS = [
('504', 'Rother District Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
| mit | Python | |
1f8dd52b6a40b834d459ea356457429969393339 | add migration | liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin | meinberlin/apps/cms/migrations/0035_remove_project_block_from_homepage.py | meinberlin/apps/cms/migrations/0035_remove_project_block_from_homepage.py | # Generated by Django 2.2.18 on 2021-02-18 10:01
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('meinberlin_cms', '0034_emailformfield_clean_name'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField([('paragraph', wagtail.core.blocks.RichTextBlock(template='meinberlin_cms/blocks/richtext_block.html')), ('call_to_action', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock()), ('link', wagtail.core.blocks.CharBlock()), ('link_text', wagtail.core.blocks.CharBlock(label='Link Text', max_length=50))])), ('image_call_to_action', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(max_length=80)), ('body', wagtail.core.blocks.RichTextBlock()), ('link', wagtail.core.blocks.CharBlock()), ('link_text', wagtail.core.blocks.CharBlock(label='Link Text', max_length=50))])), ('columns_text', wagtail.core.blocks.StructBlock([('columns_count', wagtail.core.blocks.ChoiceBlock(choices=[(2, 'Two columns'), (3, 'Three columns'), (4, 'Four columns')])), ('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.RichTextBlock(label='Column body')))])), ('activities', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(label='Heading')), ('count', wagtail.core.blocks.IntegerBlock(default=5, label='Count'))])), ('accordion', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.RichTextBlock(required=False))])), ('infographic', wagtail.core.blocks.StructBlock([('text_left', wagtail.core.blocks.CharBlock(max_length=50)), ('text_center', wagtail.core.blocks.CharBlock(max_length=50)), ('text_right', wagtail.core.blocks.CharBlock(max_length=50))])), ('map_teaser', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('body', wagtail.core.blocks.RichTextBlock())]))]),
),
]
| agpl-3.0 | Python | |
16e52502bf55075c58022fa35e1673a8a0d5f4bc | Add a test for UnionFind fix. | beni55/networkx,jni/networkx,aureooms/networkx,wasade/networkx,RMKD/networkx,dhimmel/networkx,RMKD/networkx,Sixshaman/networkx,sharifulgeo/networkx,blublud/networkx,jfinkels/networkx,harlowja/networkx,dmoliveira/networkx,kernc/networkx,jakevdp/networkx,ghdk/networkx,aureooms/networkx,blublud/networkx,jni/networkx,ghdk/networkx,michaelpacer/networkx,dhimmel/networkx,jcurbelo/networkx,ionanrozenfeld/networkx,blublud/networkx,harlowja/networkx,debsankha/networkx,yashu-seth/networkx,sharifulgeo/networkx,jni/networkx,cmtm/networkx,nathania/networkx,RMKD/networkx,NvanAdrichem/networkx,ltiao/networkx,bzero/networkx,debsankha/networkx,harlowja/networkx,chrisnatali/networkx,tmilicic/networkx,farhaanbukhsh/networkx,kernc/networkx,bzero/networkx,OrkoHunter/networkx,sharifulgeo/networkx,JamesClough/networkx,kernc/networkx,nathania/networkx,ghdk/networkx,jakevdp/networkx,SanketDG/networkx,dmoliveira/networkx,bzero/networkx,ionanrozenfeld/networkx,dmoliveira/networkx,farhaanbukhsh/networkx,chrisnatali/networkx,chrisnatali/networkx,ionanrozenfeld/networkx,nathania/networkx,goulu/networkx,aureooms/networkx,jakevdp/networkx,farhaanbukhsh/networkx,debsankha/networkx,andnovar/networkx,dhimmel/networkx | networkx/utils/tests/test_unionfind.py | networkx/utils/tests/test_unionfind.py | from nose.tools import *
import networkx as nx
def test_unionfind():
# Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8
# Previously (in 2.x), the UnionFind class could handle mixed types.
# But in Python 3.x, this causes a TypeError such as:
# TypeError: unorderable types: str() > int()
#
# Now we just make sure that no exception is raised.
x = nx.utils.UnionFind()
x.union(0, 'a')
| bsd-3-clause | Python | |
f11957f7b9f60ae1afbaf41fc558b2e600d2822c | Add basic smoketest | ebmdatalab/openprescribing,ebmdatalab/openprescribing,ebmdatalab/openprescribing,annapowellsmith/openpresc,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc,annapowellsmith/openpresc | openprescribing/frontend/tests/commands/test_infer_practice_boundaries.py | openprescribing/frontend/tests/commands/test_infer_practice_boundaries.py | from django.core.management import call_command
from django.test import TestCase
from frontend.models import Practice
class InferPracticeBoundariesTestCase(TestCase):
fixtures = ['orgs', 'practices']
def test_basic_smoketest(self):
should_have_boundary = Practice.objects.filter(
setting=4, location__isnull=False
)
has_boundary = Practice.objects.filter(boundary__isnull=False)
self.assertGreater(should_have_boundary.count(), 0)
self.assertEqual(has_boundary.count(), 0)
call_command('infer_practice_boundaries')
self.assertEqual(has_boundary.count(), should_have_boundary.count())
| mit | Python | |
81f3466990e4161d500a21a1429b14b8de836f0d | Create main.py | teodors/genertate-cfg | main.py | main.py | #!/usr/bin/python
import os
max_length = 8 # maximum length of a word (words can be < or == )
words = [] # store all words
productions = { # production rules
"E": ("E+E", "E*E", "(E)", "xI"),
"I": ("0I", "1I", "0", "1")
}
terminals = ['0', '1', 'x', '+', '*', '(', ')']
# generate all possible strings
def generate(symbols):
global productions
global words
# discard strings that are too long
if len(symbols) > max_length: return
# add new words
if is_word(symbols) and not old_word(symbols): words.append(symbols)
for i in range(0, len(symbols)):
# skip terminal simbols (since nothing to replace)
if is_terminal(symbols[i]): continue
for j in productions:
if symbols[i] == j:
for k in range(0, len(productions[j])):
temp = symbols[0:i] + productions[j][k] + symbols[i+1: len(symbols)]
generate(temp)
# checks if symbol is terminal or non-terminal
def is_terminal(symbol):
global terminals
for i in range(0, len(terminals)):
if terminals[i] == symbol: return 1
return 0
# checks if word already was generated somewhere in past
def old_word(symbols):
for i in range(0, len(words)):
if symbols == words[i]: return 1
return 0
# checks if string of simbols is word (e.g. consists only of terminal symbols)
def is_word(symbols):
for i in range(0, len(symbols)):
if not is_terminal(symbols[i]): return 0
return 1
# prints array of all words
def print_words():
global words
print(words)
# print total words generated
def print_total():
global words
print(len(words))
# UI
max_length = int(raw_input("Please enter maximum length of a word:\n"))
print "Generating ..."
generate("E")
print_words()
print_total()
| cc0-1.0 | Python | |
8ebe878fa4898939f210cf6a6918410781f964a9 | Add skeleton NoteBag.py | Sodel-the-Vociferous/NoteBag | NoteBag.py | NoteBag.py | #!/usr/bin/python -B
# For getting the config file
import configparser
import os.path
from os.path import abspath, dirname, realpath, join as join_path
from sys import argv
# Widgets
from tkinter import Button, Entry, Frame, Label, Listbox, Scrollbar, Tk
# Constants
from tkinter import BOTH, BOTTOM, END, LEFT, N, S, W, E, X, Y
def read_config(filename):
config_dir = abspath(realpath(dirname(argv[0])))
config_path = join_path(config_dir, filename)
config = configparser.ConfigParser()
config.read(config_path)
return config
class NoteBag:
config = None
notes = None
# Class ("Static") Members
CONFIG_FILENAME = "Notebag.ini"
NOTES_FILENAME = "NoteFiles.pkl"
def __init__(self, master):
self.config = read_config(self.CONFIG_FILENAME)
self.notes = {}
## High-level Layout
input_frame = Frame(master)
notes_frame = Frame(master)
input_frame.pack(fill=X, padx=15)
notes_frame.pack(fill=BOTH, expand=True, padx=10, pady=10)
## Input Frame Setup
note_name_label = Label(input_frame, text="Note Name: ")
note_name_label.pack(side=LEFT)
note_name_entry = Entry(input_frame)
note_name_entry.pack(side=LEFT, fill=X, expand=True)
search_note_button = Button(input_frame, text="Search")
search_note_button.pack(side=LEFT)
add_note_button = Button(input_frame, text="Add")
add_note_button.pack(side=LEFT)
## Notes Frame Setup
# List of existing notes
existing_notes_label = Label(notes_frame, text="Existing Notes:")
existing_notes_label.pack(anchor=W)
all_notes = Listbox(notes_frame)
all_notes.pack(side=LEFT, fill=BOTH, expand=True)
notes_scrollbar = Scrollbar(notes_frame)
notes_scrollbar.pack(side=LEFT, fill=Y)
# Link scrollbar to list of notes
all_notes.config(yscrollcommand=notes_scrollbar.set)
notes_scrollbar.config(command=all_notes.yview)
# Test data
# TODO remove
for i in range(1,50):
all_notes.insert(END, str(i))
## Controls
note_controls = Frame(notes_frame)
note_controls.pack(side=LEFT, fill=Y)
open_note_button = Button(note_controls, text="Open")
open_note_button.pack(fill=X)
delete_note_button = Button(note_controls, text="Delete")
delete_note_button.pack(fill=X)
if __name__ == "__main__":
root = Tk()
notebag = NoteBag(root)
root.mainloop()
| mit | Python | |
b27b6b79871cbf814a2dd255cf47e6b3281f6dfb | add assert_equal() util function | mit-dci/lit,mit-dci/lit,mit-dci/lit,mit-dci/lit | test/utils.py | test/utils.py | #!/usr/bin/env python3
# Copyright (c) 2017 The lit developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Utils for lit testing"""
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
| mit | Python | |
6daea04927ff722f03b66f3d101a2b2a2f9b59be | Create Pokedex.py | JacenBoy/ChariCalc | Pokedex.py | Pokedex.py | pokedex = {
"Bulbasaur" : 1,
"Ivysaur" : 2,
"Venusaur" : 3,
"Charmander" : 4,
"Charmeleon" : 5,
"Charizard" : 6,
"Squirtle" : 7,
"Wartortle" : 8,
"Blastoise" : 9,
"Caterpie" : 10,
"Metapod" : 11,
"Butterfree" : 12,
"Weedle" : 13,
"Kakuna" : 14,
"Beedrill" : 15,
"Pidgey" : 16,
"Pidgeotto" : 17,
"Pidgeot" : 18,
"Rattata" : 19,
"Raticate" : 20,
"Spearow" : 21,
"Fearow" : 22,
"Ekans" : 23,
"Arbok" : 24,
"Pikachu" : 25,
"Raichu" : 26,
"Sandshrew" : 27,
"Sandslash" : 28,
"Nidoran F" : 29,
"Nidorina" : 30,
"Nidoqueen" : 31,
"Nidoran M" : 32,
"Nidorino" : 33,
"Nidoking" : 34,
"Clefairy" : 35,
"Clefable" : 36,
"Vulpix" : 37,
"Ninetails" : 38,
"Jigglypuff" : 39,
"Wigglytuff" : 40,
"Zubat" : 41,
"Golbat" : 42,
"Oddish" : 43,
"Gloom" : 44,
"Vileplume" : 45,
"Paras" : 46,
"Parasect" : 47,
"Venonat" : 48,
"Venomoth" : 49,
"Diglett" : 50,
"Dugtrio" : 51,
"Meowth" : 52,
"Persian" : 53,
"Psyduck" : 54,
"Golduck" : 55,
"Mankey" : 56,
"Primeape" : 57,
"Growlithe" : 58,
"Arcanine" : 59,
"Poliwag" : 60,
"Poliwhirl" : 61,
"Poliwrath" : 62,
"Abra" : 63,
"Kadabra" : 64,
"Alakazam" : 65,
"Machop" : 66,
"Machoke" : 67,
"Machamp" : 68,
"Bellsprout" : 69,
"Weepinbell" : 70,
"Victreebel" : 71,
"Tentacool" : 72,
"Tentacruel" : 73,
"Geodude" : 74,
"Graveler" : 75,
"Golem" : 76,
"Ponyta" : 77,
"Rapidash" : 78,
"Slowpoke" : 79,
"Slowbro" : 80,
"Magnemite" : 81,
"Magneton" : 82,
"Farfetch'd" : 83,
"Doduo" : 84,
"Dodrio" : 85,
"Seel" : 86,
"Dewgong" : 87,
"Grimer" : 88,
"Muk" : 89,
"Shellder" : 90,
"Cloyster" : 91,
"Gastly" : 92,
"Haunter" : 93,
"Gengar" : 94,
"Onix" : 95,
"Drowzee" : 96,
"Hypno" : 97,
"Krabby" : 98,
"Kingler" : 99,
"Voltorb" : 100,
"Electrode" : 101,
"Exeggute" : 102,
"Exeggutor" : 103,
"Cubone" : 104,
"Marowak" : 105,
"Hitmonlee" : 106,
"Hitmonchan" : 107,
"Lickitung" : 108,
"Koffing" : 109,
"Weezing" : 110,
"Rhyhorn" : 111,
"Rhydon" : 112,
"Chansey" : 113,
"Tangela" : 114,
"Kangaskhan" : 115,
"Horsea" : 116,
"Seadra" : 117,
"Goldeen" : 118,
"Seaking" : 119,
"Staryu" : 120,
"Starmie" : 121,
"Mr. Mime" : 122,
"Scyther" : 123,
"Jynx" : 124,
"Electabuzz" : 125,
"Magmar" : 126,
"Pinsir" : 127,
"Tauros" : 128,
"Magikarp" : 129,
"Gyarados" : 130,
"Lapras" : 131,
"Ditto" : 132,
"Eevee" : 133,
"Vaporeon" : 134,
"Jolteon" : 135,
"Flareon" : 136,
"Porygon" : 137,
"Omanyte" : 138,
"Omastar" : 139,
"Kabuto" : 140,
"Kabutops" : 141,
"Aerodactyl" : 142,
"Snorlax" : 143,
"Articuno" : 144,
"Zapdos" : 145,
"Moltres" : 146,
"Dratini" : 147,
"Dragonair" : 148,
"Dragonite" : 149,
"Mewtwo" : 150,
"Mew" : 151
}
def dexSearch(name):
if name.capitalize() in pokedex:
return pokedex[name.capitalize()]
else:
if name.title() in pokedex:
return pokedex[name.title()]
else:
return "Entry not found."
| mit | Python | |
4c7d6aef0805356e02a25e2cc70c167e9db5c509 | add new updated (WIP) play.py | raehik/scripts,raehik/scripts | play.py | play.py | #!/usr/bin/env python3
#
# Play a game.
#
import sys, os, argparse, logging
from raehutils import *
class PlayPy:
ERR_MATCH = 3
## __init_logging, run, exit {{{
def __init__(self):
retroarch_cores_dir = os.environ.get("HOME") + "/.config/retroarch/cores"
games_dir = os.environ.get("HOME") + "/media/games-local"
self.games = {
"tome4": {"name": "Tales of Maj'Eyal", "cmd": ["tome4"]},
"pokemon-emerald-jp": {"name": "Pokemon Emerald (JP)", "cmd": ["retroarch","-L",retroarch_cores_dir+"/vbam_libretro.so",games_dir+"/gba/official/Pocket Monsters - Emerald (Japan).gba"]}
}
self.workspace_num = "9"
def __init_logging(self):
self.logger = logging.getLogger(os.path.basename(sys.argv[0]))
lh = logging.StreamHandler()
lh.setFormatter(logging.Formatter("%(name)s: %(levelname)s: %(message)s"))
self.logger.addHandler(lh)
def run(self):
"""Run from CLI: parse arguments, run main."""
self.__init_logging()
self.__parse_args()
self.main()
def exit(self, msg, ret):
"""Exit with explanation."""
self.logger.error(msg)
sys.exit(ret)
## }}}
def __parse_args(self):
self.parser = argparse.ArgumentParser(description="Play a game.")
self.parser.add_argument("-v", "--verbose", help="be verbose", action="count", default=0)
self.parser.add_argument("-q", "--quiet", help="be quiet (overrides -v)", action="count", default=0)
self.parser.add_argument("game", help="unique string of game to play")
self.args = self.parser.parse_args()
if self.args.verbose == 1:
self.logger.setLevel(logging.INFO)
elif self.args.verbose >= 2:
self.logger.setLevel(logging.DEBUG)
if self.args.quiet >= 1:
self.logger.setLevel(logging.NOTSET)
def main(self):
"""Main entrypoint after program setup."""
# get all possible matches
matches = [k for k, v in self.games.items() if k.startswith(self.args.game)]
if len(matches) > 1:
self.exit("query matches multiple games: {}".format(", ".join(matches), PlayPy.ERR_MATCH))
elif len(matches) < 1:
self.exit("no matching games for query: {}".format(self.args.game), PlayPy.ERR_MATCH)
game = self.games[matches[0]]
self.logger.info("matched game: {}".format(game["name"]))
self.logger.info("game cmd: {}".format(" ".join(game["cmd"])))
self.start_game(game)
def start_game(self, game):
"""Start a game."""
cmd_switch_workspace = ["i3-msg","workspace",self.workspace_num]
run_shell_interactive(cmd_switch_workspace)
DEVNULL = open(os.devnull, "wb")
cmd = subprocess.Popen(game["cmd"], stdout=DEVNULL, stderr=DEVNULL)
DEVNULL.close()
sys.exit(cmd.returncode)
if __name__ == "__main__":
program = PlayPy()
program.run()
| mit | Python | |
d168599b9167ede2098aa2fe82375aa95e5ab8b3 | Check if hook parameter is passed to the url | nicocoffo/docker-puller,nicocoffo/docker-puller,glowdigitalmedia/docker-puller,glowdigitalmedia/docker-puller | dockerpuller/app.py | dockerpuller/app.py | from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
if hook:
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid request: missing hook"), 400
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
| from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
| mit | Python |
f5b1038062aae983aea6500ae376ad87d4555c30 | allow one-off test runs | pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality | tests_once.py | tests_once.py | #!/usr/bin/env python
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2014 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
# this line must precede ckan import
import config
from iatidq.test_queue import test_queue_once
from iatidq.util import ensure_download_dir
if __name__ == '__main__':
directory = config.DATA_STORAGE_DIR
ensure_download_dir(directory)
test_queue_once()
| agpl-3.0 | Python | |
e345aa8deb3f79c100996d445a23d2166fbb2aca | add evolutions | jerkos/hozons,jerkos/hozons,jerkos/be-my-change,jerkos/be-my-change,jerkos/hozons,jerkos/be-my-change | migrations/versions/2f5abd277f57_.py | migrations/versions/2f5abd277f57_.py | """empty message
Revision ID: 2f5abd277f57
Revises: 21d6a9f40991
Create Date: 2017-07-16 22:47:47.460172
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2f5abd277f57'
down_revision = '21d6a9f40991'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('actions', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('actions', sa.Column('creator_user_id', sa.Integer(), nullable=True))
op.add_column('actions', sa.Column('end_date', sa.DateTime(), nullable=True))
op.add_column('actions', sa.Column('initial_nb_days', sa.Integer(), nullable=True))
op.add_column('actions', sa.Column('is_personal_action', sa.Boolean(), nullable=True))
op.add_column('actions', sa.Column('public', sa.Boolean(), nullable=True))
op.add_column('actions', sa.Column('start_date', sa.DateTime(), nullable=True))
#op.create_foreign_key(None, 'actions', 'users', ['creator_user_id'], ['id'])
#op.drop_column('actions', 'value')
op.add_column('ressources', sa.Column('url', sa.Text(), nullable=True))
op.add_column('users', sa.Column('points_env', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('points_pers', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('points_rel', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'points_rel')
op.drop_column('users', 'points_pers')
op.drop_column('users', 'points_env')
op.drop_column('ressources', 'url')
op.add_column('actions', sa.Column('value', sa.VARCHAR(length=7), nullable=False))
op.drop_constraint(None, 'actions', type_='foreignkey')
op.drop_column('actions', 'start_date')
op.drop_column('actions', 'public')
op.drop_column('actions', 'is_personal_action')
op.drop_column('actions', 'initial_nb_days')
op.drop_column('actions', 'end_date')
op.drop_column('actions', 'creator_user_id')
op.drop_column('actions', 'created_at')
# ### end Alembic commands ###
| bsd-3-clause | Python | |
0bc0691c7714b7b5885ce2a9c05eb7eb35738c74 | Add test for sender_callable check | pydanny/webhooks | tests/test_decorators.py | tests/test_decorators.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from webhooks.exceptions import SenderNotCallable
from webhooks import webhook, unhashed_hook
def test_callable_sender():
@webhook(event="example200", sender_callable=123)
def basic(creator="pydanny"):
return {"husband": "Daniel Roy Greenfeld", "wife": "Audrey Roy Greenfeld"}
with pytest.raises(SenderNotCallable):
basic(creator='pydanny')
| bsd-3-clause | Python | |
749c24ee930d44914970b757cfcadb30bb7a1d1c | Prepare for testcase | slaveofcode/pycrawler,slaveofcode/pycrawler | tests/unit/page_tests.py | tests/unit/page_tests.py | import unittest, os, sys
current_dir = os.path.dirname(__file__)
base_dir = os.path.join(current_dir, os.pardir, os.pardir)
sys.path.append(base_dir)
| mit | Python | |
f7273b6e97e6120fe0f3b0a2c9ea5865685608bd | Create client.py | wallefan/cosmic-encounter | client.py | client.py | """A slightly smarter telnet client that, if available, supports readline."""
import telnetlib
from telnetlib import IAC, WILL, DO, WONT, DONT
try:
import readline
old_readline_callback=readline.get_completer()
except ImportError:
readline=None
def telnet_callback(sock, cmd, option):
if option==OPTION_READLINE:
if cmd==DO:
# Telnet: do not acknowledge a request to enter a requeat we are already in
if readline is not None and readline.get_completer() is not completer:
old_completer=readline.get_completer()
readline.set_completer(completer)
sock.sendall(IAC+WILL+OPTION_READLINE)
else:
sock.sendall(IAC+WONT+OPTION_READLINE)
elif cmd==DONT:
if readline is not None and readline.get_completer() is completer:
readline.set_completer(old_completer)
sock.sendall(IAC+WONT+OPTION_READLINE)
elif cmd==SE:
if mode is None
| mit | Python | |
98a2706909a7f880273080fdc7b4e696b6a600aa | Create 04.py | Pouf/CodingCompetition,Pouf/CodingCompetition | Euler/04.py | Euler/04.py | print(max(a*b for a in range(999, 100, -1) for b in range(999, 100, -1) if str(a*b)==str(a*b)[::-1]))
| mit | Python | |
e2a9f8ae9378c52bcde06863de3445ae89cca0d0 | Create details.py | dsg2806/acti.monash,dsg2806/acti.monash | details.py | details.py | import csv
f = open("data.csv")
out = open("detail.csv","w")
reader = csv.reader(f)
for line in reader:
if line[0][-1] == ":":
out.write(" ".join(line))
f.close()
out.close()
| agpl-3.0 | Python | |
389aa32dae9635bfc0eff192c9f01ab99e04b0f2 | Create main.py | buslovich/raspberry-pi-robotics | main.py | main.py | #controlling a robot with two motors that can avoid obstacles by using a sonar mounted on pan/tilt servos
import motorcontrol, servocontrol, sonarcontrol, auxiliary
import time
#instantiate objects
motor = motorcontrol.MotorControl()
servo = servocontrol.ServoControl()
sonar = sonarcontrol.SonarControl()
aux = auxiliary.AuxiliaryHelp()
#center servos
servo.pancenter()
servo.tiltcenter()
#start logging data
aux.writetofile('Servos are dead center', 0)
#-------------------------------------------------------------------------------
# Setting direction (finding the longest way without obstacles
def findWay(self):
distanceArray = []
#pan left
servo.panleft()
time.sleep(1)
distanceArray.append(sonar.distance())
aux.writetofile('Pan Left Distace', distanceArray[0])
#pan center
servo.pancenter()
time.sleep(1)
distanceArray.append(sonar.distance())
aux.writetofile('Pan Center Distace', distanceArray[1])
#pan right
servo.panright()
time.sleep(1)
distanceArray.append(sonar.distance())
aux.writetofile('Pan Right Distace', distanceArray[2])
maxdistance=max(distanceArray)
maxindex=distanceArray.index(maxdistance)
if maxindex==0:
motor.left()
aux.writetofile('Turning Left', distanceArray[maxindex])
elif maxindex==2:
motor.right()
aux.writetofile('Turning Right', distanceArray[maxindex])
else:
aux.writetofile('Not Turning', distanceArray[maxindex])
del distanceArray[:]
#-------------------------------------------------------------------------------
def move(self):
while sonar.distance()>=10:
motor.forward()
print "moving forward"
print "stopping"
motor.allStop()
time.sleep(0.5)
while sonar.distance()<10
print "moving backward"
motor.backward()
motor.allStop()
print "resetting position"
#-------------------------------------------------------------------------------
try:
while True:
findWay()
move()
except KeyboardInterrupt:
aux.cleanup()
| apache-2.0 | Python | |
cdbc14e6f78ca7b05e291a007c4267dd55d9b96a | add `enums.py` | kokimoribe/todo-api | todo/enums.py | todo/enums.py | """Enums are defined here"""
from enum import Enum, auto
class Status(Enum):
"""Enum for Task status"""
TO_DO = auto()
IN_PROGRESS = auto()
DONE = auto()
| mit | Python | |
f03079f2fd200d9b726c70acec9cdfd8772adb26 | Add support for arm64 as a CR architecture. | krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,dushu1203/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,dednal/chromium.src,M4sse/chromium.src,M4sse/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,patrickm/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,littlstar/chromium.src,Jonekee/chromium.src,ltilve/chromium,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,axinging/chromium-crosswalk,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,patrickm/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,patrickm/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,patrickm/chromium.src,dednal/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,jaruba/chromium.src,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,jaruba/chromium.src,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,dednal/chromium.src,ondra-novak/chromium.src,littlstar/chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,jaruba/chromium.src,dushu1203/chromium.src,fujunwei/chromium-crosswalk,Chilledheart/chromium,ltilve/chromium,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,Chilledheart/chromium,markYoungH/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,ltilve/chromium,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,dednal/chromium.src,Chilledheart/chromium,Just-D/chromium-1,krieger-od/nwjs_chromium.src,Just-D/chromium-1,Just-D/chromium-1,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,patrickm/chromium.src,jaruba/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,Fireblend/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,Chilledheart/chromium,markYoungH/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk,ltilve/chromium,fujunwei/chromium-crosswalk,jaruba/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,littlstar/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,ondra-novak/chromium.src,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk | tools/cr/cr/base/arch.py | tools/cr/cr/base/arch.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the basic architectures supported by cr."""
import cr
DEFAULT = cr.Config.From(
CR_ENVSETUP_ARCH='{CR_ARCH}',
)
class Arch(cr.Plugin, cr.Plugin.Type):
"""Base class for implementing cr architecture targets."""
SELECTOR = 'CR_ARCH'
@classmethod
def AddArguments(cls, parser):
parser.add_argument(
'--architecture', dest=cls.SELECTOR,
choices=cls.Choices(),
default=None,
help='Sets the target architecture to use. Overrides ' + cls.SELECTOR
)
class IA32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='ia32',
)
class Mips32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='mipsel',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class X64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='x64',
)
@property
def priority(self):
return super(X64Arch, self).priority + 1
class Arm32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm',
)
@property
def priority(self):
return super(Arm32Arch, self).priority + 2
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class Arm64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm64',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the basic architectures supported by cr."""
import cr
DEFAULT = cr.Config.From(
CR_ENVSETUP_ARCH='{CR_ARCH}',
)
class Arch(cr.Plugin, cr.Plugin.Type):
"""Base class for implementing cr architecture targets."""
SELECTOR = 'CR_ARCH'
@classmethod
def AddArguments(cls, parser):
parser.add_argument(
'--architecture', dest=cls.SELECTOR,
choices=cls.Choices(),
default=None,
help='Sets the target architecture to use. Overrides ' + cls.SELECTOR
)
class IA32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='ia32',
)
class Mips32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='mipsel',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class X64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='x64',
)
@property
def priority(self):
return super(X64Arch, self).priority + 1
class Arm32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm',
)
@property
def priority(self):
return super(Arm32Arch, self).priority + 2
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
| bsd-3-clause | Python |
64140f651b22b4b962368a53723cb4bda86651ba | test branch | ellisztamas/faps | test.py | test.py | #this is a test file
| mit | Python | |
df9122528172c5142160f9804b695d65bd211892 | Create QAStats.py | Nik0l/UTemPro,Nik0l/UTemPro | QAStats.py | QAStats.py | # Calculating statistics on Q&A communities. Includes temporal statistics ( for example, how many questions were asked in a period), spatial statistics, number of users, questions, answers.
| mit | Python | |
8038e27862f4c128168bd656d9e43880359b1a61 | Create Scraper.py | RuyCalderon/CraigslistScraper | Scraper.py | Scraper.py | import requests
from bs4 import BeautifulSoup
from time import sleep
import datetime
import random
def RemoveEncodingSignature(string):
LocalStringCopy = ''
EncodingState = [{'Type': None, 'Active' : False, 'Delimiter' : None, 'StartingIndex': 0}]
for Index in range(0, len(string)):
if EncodingState[len(EncodingState) - 1]['Active']:
if string[Index] != EncodingState[len(EncodingState) - 1]['Delimiter']:
LocalStringCopy += string[Index]
else:
if Index != EncodingState[len(EncodingState) -1]['StartingIndex'] + 1:
LastState = EncodingState.pop()
assert(len(EncodingState) > 0)
elif string[Index] == 'b':
if Index + 1 < len(string):
if string[Index + 1] == '\'':
EncodingState.append({'Type': 'Bytes', 'Active' : True, 'Delimiter' : '\'', 'StartingIndex' : Index})
if string[Index + 1] == '\"':
EncodingState.append({'Type': 'Bytes', 'Active' : True, 'Delimiter' : '\"', 'StartingIndex' : Index})
else:
LocalStringCopy += string[Index]
return LocalStringCopy
CraigslistCities = []
url = "http://www.craigslist.org/about/sites"
WebResponse = requests.get(url)
if WebResponse.status_code == 200:
#print("Web OK")
parsedHTML = BeautifulSoup(WebResponse.text, "html.parser")
if(parsedHTML):
#print("HTMLparsing OK")
usadiv = parsedHTML.body.section.find_all('div')[2]
USCities = usadiv.find_all('li')
for cityItem in USCities:
url = cityItem.a['href']
CraigslistCities.append({'Name' : cityItem.string , 'URL': url})
random.seed()
#things to watch out for - identical listings being posted in different cities
#think about adding mulitiered keywords, ie; keywords that can stand on their own and others that need other keywords to be valid
KeyWords = ['javascript', 'c++', 'c/c++', 'programmer', 'coder', 'developer', 'python', 'html', 'css', 'tutoring', 'tutor', 'project', 'problem', 'help']
GoodListings = []
StartTime = datetime.datetime.today()
CurrentCityIndex = 0
for City in CraigslistCities:
sleep(random.uniform(10,17))
try:
try:
CityResponse = requests.get("https:" + City['URL'] + 'search/cpg')
print("http:" + City['URL'] + 'search/cpg')
except:
print(CurrentCityIndex)
print(City['URL'])
if CityResponse.status_code == 200:
print('ok')
parsedCityHTML = BeautifulSoup(CityResponse.text,'html.parser')
MoveToNextListing = True
FirstListing = True
Count = 0
while(MoveToNextListing):
Count+=1
if(FirstListing):
listing = parsedCityHTML.find('p')
FirstListing = False
else:
listing = listing.find_next_sibling('p')
TimePostedData = listing.find('time')['datetime']
DateTimePosted = datetime.datetime.strptime(TimePostedData,'%Y-%m-%d %H:%M')
if (StartTime - DateTimePosted) > datetime.timedelta(days=1):
MoveToNextListing = False
else:
ValidListing = False
ListingContainers = listing.find_all('a')
ListingTitle = ListingContainers[1].text.lower()
ListingID = ListingContainers[1]['data-id']
ListingLinkSuffix = ListingContainers[1]['href']
if ListingContainers[1]['class'][0] != 'hdrlnk':
for container in ListingContainers:
if container['class'] == 'hdrlnk':
ListingTitle = container.text
ListingID = container['data-id']
ListingLinkSuffix = container['href']
ValidListing = True
else:
ValidListing = True
if ValidListing:
KeywordFound = False
for keyword in KeyWords:
if keyword in ListingTitle:
KeywordFound = True
if KeywordFound:
GoodListings.append({'TimePosted': TimePostedData, 'ListingID': ListingID, 'ListingTitle': ListingTitle, 'ListingLink': City['URL'] + ListingLinkSuffix})
MoveToNextListing = True
except Exception as e:
print(str(e))
CurrentCityIndex+=1
fouthandle = open('CraigslistResults.txt', 'w')
for Listing in GoodListings:
NewLine = str(Listing['TimePosted'].encode('utf-8')) + ', ' + str(Listing['ListingTitle'].encode('utf-8')) + ', ' + str(Listing['ListingLink'].encode('utf-8')) + '\n'
NewLine = RemoveEncodingSignature(NewLine)
fouthandle.write(NewLine)
fouthandle.close()
print('Finished')
| mit | Python | |
c54d73cc3e6a1e868fa2804e51d6b9247bd4c814 | Add player season definition | leaffan/pynhldb | db/player_season.py | db/player_season.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import timedelta
from sqlalchemy import and_
from .common import Base, session_scope
class PlayerSeason(Base):
__tablename__ = 'player_seasons'
__autoload__ = True
STD_STATS = [
'games_played', 'goals', 'assists', 'points',
'plus_minus', 'pim', 'ppg', 'shg', 'gwg', 'shots']
JSON_DB_MAPPING = {
"timeOnIce": "toi",
"assists": "assists",
"goals": "goals",
"pim": "pim",
"shots": "shots",
"games": "games_played",
"hits": "hits",
"powerPlayGoals": "ppg",
"powerPlayPoints": "pp_pts",
"powerPlayTimeOnIce": "pp_toi",
"evenTimeOnIce": "ev_toi",
"faceOffPct": "faceoff_pctg",
"shotPct": "pctg",
"gameWinningGoals": "gwg",
"overTimeGoals": "otg",
"shortHandedGoals": "shg",
"shortHandedPoints": "sh_pts",
"shortHandedTimeOnIce": "sh_toi",
"blocked": "blocks",
"plusMinus": "plus_minus",
"points": "points",
"shifts": "shifts",
}
INTERVAL_ATTRS = ["toi", "ev_toi", "pp_toi", "sh_toi"]
def __init__(self, player_id, season, season_type, team, team_season_cnt, season_data):
self.player_id = player_id
self.season = season
self.season_type = season_type
self.team_season_cnt = team_season_cnt
self.team_id = team.team_id
for json_key in self.JSON_DB_MAPPING:
if json_key in season_data.keys():
try:
# creating actual time intervals for time-on-ice items
if self.JSON_DB_MAPPING[json_key] in self.INTERVAL_ATTRS:
minutes, seconds = [
int(x) for x in season_data[json_key].split(":")]
value = timedelta(minutes=minutes, seconds=seconds)
# all other items are already suitably
# stored in the json struct
else:
value = season_data[json_key]
setattr(self, self.JSON_DB_MAPPING[json_key], value)
except:
print(
"Unable to retrieve %s from season data" %
self.JSON_DB_MAPPING[json_key])
# logger.warn(
# "Unable to retrieve %s from season data" %
# self.PLAYER_STATS_MAP[key])
else:
self.calculate_pctg()
def calculate_pctg(self):
if self.shots:
self.pctg = round((float(self.goals) / float(self.shots)) * 100, 4)
elif self.shots is None:
self.pctg = None
else:
self.pctg = round(0., 2)
@classmethod
def find(self, player_id, team, season, season_type, team_season_cnt):
with session_scope() as session:
try:
player_season = session.query(PlayerSeason).filter(
and_(
PlayerSeason.player_id == player_id,
PlayerSeason.season == season,
PlayerSeason.team_id == team.team_id,
PlayerSeason.season_type == season_type,
PlayerSeason.team_season_cnt == team_season_cnt
)
).one()
except:
player_season = None
return player_season
def update(self, other):
for attr in self.JSON_DB_MAPPING.values():
if hasattr(other, attr):
setattr(self, attr, getattr(other, attr))
else:
self.calculate_pctg()
| mit | Python | |
9add66f49ae0daa28db84e36bf2ac8675125d21c | Remove instructional comments | BryceLohr/authentic,pu239ppy/authentic2,BryceLohr/authentic,incuna/authentic,adieu/authentic2,pu239ppy/authentic2,incuna/authentic,pu239ppy/authentic2,incuna/authentic,pu239ppy/authentic2,adieu/authentic2,BryceLohr/authentic,BryceLohr/authentic,incuna/authentic,adieu/authentic2,adieu/authentic2,incuna/authentic | urls.py | urls.py | from django.conf.urls.defaults import *
from django.contrib import admin
from django.views.generic.simple import direct_to_template
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
(r'^registration/', include('registration.urls')),
(r'^$', direct_to_template,
{ 'template': 'index.html' }, 'index'),
)
| from django.conf.urls.defaults import *
from django.contrib import admin
from django.views.generic.simple import direct_to_template
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^authentic/', include('authentic.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^registration/', include('registration.urls')),
(r'^$', direct_to_template,
{ 'template': 'index.html' }, 'index'),
)
| agpl-3.0 | Python |
51e368584d59ad2f308f1c9229fbbbf40c504749 | Create list.py | hsachdevah/aws | list.py | list.py | import argparse
import boto.ec2
access_key = ''
secret_key = ''
def get_ec2_instances(region):
ec2_conn = boto.ec2.connect_to_region(region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
reservations = ec2_conn.get_all_reservations()
for reservation in reservations:
print region+':',reservation.instances
for vol in ec2_conn.get_all_volumes():
print region+':',vol.id
def main():
regions = ['us-east-1','us-west-1','us-west-2','eu-west-1','sa-east-1',
'ap-southeast-1','ap-southeast-2','ap-northeast-1']
parser = argparse.ArgumentParser()
parser.add_argument('access_key', help='Access Key');
parser.add_argument('secret_key', help='Secret Key');
args = parser.parse_args()
global access_key
global secret_key
access_key = args.access_key
secret_key = args.secret_key
for region in regions: get_ec2_instances(region)
if __name__ =='__main__':main()
| mit | Python | |
c0bca49a19e4f97663b9f282bb11768457aec89e | Add example of tycho2 usage | lkangas/python-tycho2 | example.py | example.py | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 21 00:17:33 2017
@author: lauri.kangas
"""
import matplotlib.pyplot as plt
import numpy as np
from tycho2 import tycho2
from projections import stereographic, unity
import coordinate_transformations as coord
index = np.load('tyc2index.npy')
catalog = np.load('tyc2.npy', mmap_mode='r')
suppl = np.load('tyc2sup.npy')
center_RADEC = np.radians([90, 0])
rotation = np.radians(0)
fov_degrees = np.array([70, 70])
fov_radians = np.radians(fov_degrees)
half_fov_radians = fov_radians/2
projection = stereographic
projection = unity
image_plane_half_fov = projection(half_fov_radians)
LM = 12
factor = 4
TYC2 = tycho2('tyc2index.npy', 'tyc2.npy', 'tyc2sup.npy', projection=projection)
image_x, image_y, mags = TYC2.stars_in_fov(center_RADEC, fov_radians, rotation, LM=LM)
plt.clf()
plt.scatter(image_x, image_y, (LM-mags)**2.5/LM*factor, alpha=1)
plt.axis('equal')
import matplotlib.patches as patches
plt.gca().add_patch(patches.Rectangle(-image_plane_half_fov, *(2*image_plane_half_fov), fill=False))
plt.tight_layout()
plt.axis('off') | mit | Python | |
6dc4d2d1e9ef998ff310c14c54586ca694572801 | Add extract plugin | devzero-xyz/Andromeda-Plugins | extract.py | extract.py | """This plugin extracts the main content of a webpage"""
"""e.g. extracting the article of a url of bbc.co.uk"""
from utils import add_cmd, add_handler
import utils
import requests
from bs4 import BeautifulSoup
name = "extract"
cmds = ["extract"]
def main(irc):
if name not in irc.plugins.keys():
irc.plugins[name] = {}
@add_cmd
def extract(irc, event, args):
try:
html = requests.get(args[0]).text
soup = BeautifulSoup(html)
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = '\n'.join(chunk for chunk in chunks if chunk)
text = text.encode('ascii', 'ignore')
irc.reply(event, (text[:350] + '..') if len(text) > 350 else text)
except IndexError:
irc.reply(event, utils.gethelp("extract"))
except:
irc.reply(event, "Error extracting informations")
add_handler(extract, name) | mit | Python | |
8c71a177c16762ab50dafe2528d24fab4ccf0925 | Add py solution for 462. Minimum Moves to Equal Array Elements II | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/minimum-moves-to-equal-array-elements-ii.py | py/minimum-moves-to-equal-array-elements-ii.py | class Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
median = nums[len(nums) / 2]
return sum(abs(x - median) for x in nums)
| apache-2.0 | Python | |
9cd494f526fa49e04bbdbb31dc6d32f444bbbba8 | add a tool to print the content of SEG frames | QIICR/dcmqi,QIICR/dcmqi,QIICR/dcmqi,QIICR/dcmqi,QIICR/dcmqi | util/dumpSEGPixelData.py | util/dumpSEGPixelData.py | import pydicom, sys
from colorama import Fore, Style, init
# colorama
init()
d = pydicom.read_file(sys.argv[1])
if len(sys.argv)>2:
frame = int(sys.argv[2])-1
print("Dumping frame "+str(frame))
else:
frame = None
print(d.Rows)
print(d.Columns)
print(d.NumberOfFrames)
totalPixels = int(d.Rows*d.Columns*d.NumberOfFrames/8)
if totalPixels%8:
totalPixels = totalPixels + 1
totalPixels = totalPixels + (totalPixels % 2)
print("Total pixels expected: %i" % totalPixels)
print("Total pixels actual: %i" % len(d.PixelData))
if not frame is None:
frames = [frame]
else:
frames = range(d.NumberOfFrames)
import numpy as np
unpacked = np.unpackbits(np.frombuffer(d.PixelData,dtype=np.uint8))
print("With numpy unpackbits:")
for f in frames:
print("Frame %i" % f)
for i in range(d.Rows):
for j in range(d.Columns):
pixelNumber = f*d.Rows*d.Columns+i*d.Columns+j
if int(pixelNumber/8)%2:
sys.stdout.write(Fore.RED)
else:
sys.stdout.write(Fore.WHITE)
if unpacked[pixelNumber]:
sys.stdout.write("X")
else:
sys.stdout.write(".")
print("")
print("\nWith manual unpacking:")
for f in frames:
print("Frame %i" % f)
for i in range(d.Rows):
for j in range(d.Columns):
pixelNumber = f*d.Rows*d.Columns+i*d.Columns+j
byteNumber = int(pixelNumber/8)
bitPosition = pixelNumber % 8
if byteNumber%2:
sys.stdout.write(Fore.RED)
else:
sys.stdout.write(Fore.WHITE)
if (d.PixelData[byteNumber] >> bitPosition) & 1:
sys.stdout.write("X")
else:
sys.stdout.write(".")
print("")
print(Style.RESET_ALL)
| bsd-3-clause | Python | |
ec3ef6e8770b9a36f20a05216d8e0964107a8689 | Add a new snippet (Python GTK+3). | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/pygtk/python_gtk3_pygobject/combobox.py | python/pygtk/python_gtk3_pygobject/combobox.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
This is the simplest Python GTK+3 snippet.
See: http://python-gtk-3-tutorial.readthedocs.org/en/latest/combobox.html
"For a simple list of textual choices, the model-view API of Gtk.ComboBox can
be a bit overwhelming. In this case, Gtk.ComboBoxText offers a simple
alternative."
"""
from gi.repository import Gtk as gtk
COMBOBOX_TEXT_LIST = ["Hello World!", "Hi!", "Goodbye."]
def print_text(widget, data):
"""
Print the content of the ComboBoxText widget.
This is an usage example fo gtk.ComboBoxText.get_active_text().
"""
print(data.get_active_text()) # data is a gtk.ComboBoxText widget
def reset_selection(widget, data):
"""
Clear the content of the ComboBoxText widget.
This is an usage example fo gtk.ComboBoxText.set_active().
"""
data.set_active(0) # select the first item ; data is a gtk.ComboBoxText widget
def main():
window = gtk.Window()
vertical_box = gtk.Box(orientation = gtk.Orientation.VERTICAL, spacing = 6) # 6 pixels are placed between children
window.add(vertical_box)
# Label and Combobox ##############
horizontal_box1 = gtk.Box(orientation = gtk.Orientation.HORIZONTAL, spacing = 6) # 6 pixels are placed between children
label = gtk.Label(label="Text to print:")
horizontal_box1.pack_start(label, True, True, 0)
combobox = gtk.ComboBoxText()
combobox.set_entry_text_column(0) # sets the model column which ComboBox should use to get strings from to be text_column
for text in COMBOBOX_TEXT_LIST:
combobox.append_text(text) # fill the combobox
combobox.set_active(0) # select the first item
horizontal_box1.pack_start(combobox, True, True, 0)
vertical_box.pack_start(horizontal_box1, True, True, 0)
# Buttons #########################
horizontal_box2 = gtk.Box(orientation = gtk.Orientation.HORIZONTAL, spacing = 6) # 6 pixels are placed between children
# Print button
button1 = gtk.Button(label="Print")
button1.connect("clicked", print_text, combobox) # connect("event", callback, data)
horizontal_box2.pack_start(button1, True, True, 0)
# Clean button
button2 = gtk.Button(label="Reset")
button2.connect("clicked", reset_selection, combobox) # connect("event", callback, data)
horizontal_box2.pack_start(button2, True, True, 0)
vertical_box.pack_start(horizontal_box2, True, True, 0)
###
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
| mit | Python | |
5c3589b295c9b9156a6bbfdcecc81754a76f9e0d | Create url-logger.py | Poorchop/hexchat-scripts,ishan-marikar/hexchat-scripts,ishan-marikar/hexchat-scripts | url-logger.py | url-logger.py | from datetime import datetime
import hexchat
import os
import re
__module_name__ = "URL Logger"
__module_author__ = "Poorchop"
__module_version__ = "0.1"
__module_description__ = "Log URLs from specific channels and PMs to disk"
# channels PMs
watched_channels = ("#hexchat", "TingPing")
events = ("Channel Message", "Channel Action",
"Channel Msg Hilight", "Channel Action Hilight",
"Private Message", "Private Message to Dialog",
"Private Action", "Private Action to Dialog")
# regex source: http://blog.mattheworiordan.com/post/13174566389/url-regular-expression-for-links-with-or-without-the
url_regex = re.compile("((([A-Za-z]{3,9}:(?:\/\/)?)(?:[\-;:&=\+\$,\w]+@)?[A-Za-z0-9\.\-]+|(?:www\.|[\-;:&=\+\$,\w]+@)[A"
"-Za-z0-9\.\-]+)((?:\/[\+~%\/\.\w\-_]*)?\??(?:[\-\+=&;%@\.\w_]*)#?(?:[\.\!\/\\\w]*))?)")
def url_logger(stripped_word, nick, network, chan, time):
directory = os.path.join(hexchat.get_info("configdir"), "logs", network, chan)
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(hexchat.get_info("configdir"), "logs", network, chan, "urls.txt")
f = open(directory, "a")
f.write(time + " " + nick + "@" + chan + ":" + network + " - " + stripped_word + "\n")
f.close()
def url_finder(word, nick, network, chan, time):
for w in word[1].split():
stripped_word = hexchat.strip(w, -1, 3)
if url_regex.match(stripped_word):
url_logger(stripped_word, nick, network, chan, time)
def chan_check_cb(word, word_eol, userdata):
word = [(word[i] if len(word) > i else "") for i in range(4)]
chan = hexchat.get_info("channel")
if chan in watched_channels:
nick = hexchat.strip(word[0], -1, 3)
time = datetime.now().strftime("[%b %d %Y %H:%M]")
network = hexchat.get_info("network")
url_finder(word, nick, network, chan, time)
for event in events:
hexchat.hook_print(event, chan_check_cb)
hexchat.prnt(__module_name__ + " version " + __module_version__ + " loaded")
| mit | Python | |
0fef4f88aa37b221947a22c91a0c5bfe636fcaf1 | Test for adjointness of new point grid models. | ryanvolz/radarmodel,ryanvolz/radarmodel | radarmodel/tests/test_pointgrid_adjointness.py | radarmodel/tests/test_pointgrid_adjointness.py | # ----------------------------------------------------------------------------
# Copyright (c) 2015, 'radarmodel' developers (see AUTHORS file)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-Clause ("BSD New") license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import itertools
from radarmodel.pointgrid import TxRef, RxRef
from radarmodel.util import get_random_normal, get_random_oncircle
def adjointness_error(op, its=100):
"""Check adjointness of op.A and op.As for 'its' instances of random data.
For random unit-normed x and y, this finds the error in the adjoint
identity <Ax, y> == <x, A*y>:
err = abs( vdot(A(x), y) - vdot(x, Astar(y)) ).
The type and shape of the input to A are specified by inshape and indtype.
Returns a vector of the error magnitudes.
"""
inshape = op.inshape
indtype = op.indtype
outshape = op.outshape
outdtype = op.outdtype
x = get_random_normal(inshape, indtype)
errs = np.zeros(its, dtype=indtype)
for k in xrange(its):
x = get_random_normal(inshape, indtype)
x = x/np.linalg.norm(x)
y = get_random_normal(outshape, outdtype)
y = y/np.linalg.norm(y)
ip_A = np.vdot(op.A(x), y)
ip_Astar = np.vdot(x, op.As(y))
errs[k] = np.abs(ip_A - ip_Astar)
return errs
def check_adjointness(cls, L, N, M, R, sdtype):
s = get_random_oncircle((L,), sdtype)
s = s/np.linalg.norm(s)
model = cls(L=L, M=M, N=N, R=R, precision=sdtype)
op = model(s=s)
err_msg = '{0} and {1} are not adjoints, with max error of {2}'
def call():
errs = adjointness_error(op, its=100)
np.testing.assert_array_almost_equal(
errs, 0, err_msg=err_msg.format(
op.A.__name__, op.As.__name__, np.max(np.abs(errs)),
)
)
call.description = '{6}: s={0}({1}), x={2}({3}), N={4}, R={5}'.format(
np.dtype(sdtype).str, L, np.dtype(op.indtype).str, M, N, R,
cls.__name__,
)
return call
def test_pointgrid_adjointness():
clss = (TxRef, RxRef)
Ls = (13, 13, 13)
Ns = (13, 64, 27)
Ms = (37, 37, 10)
Rs = (1, 2, 3)
sdtypes = (np.float32, np.complex128)
np.random.seed(1)
for cls, (L, N, M), R, sdtype in itertools.product(
clss, zip(Ls, Ns, Ms), Rs, sdtypes
):
callable_test = check_adjointness(cls, L, N, M, R, sdtype)
callable_test.description = 'test_pointgrid_adjointness: '\
+ callable_test.description
yield callable_test
if __name__ == '__main__':
import nose
#nose.runmodule(argv=[__file__,'-vvs','--nologcapture',
#'--stop','--pdb','--pdb-failure'],
#exit=False)
nose.runmodule(argv=[__file__,'-vvs','--nologcapture'],
exit=False)
| mit | Python | |
3c4c06607eb14920cf4b9d0e4fb6d29f37d1d0ec | Add db_add.py, adding a post from a file, with tags from the filename. | drougge/wellpapp-pyclient | db_add.py | db_add.py | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from sys import argv, exit
from dbclient import dbclient
from hashlib import md5
import Image
from cStringIO import StringIO
from pyexiv2 import Image as ExivImage
from os.path import basename
if len(argv) < 2:
print "Usage:", argv[0], "filename [filename [..]]"
exit(1)
def determine_filetype(data):
if data[:3] == "\xff\xd8\xff": return "jpeg"
if data[:4] == "GIF8": return "gif"
if data[:4] == "\x89PNG": return "png"
if data[:2] == "BM": return "bmp"
if data[:3] == "FWS" or data[:3] == "CWS": return "swf"
def imagesize(fh):
img = Image.open(fh)
return img.size
def imagetime(fn):
img = ExivImage(fn)
img.readMetadata()
return img['Exif.Image.DateTime']
client = dbclient()
for fn in argv[1:]:
data = file(fn).read()
m = md5(data).hexdigest()
ft = determine_filetype(data)
assert ft
post = client.get_post(m)
if not post:
datafh = StringIO(data)
w, h = imagesize(datafh)
args = {"md5": m, "width": w, "height": h, "filetype": ft}
try:
datafh.seek(0)
args["date"] = imagetime(fn)
except Exception:
pass
client.add_post(**args)
full = []
weak = []
post = client.get_post(m)
posttags = map(lambda t: t[1:] if t[0] == "~" else t, post["tagguid"])
for tag in basename(fn).split()[:-1]:
if tag[0] == "~":
tags = weak
tag = tag[1:]
else:
tags = full
t = client.find_tag(tag)
if t and t not in posttags: tags.append(t)
if full or weak:
client.tag_post(m, full, weak)
| mit | Python | |
7570e757e79c29974afffeee036f056328a06fe9 | Create pull.py | wyongkun/Hello-World | pull.py | pull.py | It's a file to try my first pull derective
| mit | Python | |
5d38ab06fd014241cba7e8cdcfed9887a92460b9 | Add smoke tests aperiodic | voytekresearch/neurodsp | neurodsp/tests/test_aperiodic_dfa.py | neurodsp/tests/test_aperiodic_dfa.py | """Tests for fractal analysis using fluctuation measures."""
from neurodsp.tests.settings import FS
from neurodsp.aperiodic.dfa import *
###################################################################################################
###################################################################################################
def test_compute_fluctuations(tsig):
t_scales, flucs, exp = compute_fluctuations(tsig, FS)
def test_compute_rescaled_range(tsig):
rs = compute_rescaled_range(tsig, 10)
def test_compute_detrended_fluctuation(tsig):
out = compute_detrended_fluctuation(tsig, 10)
| apache-2.0 | Python | |
3628c21841eea385dbc13e0065ab41138cf102a6 | Add users to the admin | martinogden/djangae,trik/djangae,jscissr/djangae,SiPiggles/djangae,pablorecio/djangae,martinogden/djangae,martinogden/djangae,stucox/djangae,pablorecio/djangae,SiPiggles/djangae,chargrizzle/djangae,stucox/djangae,wangjun/djangae,kirberich/djangae,kirberich/djangae,trik/djangae,nealedj/djangae,armirusco/djangae,leekchan/djangae,wangjun/djangae,b-cannon/my_djae,kirberich/djangae,grzes/djangae,asendecka/djangae,SiPiggles/djangae,grzes/djangae,potatolondon/djangae,armirusco/djangae,jscissr/djangae,leekchan/djangae,armirusco/djangae,jscissr/djangae,trik/djangae,chargrizzle/djangae,leekchan/djangae,asendecka/djangae,grzes/djangae,stucox/djangae,nealedj/djangae,chargrizzle/djangae,wangjun/djangae,asendecka/djangae,potatolondon/djangae,nealedj/djangae,pablorecio/djangae | djangae/contrib/auth/admin.py | djangae/contrib/auth/admin.py | from django.contrib import admin
from djangae.contrib.auth.models import User
admin.site.register(User) | bsd-3-clause | Python | |
e506a059369b089cb4c163669a04fbb9d05e9884 | add minimal FBO example | swenger/glitter,swenger/glitter,swenger/glitter | examples/minimalframebufferexample.py | examples/minimalframebufferexample.py | from scipy.misc import imsave
from glitter import ShaderProgram, RectangleTexture, Framebuffer, VertexArray
from glitter.contexts.glut import GlutWindow, main_loop
vertex_shader = """
#version 400 core
layout(location=0) in vec4 in_position;
void main() {
gl_Position = in_position;
}
"""
fragment_shader = """
#version 400 core
layout(location=0) out vec4 out_color;
uniform float dimx, dimy;
void main() {
out_color = vec4(gl_FragCoord.x / dimx, gl_FragCoord.y / dimy, 1.0, 1.0);
}
"""
class MinimalFramebufferExample(object):
def __init__(self):
self.window = GlutWindow(double=True, multisample=True, shape=(100, 800))
self.window.display_callback = self.display
self.shader = ShaderProgram(vertex=vertex_shader, fragment=fragment_shader)
self.shader.dimy, self.shader.dimx = self.window.shape
self.fbo = Framebuffer(RectangleTexture(shape=self.window.shape + (3,)))
self.vao = VertexArray(((-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)), elements=((0, 1, 2), (0, 2, 3)))
def save(self, filename):
self.fbo.clear()
with self.shader:
with self.fbo:
self.vao.draw()
imsave(filename, self.fbo[0].data)
def display(self):
self.window.clear()
with self.shader:
self.vao.draw()
self.window.swap_buffers()
def run(self):
main_loop()
if __name__ == "__main__":
ie = MinimalFramebufferExample()
ie.save("test.png")
ie.run()
# TODO why do I get a bus error on exit?
| mit | Python | |
066c48effb2f2c1534e43687d031e01f823f098f | add common mixins | EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi | emgapi/viewsets.py | emgapi/viewsets.py | # -*- coding: utf-8 -*-
# Copyright 2017 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from rest_framework import viewsets, mixins
logger = logging.getLogger(__name__)
class ReadOnlyListModelViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
A viewset that provides default `list()` action.
"""
pass
class ReadOnlyRetrieveModelViewSet(mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
"""
A viewset that provides default `retrieve()` action.
"""
pass
class ReadOnlyModelViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
A viewset that provides default `list()` and `retrieve()` actions.
"""
pass
| apache-2.0 | Python | |
59ccbc17e7e7f4f75b8f95bb1287b676466068db | Create snake1.py | afeyrer/Final-Project | snake1.py | snake1.py | mit | Python | ||
cf496298c839e63a786bc8b4a934df09beef93ac | Add script to better log time | squaresurf/dotfiles,squaresurf/dotfiles,squaresurf/dotfiles | bin/log.py | bin/log.py | #!/usr/bin/env python
from datetime import date, datetime
from pathlib import Path
import csv
import os
import sys
if len(sys.argv) != 2:
print("Usage: {} 'job to be logged'".format(sys.argv[0]))
exit(1)
log_dir = "{}/time_logs".format(Path.home())
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
csv_file = "{}/{}_time_log.csv".format(log_dir, date.today())
csv_fields = ["job", "start", "end", "duration"]
job = sys.argv[1]
def now_string():
return datetime.now().isoformat(timespec="seconds")
def new_row(job):
return {"job": job, "start": now_string()}
def complete_row(row):
row["end"] = now_string()
start = datetime.fromisoformat(row["start"])
end = datetime.fromisoformat(row["end"])
row["duration"] = str(end - start)
return row
if os.path.isfile(csv_file):
with open(csv_file) as fh:
rows = list(csv.DictReader(fh))
rows[-1] = complete_row(rows[-1])
rows.append(new_row(job))
else:
rows = [new_row(job)]
with open(csv_file, "w") as fh:
writer = csv.DictWriter(fh, csv_fields)
writer.writeheader()
writer.writerows(rows)
| mit | Python | |
6cad1e6f91dd01f8fa36e2c8ca1cf7fe092a5c43 | Adjust init: static route include, double quotes. | hlwsmith/akhet,hlwsmith/akhet,Pylons/akhet,hlwsmith/akhet,Pylons/akhet | akhet/demo/__init__.py | akhet/demo/__init__.py | from pyramid.config import Configurator
import pyramid_beaker
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
# Configure Beaker sessions and caching
session_factory = pyramid_beaker.session_factory_from_settings(settings)
config.set_session_factory(session_factory)
pyramid_beaker.set_cache_regions_from_settings(settings)
# Configure renderers and event subscribers.
config.add_renderer(".html", "pyramid.mako_templating.renderer_factory")
config.include(".subscribers")
config.include("akhet.static")
# Add routes and views.
config.add_route("home", "/")
config.add_static_route("akhet.demo", "static", cache_max_age=3600)
config.scan()
return config.make_wsgi_app()
def serve():
"""Run the application like 'pserve development.ini' would do."""
import logging
from wsgiref.simple_server import make_server
fmt = "%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s"
settings = {
"pyramid.reload_templates": True,
"pyramid.debug_authorization": False,
"pyramid.debug_notfound": False,
"pyramid.debug_routematch": False,
"pyramid.debug_templates": True,
"pyramid.default_locale_name": "en",
"pyramid.includes": ["pyramid_debugtoolbar"]
}
logging.basicConfig(level=logging.INFO, format=fmt)
logging.getLogger("akhet.demo").setLevel(logging.DEBUG)
app = main({}, **settings)
httpd = make_server("127.0.0.1", 5000, app)
httpd.serve_forever()
if __name__ == "__main__": serve()
| import akhet.static as static
from pyramid.config import Configurator
import pyramid_beaker
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
# Configure Beaker sessions and caching
session_factory = pyramid_beaker.session_factory_from_settings(settings)
config.set_session_factory(session_factory)
pyramid_beaker.set_cache_regions_from_settings(settings)
# Configure renderers and event subscribers.
config.add_renderer(".html", "pyramid.mako_templating.renderer_factory")
config.include(".subscribers")
# Add routes and views.
config.add_route('home', '/')
static.add_static_route(config, "akhet.demo", "static", cache_max_age=3600)
config.scan()
return config.make_wsgi_app()
def serve():
"""Run the application like 'pserve development.ini' would do."""
import logging
from wsgiref.simple_server import make_server
fmt = "%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s"
settings = {
"pyramid.reload_templates": True,
"pyramid.debug_authorization": False,
"pyramid.debug_notfound": False,
"pyramid.debug_routematch": False,
"pyramid.debug_templates": True,
"pyramid.default_locale_name": "en",
"pyramid.includes": ["pyramid_debugtoolbar"]
}
logging.basicConfig(level=logging.INFO, format=fmt)
logging.getLogger("akhet.demo").setLevel(logging.DEBUG)
app = main({}, **settings)
httpd = make_server("127.0.0.1", 5000, app)
httpd.serve_forever()
if __name__ == "__main__": serve()
| mit | Python |
addb7f342e5c2bc0f1df19bd6d00d50be23a52da | Add Year | JennyWang01/PythonLeaning | year.py | year.py | months=['January','February','March','April','May','June','July','August','September','October','November','December']
endings=['st','nd','rd']+17*['th']+['st','nd','rd']+7*['th']+['st']
year=raw_input('Year:')
month=raw_input('Month(1--12):')
day=raw_input('day(1--31):')
month_number=int(month)
day_number=int(day)
month_name=months[month_number-1]
ordinal=day+endings[day_number-1]
print month_name+ ''+ordinal+''+year
| mit | Python | |
dac398518bac49c6c51d04166c021323cdba9235 | Add unit tests. | NLeSC/ODEX-FAIRDataPoint,DTL-FAIRData/ODEX-FAIRDataPoint,NLeSC/ODEX-FAIRDataPoint,DTL-FAIRData/ODEX-FAIRDataPoint,DTL-FAIRData/ODEX-FAIRDataPoint,NLeSC/ODEX-FAIRDataPoint,DTL-FAIRData/ODEX-FAIRDataPoint,NLeSC/ODEX-FAIRDataPoint | fdp-api/python/tests/test_metadata.py | fdp-api/python/tests/test_metadata.py | from nose import with_setup
from nose.tools import assert_equals, assert_in, assert_true, assert_false
from metadata import FAIRConfigReader, FAIRGraph, FDPath
from urllib2 import urlparse
reader = FAIRConfigReader()
def test_paths():
id = 'abc'
for resource in ['fdp', 'doc']:
assert_equals(FDPath(resource), FDPath(resource, id))
for resource in ['cat', 'dat', 'dist']:
assert_equals(FDPath(resource, id), '%s/%s' % (FDPath(resource), id))
def test_sections():
set_a = set(['fdp','catalog/catalog-01','dataset/breedb','distribution/breedb-sqldump','distribution/breedb-sparql'])
set_b = set(reader.getSectionHeaders())
assert_true(set_a == set_b)
def test_get_items():
for section,fields in reader.getMetadata().iteritems():
for field in fields:
assert_false(isinstance(reader.getItems(section, field), list))
def test_get_triples():
for triple in reader.getTriples():
assert_true(isinstance(triple, tuple))
assert_equals(len(triple), 3)
base_uri = 'http://127.0.0.1:8080'
g = FAIRGraph(base_uri)
def test_base_uri():
assert_equals(base_uri, g.baseURI())
def test_doc_uri():
assert_equals(urlparse.urljoin(base_uri, 'doc'), g.docURI())
def test_fdp_uri():
assert_equals(urlparse.urljoin(base_uri, 'fdp'), g.fdpURI())
def test_catalog_uri():
assert_equals(urlparse.urljoin(base_uri, 'catalog/catalog-01'), g.catURI('catalog-01'))
def test_dataset_uri():
assert_equals(urlparse.urljoin(base_uri, 'dataset/breedb'), g.datURI('breedb'))
def test_distribution_uri():
assert_equals(urlparse.urljoin(base_uri, 'distribution/breedb-sqldump'), g.distURI('breedb-sqldump'))
| apache-2.0 | Python | |
19c94f3bf71c07763dfddd72b867502d765d01db | Add https://gist.github.com/2349756 | hudsonkeithl/scraperwiki-python,openaustralia/scraperwiki-python,tlevine/scraperwiki-python,hudsonkeithl/scraperwiki-python,openaustralia/scraperwiki-python,scraperwiki/scraperwiki-python,scraperwiki/scraperwiki-python,tlevine/scraperwiki-python | sqlite.py | sqlite.py | import scraperwiki
from dumptruck import DumpTruck
dt = DumpTruck()
def execute(sqlquery, data=None, verbose=1):
""" Should return list of lists, but returns list of dicts """
return dt.execute(sqlquery, *data, commit=False)
# other way [ dict(zip(result["keys"], d)) for d in result["data"] ]
def save(unique_keys, data, table_name="swdata", verbose=2, date=None):
dt.create_table(data, table_name = table_name)
#dt.add_index(unique_keys)
return dt.insert(data, table_name = table_name)
def attach(name, asname=None, verbose=1):
"This somehow connects to scraperwiki."
raise NotImplementedError
def commit(verbose=1):
dt.commit()
def select(sqlquery, data=None, verbose=1):
sqlquery = "select %s" % sqlquery # maybe check if select or another command is there already?
return dt.execute(sqlquery, *data, commit = False)
def show_tables(dbname=""):
return dt.tables()
def save_var(name, value, verbose=2):
return dt.save_var(name, value)
def get_var(name, default=None, verbose=2):
try:
return dt.get_var(name)
except NameError:
return default
| bsd-2-clause | Python | |
07e074e662b33713a266777300354e8953ce3b78 | ADD connection class to Azure | fastconnect/cloudify-azure-plugin | plugin/connection.py | plugin/connection.py | from plugin import utils
from azure import servicemanagement
class AzureConnectionClient():
"""Provides functions for getting the Azure Service Management Service
"""
def __init__(self):
self.connection = None
def client(self):
"""Represents the AzureConnection Client
"""
azure_subscription = self._get_azure_subscription()
azure_certificate = self._get_azure_certificate()
return ServiceManagementService(azure_subscription,
azure_certificate)
def _get_azure_subscription(self):
node_properties = \
utils.get_instance_or_source_node_properties()
return node_properties["subscription"]
def _get_azure_certificate(self):
node_properties = \
utils.get_instance_or_source_node_properties()
return node_properties["certificate"]
| apache-2.0 | Python | |
9ceb4f394c19a74d8cd28698eeb9116cf8099117 | add anno | polltooh/traffic_video_analysis | annot_to_densitymap.py | annot_to_densitymap.py | import xml.etree.ElementTree as ET
import numpy as np
import xmltodict
import matplotlib.pyplot as plt
#import cv2
xml_data = 'data/Cam253/[Cam253]-2016_4_21_15h_150f/000150.xml'
with open(xml_data) as xml_d:
doc = xmltodict.parse(xml_d.read())
img = np.zeros((352, 240), np.float32)
def add_to_image(image, bbox):
xmin = int(bbox['xmin'])
ymin = int(bbox['ymin'])
xmax = int(bbox['xmax'])
ymax = int(bbox['ymax'])
density = 1/ float((ymax - ymin) * (xmax - xmin))
image[xmin:xmax, ymin:ymax] += density
print(np.sum(image))
print(xmin)
print(xmax)
print(ymin)
print(ymax)
return image
for vehicle in doc['annotation']['vehicle']:
add_to_image(img, vehicle['bndbox'])
imgplot = plt.imshow(img)
plt.show()
print(np.sum(img))
| apache-2.0 | Python | |
0b0150ad73c52ea5b23def899edb819bd3318eb1 | fix uncaught analytics exception | lbryio/lbry,zestyr/lbry,lbryio/lbry,lbryio/lbry,zestyr/lbry,zestyr/lbry | lbrynet/analytics/api.py | lbrynet/analytics/api.py | import functools
import json
import logging
from requests import auth
from requests_futures import sessions
from lbrynet.conf import settings
from lbrynet.analytics import utils
log = logging.getLogger(__name__)
def log_response(fn):
def _log(future):
if future.cancelled():
log.warning('Request was unexpectedly cancelled')
elif future.exception():
log.warning(future.exception_info())
else:
response = future.result()
log.debug('Response (%s): %s', response.status_code, response.content)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
future = fn(*args, **kwargs)
future.add_done_callback(_log)
return future
return wrapper
class Api(object):
def __init__(self, session, url, write_key):
self.session = session
self.url = url
self.write_key = write_key
@property
def auth(self):
return auth.HTTPBasicAuth(self.write_key, '')
@log_response
def batch(self, events):
"""Send multiple events in one request.
Each event needs to have its type specified.
"""
data = json.dumps({
'batch': events,
'sentAt': utils.now(),
})
log.debug('sending %s events', len(events))
log.debug('Data: %s', data)
return self.session.post(self.url + '/batch', json=data, auth=self.auth)
@log_response
def track(self, event):
"""Send a single tracking event"""
log.debug('Sending track event: %s', event)
return self.session.post(self.url + '/track', json=event, auth=self.auth)
@classmethod
def new_instance(cls, session=None):
"""Initialize an instance using values from the configuration"""
if not session:
session = sessions.FuturesSession()
return cls(
session,
settings.ANALYTICS_ENDPOINT,
utils.deobfuscate(settings.ANALYTICS_TOKEN)
)
| import functools
import json
import logging
from requests import auth
from requests_futures import sessions
from lbrynet.conf import settings
from lbrynet.analytics import utils
log = logging.getLogger(__name__)
def log_response(fn):
def _log(future):
if future.cancelled():
log.warning('Request was unexpectedly cancelled')
else:
response = future.result()
log.debug('Response (%s): %s', response.status_code, response.content)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
future = fn(*args, **kwargs)
future.add_done_callback(_log)
return future
return wrapper
class Api(object):
def __init__(self, session, url, write_key):
self.session = session
self.url = url
self.write_key = write_key
@property
def auth(self):
return auth.HTTPBasicAuth(self.write_key, '')
@log_response
def batch(self, events):
"""Send multiple events in one request.
Each event needs to have its type specified.
"""
data = json.dumps({
'batch': events,
'sentAt': utils.now(),
})
log.debug('sending %s events', len(events))
log.debug('Data: %s', data)
return self.session.post(self.url + '/batch', json=data, auth=self.auth)
@log_response
def track(self, event):
"""Send a single tracking event"""
log.debug('Sending track event: %s', event)
return self.session.post(self.url + '/track', json=event, auth=self.auth)
@classmethod
def new_instance(cls, session=None):
"""Initialize an instance using values from the configuration"""
if not session:
session = sessions.FuturesSession()
return cls(
session,
settings.ANALYTICS_ENDPOINT,
utils.deobfuscate(settings.ANALYTICS_TOKEN)
)
| mit | Python |
8088d7061c24ca78df0c92be6e36edb7deca1dac | Remove print | kalail/gutter,kalail/gutter,kalail/gutter,disqus/gutter,disqus/gutter | gargoyle/client/operators/__init__.py | gargoyle/client/operators/__init__.py | import inspect
class GetInitArguments(object):
def __get__(self, obj, obj_type):
args = inspect.getargspec(obj_type.__init__).args
return tuple(args[1:])
class Base(object):
def __init__(self):
pass
arguments = GetInitArguments()
| import inspect
class GetInitArguments(object):
def __get__(self, obj, obj_type):
print obj_type
args = inspect.getargspec(obj_type.__init__).args
return tuple(args[1:])
class Base(object):
def __init__(self):
pass
arguments = GetInitArguments()
| apache-2.0 | Python |
8238d7ad6793f6deef520a46f85d40b0d75d221f | Add placeholder for the parsing code | rambo/python-gpmf,rambo/python-gpmf | gpmf/parse.py | gpmf/parse.py | # TODO: Implement GPMF parsing
# see https://github.com/gopro/gpmf-parser#gmfp-deeper-dive for format details
| mit | Python | |
d7553fd42e3ac0bcdf0ab70468ad314253b64871 | Create parse.py | soyrice/autozoom | zoom/parse.py | zoom/parse.py | parse(geo, df) :
latDict, lonDict = dict(), dict()
# identify extreme most latitude and longitude coordinate pairs in each state
# save coordinate pair of most extreme points for autozoom
for count in range(0,len(usStates['features'])) :
if geo['key'] in [code for code in statePctChange['code']] :
for coords in geo['key']['coordinates'][0] :
# collect lat, lon data in either geoJSON format
# lat, lon data will either be in list (coords) or a list of lists (pairs)
try :
latDict[coords[1]] = coords
lonDict[coords[0]] = coords
except :
for pair in coords :
latDict[pair[1]] = pair
lonDict[pair[0]] = pair
bounds = [list(reversed(l)) for l in [latDict[max([key for key in latDict.keys()])], latDict[min([key for key in latDict.keys()])],
lonDict[max([key for key in lonDict.keys()])], lonDict[min([key for key in lonDict.keys()])]]]
# keep most extreme bounds to save maximum bounding triangle
triangle = bounds.remove(min([abs(max([key for key in latDict.keys()])), abs(min([key for key in latDict.keys()])),
abs(max([key for key in lonDict.keys()])), abs(min([key for key in lonDict.keys()]))]))
return bounds, triangle
| mit | Python | |
0f7d6f039930324d77dc23315ed3c9bd10c1f0de | Add missing file | anlthms/sp-2016,anlthms/sp-2016 | util.py | util.py | #
# Copyright 2016 Anil Thomas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions.
"""
import numpy as np
from sklearn import metrics
from prep import nwin
def avg(labels, preds):
assert preds.shape[0] % nwin == 0
preds_len = preds.shape[0] // nwin
post_preds = np.zeros(preds_len, np.float32)
post_labels = np.zeros(preds_len, np.float32)
for i in range(preds_len):
post_preds[i] = np.mean(preds[nwin*i:nwin*(i+1)])
post_labels[i] = labels[nwin*i]
assert post_labels[i] == np.mean(labels[nwin*i:nwin*(i+1)])
return (post_labels, post_preds)
def avg_preds(preds):
assert preds.shape[0] % nwin == 0
preds_len = preds.shape[0] // nwin
post_preds = np.zeros(preds_len, np.float32)
for i in range(preds_len):
post_preds[i] = np.mean(preds[nwin*i:nwin*(i+1)])
return post_preds
def auc(labels, preds):
return metrics.roc_auc_score(labels, preds)
def score(labels, preds):
return auc(*avg(labels, preds))
| apache-2.0 | Python | |
37dabf1c98407602edda25dbb9c24c17bd84f19a | Add timer to builders | henningjp/CoolProp,JonWel/CoolProp,DANA-Laboratory/CoolProp,CoolProp/CoolProp,henningjp/CoolProp,CoolProp/CoolProp,DANA-Laboratory/CoolProp,DANA-Laboratory/CoolProp,JonWel/CoolProp,dcprojects/CoolProp,dcprojects/CoolProp,CoolProp/CoolProp,dcprojects/CoolProp,CoolProp/CoolProp,JonWel/CoolProp,JonWel/CoolProp,CoolProp/CoolProp,henningjp/CoolProp,henningjp/CoolProp,DANA-Laboratory/CoolProp,JonWel/CoolProp,dcprojects/CoolProp,henningjp/CoolProp,dcprojects/CoolProp,CoolProp/CoolProp,JonWel/CoolProp,DANA-Laboratory/CoolProp,dcprojects/CoolProp,DANA-Laboratory/CoolProp,CoolProp/CoolProp,DANA-Laboratory/CoolProp,henningjp/CoolProp,CoolProp/CoolProp,dcprojects/CoolProp,henningjp/CoolProp,henningjp/CoolProp,JonWel/CoolProp | dev/scripts/coolprop_builder_timer.py | dev/scripts/coolprop_builder_timer.py | from __future__ import print_function
import urllib, json
filehandle = urllib.urlopen('http://www.coolprop.dreamhosters.com:8010/json/builders')
jj = json.loads(filehandle.read())
times = []
for key in jj.keys():
filehandle = urllib.urlopen('http://www.coolprop.dreamhosters.com:8010/json/builders/' + key + '/builds/-1')
builder = json.loads(filehandle.read())
elapsed_time = builder['times'][1] - builder['times'][0]
times.append((elapsed_time, key))
print(sorted(times)[::-1]) | mit | Python | |
27bbb0dff2ef60008c0bd7615e2607ee8544aeb2 | Add files via upload | sandiegodata/age-friendly-communities | ageDataOver65_v3.py | ageDataOver65_v3.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 19 23:56:12 2016
@author: CC
Using Python 3.5.2; Anaconda 4.2.0; Spyder 3.0.0
Purpose: open a CSV file from https://censusreporter.org/ and recode it with the corresponding JSON file
CSV raw data file containing age census data in San Diego: 'acs2015_1yr_B01001.csv'
JSON metadata file: 'metadata.json'
"""
import pandas
import numpy
import os
import ijson
path = os.chdir('/Users/superuser/Documents/projects/SDRegionalDataLib/age friendly community/acs2015_1yr_B01001/')
#load the age data file
ageData = pandas.read_csv('acs2015_1yr_B01001.csv');
#get the names of the columns
colNames = list(ageData.columns.values)
#only need the B01001001, B01001002, etc. Don't need the other column names ie anything with the word 'error', geoid, or 'name'.
def getRecodingKeys(element):
if ('Error' not in element) and ('name' != element) and ('geoid' != element):
return element
return False
#filter out the original column names that don't require recoding
codingDF = pandas.DataFrame({'origColNames': list(filter(getRecodingKeys, colNames))})
#open the json file
jsonFile = 'metadata.json';
with open(jsonFile, 'r') as f:
objects = ijson.items(f, 'tables.B01001.columns')
columnAttr = list(objects)
#add a new column to codingDF that contains the recoded ageDF column names
codingDF['recodeColName'] = ''
#append the recodedColNames to codingDF.recodeColName
for idx, origColName in enumerate(codingDF.origColNames):
codingDF.recodeColName[idx] = columnAttr[0][origColName]['name']
#recode ageData with the actual column names
for idx, col in enumerate(ageData.columns):
if codingDF.origColNames.str.contains(col): #codingDF.origColNames.str.contains(ageData.columns[idx]):
colMatchIDX = codingDF.origColNames.get_loc(col)
if ageData.columns[idx].str.contains('Error'):
tempColName = codingDF.recodeColName[colMatchIDX] + '_Error'
ageData.columns[idx] = tempColName
else:
tempColName = codingDF.recodeColName[colMatchIDX]
#recode ageData with the actual column names
for idx, col in enumerate(ageData.columns):
if codingDF.origColNames.str.contains(col): #codingDF.origColNames.str.contains(ageData.columns[idx]):
colMatchIDX = codingDF.origColNames.get_loc(col)
if ageData.columns[idx].str.contains('Error'):
tempColName = codingDF.recodeColName[colMatchIDX] + '_Error'
ageData.rename(columns={col:tempColName}, inplace=True)
else:
tempColName = codingDF.recodeColName[colMatchIDX]
print(tempColName)
ageData.rename(columns={col:tempColName}, inplace=True)
'''
Things I've tried to recode the actual pandas dataframe columns with the appropriate labels
print(codingDF.origColNames.loc[:,'B01001001'])
print(codingDF.origColNames['B01001001, Error'])
codingDF.loc[codingDF.origColNames == 'B01001001']
#ageData.columns[idx] = codingDF.recodeColName
dummy = codingDF.origColNames.str.extract('B01001001')
dummy = codingDF.origColNames.where(codingDF.origColNames == ageData.columns[2])
dummy = codingDF.origColNames.index(ageData.columns[2])
dummy = codingDF.origColNames.Index(ageData.columns[2])
dummy = codingDF.origColNames.loc(ageData.columns[2])
print(codingDF.origColNames.str.contains('B01001001'))
print(dummy)
print(dummy)
dummy = [ageData.columns[2] == codingDF.origColNames]
print(dummy[dummy==True])
codingDF.loc[codingDF['origColNames'] == ('B01001001')]
codingDF.origColNames.str.contains(ageData.columns[4])
''' | mit | Python | |
45aacb9d0f5f24600d4796cc5194dfbde1c65222 | Add map/dist/filter demo | precompiler/python-101 | learning-python/ch05/MapExamples.py | learning-python/ch05/MapExamples.py | lst = range(10)
ret = map(lambda *i: i, lst, "abcdefg")
for item in ret:
print(item)
students = [
dict(id=0, credits=dict(math=9, physics=6, history=7)),
dict(id=1, credits=dict(math=6, physics=7, latin=10)),
dict(id=2, credits=dict(history=8, physics=9, chemistry=10)),
dict(id=3, credits=dict(math=5, physics=5, geography=7)),
]
print(type(students[0]["credits"].values()))
def decorate(student):
return (sum(student['credits'].values()), student)
def undecorate(decorated_student):
return decorated_student[1]
students = sorted(map(decorate, students), reverse=True)
for s in students:
print(s)
students = list(map(undecorate, students))
for s in students:
print(s)
a = [3, 6, 9]
b = [4, 7, 10]
c = [1, 8, 2]
r = map(lambda n: max(*n), zip(a, b, c))
for item in r:
print(item)
d = [0, 1, 2, 3, 4]
ret = filter(lambda n: n > 2, d)
for item in ret:
print(item) | apache-2.0 | Python | |
efcc5260a8566d41880cddcc54d4f86c8f722153 | Add unit test | WheatonCS/Lexos,WheatonCS/Lexos,WheatonCS/Lexos | test/unit_test/test_utility_stats/test_utility_stats.py | test/unit_test/test_utility_stats/test_utility_stats.py | from lexos.processors.analyze import information
word_lists = [{"abundant": 40, "actually": 20, "advanced": 15, "alter": 5},
{"hunger": 1, "hunt": 2, "ignore": 3, "illustration": 4,
"ink": 5}]
file_list = ["file_one.txt", "file_two.txt"]
for i in range(len(file_list)):
# because the first row of the first line is the ''
file_information = information.FileInformation(word_lists[i], file_list)
| mit | Python | |
7dc489b393ba293db5cc72c2f9b2bd65d6fe1166 | add command to migrate sheet. | opencivicdata/opencivicdata.org,opencivicdata/opencivicdata.org,opencivicdata/opencivicdata.org | upload/management/commands/import_transaction.py | upload/management/commands/import_transaction.py | import csv
from django.contrib.auth.models import User
from opencivicdata.models import Jurisdiction, Division
from upload.backend.parser import import_stream, people_to_pupa
from upload.backend.importer import do_import
from upload.models import SpreadsheetUpload
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = '<transaction> <user>'
help = 'Import a Sheet'
def import_transaction(self, transaction_id, username):
user = User.objects.get(username=username)
transaction = SpreadsheetUpload.objects.get(id=transaction_id)
assert transaction.approved_by is None
stream = people_to_pupa(transaction.people.all(), transaction)
report = do_import(stream, transaction)
transaction.approved_by = user
transaction.save()
def handle(self, *args, **options):
return self.import_transaction(*args)
| bsd-3-clause | Python | |
35a0832ab372b9519ed7391e166ef8c25456a005 | Bump minor version | gmjosack/pygerduty,dropbox/pygerduty,excelciordan/pygerduty,JohnTheodore/pygerduty,lyft/pygerduty | pygerduty/version.py | pygerduty/version.py | version_info = (0, 28, 2)
__version__ = '.'.join(str(v) for v in version_info)
| version_info = (0, 28, 1)
__version__ = '.'.join(str(v) for v in version_info)
| mit | Python |
7b34a2cc6c8124629ee11a5782a8d9005f80e4b1 | Create downloader-v0.22.py | Keiiko/anime-manga-cz-downloader | downloader-v0.22.py | downloader-v0.22.py | import urllib2
import re
import os
def stahniHtml(url):
f = urllib2.urlopen(url)
obsah = f.read()
f.close()
return obsah
def nahled(url):
global chapter
global currpatch1
odkazy = vyberodkazux(url)
for odkaz in odkazy:
currpatch1 = odkaz.replace("index.html", "")
chapter = re.search(r'.*/(.*?)/index',odkaz).group(1)
print "Kapitola "+chapter
print " Stahovani nahledu kapitoly... ",
nahledhtml = stahniHtml(odkaz)
print "Hotovo."
print " Vyhledavani odkazu stranky... ",
tabulka = re.search(r'<!-- Thumbnail images -->(.*?)class="xsmalltxt"',nahledhtml, re.DOTALL).group(1)
nahledyurl = re.findall(r'<a href="(.*?)"',tabulka)
print "Hotovo."
kapitola(nahledyurl)
print "Vsechna stahovani dokoncena."
finalpatch = os.path.expanduser("~")+"\\Downloads\\anime-manga.cz-downloader\\"+nazevserie+"\\"
print "Ulozeno do: "+finalpatch
os.startfile(finalpatch)
def kapitola(nahledyurl):
for kapitolasmallurl in nahledyurl:
kapitolafullurl = currpatch1 + kapitolasmallurl
getobrazek(kapitolafullurl)
def getobrazek(kapitolafullurl):
global imgname
print " Vyhledavani odkazu obrazku... ",
obrazekshorturl = re.search(r'<img id="slide" src="(.*?)".*?>',stahniHtml(kapitolafullurl)).group(1).replace("../", "")
imgname = obrazekshorturl
print "Hotovo."
obrazekfullurl = currpatch1 + obrazekshorturl
#print obrazekfullurl
ulozitobr(obrazekfullurl)
def ulozitobr(obrazekfullurl):
print " Ukladani obrazku "+obrazekfullurl+"... ",
currentpatch = os.path.expanduser("~")+"\\Downloads\\anime-manga.cz-downloader\\"+nazevserie+"\\"+chapter+"\\"
createDir(currentpatch)
imgData = urllib2.urlopen(obrazekfullurl).read()
output = open(currentpatch+imgname,'wb')
output.write(imgData)
output.close()
print "Hotovo."
def createDir(path):
if os.path.exists(path) != True:
os.makedirs(path)
### 18+ rozsireni ###
def vyberodkazux(url):
global nazevserie
print "Stahovani hlavni stranky... ",
stranka = stahniHtml(url)
print "Hotovo."
print "Vyhledavani kapitol... ",
odkazy = odkazya(stranka) + odkazyb(stranka)
nazevserie = re.search(r'<title>(.*?) *\| Anime - Manga.*?</title>',stranka).group(1).replace(" ", "").replace(" ", " ").replace(" ", " ")
print "Hotovo."
print "Manga "+nazevserie
return odkazy
def odkazya(stranka):
odkazy1 = re.findall(r'<a href="(http://anime-manga.cz/manga.*?)"', stranka)
odkazy2 = re.findall(r'<a href="(http://www.anime-manga.cz/manga.*?)"',stranka)
odkazy = odkazy1 + odkazy2
return odkazy
def odkazyb(stranka):
odkazy18 = re.findall(r'<a href="(http://anime-manga.cz/\d[^/]*?)"|<a href="(http://www.anime-manga.cz/\d[^/]*?)"|<a href="(http://anime-manga.cz/[^/]*?\d)"|<a href="(http://www.anime-manga.cz/[^/]*?\d)"', stranka)
odkazy = []
for odkaz18 in odkazy18:
for i in range(4):
if odkaz18[i]!= '':
stranka18 = stahniHtml(odkaz18[i])
odkazy.append(re.search(r'<a href="(.*?anime-manga.cz/manga.*?)"',stranka18).group(1))
return odkazy
print "Anime-manga.cz Downloader"
xurl = raw_input('stahnout mangu s url: http://www.anime-manga.cz/')
nahled("http://www.anime-manga.cz/"+xurl)
| mit | Python | |
b9245a8acf0bed7e19f709490c4ba3788028da93 | Fix error in PoolStatusEntry model | MozillaSecurity/FuzzManager,lazyparser/FuzzManager,MozillaSecurity/FuzzManager,sigma-random/FuzzManager,cihatix/FuzzManager,lazyparser/FuzzManager,lazyparser/FuzzManager,sigma-random/FuzzManager,MozillaSecurity/FuzzManager,lazyparser/FuzzManager,cihatix/FuzzManager,sigma-random/FuzzManager,cihatix/FuzzManager,cihatix/FuzzManager,sigma-random/FuzzManager,MozillaSecurity/FuzzManager | server/ec2spotmanager/migrations/0003_auto_20150504_1440.py | server/ec2spotmanager/migrations/0003_auto_20150504_1440.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ec2spotmanager', '0002_instancestatusentry_poolstatusentry'),
]
operations = [
migrations.RemoveField(
model_name='poolstatusentry',
name='instance',
),
migrations.AddField(
model_name='poolstatusentry',
name='pool',
field=models.ForeignKey(default=0, to='ec2spotmanager.InstancePool'),
preserve_default=False,
),
]
| mpl-2.0 | Python | |
d05bdc1a3a343184a02ec12b734a110140e94829 | add persistent decorator of class | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | server/Mars/ServerUtils/PersistentDecorator.py | server/Mars/ServerUtils/PersistentDecorator.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def persistent(cls):
cls.isPersistent = True
return cls
| bsd-2-clause | Python | |
2f0b290d64aa1f85b60dfce99c39de49f095cbbd | Add script to warn users about imepending deletion | OSGConnect/freesurfer_workflow,OSGConnect/freesurfer_workflow | python/warn_purge.py | python/warn_purge.py | #!/usr/bin/env python
# Copyright 2016 University of Chicago
# Licensed under the APL 2.0 license
import argparse
import getpass
import os
import subprocess
import sys
import logging
from email.mime.text import MIMEText
import psycopg2
import shutil
PARAM_FILE_LOCATION = "/etc/freesurfer/db_info"
FREESURFER_BASE = '/stash2/user/fsurf/'
VERSION = '1.3.2'
def get_db_parameters():
"""
Read database parameters from a file and return it
:return: a tuple of (database_name, user, password, hostname)
"""
parameters = {}
with open(PARAM_FILE_LOCATION) as param_file:
for line in param_file:
key, val = line.strip().split('=')
parameters[key.strip()] = val.strip()
return (parameters['database'],
parameters['user'],
parameters['password'],
parameters['hostname'])
def get_db_client():
"""
Get a postgresql client instance and return it
:return: a redis client instance or None if failure occurs
"""
db, user, password, host = get_db_parameters()
return psycopg2.connect(database=db, user=user, host=host, password=password)
def email_user(workflow_id, email):
"""
Email user informing them that a workflow will be deleted
:param workflow_id: id for workflow that will be deleted
:param email: email address for user
:return: None
"""
msg = MIMEText('The results from your freesurfer ' +
'workflow {0} '.format(workflow_id) +
'will be deleted in 7 days')
msg['Subject'] = 'Results for freesurfer workflow {0} '.format(workflow_id)
msg['Subject'] += 'will be deleted'
sender = 'fsurf@login.osgconnect.net'
dest = email
msg['From'] = sender
msg['To'] = dest
try:
sendmail = subprocess.Popen(['/usr/sbin/sendmail', '-t'], stdin=subprocess.PIPE)
sendmail.communicate(msg.as_string())
except subprocess.CalledProcessError:
pass
def process_results():
"""
Process results from jobs, removing any that are more than 30 days old
:return: exit code (0 for success, non-zero for failure)
"""
parser = argparse.ArgumentParser(description="Process and remove old results")
# version info
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
# Arguments for action
parser.add_argument('--dry-run', dest='dry_run',
action='store_true', default=False,
help='Mock actions instead of carrying them out')
args = parser.parse_args(sys.argv[1:])
if args.dry_run:
sys.stdout.write("Doing a dry run, no changes will be made\n")
conn = get_db_client()
cursor = conn.cursor()
job_query = "SELECT id, username, email, state, pegasus_ts, subject " \
" FROM freesurfer_interface.jobs " \
"WHERE (state = 'COMPLETED' OR" \
" state = 'ERROR') AND" \
" (age(job_date) >= '22 days' AND " \
" (age(job_date) < '23 days') ;"
try:
cursor.execute(job_query)
for row in cursor.fetchall():
if args.dry_run:
sys.stdout.write("Would email {0}".format(row[2]))
sys.stdout.write("about workflow {0}\n".format(row[0]))
continue
if not email_user(row[0], row[2]):
logging.error("Can't email {0} for job {1}".format(row[2],
row[0]))
continue
conn.commit()
except psycopg2.Error:
logging.error("Can't connect to database")
return 1
finally:
conn.commit()
conn.close()
return 0
if __name__ == '__main__':
sys.exit(process_results())
| apache-2.0 | Python | |
7adbfdf409a81a1462073fa97eb3860523e71a13 | Add python lib for queries. | acthp/ucsc-xena-server,acthp/ucsc-xena-server,ucscXena/ucsc-xena-server,ucscXena/ucsc-xena-server,acthp/ucsc-xena-server,ucscXena/ucsc-xena-server,acthp/ucsc-xena-server,ucscXena/ucsc-xena-server,ucscXena/ucsc-xena-server,acthp/ucsc-xena-server | python/xena_query.py | python/xena_query.py | """
Utilities for xena queries.
A basic query example.
Queries are scheme expressions.
>>> import xena_query as xena
>>> xena.post("https://genome-cancer.ucsc.edu/proj/public/xena", "(+ 1 2)")
'3.0'
>>> xena.post("https://genome-cancer.ucsc.edu/proj/public/xena", "(let [x 2 y (+ x 3)] (* x y))")
'10.0'
Looking up sample ids for the TCGA LGG cohort.
>>> xena.post("https://genome-cancer.ucsc.edu/proj/public/xena",
xena.patient_to_sample_query("TCGA.LGG.sampleMap",
["TCGA-CS-4938",
"TCGA-HT-7693",
"TCGA-CS-6665",
"TCGA-S9-A7J2",
"TCGA-FG-A6J3"]))
'{"TCGA.LGG.sampleMap":["TCGA-CS-4938-01","TCGA-CS-6665-01","TCGA-FG-A6J3-01","TCGA-HT-7693-01","TCGA-S9-A7J2-01"]}'
>>> import json
>>> json.loads(_)
{u'TCGA.LGG.sampleMap': [u'TCGA-CS-4938-01', u'TCGA-CS-6665-01', u'TCGA-FG-A6J3-01', u'TCGA-HT-7693-01', u'TCGA-S9-A7J2-01']}
"""
import urllib2
def quote(s):
return '"' + s + '"'
def array_fmt(l):
return '[' + ', '.join((quote(s) for s in l)) + ']'
sample_query_str = """
(let [cohort %s
patient-dataset (car (query {:select [[:field.id :patient] [:dataset.id :dataset]]
:from [:dataset]
:left-join [:field [:= :dataset_id :dataset.id]]
:where [:and [:= :cohort cohort]
[:= :field.name "_PATIENT"]]}))
patient (:PATIENT patient-dataset)
dataset (:DATASET patient-dataset)
sample (:ID (car (query {:select [:field.id]
:from [:field]
:where [:and [:= :dataset_id dataset]
[:= :field.name "sampleID"]]})))
N (- (/ (:N (car (query {:select [[#sql/call [:sum #sql/call [:length :scores]] :N]]
:from [:field_score]
:join [:scores [:= :scores_id :scores.id]]
:where [:= :field_id patient]}))) 4) 1)]
{cohort (map :SAMPLE (query {:select [:sample]
:from [{:select [[#sql/call [:unpackValue patient, :x] :patient]
[#sql/call [:unpackValue sample, :x] :sample]]
:from [#sql/call [:system_range 0 N]]}]
:where [:in :patient %s]}))})
"""
def patient_to_sample_query(cohort, patients):
"""Return a xena query which looks up sample ids for the given patients."""
return sample_query_str % (quote(cohort), array_fmt(patients))
headers = { 'Content-Type' : "text/plain" }
def post(url, query):
"""POST a xena data query to the given url."""
req = urllib2.Request(url + '/data/', query, headers)
response = urllib2.urlopen(req)
result = response.read()
return result
| apache-2.0 | Python | |
ee21691294e6bf9aacf0dd8591991c33d30c5159 | raise KeyboardInterrupt for backward compatibility | etingof/pysnmp,filippog/pysnmp,etingof/pysnmp,filippog/pysnmp | pysnmp/carrier/asyncore/dispatch.py | pysnmp/carrier/asyncore/dispatch.py | # Implements I/O over asynchronous sockets
from time import time
from sys import exc_info
from asyncore import socket_map
from asyncore import loop
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class AsynsockDispatcher(AbstractTransportDispatcher):
def __init__(self):
self.__sockMap = {} # use own map for MT safety
self.timeout = 0.5
AbstractTransportDispatcher.__init__(self)
def getSocketMap(self): return self.__sockMap
def setSocketMap(self, sockMap=socket_map): self.__sockMap = sockMap
def registerTransport(self, tDomain, t):
AbstractTransportDispatcher.registerTransport(self, tDomain, t)
t.registerSocket(self.__sockMap)
def unregisterTransport(self, tDomain):
self.getTransport(tDomain).unregisterSocket(self.__sockMap)
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
def transportsAreWorking(self):
for transport in self.__sockMap.values():
if transport.writable():
return 1
return 0
def runDispatcher(self, timeout=0.0):
while self.jobsArePending() or self.transportsAreWorking():
try:
loop(timeout and timeout or self.timeout,
use_poll=True, map=self.__sockMap, count=1)
except KeyboardInterrupt:
raise
except:
raise PySnmpError('poll error: %s' % exc_info()[1])
self.handleTimerTick(time())
| # Implements I/O over asynchronous sockets
from time import time
from sys import exc_info
from asyncore import socket_map
from asyncore import loop
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class AsynsockDispatcher(AbstractTransportDispatcher):
def __init__(self):
self.__sockMap = {} # use own map for MT safety
self.timeout = 0.5
AbstractTransportDispatcher.__init__(self)
def getSocketMap(self): return self.__sockMap
def setSocketMap(self, sockMap=socket_map): self.__sockMap = sockMap
def registerTransport(self, tDomain, t):
AbstractTransportDispatcher.registerTransport(self, tDomain, t)
t.registerSocket(self.__sockMap)
def unregisterTransport(self, tDomain):
self.getTransport(tDomain).unregisterSocket(self.__sockMap)
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
def transportsAreWorking(self):
for transport in self.__sockMap.values():
if transport.writable():
return 1
return 0
def runDispatcher(self, timeout=0.0):
while self.jobsArePending() or self.transportsAreWorking():
try:
loop(timeout and timeout or self.timeout,
use_poll=True, map=self.__sockMap, count=1)
except:
raise PySnmpError('poll error: %s' % exc_info()[1])
self.handleTimerTick(time())
| bsd-2-clause | Python |
98bd10cdf2c380b17c16a47a8f962dc384b3a18d | Solve py set discard remove pop | rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank | python/py-set-discard-remove-pop.py | python/py-set-discard-remove-pop.py | num_elements = int(input())
s = set(map(int, input().split()))
num_operations = int(input())
for _ in range(num_operations):
operation = input().split(" ")
if(operation[0] == "pop"):
s.pop()
else:
op, val = operation
s.discard(int(val))
print(sum(s))
| mit | Python | |
a8db812c5a9822f6ea72bf44134ed2219a2c5c74 | Remove print statement. | gov-cjwaszczuk/notifications-admin,gov-cjwaszczuk/notifications-admin,alphagov/notifications-admin,gov-cjwaszczuk/notifications-admin,alphagov/notifications-admin,gov-cjwaszczuk/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin | app/main/views/dashboard.py | app/main/views/dashboard.py | from flask import (abort, render_template, session)
from flask_login import login_required
from app.main import main
from app.main.dao.services_dao import get_service_by_id
from app.main.dao import templates_dao
from notifications_python_client.errors import HTTPError
from app import job_api_client
from app.utils import user_has_permissions
@main.route("/services/<service_id>/dashboard")
@login_required
def service_dashboard(service_id):
try:
templates = templates_dao.get_service_templates(service_id)['data']
jobs = job_api_client.get_job(service_id)['data']
except HTTPError as e:
if e.status_code == 404:
abort(404)
else:
raise e
try:
service = get_service_by_id(service_id)
session['service_name'] = service['data']['name']
session['service_id'] = service['data']['id']
except HTTPError as e:
if e.status_code == 404:
abort(404)
else:
raise e
return render_template(
'views/service_dashboard.html',
jobs=list(reversed(jobs))[:5],
more_jobs_to_show=(len(jobs) > 5),
free_text_messages_remaining='250,000',
spent_this_month='0.00',
template_count=len(templates),
service_id=str(service_id))
| from flask import (abort, render_template, session)
from flask_login import login_required
from app.main import main
from app.main.dao.services_dao import get_service_by_id
from app.main.dao import templates_dao
from notifications_python_client.errors import HTTPError
from app import job_api_client
from app.utils import user_has_permissions
@main.route("/services/<service_id>/dashboard")
@login_required
def service_dashboard(service_id):
try:
templates = templates_dao.get_service_templates(service_id)['data']
jobs = job_api_client.get_job(service_id)['data']
except HTTPError as e:
if e.status_code == 404:
abort(404)
else:
raise e
try:
service = get_service_by_id(service_id)
session['service_name'] = service['data']['name']
print(service['data']['id'])
session['service_id'] = service['data']['id']
except HTTPError as e:
if e.status_code == 404:
abort(404)
else:
raise e
return render_template(
'views/service_dashboard.html',
jobs=list(reversed(jobs))[:5],
more_jobs_to_show=(len(jobs) > 5),
free_text_messages_remaining='250,000',
spent_this_month='0.00',
template_count=len(templates),
service_id=str(service_id))
| mit | Python |
f3130bde2704008017e1438bf7d6cb1c0bbf3d61 | Create jsonSender.py | edlectrico/Elasticsearch_ELK_IoT,edlectrico/Elasticsearch_ELK_IoT | jsonSender.py | jsonSender.py | import socket
import json
import time
from distancemeter import get_distance,cleanup
# Logstash TCP/JSON Host
JSON_PORT = 5959
JSON_HOST = '127.0.0.1'
if __name__ == '__main__':
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((JSON_HOST, JSON_PORT))
while True:
distance = get_distance()
data = {'message': 'distance %.1f cm' % distance, 'distance': distance, 'hostname': socket.gethostname()}
s.send(json.dumps(data))
s.send('\n')
print ("Received distance = %.1f cm" % distance)
time.sleep(0.2)
# interrupt
except KeyboardInterrupt:
print("Program interrupted")
| apache-2.0 | Python | |
482c79850a943f77034d376c0d8e8682142b705d | Add init | senthil10/scilifelab,kate-v-stepanova/scilifelab,SciLifeLab/scilifelab,senthil10/scilifelab,kate-v-stepanova/scilifelab,senthil10/scilifelab,kate-v-stepanova/scilifelab,jun-wan/scilifelab,jun-wan/scilifelab,jun-wan/scilifelab,kate-v-stepanova/scilifelab,SciLifeLab/scilifelab,jun-wan/scilifelab,senthil10/scilifelab,SciLifeLab/scilifelab,SciLifeLab/scilifelab | project_management/pmtools/__init__.py | project_management/pmtools/__init__.py | """
Pipeline Management Tools
Usage: pm command [options]
"""
from cement.core import foundation, controller, handler
## Abstract base controller -- for sharing arguments
class AbstractBaseController(controller.CementBaseController):
class Meta:
arguments = [
(['-n', '--dry_run'], dict(help="dry_run - don't actually do anything")),
]
def _setup(self, base_app):
super(AbstractBaseController, self)._setup(base_app)
self.shared_config = dict()
## Main pm base controller
class PmController(controller.CementBaseController):
class Meta:
label = 'pm'
description = ''
@controller.expose(hide=True)
def default(self):
print dir(self)
| mit | Python | |
d853fba90a8fc784cdb248923cede6166fe91a8f | remove some field that duplicate | nfco/netforce,sidzan/netforce,bank-netforce/netforce,bank-netforce/netforce,anastue/netforce,sidzan/netforce,sidzan/netforce,bank-netforce/netforce,anastue/netforce,anastue/netforce,anastue/netforce,nfco/netforce,anastue/netforce,sidzan/netforce,nfco/netforce,anastue/netforce,sidzan/netforce,nfco/netforce,nfco/netforce,bank-netforce/netforce,bank-netforce/netforce,bank-netforce/netforce,sidzan/netforce,nfco/netforce | netforce_mfg/netforce_mfg/models/stock_barcode.py | netforce_mfg/netforce_mfg/models/stock_barcode.py | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
class Barcode(Model):
_inhierit= "stock.barcode"
_fields = {
"related_id": fields.Reference([["sale.order", "Sales Order"], ["purchase.order", "Purchase Order"], ["production.order", "Production Order"], ["stock.picking", "Picking"]], "Related To"),
}
def onchange_related(self, context={}):
data = context["data"]
type = data["type"]
val = data["related_id"][0]
relation, rel_id = val.split(",")
rel_id = int(rel_id)
if relation == "production.order":
rel = get_model("production.order").browse(rel_id)
if type == "out":
data["location_to_id"] = rel.production_location_id.id
elif type == "in":
data["location_from_id"] = rel.production_location_id.id
return data
Barcode.register()
| # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
class Barcode(Model):
_inhierit= "stock.barcode"
_transient = True
_fields = {
"type": fields.Selection([["in", "Goods Receipt"], ["internal", "Goods Transfer"], ["out", "Goods Issue"]], "Transaction Type"),
"related_id": fields.Reference([["sale.order", "Sales Order"], ["purchase.order", "Purchase Order"], ["production.order", "Production Order"], ["stock.picking", "Picking"]], "Related To"),
}
def onchange_related(self, context={}):
data = context["data"]
type = data["type"]
val = data["related_id"][0]
relation, rel_id = val.split(",")
rel_id = int(rel_id)
if relation == "production.order":
rel = get_model("production.order").browse(rel_id)
if type == "out":
data["location_to_id"] = rel.production_location_id.id
elif type == "in":
data["location_from_id"] = rel.production_location_id.id
return data
Barcode.register()
| mit | Python |
ee7f49a39f0a0d3048c0b83c856982d3b34d1701 | Create anovaTest.py | duttashi/Data-Analysis-Visualization | anovaTest.py | anovaTest.py | # Importing the required libraries
# Note %matplotlib inline works only for ipython notebook. It will not work for PyCharm. It is used to show the plot distributions
#%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.formula.api as smf
import statsmodels.stats.multicomp as multi
sns.set(color_codes=True)
# Reading the data where low_memory=False increases the program efficiency
data= pd.read_csv("gapminder.csv", low_memory=False)
# setting variables that you will be working with to numeric
data['breastcancerper100th']= data['breastcancerper100th'].convert_objects(convert_numeric=True)
data['femaleemployrate']= data['femaleemployrate'].convert_objects(convert_numeric=True)
data['alcconsumption']= data['alcconsumption'].convert_objects(convert_numeric=True)
# shows the number of rows and columns
print (len(data))
print (len(data.columns))
print (len(data.index))
# Print the column headers/headings
names=data.columns.values
print names
# using the describe function to get the standard deviation and other descriptive statistics of our variables
desc1=data['breastcancerper100th'].describe()
desc2=data['femaleemployrate'].describe()
desc3=data['alcconsumption'].describe()
print "\nBreast Cancer per 100th person\n", desc1
print "\nfemale employ rate\n", desc2
print "\nAlcohol consumption in litres\n", desc3
data.describe()
# Show the frequency distribution
print "\nAlcohol Consumption\nFrequency Distribution (in %)"
c1=data['alcconsumption'].value_counts(sort=False,dropna=False)
print c1
print "\nBreast Cancer per 100th"
c2=data['breastcancerper100th'].value_counts(sort=False)
print c2
print "\nFemale Employee Rate"
c3=data['femaleemployrate'].value_counts(sort=False)
print c3
# Show the frequency distribution of the quantitative variable using the groupby function
ac1=data.groupby('alcconsumption').size()
print "ac1\n",ac1
# Creating a subset of the data
sub1=data[(data['femaleemployrate']>40) & (data['alcconsumption']>=20)& (data['breastcancerper100th']<50)]
# creating a copy of the subset. This copy will be used for subsequent analysis
sub2=sub1.copy()
print "\nContries where Female Employee Rate is greater than 40 &" \
" Alcohol Consumption is greater than 20L & new breast cancer cases reported are less than 50\n"
print sub2
print "\nContries where Female Employee Rate is greater than 50 &" \
" Alcohol Consumption is greater than 10L & new breast cancer cases reported are greater than 70\n"
sub3=data[(data['alcconsumption']>10)&(data['breastcancerper100th']>70)&(data['femaleemployrate']>50)]
print sub3
# Checking for missing values in the data row-wise
print "Missing data rows count: ",sum([True for idx,row in data.iterrows() if any(row.isnull())])
# Checking for missing values in the data column-wise
print "Showing missing data coulmn-wise"
print data.isnull().sum()
# Create a copy of the original dataset as sub4 by using the copy() method
sub4=data.copy()
# Now showing the count of null values in the variables
print sub4.isnull().sum()
# Since the data is all continuous variables therefore the use the mean() for missing value imputation
# if dealing with categorical data, than use the mode() for missing value imputation
sub4.fillna(sub4['breastcancerper100th'].mean(), inplace=True)
sub4.fillna(sub4['femaleemployrate'].mean(), inplace=True)
sub4.fillna(sub4['alcconsumption'].mean(), inplace=True)
# Showing the count of null values after imputation
print sub4.isnull().sum()
# categorize quantitative variable based on customized splits using the cut function
sub4['alco']=pd.qcut(sub4.alcconsumption,6,labels=["0","1-4","5-9","10-14","15-19","20-24"])
sub4['brst']=pd.qcut(sub4.breastcancerper100th,5,labels=["1-20","21-40","41-60","61-80","81-90"])
sub4['emply']=pd.qcut(sub4.femaleemployrate,4,labels=["30-39","40-59","60-79","80-90"])
# Showing the frequency distribution of the categorised quantitative variables
print "\n\nFrequency distribution of the categorized quantitative variables\n"
fd1=sub4['alco'].value_counts(sort=False,dropna=False)
fd2=sub4['brst'].value_counts(sort=False,dropna=False)
fd3=sub4['emply'].value_counts(sort=False,dropna=False)
print "Alcohol Consumption\n",fd1
print "\n------------------------\n"
print "Breast Cancer per 100th\n",fd2
print "\n------------------------\n"
print "Female Employee Rate\n",fd3
print "\n------------------------\n"
# Now plotting the univariate quantitative variables using the distribution plot
sub5=sub4.copy()
sns.distplot(sub5['alcconsumption'].dropna(),kde=True)
plt.xlabel('Alcohol consumption in litres')
plt.title('Breast cancer in working class women')
plt.show() # Note: Although there is no need to use the show() method for ipython notebook as %matplotlib inline does the trick but
#I am adding it here because matplotlib inline does not work for an IDE like Pycharm and for that i need to use plt.show
sns.distplot(sub5['breastcancerper100th'].dropna(),kde=True)
plt.xlabel('Breast cancer per 100th women')
plt.title('Breast cancer in working class women')
plt.show()
sns.distplot(sub5['femaleemployrate'].dropna(),kde=True)
plt.xlabel('Female employee rate')
plt.title('Breast cancer in working class women')
plt.show()
# using scatter plot the visulaize quantitative variable.
# if categorical variable then use histogram
scat1= sns.regplot(x='alcconsumption', y='breastcancerper100th', data=data)
plt.xlabel('Alcohol consumption in liters')
plt.ylabel('Breast cancer per 100th person')
plt.title('Scatterplot for the Association between Alcohol Consumption and Breast Cancer 100th person')
scat2= sns.regplot(x='femaleemployrate', y='breastcancerper100th', data=data)
plt.xlabel('Female Employ Rate')
plt.ylabel('Breast cancer per 100th person')
plt.title('Scatterplot for the Association between Female Employ Rate and Breast Cancer per 100th Rate')
sub6=sub4.copy()
model1=smf.ols(formula='breastcancerper100th~C(alco)',data=sub6)
results1=model1.fit()
print(results1.summary())
m1=sub5.groupby('alcconsumption').mean()
sd1=sub5.groupby('alcconsumption').std()
'''
print m1
print "\n"
print sd1
'''
# Conducting a post hoc comparison test to check for type 1 error
mc1=multi.MultiComparison(sub6['breastcancerper100th'],sub6['alco'])
res1=mc1.tukeyhsd()
print res1.summary()
| mit | Python | |
f1e2ae06784d759b0f6dbfae1424a2de70353ea9 | add a module used to call platon (software assigning space groups) on a structure in pymatgen | yanikou19/pymatgen,rousseab/pymatgen,Bismarrck/pymatgen,ctoher/pymatgen,migueldiascosta/pymatgen,Bismarrck/pymatgen,sonium0/pymatgen,rousseab/pymatgen,rousseab/pymatgen,ctoher/pymatgen,migueldiascosta/pymatgen,Bismarrck/pymatgen,Bismarrck/pymatgen,sonium0/pymatgen,Dioptas/pymatgen,ctoher/pymatgen,yanikou19/pymatgen,migueldiascosta/pymatgen,Dioptas/pymatgen,sonium0/pymatgen,Bismarrck/pymatgen,yanikou19/pymatgen | pymatgen/command_line/platon_caller.py | pymatgen/command_line/platon_caller.py | '''
Interface with command line platon
http://aflowlib.org/
Only tested on Linux
inspired by Shyue's qhull_caller
WARNING: you need to have a platon in your path for this to work
'''
__author__="Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy.hautier@uclouvain.be"
__status__ = "Production"
__date__ ="$February 24, 2012M$"
import subprocess
import pymatgen.io.cifio
import os
def run_platon_command(command, structure):
"""
Helper function for calling platon with different arguments
I know it's uggly to actually write a file and call platon but I did not manage to make it work in another way
"""
writer=pymatgen.io.cifio.CifWriter(structure)
#incommand=str(writer)
#print incommand
writer.write_file("/tmp/tmp.cif")
command.append("/tmp/tmp.cif")
p = subprocess.Popen(command,shell = False, stdout = subprocess.PIPE, stdin = subprocess.PIPE)
output = p.communicate()
os.remove("/tmp/tmp.cif")
return output
def get_space_group(structure):
output=run_platon_command(['platon', '-o', '-c'], structure)
dictio={}
for line in output[0].split("\n"):
# print line
if(line.find("Space Group")!=-1):
list_tmp=line.split()
# print list_tmp
for i in range(len(list_tmp)):
if(list_tmp[i]=='Group'):
dictio['SG_HM']=list_tmp[i+1]
if(list_tmp[i]=='No:'):
dictio['SG_NB']=list_tmp[i+1]
return dictio
| mit | Python | |
6cad2d60857e9d8714f679f68ae4887e58092a57 | Add caffe-compute-image-mean. | bamos/python-scripts,bamos/python-scripts | python2.7/caffe-compute-image-mean.py | python2.7/caffe-compute-image-mean.py | #!/usr/bin/env python2
import sys
sys.path.append("/home/bamos/repos/caffe-local/python")
import argparse
import numpy as np
import os
import time
from caffe.proto import caffe_pb2
from caffe.io import array_to_blobproto
from collections import defaultdict
from skimage import io
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('meanPrefix', type=str, help="TODO")
parser.add_argument('imageDir', type=str, help="TODO")
args = parser.parse_args()
exts = ["jpg", "png"]
mean = np.zeros((1, 3, 152, 152))
N = 0
classSizes = defaultdict(int)
beginTime = time.time()
for subdir, dirs, files in os.walk(args.imageDir):
for fName in files:
(imageClass, imageName) = (os.path.basename(subdir), fName)
if any(imageName.lower().endswith("." + ext) for ext in exts):
img = io.imread(os.path.join(subdir, fName))
if img.shape == (152, 152, 3):
mean[0][0] += img[:,:,0]
mean[0][1] += img[:,:,1]
mean[0][2] += img[:,:,2]
N += 1
if N % 1000 == 0:
elapsed = time.time() - beginTime
print("Processed {} images in {:.2f} seconds. "
"{:.2f} images/second.".format(N, elapsed,
N/elapsed))
mean[0] /= N
blob = array_to_blobproto(mean)
with open("{}.binaryproto".format(args.meanPrefix), 'wb') as f:
f.write(blob.SerializeToString())
np.save("{}.npy".format(args.meanPrefix), mean[0])
meanImg = np.transpose(mean[0].astype(np.uint8), (1, 2, 0))
io.imsave("{}.png".format(args.meanPrefix), meanImg)
| mit | Python | |
db2e1a1070e812eaa539b229d128a58a59448317 | create hatenablog-post.py | tadaken3/hatenablog_post | hatenablog-post.py | hatenablog-post.py | #!/usr/bin/env python
#coding=utf-8
import sys
import datetime
import random
import hashlib
import base64
import requests
from chardet.universaldetector import UniversalDetector
username = 'username'
password = 'API key'
blogname = 'yourblogname.hatenablog.com'
def main():
if len(sys.argv) != 2:
sys.exit('Usage: %s file-name' % sys.argv[0])
file = sys.argv[1]
charset = check_encoding(file)
title, body = parseText(file, charset)
data = create_data(title, body)
post_hatena(data)
def wsse(username, api_key):
created = datetime.datetime.now().isoformat() + "Z"
b_nonce = hashlib.sha1(str(random.random()).encode()).digest()
b_digest = hashlib.sha1(b_nonce + created.encode() + api_key.encode()).digest()
c = 'UsernameToken Username="{0}", PasswordDigest="{1}", Nonce="{2}", Created="{3}"'
return c.format(username, base64.b64encode(b_digest).decode(), base64.b64encode(b_nonce).decode(), created)
def create_data(title,body):
template = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:app="http://www.w3.org/2007/app">
<title>{0}</title>
<author><name>name</name></author>
<content type="text/plain">{1}</content>
<updated>2013-09-05T00:00:00</updated>
<app:control>
<app:draft>yes</app:draft>
</app:control>
</entry>
"""
data = template.format(title, body).encode()
return data
def parseText(file, charset):
with open(file, encoding=charset) as f:
obj = f.readlines()
title = ""
body = ""
for i, line in enumerate(obj):
if i == 0:
title = line
else:
body = body + line
return title, body
def check_encoding(file):
detector = UniversalDetector()
with open(file, mode='rb') as f:
for binary in f:
detector.feed(binary)
if detector.done:
break
detector.close()
charset = detector.result['encoding']
return charset
def post_hatena(data):
headers = {'X-WSSE': wsse(username, api_key)}
url = 'http://blog.hatena.ne.jp/{0}/{1}/atom/entry'.format(username, blogname)
r = requests.post(url, data=data, headers=headers)
if r.status_code != 201:
sys.stderr.write('Error!\n' + 'status_code: ' + str(r.status_code) + '\n' + 'message: ' + r.text)
if __name__ == '__main__':
main()
| mit | Python | |
8e94da2cf788115a1562db253c96b1932b495ef3 | Add script for generating chords, used to make some of the sounds. | apendleton/valve-installation,apendleton/valve-installation | make_chord.py | make_chord.py | from collections import OrderedDict
from itertools import cycle
import sys
# build the pitch table
note_names = ['A', 'A#/Bb', 'B', 'C', 'C#/Db', 'D', 'D#/Eb', 'E', 'F', 'F#/Gb', 'G', 'G#/Ab']
note_cycle = cycle(note_names)
piano = []
onumber = 0
for i in range(1, 89):
note = note_cycle.next()
if note == 'C':
onumber += 1
piano.append({
'number': i,
'name': [n + str(onumber) for n in note.split('/')],
'freq': (2 ** ((i - 49.0) / 12)) * 440
})
# invert it
freqs = {}
for key in piano:
for name in key['name']:
freqs[name] = key['freq']
# look at arguments for pitch names and build samples
from wavebender import *
flist = []
requested = sys.argv[1:]
amp = 0.8 / len(requested)
for arg in requested:
flist.append(sine_wave(freqs[arg], amplitude=amp))
channels = (tuple(flist),)
nframes = 44100 * 10
samples = compute_samples(channels, nframes)
write_wavefile(sys.stdout, samples, nchannels=1, nframes=nframes) | bsd-3-clause | Python | |
56bfc977ea1e7b415e699a82459c917c71fe36df | add app.utils module | iBis-project/iBis-server2,rleh/ocyco-server-python,OpenCycleCompass/ocyco-server-python | app/utils.py | app/utils.py |
def get_city_by_coordinates(lon, lat):
# TODO: determinate city (reverse geocoding...)
return "MyCity"
| agpl-3.0 | Python | |
2b1500419e97b75c7b5bda9d8e226ed8340edb50 | add experimental python proxy | n1kdo/lotw-gridmapper,n1kdo/lotw-gridmapper,n1kdo/lotw-gridmapper | lotwreport.py | lotwreport.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
lotwreport.py: proxy for ARRL LoTW lotwreport.adi web service, which does not
support CORS headers and thus cannot be called from a script that is loaded
from any other server. This CGI must be served from the same host name as
any script that wishes to call it. Because I do not want other peoples'
scripts to call this service, it deliberately does nnot support CORS, either.
"""
#
# LICENSE:
#
# Copyright (c) 2018, Jeffrey B. Otterson, N1KDO
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cgi, os, urllib2, sys
valid_args = ['login', 'password', 'qso_query', 'qso_qsl',
'qso_qslsince', 'qso_qsorxsince', 'qso_owncall', 'qso_callsign',
'qso_mode', 'qso_band', 'qso_dxcc',
'qso_startdate', 'qso_starttime',
'qso_enddate', 'qso_endtime',
'qso_mydetail', 'qso_qsldetail', 'qso_withown']
form = cgi.FieldStorage()
callsign = form['login'].value if 'login' in form else None
password = form['password'].value if 'password' in form else None
client = os.environ["REMOTE_ADDR"]
pfx = '?'
url = 'https://lotw.arrl.org/lotwuser/lotwreport.adi'
for arg in valid_args:
if arg in form:
url = url + pfx + arg + '=' + form[arg].value
pfx = '&'
if callsign == 'n1kdo' and password is None and client.startswith('192.168.1'):
print 'Content-Type: application/x-arrl-adif'
print
try:
filename = callsign + '.adi'
with open(filename, 'r') as file:
data = file.read()
print data
except IOError:
print 'no cache'
else:
req = urllib2.Request(url)
response = urllib2.urlopen(req, None, 600)
data = response.read()
if callsign == 'n1kdo' and 'ARRL Logbook of the World Status Report' in data:
filename = callsign + '.adi'
with open(filename, 'w') as file:
file.write(data)
info = response.info()
print 'Content-Type: %s' % info['Content-Type']
print
print data
| bsd-2-clause | Python | |
2c387a3674a574bcbfa2ebf5dc32d7987988aef5 | Add py-fastavro (#19196) | LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/py-fastavro/package.py | var/spack/repos/builtin/packages/py-fastavro/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFastavro(PythonPackage):
"""Fastavro for Python."""
homepage = "https://github.com/fastavro/fastavro"
url = "https://github.com/fastavro/fastavro/archive/1.0.0.post1.tar.gz"
version('1.0.0.post1', sha256='74f9bf0f9bc9e484c6d42fad603d6e6f907e716a78189873761dc86ce64cc6c5')
version('1.0.0', sha256='9aca6f425dd898f40e2c4e10714276604e610c6ad3df53109d6fb3ad49761b5e')
version('0.24.2', sha256='6ccd711a0c6960c3263a7c1c0e0e3bf7c5e949e11c3a676edece36138c62caba')
version('0.24.1', sha256='8b109b4f564f6fe7fd82d3e2d582fba8da58167bcf2fa65e27fd3c4e49afddf9')
version('0.24.0', sha256='d60c2a90d7bbe7a70aab30d3b772faedcbd9487bc1f7e2cd65a93a555688965e')
version('0.23.6', sha256='b511dc55a9514205765f96b4d964f1d74fca9696dbac91229cef6100a028a29f')
version('0.23.5', sha256='5542b69a99a74a57988c2a48da9be4356db4223cebac599ec3e9bf1b74ef534b')
version('0.23.4', sha256='e699940a06fc713d56ba8b9cb88665e2fa2a6abc2c342cd540ee7cd4428af973')
version('0.23.3', sha256='4e4bebe7b43b5cdad030bdbeb7f7f0ccb537ea025a9e28c7a4826876872fc84b')
version('0.23.2', sha256='3b31707d6eaa1e98fc60536d0b3483bafb78be39bf9f0a1affe1b353e70bd5b2')
def setup_build_environment(self, env):
# Use cython for building as *.c files are missing from repo
env.set('FASTAVRO_USE_CYTHON', 1)
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-snappy', type=('build', 'run'))
depends_on('py-lz4', type=('build', 'run'))
depends_on('py-cython', type=('build', 'run'))
| lgpl-2.1 | Python | |
e43ffdc6ce35438b188f84ec34855ac3ff5a4722 | Create immhook.py | abhinavbom/Debugging | immhook.py | immhook.py | #-------------------------------------------------------------------------------
# Name: Immunity debugger Exception Hook
# Purpose: The script throws out values of EIP,ESP when a crash exception occours
#
# Author: darklord
#
# Created: 19/10/2014
# Copyright: (c) darklord 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import immlib
from immlib import AllExceptHook
class DemoHook(AllExceptHook):
def __init__(self):
AllExceptHook.__init__(self)
def run(self, regs):
imm = immlib.Debugger()
#picks up registers from the memory
eip = regs['EIP']
esp = regs['ESP']
#logging register information
imm.log("EIP: 0x%08X ESP:0x%08X"%(eip, esp))
#reads the data from the ESP
buff = imm.readString(esp)
def main(args):
imm = immlib.Debugger()
newHook = DemoHook()
newHook.add('Demo Hook')
return 'Hook PyCommand'
if __name__ == '__main__':
main()
| mit | Python | |
9fa0ae7c14bfa0d352bedd5ab7ea92be7736a485 | Create classes.py | SamCD/TestTrainer | classes.py | classes.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import notelib
class Student(object):
PassedTests = []
score = 0
def __init__(self,name):
self.name=name
def take_test(self,subject,level):
mo = (subject,level)
tfiles = ["{}questions.txt{}".format(mo[0],mo[1]),
"{}answers.txt{}".format(mo[0],mo[1]),
"{}hints.txt{}".format(mo[0],mo[1]),
"{}followups.txt{}".format(mo[0],mo[1])]
counter = 0
question = 0
qs = open(tfiles[counter],'r').split()
ans = raw_input(qs[0])
| apache-2.0 | Python | |
466edb19fbf6fcfc51e671a80d45320bd8e1717c | add linode_api | leonanu/scripts,leonanu/scripts | linode_api/View_Network_Transfer.py | linode_api/View_Network_Transfer.py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import json
import math
import sys
from urllib import parse,request
LINODE_ID = ''
LINODE_TOKEN_RO = ''
def getTransfer(linode_id, argv):
url = 'https://api.linode.com/v4/linode/instances/' + linode_id + '/transfer'
header_dict = {
'User-Agent': 'User-Agent: curl/7.68.0',
'Authorization': 'Bearer ' + LINODE_TOKEN_RO}
req = request.Request(url, headers=header_dict)
res = request.urlopen(req)
ret = res.read()
jsonData = json.loads(ret)
transfer_quota = float(round(jsonData['quota'], 2))
transfer_used = round(jsonData['used'] / math.pow(1024, 3), 2)
if argv == 'quota':
print(transfer_quota)
if argv == 'used':
print(transfer_used)
if argv == 'percent':
print(round(float(transfer_used / transfer_quota * 100), 1))
def main():
getTransfer(LINODE_ID, sys.argv[1])
if __name__ == '__main__':
main()
| mit | Python | |
c84e3394ed4829ff9a66167864a11a4ef6a2b62c | Add script to get certificate expiration date | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | scripts/get_saml_cert_expiration.py | scripts/get_saml_cert_expiration.py | from cryptography import x509
from cryptography.hazmat.backends import default_backend
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
def run(*args):
for client in Client.objects.all():
with LocalTenant(client):
try:
cert_string = '-----BEGIN CERTIFICATE-----\n{}\n-----END CERTIFICATE-----'.format(
properties.TOKEN_AUTH['sp']['x509cert']
)
cert = x509.load_pem_x509_certificate(bytes(cert_string), default_backend())
print client.name, cert.not_valid_after
except (AttributeError, KeyError):
pass
except Exception, e:
print e
| bsd-3-clause | Python | |
3f685e3873c18e1eb28b7a4121c552bbb697e0a4 | Add script for generate events. | Intey/OhMyBank,Intey/OhMyBank,Intey/OhMyBank,Intey/OhMyBank | scripts/generator.py | scripts/generator.py | #!/usr/bin/python3
from random import randint
output = ""
filename = "data"
class Generator:
def gen_date(self):
return str(randint(2013, 2015)) + "-" \
+ str(randint(1, 12)) + "-" \
+ str(randint(1, 31))
def gen_price(self):
return str(10 * randint(10, 100))
def gen_author(self):
users = [
"Intey",
"Andrey",
"Tatiana",
"Nigger",
]
return users[randint(1, len(users)-1)]
def gen_parts(self):
return str(randint(0, 15))
def gen_row(self, s):
return ":".join([s,
self.gen_price(),
self.gen_author(),
self.gen_date(),
self.gen_parts()]) + '\n'
def prepare_file(file_name):
gena = Generator()
with open(file_name, 'r') as f:
file_lines = []
for x in f.readlines():
new_line = gena.gen_row(x.rstrip('\n'))
# print(new_line)
file_lines.append(new_line)
# file_lines.sort(key=lambda line: int(line.split(":")[-1]))
with open(file_name, 'w') as f:
f.writelines(file_lines)
if __name__ == "__main__":
prepare_file(filename)
| mit | Python | |
8be49481990096c7a4735807cc3d9611b4ce0780 | add migration script | UrbanCCD-UChicago/plenario,UrbanCCD-UChicago/plenario,UrbanCCD-UChicago/plenario | scripts/update_metatable_columns.py | scripts/update_metatable_columns.py | from plenario.settings import DATABASE_CONN
from plenario.database import Base
from plenario.models import MetaTable
from sqlalchemy import create_engine, Table
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import NoSuchTableError
def main():
# establish connection to provided database
engine = create_engine(DATABASE_CONN, convert_unicode=True)
session = sessionmaker(bind=engine)()
# grab the MetaTable records
query = session.query(MetaTable)
for table in query.all():
try:
# reflect existing table information into a Table object
t = Table(table.dataset_name, Base.metadata, autoload=True, extend_existing=True)
print(table)
cols = {}
for col in t.columns:
c_name = str(col.name)
c_type = str(col.type)
if c_name not in {u'geom', u'point_date', u'hash'}:
cols[c_name] = c_type
# update existing table
table.column_names = cols
session.commit()
except NoSuchTableError:
pass
print('... done.')
if __name__ == '__main__':
print "Connecting to {}".format(DATABASE_CONN)
main()
| mit | Python | |
f5284cc7da9166a43e3cfbd901205f4446295f7a | Add Consumer Product Safety Commission. | divergentdave/inspectors-general,lukerosiak/inspectors-general | inspectors/cpsc.py | inspectors/cpsc.py | #!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# https://www.cpsc.gov/en/about-cpsc/inspector-general/
# Oldest report: 2003
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Fix the links for BLACKLIST_REPORT_URLS
REPORTS_URL = "https://www.cpsc.gov/en/about-cpsc/inspector-general/"
BLACKLIST_REPORT_URLS = [
'https://www.cpsc.gov/Media/Documents/About/OIG/Audits/CPSC-Fiscal-Year-2009-Financial-Statements-released-November-13-2009/',
]
def run(options):
year_range = inspector.year_range(options)
doc = BeautifulSoup(utils.download(REPORTS_URL))
results = doc.select("ul.summary-list li")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, year_range):
link = result.find("a")
report_url = urljoin(REPORTS_URL, link.get('href'))
if report_url in BLACKLIST_REPORT_URLS:
return
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
if not report_id:
import pdb;pdb.set_trace()
title = link.text
published_on_text = result.select("span.date")[0].text
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'cpsc',
'inspector_url': 'https://www.cpsc.gov/en/about-cpsc/inspector-general/',
'agency': 'cpsc',
'agency_name': 'Consumer Product Safety Commission',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 | Python | |
79a81b2d1936cd44caabf5f4e38abdee88a8821a | add missing proxy for kiva.agg.plat_support | enthought/etsproxy | enthought/kiva/agg/plat_support.py | enthought/kiva/agg/plat_support.py | # proxy module
from kiva.agg.plat_support import *
| bsd-3-clause | Python | |
f25a1484892d7b60fb9ffaba033cfb467e1b34f5 | Update random-point-in-non-overlapping-rectangles.py | tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/random-point-in-non-overlapping-rectangles.py | Python/random-point-in-non-overlapping-rectangles.py | # Time: ctor: O(n)
# pick: O(logn)
# Space: O(n)
# Given a list of non-overlapping axis-aligned rectangles rects,
# write a function pick which randomly and uniformily picks
# an integer point in the space covered by the rectangles.
#
# Note:
# - An integer point is a point that has integer coordinates.
# - A point on the perimeter of a rectangle is included in the space covered by the rectangles.
# - ith rectangle = rects[i] = [x1,y1,x2,y2],
# where [x1, y1] are the integer coordinates of the bottom-left corner,
# and [x2, y2] are the integer coordinates of the top-right corner.
# - length and width of each rectangle does not exceed 2000.
# - 1 <= rects.length <= 100
# - pick return a point as an array of integer coordinates [p_x, p_y]
# - pick is called at most 10000 times.
#
# Example 1:
#
# Input:
# ["Solution","pick","pick","pick"]
# [[[[1,1,5,5]]],[],[],[]]
# Output:
# [null,[4,1],[4,1],[3,3]]
# Example 2:
#
# Input:
# ["Solution","pick","pick","pick","pick","pick"]
# [[[[-2,-2,-1,-1],[1,0,3,0]]],[],[],[],[],[]]
# Output:
# [null,[-1,-2],[2,0],[-2,-1],[3,0],[-2,-2]]
# Explanation of Input Syntax:
#
# The input is two lists: the subroutines called and their arguments.
# Solution's constructor has one argument,
# the array of rectangles rects. pick has no arguments.
# Arguments are always wrapped with a list, even if there aren't any.
import random
import bisect
class Solution(object):
def __init__(self, rects):
"""
:type rects: List[List[int]]
"""
self.__rects = list(rects)
self.__prefix_sum = map(lambda x : (x[2]-x[0]+1)*(x[3]-x[1]+1), rects)
for i in xrange(1, len(self.__prefix_sum)):
self.__prefix_sum[i] += self.__prefix_sum[i-1]
def pick(self):
"""
:rtype: List[int]
"""
target = random.randint(0, self.__prefix_sum[-1]-1)
left = bisect.bisect_right(self.__prefix_sum, target)
rect = self.__rects[left]
width = rect[2]-rect[0]+1
height = rect[3]-rect[1]+1
base = self.__prefix_sum[left]-width*height
return [rect[0]+(target-base)%width, rect[1]+(target-base)//width]
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
| # Time: O(logn)
# Space: O(n)
# Given a list of non-overlapping axis-aligned rectangles rects,
# write a function pick which randomly and uniformily picks
# an integer point in the space covered by the rectangles.
#
# Note:
# - An integer point is a point that has integer coordinates.
# - A point on the perimeter of a rectangle is included in the space covered by the rectangles.
# - ith rectangle = rects[i] = [x1,y1,x2,y2],
# where [x1, y1] are the integer coordinates of the bottom-left corner,
# and [x2, y2] are the integer coordinates of the top-right corner.
# - length and width of each rectangle does not exceed 2000.
# - 1 <= rects.length <= 100
# - pick return a point as an array of integer coordinates [p_x, p_y]
# - pick is called at most 10000 times.
#
# Example 1:
#
# Input:
# ["Solution","pick","pick","pick"]
# [[[[1,1,5,5]]],[],[],[]]
# Output:
# [null,[4,1],[4,1],[3,3]]
# Example 2:
#
# Input:
# ["Solution","pick","pick","pick","pick","pick"]
# [[[[-2,-2,-1,-1],[1,0,3,0]]],[],[],[],[],[]]
# Output:
# [null,[-1,-2],[2,0],[-2,-1],[3,0],[-2,-2]]
# Explanation of Input Syntax:
#
# The input is two lists: the subroutines called and their arguments.
# Solution's constructor has one argument,
# the array of rectangles rects. pick has no arguments.
# Arguments are always wrapped with a list, even if there aren't any.
import random
import bisect
class Solution(object):
def __init__(self, rects):
"""
:type rects: List[List[int]]
"""
self.__rects = list(rects)
self.__prefix_sum = map(lambda x : (x[2]-x[0]+1)*(x[3]-x[1]+1), rects)
for i in xrange(1, len(self.__prefix_sum)):
self.__prefix_sum[i] += self.__prefix_sum[i-1]
def pick(self):
"""
:rtype: List[int]
"""
target = random.randint(0, self.__prefix_sum[-1]-1)
left = bisect.bisect_right(self.__prefix_sum, target)
rect = self.__rects[left]
width = rect[2]-rect[0]+1
height = rect[3]-rect[1]+1
base = self.__prefix_sum[left]-width*height
return [rect[0]+(target-base)%width, rect[1]+(target-base)//width]
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
| mit | Python |
eb8eabd44764dc26fdbd08ef35b3ea8fc0dd7c54 | Add mutt display script | SevereOverfl0w/.files,SevereOverfl0w/.files,SevereOverfl0w/.files | bin/mutt-display.py | bin/mutt-display.py | #!/usr/bin/env python2
"""
Copyright 2011 by Brian C. Lane
"""
import sys
import email
raw_msg = sys.stdin.read()
msg = email.message_from_string(raw_msg)
date = msg.get('Date', None)
if date:
from email.utils import mktime_tz, parsedate_tz, formatdate
try:
# Convert to local TZ
tz_tuple = parsedate_tz(date)
epoch_time = mktime_tz(tz_tuple)
msg.add_header('X-Date', formatdate( epoch_time, localtime=True ))
from cStringIO import StringIO
from email.generator import Generator
fp = StringIO()
g = Generator(fp, mangle_from_=False, maxheaderlen=200)
g.flatten(msg)
sys.stdout.write(fp.getvalue())
except:
import traceback
print(traceback.format_exc())
sys.stdout.write(raw_msg)
else:
# just write it out
sys.stdout.write(raw_msg)
| mit | Python | |
2957a0331654a22c6f62544b6ec1ca4a4ee86be9 | Tweak metainfo_series series name detection. | LynxyssCZ/Flexget,ratoaq2/Flexget,jawilson/Flexget,thalamus/Flexget,tarzasai/Flexget,vfrc2/Flexget,tobinjt/Flexget,malkavi/Flexget,poulpito/Flexget,offbyone/Flexget,Danfocus/Flexget,vfrc2/Flexget,jacobmetrick/Flexget,jawilson/Flexget,lildadou/Flexget,malkavi/Flexget,malkavi/Flexget,sean797/Flexget,LynxyssCZ/Flexget,oxc/Flexget,OmgOhnoes/Flexget,offbyone/Flexget,xfouloux/Flexget,Flexget/Flexget,lildadou/Flexget,antivirtel/Flexget,dsemi/Flexget,ianstalk/Flexget,ZefQ/Flexget,tarzasai/Flexget,v17al/Flexget,jawilson/Flexget,oxc/Flexget,X-dark/Flexget,poulpito/Flexget,qk4l/Flexget,v17al/Flexget,oxc/Flexget,Danfocus/Flexget,sean797/Flexget,Danfocus/Flexget,ratoaq2/Flexget,OmgOhnoes/Flexget,tsnoam/Flexget,ibrahimkarahan/Flexget,tvcsantos/Flexget,xfouloux/Flexget,LynxyssCZ/Flexget,grrr2/Flexget,qvazzler/Flexget,crawln45/Flexget,antivirtel/Flexget,Pretagonist/Flexget,Pretagonist/Flexget,camon/Flexget,v17al/Flexget,thalamus/Flexget,grrr2/Flexget,lildadou/Flexget,qvazzler/Flexget,asm0dey/Flexget,qk4l/Flexget,spencerjanssen/Flexget,X-dark/Flexget,Pretagonist/Flexget,crawln45/Flexget,qk4l/Flexget,jacobmetrick/Flexget,thalamus/Flexget,spencerjanssen/Flexget,grrr2/Flexget,cvium/Flexget,spencerjanssen/Flexget,tobinjt/Flexget,X-dark/Flexget,tsnoam/Flexget,sean797/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,patsissons/Flexget,drwyrm/Flexget,jacobmetrick/Flexget,ibrahimkarahan/Flexget,voriux/Flexget,vfrc2/Flexget,antivirtel/Flexget,LynxyssCZ/Flexget,Flexget/Flexget,ZefQ/Flexget,ratoaq2/Flexget,OmgOhnoes/Flexget,patsissons/Flexget,tarzasai/Flexget,tobinjt/Flexget,tvcsantos/Flexget,Danfocus/Flexget,offbyone/Flexget,ianstalk/Flexget,JorisDeRieck/Flexget,voriux/Flexget,drwyrm/Flexget,poulpito/Flexget,crawln45/Flexget,Flexget/Flexget,dsemi/Flexget,xfouloux/Flexget,ibrahimkarahan/Flexget,jawilson/Flexget,cvium/Flexget,qvazzler/Flexget,drwyrm/Flexget,camon/Flexget,malkavi/Flexget,tobinjt/Flexget,cvium/Flexget,asm0dey/Flexget,Flexget/Flexget,dsemi/Flexget,gazpachoking/Flexget,JorisDeRieck/Flexget,asm0dey/Flexget,patsissons/Flexget,tsnoam/Flexget,ianstalk/Flexget,JorisDeRieck/Flexget,ZefQ/Flexget,gazpachoking/Flexget | flexget/plugins/metainfo_series.py | flexget/plugins/metainfo_series.py | import logging
from flexget.plugin import *
from flexget.utils.titles import SeriesParser
import re
log = logging.getLogger('metanfo_series')
class MetainfoSeries(object):
"""
Check if entry appears to be a series, and populate series info if so.
"""
def validator(self):
from flexget import validator
return validator.factory('boolean')
def on_feed_metainfo(self, feed):
# Don't run if we are disabled
if not feed.config.get('metainfo_series', True):
return
for entry in feed.entries:
match = self.guess_series(entry['title'])
if match:
entry['series_name'] = match[0]
entry['series_season'] = match[1]
entry['series_episode'] = match[2]
entry['series_parser'] = match[3]
entry['series_guessed'] = True
def guess_series(self, title):
"""Returns tuple of (series_name, season, episode, parser) if found, else None"""
# Clean the data for parsing
parser = SeriesParser()
data = parser.clean(title)
data = parser.remove_dirt(data)
match = parser.parse_episode(data)
if match:
if match[0] is None:
return
elif match[2].start() > 1:
# If an episode id is found, assume everything before it is series name
name = data[:match[2].start()].rstrip()
# Grab the name from the original title to preserve formatting
name = title[:len(name)]
# Replace . and _ with spaces
name = re.sub('[\._]', ' ', name)
name = ' '.join(name.split())
season = match[0]
episode = match[1]
parser.name = name
parser.data = title
parser.season = season
parser.episode = episode
parser.valid = True
return (name, season, episode, parser)
register_plugin(MetainfoSeries, 'metainfo_series', builtin=True)
| import logging
from flexget.plugin import *
from flexget.utils.titles import SeriesParser
import re
log = logging.getLogger('metanfo_series')
class MetainfoSeries(object):
"""
Check if entry appears to be a series, and populate series info if so.
"""
def validator(self):
from flexget import validator
return validator.factory('boolean')
def on_feed_metainfo(self, feed):
# Don't run if we are disabled
if not feed.config.get('metainfo_series', True):
return
for entry in feed.entries:
match = self.guess_series(entry['title'])
if match:
entry['series_name'] = match[0]
entry['series_season'] = match[1]
entry['series_episode'] = match[2]
entry['series_parser'] = match[3]
entry['series_guessed'] = True
def guess_series(self, title):
"""Returns tuple of (series_name, season, episode, parser) if found, else None"""
# Clean the data for parsing
parser = SeriesParser()
data = parser.clean(title)
data = parser.remove_dirt(data)
data = ' '.join(data.split())
match = parser.parse_episode(data)
if match:
if match[0] is None:
return
elif match[2].start() > 1:
# If an episode id is found, assume everything before it is series name
name = data[:match[2].start()].rstrip()
# Grab the name from the original title to preserve formatting
name = title[:len(name)]
# Replace . and _ with spaces
name = re.sub('[\._]', ' ', name)
season = match[0]
episode = match[1]
parser.name = name
parser.data = title
parser.season = season
parser.episode = episode
parser.valid = True
return (name, season, episode, parser)
register_plugin(MetainfoSeries, 'metainfo_series', builtin=True)
| mit | Python |
00cc43b3e7a848c17272928f6469beb128e278b4 | add linear_regression | yeatmanlab/BrainTools,yeatmanlab/BrainTools | projects/NLR_MEG/linear_regression.py | projects/NLR_MEG/linear_regression.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 12:08:31 2018
@author: sjjoo
"""
#%%
import numpy as np
from sklearn import linear_model as lm
import statsmodels.api as sm
import statsmodels.formula.api as smf
import pandas as pd
X = np.column_stack((temp_read,temp_raw, temp_age,temp_meg1, temp_meg2))
y = temp_meg2
reg = lm.LinearRegression()
reg.fit(X,y)
d = pd.DataFrame(X,columns=['read', 'raw', 'age','meg_dot', 'meg_lex'])
result1 = smf.ols('meg_dot~read',d).fit()
result2 = smf.ols('meg_dot~age',d).fit()
result3 = smf.ols('meg_dot~read+age',d).fit()
print(result1.summary())
print(result2.summary())
print(result3.summary())
result4 = smf.ols('meg_dot~meg_lex',d).fit()
print(result4.summary())
result5 = smf.ols('meg_dot~raw+age',d).fit()
print(result5.summary()) | bsd-3-clause | Python | |
f20aef828bb7e3a7206cd239ff95c3234391c11c | Add Example 5.1. | jcrist/pydy,skidzo/pydy,oliverlee/pydy,Shekharrajak/pydy,jcrist/pydy,jcrist/pydy,skidzo/pydy,jcrist/pydy,Shekharrajak/pydy,Shekharrajak/pydy,jcrist/pydy,jcrist/pydy,jcrist/pydy,oliverlee/pydy,skidzo/pydy,oliverlee/pydy,skidzo/pydy,Shekharrajak/pydy | Kane1985/Chapter5/Example5.1.py | Kane1985/Chapter5/Example5.1.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Example 5.1 from Kane 1985."""
from __future__ import division
from sympy import Dummy, Matrix
from sympy import expand, solve, symbols, trigsimp
from sympy.physics.mechanics import ReferenceFrame, Point, dot, dynamicsymbols
from util import msprint, subs, partial_velocities
from util import generalized_active_forces, potential_energy
g, m1, m2, k, L, omega, t = symbols('g m1 m2 k L ω t')
q1, q2, q3 = dynamicsymbols('q1:4')
qd1, qd2, qd3 = dynamicsymbols('q1:4', level=1)
u1, u2, u3 = dynamicsymbols('u1:4')
## --- Define ReferenceFrames ---
A = ReferenceFrame('A')
B = A.orientnew('B', 'Axis', [omega * t, A.y])
E = B.orientnew('E', 'Axis', [q3, B.z])
## --- Define Points and their velocities ---
pO = Point('O')
pO.set_vel(A, 0)
pP1 = pO.locatenew('P1', q1*B.x + q2*B.y)
pD_star = pP1.locatenew('D*', L*E.x)
pP1.set_vel(B, pP1.pos_from(pO).dt(B))
pD_star.v2pt_theory(pP1, B, E)
pP1.v1pt_theory(pO, A, B)
pD_star.v2pt_theory(pP1, A, E)
## --- Expressions for generalized speeds u1, u2, u3 ---
kde = [u1 - dot(pP1.vel(A), E.x), u2 - dot(pP1.vel(A), E.y),
u3 - dot(E.ang_vel_in(B), E.z)]
kde_map = solve(kde, [qd1, qd2, qd3])
## --- Velocity constraints ---
vc = [dot(pD_star.vel(B), E.y)]
vc_map = solve(subs(vc, kde_map), [u3])
## --- Define forces on each point in the system ---
K = k*E.x - k/L*dot(pP1.pos_from(pO), E.y)*E.y
gravity = lambda m: -m*g*A.y
forces = [(pP1, K), (pP1, gravity(m1)), (pD_star, gravity(m2))]
## --- Calculate generalized active forces ---
partials = partial_velocities(zip(*forces)[0], [u1, u2], A,
kde_map, vc_map)
Fr_tilde, _ = generalized_active_forces(partials, forces)
Fr_tilde = map(expand, map(trigsimp, Fr_tilde))
print('Finding a potential energy function V.')
V = potential_energy(Fr_tilde, [q1, q2, q3], [u1, u2], kde_map, vc_map)
if V is not None:
print('V = {0}'.format(msprint(V)))
print('Substituting αi = 0, C = 0...')
zero_vars = dict(zip(symbols('C α1:4'), [0] * 4))
print('V = {0}'.format(msprint(V.subs(zero_vars))))
| bsd-3-clause | Python | |
bc691d415d32836f8354582294c6ae11413b0a6a | change version to .dev | dsemi/Flexget,sean797/Flexget,drwyrm/Flexget,poulpito/Flexget,qk4l/Flexget,LynxyssCZ/Flexget,dsemi/Flexget,ianstalk/Flexget,jacobmetrick/Flexget,tarzasai/Flexget,dsemi/Flexget,drwyrm/Flexget,LynxyssCZ/Flexget,Flexget/Flexget,oxc/Flexget,Flexget/Flexget,ianstalk/Flexget,jacobmetrick/Flexget,jacobmetrick/Flexget,poulpito/Flexget,qvazzler/Flexget,jawilson/Flexget,malkavi/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,tobinjt/Flexget,crawln45/Flexget,jawilson/Flexget,crawln45/Flexget,Danfocus/Flexget,qk4l/Flexget,gazpachoking/Flexget,crawln45/Flexget,oxc/Flexget,malkavi/Flexget,JorisDeRieck/Flexget,qk4l/Flexget,poulpito/Flexget,sean797/Flexget,oxc/Flexget,tarzasai/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,OmgOhnoes/Flexget,jawilson/Flexget,jawilson/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,LynxyssCZ/Flexget,qvazzler/Flexget,malkavi/Flexget,tobinjt/Flexget,Danfocus/Flexget,drwyrm/Flexget,JorisDeRieck/Flexget,qvazzler/Flexget,sean797/Flexget,OmgOhnoes/Flexget,LynxyssCZ/Flexget,gazpachoking/Flexget,malkavi/Flexget,Danfocus/Flexget,tarzasai/Flexget,Flexget/Flexget,Danfocus/Flexget,ianstalk/Flexget,crawln45/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.0.0.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.0.0'
| mit | Python |
9012cb2aa34d6df32e780555b74581b29cd309b8 | add forgotten new module | zoofIO/flexx,zoofIO/flexx,jrversteegh/flexx,JohnLunzer/flexx,JohnLunzer/flexx,jrversteegh/flexx,JohnLunzer/flexx,kevinlondon/flexx,kevinlondon/flexx | flexx/ui/_iframe.py | flexx/ui/_iframe.py | from .. import react
from . import Widget
class IFrame(Widget):
""" An iframe element, i.e. a container to show web-content. Note
that some websites do not allow themselves to be rendered in a
cross-source iframe.
"""
CSS = '.flx-iframe {border: none;}'
@react.input
def url(v=''):
""" The url to show. 'http://' is automatically prepended if the url
does not have '://' in it.
"""
v = str(v)
if v and not '://' in v:
v = 'http://' + v
return v
class JS:
def _create_node(self):
self.node = document.createElement('iframe')
@react.connect('url')
def _update_url(self, url):
print('set', url)
self.node.src = url
| bsd-2-clause | Python | |
76c7add3a57810d42e6584ddf22acc027f641a0a | Create classes.py | Xsmael/noogger,Xsmael/node-logger,Xsmael/node-logger | classes.py | classes.py | from tkinter import *
class myClass:
def hello(self):
self.label.config(text='HelloO!')
def __init__(self,master): # this function is always called when object is instantiated
frame=Frame(master)
frame.pack()
self.printBtn = Button(frame, text='click', command=self.hello)
self.printBtn.pack()
self.quitBtn = Button(frame, text='quit', command=frame.quit)
self.quitBtn.pack()
self.label = Label(frame)
self.label.pack()
root= Tk()
c= myClass(root)
root.mainloop()
| mit | Python | |
2ccefe090305e815633f92a6f3d13155e46e7711 | Update migrations | teamtaverna/core | app/timetables/migrations/0002_auto_20171005_2209.py | app/timetables/migrations/0002_auto_20171005_2209.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-10-05 22:09
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='course',
name='name',
field=models.CharField(help_text='Example: appetizer, main course, dessert', max_length=150),
),
migrations.AlterField(
model_name='course',
name='sequence_order',
field=models.PositiveSmallIntegerField(help_text='The numerical order of the dishes for a meal option. E.g, 1 for appetizer, 2 for main course', unique=True),
),
migrations.AlterField(
model_name='timetable',
name='cycle_length',
field=models.PositiveSmallIntegerField(help_text='Number of days in which the menu timetable is repeated after a period of time. E.g, A cycle length of 14 days (2 weeks) including the inactive weekdays like weekends after which the food schedule is repeated.', validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='timetable',
name='ref_cycle_date',
field=models.DateField(help_text='The reference date in time with which cycle day for any other following date can be computed. E.g, 1 if today is Sunday as first day of the cycle length. No need to always update this except the cycle changes.'),
),
migrations.AlterField(
model_name='timetable',
name='ref_cycle_day',
field=models.PositiveSmallIntegerField(help_text='The reference day (numerical value) in time with which cycle day for any other following date can be computed. E.g, 1 if today is Sunday as first day of the cycle length. No need to always update this except the cycle changes.', validators=[django.core.validators.MinValueValidator(1)]),
),
]
| mit | Python | |
7f2f0dca532ce3cbcf33720e56a639f78b82e771 | add console utility | vektorlab/multivac,bcicen/slackbot,vektorlab/multivac,bcicen/slackbot | multivac/console.py | multivac/console.py | import sys
from termcolor import colored
from multivac.version import version
from multivac.models import JobsDB
from multivac.util import format_time
class Console(object):
def __init__(self):
self.prompt = colored('multivac> ','cyan',attrs=['bold'])
self.db = JobsDB('localhost', 6379)
self.commands = { 'jobs' : self.jobs,
'logs' : self.logs,
'actions' : self.actions,
'exit' : self.exit }
self.run()
def run(self):
print('Multivac version %s' % (version))
while True:
cmdline = input(self.prompt).split(' ')
cmd = cmdline.pop(0)
args = ' '.join(cmdline)
if cmd not in self.commands:
print('invalid command: %s' % (cmd))
else:
if args:
self.commands[cmd](args)
else:
self.commands[cmd]()
def jobs(self):
jobs = self.db.get_jobs()
for j in jobs:
created = format_time(j['created'])
if j['status'] == 'completed':
status = colored(j['status'], 'green')
elif j['status'] == 'pending':
status = colored(j['status'], 'yellow')
else:
status = j['status']
print('%s %s(%s) %s' % (created, j['id'], j['name'], status))
def logs(self, job_id):
jobs = self.db.get_jobs()
if job_id not in [ j['id'] for j in jobs ]:
print(colored('no such job: %s' % job_id, 'red'))
return
print('\n'.join(self.db._get_stored_log(job_id)))
def actions(self):
actions = self.db.get_actions()
output = [ ['Name','Command','Confirm Required'] ]
for a in actions:
name = colored(a['name'], 'white', attrs=['bold'])
output.append([a['name'], a['cmd'], a['confirm_required']])
self._print_column(output)
def exit(self):
sys.exit(0)
def _print_column(self, data, has_header=True):
col_width = max(len(word) for row in data for word in row) + 2
for row in data:
print(''.join(word.ljust(col_width) for word in row))
if __name__ == '__main__':
c = Console()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.