commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
9ec9dca1bc599a3a4234881029f9dfff5f0a2e63 | Add vacation_overlap. | jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools | problem/bench/db/vacation_overlap.py | problem/bench/db/vacation_overlap.py | #! /usr/bin/env python
"""Al and Ben will each take vacations, with overlap.
How short staffed will the office be, on a daily basis?
"""
import datetime as dt
import sqlalchemy as sa
class Vacation:
def __init__(self):
self.engine = sa.create_engine('sqlite:////tmp/vacation.db')
def _create_tables(self):
create_emp = """
CREATE TABLE emp (
name TEXT PRIMARY KEY,
vac_start DATE NOT NULL,
vac_end DATE NOT NULL
)
"""
self.engine.execute(create_emp)
create_calendar = """
CREATE TABLE calendar (
day DATE PRIMARY KEY
)
"""
self.engine.execute(create_calendar)
def _populate_tables(self):
for ins in [
"INSERT INTO emp VALUES ('Al', '2020-02-03', '2020-02-16')",
"INSERT INTO emp VALUES ('Ben', '2020-02-10', '2020-02-23')",
]:
self.engine.execute(ins)
day = dt.date(2020, 2, 1)
for _ in range(28):
ins = sa.text("INSERT INTO calendar VALUES (:day)")
self.engine.execute(ins, dict(day=day))
day += dt.timedelta(days=1)
def report(self):
self._create_tables()
self._populate_tables()
if __name__ == '__main__':
Vacation().report()
| mit | Python | |
1dc52c2139e535b487cd9082259ac74802313132 | Create Wiki_XML.py | North-Guard/BigToolsComplicatedData,North-Guard/BigToolsComplicatedData | week4-5/Wiki_XML.py | week4-5/Wiki_XML.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 27 14:25:47 2017
@author: Laurent Vermue (lauve@dtu.dk)
"""
import xml.etree.ElementTree as ET
import time
import re
from bz2file import BZ2File
import sqlite3
from psutil import virtual_memory
# Possibility to take memory into account(Machine optimization)
mem = virtual_memory()
mem.available
# Defining filepaths
filepath="/Users/lousy12/"
filename="enwiki-20170820-pages-articles-multistream.xml.bz2"
database_name="Articles.db"
# Creating or connecting to the database
conn = sqlite3.connect(filepath+database_name)
c = conn.cursor()
# Create table
c.execute('''CREATE TABLE IF NOT EXISTS Articles
(Ident INTEGER PRIMARY KEY, Article VARCHAR(10))''')
# Opening the BZ2-File
file=BZ2File(filepath+filename)
#Setting some pretext for the tags of the xml file
pretext="{http://www.mediawiki.org/xml/export-0.10/}"
#Creating the dictionary
index={}
#Creating a iterable parse object with ElementTree
context = ET.iterparse(file, events=('end',))
#Counting time
start=time.time()
i=0
for event, elem in context:
if elem.tag == pretext+"page":
text=elem[-1].find(pretext+'text').text.replace('\n', '').lower()
c.executemany("INSERT INTO Articles(Article) VALUES (?)",[(text,)])
# Check the last row_id
article_position=c.lastrowid
for word in set(re.split('\W+',text)):
if word in index.keys():
index[word].append(article_position)
else:
index[word]=[article_position]
i+=1
if i==10000:
break
elem.clear
end=time.time()
print("Finished in time: {:.2f}s".format(end-start))
conn.commit()
c.close()
'''
def search(substr):
result = []
for key in dictionary:
if substr in key:
result.append((key, dictionary[key]))
return result
set(sum([index[k] for k in index if "a" in k],[]))
set.intersection(set1,set2,set3)
'''
| mit | Python | |
4900617f38a912fbf386c6a87c55627d87dd59fd | Add test_assign.py | sony/nnabla,sony/nnabla,sony/nnabla | python/test/function/test_assign.py | python/test/function/test_assign.py | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is still left unlike other *2 operation (sub2, mul2, ...) because
it has cudnn implementation.
"""
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('Assign')
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [314])
def test_assign_forward_backward(seed, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2 for _ in range(2)]
grads = np.zeros((48,))
function_tester(rng, F.assign, lambda dst, src: src, inputs, ref_grad=lambda *args: grads,
ctx=ctx, func_name=func_name, atol_f=1e-3, atol_b=1e-2)
| apache-2.0 | Python | |
b9044185e572c811ebe8a8fc89d54f141b0466fb | Add getCurrentConfig method to track.py | ollien/playserver,ollien/playserver,ollien/playserver | play-server/track.py | play-server/track.py | import configmanager
import osascript
APP_CONFIG_PATH = "applications/"
applicationConfigs = configmanager.ConfigManager(APP_CONFIG_PATH)
#TODO: Make this user choosable
currentApplication = "radiant"
def getCurrentConfig():
return applicationConfigs[currentApplication]
| mit | Python | |
dff6aebe247601bbd1a28acc1ddda57052fb7fb3 | Create seaborn.py | mapattacker/cheatsheets,mapattacker/cheatsheets,mapattacker/cheatsheets,mapattacker/cheatsheets,mapattacker/cheatsheets,mapattacker/cheatsheets | seaborn.py | seaborn.py | import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
#------------------------------------------------------------------------------------
# http://gree2.github.io/python/2015/05/05/python-seaborn-tutorial-controlling-figure-aesthetics
# Reset all parameters to default
sns.set()
# STYLES
# default is darkgrid, others include: whitegrid, dark, white
sns.set_style('darkgrid')
sns.set_style('whitegrid')
sns.set_style('dark')
sns.set_style('white')
# SIZE
# default is notebook, others include: paper, talk, poster
sns.set_context('notebook')
sns.set_context('paper')
sns.set_context('talk')
sns.set_context('poster')
# REMOVE BORDER
# only for white & tick styles
# put at bottom of plot
sns.despine() # remove top and right border
sns.despine(left=True, bottom=True) # also remove left and bottom borders
#------------------------------------------------------------------------------------
# COLOR PALETTES
# http://seaborn.pydata.org/tutorial/color_palettes.html
# http://chrisalbon.com/python/seaborn_color_palettes.html
sns.color_palette() # default palette
sns.color_palette("deep", 10)
# plot out color palettes
sns.palplot(sns.color_palette("deep", 10))
# see other palette types in links provided.
#----------------------------- PLOTTING GRAPHS --------------------------------------
#------------------------------------------------------------------------------------
# GRAPH SETTINGS
# add in x & y labels
plt.xlabel('Agricultural Land %', fontsize=18) #change font size too!
plt.ylabel('Protected Areas %')
# change size, put this line in front. 2 is width, 6 is height
# put at start of plot
plt.figure(figsize=(2, 6))
# rotate axis labels
plt.xticks(rotation=90)
#------------------------------------------------------------------------------------
# MULTIPLE PLOTS
# plot individual columns
sub1=df[['VAR1','VAR2','VAR3']]
sns.boxplot(data=sub1)
# plot individual columns but by one levels of a categoical variable
sub1=df[['CAT1','VAR1','VAR2','VAR3']]
sub1.boxplot(by='CAT1')
#------------------------------------------------------------------------------------
# SINGLE AXIS GRAPHS
# boxplot
# vertical; if need horizontal change y to x
sns.boxplot(y=df[df.columns[3]]) # dots within box not shown
sns.stripplot(y=df[df.columns[i]], size=4, jitter=True, color="black") # boxes in plot shown
# countplot
sns.countplot(x=df[df.columns[1]], data=df)
# distribution plot
sns.distplot(x=df[df.columns[1]], data=df)
#------------------------------------------------------------------------------------
# DOUBLE AXES GRAPHS
# regression plot & linear model plot
sns.regplot(x=df.columns[7],y='PROTECTED_AREAS_T_M%', data=df)
sns.lmplot(x=df.columns[7],y='PROTECTED_AREAS_T_M%', data=df)
# add pearson's R
import scipy.stats as ss
ss.pearsonr(df3_layer['depth'],df3_layer['diameter'])
# scatterplot
# basically same as lmplot & regplot but add a fit_reg=False
sns.lmplot(x=df.columns[7],y='PROTECTED_AREAS_T_M%', fit_reg=False, data=df)
# barchart, using factorplot
sns.factorplot(x='NUMBER_LAYERS',y='DEPTH_RIMFLOOR_TOPOG',kind='bar', data=df2)
sns.factorplot(x='layers',y='depth',data=df3, kind='bar', ci=False) # remove confidence interval
#------------------------------------------------------------------------------------
# SUBPLOTS
fig, ax = plt.subplots(ncols=3, nrows=2, figsize=(16, 20))
sns.regplot(x=df[df.columns[1]], y='Protected Areas', data=df, ax=ax[0,0])
sns.regplot(x=df[df.columns[2]], y='Protected Areas', data=df, ax=ax[0,1])
sns.regplot(x=df[df.columns[3]], y='Protected Areas', data=df, ax=ax[0,2])
sns.regplot(x=df[df.columns[4]], y='Protected Areas', data=df, ax=ax[1,0])
sns.regplot(x=df[df.columns[5]], y='Protected Areas', data=df, ax=ax[1,1])
sns.regplot(x=df[df.columns[6]], y='Protected Areas', data=df, ax=ax[1,2])
| mit | Python | |
5426b2be91d7cd42e70d074e305b6e6b705dd67b | Improve multy ordering in admin change list: http://code.djangoproject.com/ticket/389 | ilblackdragon/django-misc | misc/admin.py | misc/admin.py | from django.contrib.admin.views.main import ChangeList
class SpecialOrderingChangeList(ChangeList):
"""
Override change list for improve multiordering in admin change list.
`Django will only honor the first element in the list/tuple ordering attribute; any others will be ignored.`
Example:
class SongAdmin(admin.ModelAdmin):
list_display = ['name', 'time', 'artist', 'album', 'track', 'total_tracks']
special_ordering = {'artist': ('artist', 'album', 'track'), 'album': ('album', 'track')}
default_special_ordering = 'artist'
def get_changelist(self, request, **kwargs):
return SpecialOrderingChangeList
"""
def apply_special_ordering(self, queryset):
order_type, order_by = [self.params.get(param, None) for param in ('ot', 'o')]
special_ordering = self.model_admin.special_ordering
if special_ordering:
try:
if order_type and order_by:
order_field = self.list_display[int(order_by)]
ordering = special_ordering[order_field]
if order_type == 'desc':
ordering = ['-' + field for field in ordering]
else:
ordering = special_ordering[self.model_admin.default_special_ordering]
queryset = queryset.order_by(*ordering)
except IndexError:
return queryset
except KeyError:
return queryset
return queryset
def get_query_set(self):
queryset = super(SpecialOrderingChangeList, self).get_query_set()
queryset = self.apply_special_ordering(queryset)
return queryset
| mit | Python | |
edca5c6332d8301da5473e204a10e82acf47c40f | Add mixin_demo.py | gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine | mixin_demo.py | mixin_demo.py | import time
class Mixin(object):
mixins = {}
def __init__(self, target, mixin, key=""):
self.key = key.upper() + "_CALCULATION_MODE"
self.target = target
self.mixin = mixin
def __enter__(self):
return self._load()
def __exit__(self, *args):
self._unload()
def _load(self):
if issubclass(self.mixin, type(self)):
self._proxied_mixin()
self.target.__class__.__bases__ += (self.mixin,)
return self.target
def _unload(self):
bases = list(self.target.__class__.__bases__)
bases.remove(self.mixin)
self.target.__class__.__bases__ = tuple(bases)
def _proxied_mixin(self):
calculation_mode = self.target.params[self.key]
self.mixin = self.mixin.mixins[calculation_mode]['mixin']
@classmethod
def ordered_mixins(cls):
return [(k, v['mixin'])
for (k, v)
in sorted(cls.mixins.items(), key=lambda x: x[1]['order'])]
@classmethod
def register(cls, key, mixin, order=0):
if not key in cls.mixins:
cls.mixins[key] = {'mixin': mixin, 'order': order }
@classmethod
def unregister(cls, key):
del mixins[key]
class ProxyMixin(Mixin):
mixins = {}
class ProxiedMixin:
@classmethod
def execute(cls):
print ' In Proxied Mixin'
class UnproxiedMixin:
@classmethod
def execute(cls):
print ' In Unproxied Mixin'
class SomeClass(object):
params = {"SOMEKEY_CALCULATION_MODE": "ProxiedMixin"}
pass
Mixin.register("MixinProxy", ProxyMixin, order=1)
Mixin.register("UnproxiedMixin", UnproxiedMixin, order=2)
ProxyMixin.register("ProxiedMixin", ProxiedMixin)
some_object = SomeClass()
print "method resolution order: "
print some_object.__class__.__mro__
print "the execute method: "
try:
some_object.execute()
except AttributeError, e:
print e
for (key, mixin) in Mixin.ordered_mixins():
print
print "----"
print
time.sleep(1)
print "key: %s" % key
print "mixin: %s" % mixin
with Mixin(some_object, mixin, key="SOMEKEY") as mixed_in:
print "method resolution order: "
print mixed_in.__class__.__mro__
print "the execute method:"
mixed_in.execute()
| agpl-3.0 | Python | |
95d855743e9f37e87397c9be7ffc9b888320a4ac | Add the init method to the note model. | yiyangyi/cc98-tornado | model/note.py | model/note.py | class NoteModel(Query):
def __init__(self, db):
self.db = db
self.table_name = "note"
super(NoteModel, self).__init__() | mit | Python | |
87a77ca8150c970ac9083894b67c3d73a8a73e7f | Add configuration.py | xthan/polyvore,xthan/polyvore | polyvore/configuration.py | polyvore/configuration.py | # Copyright 2017 Xintong Han. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bi-LSTM Polyvore model and training configurations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ModelConfig(object):
"""Wrapper class for model hyperparameters."""
def __init__(self):
"""Sets the default model hyperparameters."""
# File pattern of sharded TFRecord file containing SequenceExample protos.
# Must be provided in training and evaluation modes.
self.input_file_pattern = None
# Image format ("jpeg" or "png").
self.image_format = "jpeg"
# Approximate number of values per input shard. Used to ensure sufficient
# mixing between shards in training.
self.values_per_input_shard = 135
# Minimum number of shards to keep in the input queue.
self.input_queue_capacity_factor = 2
# Number of threads for prefetching SequenceExample protos.
self.num_input_reader_threads = 1
# Name of the SequenceExample context feature containing set ids.
self.set_id_name = "set_id"
# Name of the SequenceExample feature list containing captions and images.
self.image_feature_name = "images"
self.image_index_name = "image_index"
self.caption_feature_name = "caption_ids"
# Number of unique words in the vocab (plus 1, for <UNK>).
# The default value is larger than the expected actual vocab size to allow
# for differences between tokenizer versions used in preprocessing. There is
# no harm in using a value greater than the actual vocab size, but using a
# value less than the actual vocab size will result in an error.
self.vocab_size = 2757
# Number of threads for image preprocessing.
self.num_preprocess_threads = 1
# Batch size.
self.batch_size = 10
# File containing an Inception v3 checkpoint to initialize the variables
# of the Inception model. Must be provided when starting training for the
# first time.
self.inception_checkpoint_file = None
# Dimensions of Inception v3 input images.
self.image_height = 299
self.image_width = 299
# Scale used to initialize model variables.
self.initializer_scale = 0.08
# LSTM input and output dimensionality, respectively. embedding_size is also
# the embedding size in the visual-semantic joint space.
self.embedding_size = 512
self.num_lstm_units = 512
# If < 1.0, the dropout keep probability applied to LSTM variables.
self.lstm_dropout_keep_prob = 0.7
# Largest number of images in a fashion set.
self.number_set_images = 8
# Margin for the embedding loss.
self.emb_margin = 0.2
# Balance factor of all losses.
self.emb_loss_factor = 0.1 # VSE loss
self.f_rnn_loss_factor = 1.0 # Forward LSTM
self.b_rnn_loss_factor = 1.0 # Backward LSTM, might give it a lower weight
# because it is harder to predict backward than forward in our senario.
# RNN type. "lstm", "gru", "rnn"
self.rnn_type = "lstm"
class TrainingConfig(object):
"""Wrapper class for training hyperparameters."""
def __init__(self):
"""Sets the default training hyperparameters."""
# Number of examples per epoch of training data.
self.num_examples_per_epoch = 17316
# Optimizer for training the model.
self.optimizer = "SGD"
# Learning rate for the initial phase of training.
# by the FLAGS in train.py
self.initial_learning_rate = 0.2
self.learning_rate_decay_factor = 0.5
self.num_epochs_per_decay = 2.0
# If not None, clip gradients to this value.
self.clip_gradients = 5.0
# How many model checkpoints to keep.
self.max_checkpoints_to_keep = 10
| apache-2.0 | Python | |
5854334b3ed9b20886e0dd62e21094e7df0fdad0 | add basic test for schema | mylokin/schematec | tests/test_schema.py | tests/test_schema.py | from __future__ import absolute_import
# import pytest
import schematec.schema
# import schematec.converters as converters
# import schematec.validators as validators
# import schematec.exc as exc
def test_empty_schema_with_empty_value():
schema = schematec.schema.Schema()
assert schema({}) == {}
| mit | Python | |
655fc717469c2f8fa49b4c55e4f0a1768b045758 | add quick sort | creativcoder/AlgorithmicProblems,creativcoder/AlgorithmicProblems,creativcoder/AlgorithmicProblems,creativcoder/AlgorithmicProblems | Python/quick_sort.py | Python/quick_sort.py | arr = [134,53,4,234,23,452,3,5,43,534,3,5,435,345]
def sort(arr):
low = 0
high = len(arr)-1
quick_sort(arr,low,high)
def get_pivot(arr,low,high):
mid = (high + low) // 2
pivot = high
if arr[low] < arr[mid]:
pivot = mid
elif arr[low] < arr[high]:
pivot = low
return pivot
def quick_sort(arr,low,high):
if low < high:
p = partition(arr,low,high)
quick_sort(arr,low,p-1)
quick_sort(arr,p+1,high)
def swap(arr,idx1,idx2):
arr[idx2],arr[idx1] = arr[idx1],arr[idx2]
def partition(arr,low,high):
if len(arr) < 2:
return 0
pivot = get_pivot(arr,low,high)
pivot_val = arr[pivot]
arr[pivot], arr[low] = arr[low], arr[pivot]
border = low
for i in range(low,high+1):
if arr[i] < pivot_val:
border += 1
arr[i],arr[border] = arr[border],arr[i]
arr[low], arr[border] = arr[border],arr[low]
return border
sort(arr)
print(arr)
| mit | Python | |
b75e72c5a0a8aa328afa03dee593daaa8400e96a | Add nosetest | wkentaro/utaskweb | test_utaskweb.py | test_utaskweb.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import requests
from bs4 import BeautifulSoup
import utaskweb
def assert_text_obj(obj):
unicode_obj = unicode(obj)
assert type(unicode_obj) == unicode
def test_get_text_if_exists():
node1 = BeautifulSoup('<tr>Hello World</tr>')
node2 = BeautifulSoup('Hello World')
text1 = utaskweb.common.get_text_if_exists(node1)
text2 = utaskweb.common.get_text_if_exists(node2)
assert text1 == text2
def test_get_class_cancels():
updated, cancels = utaskweb.get_class_cancels(date_fill=True)
assert_text_obj(updated)
for cancel in cancels:
for text in cancel.values():
assert_text_obj(text)
def test_get_admin_announces():
updated, announces = utaskweb.get_admin_announces()
assert_text_obj(updated)
for announce in announces:
for text in announce.values():
if type(text) not in [unicode, str]:
print dir(text)
assert_text_obj(text)
def test_get_class_roomchanges():
updated, roomchanges = utaskweb.get_class_roomchanges(date_fill=True)
assert_text_obj(updated)
for roomchange in roomchanges:
for text in roomchange.values():
assert_text_obj(text)
| mit | Python | |
2448b2608ab4d32c4d80c1bbd2a09063197524e6 | Create __init__.py | OdooCommunityWidgets/website_product_attachments | __init__.py | __init__.py | import product_attachments
| mit | Python | |
8e83b41a4796d62868a113ccb1e949a2d09bafac | Test throwing future callback | aldebaran/libqi,aldebaran/libqi-java,aldebaran/libqi-java,aldebaran/libqi,aldebaran/libqi,vbarbaresi/libqi,bsautron/libqi,aldebaran/libqi-java | python/test/test_servicedirectory.py | python/test/test_servicedirectory.py | import time
from qi import ServiceDirectory
from qi import Session
def main():
def raising(f):
raise Exception("woops")
local = "tcp://127.0.0.1:5555"
sd = ServiceDirectory()
sd.listen(local)
s = Session()
s.connect(local)
f = s.service("ServiceDirectory", _async=True)
f.add_callback(raising)
time.sleep(0.01)
s.close()
if __name__ == "__main__":
main() | bsd-3-clause | Python | |
fcae40e5bbc5e593d4245747dd0d1d1ef78cad3a | Add a (hackish) feed wrapper for auto-subscribing and reading new MLs | Humbedooh/ponymail,quenda/ponymail,jimjag/ponymail,Humbedooh/ponymail,jimjag/ponymail,rbowen/ponymail,quenda/ponymail,Humbedooh/ponymail,jimjag/ponymail,rbowen/ponymail,rbowen/ponymail,quenda/ponymail,jimjag/ponymail | tools/feedwrapper.py | tools/feedwrapper.py | #!/usr/bin/env python3.4
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
#the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thie is feedwrapper - a mailing list auto-subscriber and/or feed passthrough program.
Activate it by adding "|/usr/bin/env python3.4 /path/to/ponymail/tools/feedwrapper.py localuser@thisdomain.abc"
Then subscribe to lists by running: python3.4 feedwrapper sub localuser@thisdomain.abc ml-subscribe@mldomain.foo"
"""
import sys, re, os, email, smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from subprocess import *
path = os.path.dirname(os.path.realpath(__file__))
if __name__ == '__main__':
if len(sys.argv) <= 1:
print("Usage: feedwrapper [recipient email] OR")
print(" feedwrapper sub [recipient] [ML-subscribe-address]")
sys.exit(0)
if sys.argv[1] == "sub":
sender = sys.argv[2]
recip = sys.argv[3]
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, [recip], """From: %s
To: %s
Subject: subscribe
subscribe
""" % (sender, recip)
)
print("Sent subscription request for %s to %s" % (sender, recip))
else:
msg = email.message_from_file(sys.stdin)
if msg.get('to') and msg.get('reply-to') and msg.get('subject'):
if msg.get('to').find(sys.argv[1]) != -1 and \
re.search(r"-request@", msg.get('reply-to')) or \
(\
re.match(r"confirm subscribe to", msg.get('subject'), flags=re.IGNORECASE) and \
re.search(r"-sc\.", msg.get('reply-to')) \
):
with open("%s/wrapper.log" % path, "a") as f:
f.write("%s - %s: %s\n" % (msg.get('to'), msg.get('reply-to'), msg.get('subject')))
f.write("We've got a subscription request for %s. \n" % msg.get('reply-to'))
f.close()
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sys.argv[1], [msg.get('reply-to')], """From: %s
To: %s
Subject: %s
%s
""" % (sys.argv[1], msg.get('reply-to'), msg.get('subject'), msg.get('subject'))
)
else:
with open("%s/wrapper.log" % path, "a") as f:
f.write("Got an email for %s\n" % (msg.get('list-id') or "??"))
f.write("%s - %s: %s\n" % (msg.get('to'), msg.get('reply-to'), msg.get('subject')))
p = Popen("/usr/bin/python3.4 %s/../mm3/plugin.py" % path, shell=True, stdin=PIPE, stderr=PIPE, stdout=sys.stdout)
print(p.communicate(input=msg.as_string().encode('utf-8')))
p.stdin.close()
f.write("-----\n")
f.close()
| apache-2.0 | Python | |
d50d43854596522f7cef8712e0599b39c71b027b | Add initial jenkins python script for jenkins tests | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/jenkins.py | tests/jenkins.py | #!/usr/bin/env python
'''
This script is used to test salt from a jenkins server, specifically
jenkins.satstack.com.
This script is intended to be shell centric!!
'''
import subprocess
import hashlib
import random
import optparse
def run(platform, provider):
'''
RUN!
'''
htag = hashlib.md5(str(random.randint(1, 100000000))).hexdigest()[:6]
vm_name = '{0}{1}'.format(platform, htag)
subprocess.call(
'salt-cloud -p {0}_{1} {2}'.format(provider, platform, vm_name),
shell=True)
# Run tests here
subprocess.call(
'salt {0} state.sls testrun'.format(vm_name),
shell=True)
# Clean up the vm
subprocess.call(
'salt-cloud -d {0} -y'.format(vm_name),
shell=True)
| apache-2.0 | Python | |
4cddb31cd5054ff146f4bab8471367dcc48297c4 | Create server2.py | khoteevnd/stepic | server2.py | server2.py | # -*- coding: UTF-8 -*-
import socket, threading, string
debug = True
_connector = None
_running = True
_host = '0.0.0.0'
_port = 2222
_maxClient = 10
_recvBuffer = 1024
def printd (aString):
if debug:
print aString
class talkToClient (threading.Thread):
def __init__(self, clientSock, addr):
self.clientSock = clientSock
self.addr = addr
threading.Thread.__init__(self)
def run (self):
while True:
recvData = self.clientSock.recv (_recvBuffer)
if not recvData:
self.clientSock.send ('bye')
break
printd('Client ' + str (self.addr) + ' say "' + str (recvData) + '"')
self.clientSock.send (recvData)
if recvData == "exit":
break
self.clientSock.close ()
_connector = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
_connector.bind ((str(_host), int(_port)))
_connector.listen (int(_maxClient))
while _running:
printd ('Running on ' + _host + ':' + str (_port) + '.')
channel, details = _connector.accept ()
printd ('Conect on : ' + str (details))
talkToClient (channel, details).start ()
_connector.close ()
| mit | Python | |
13fd2335eb8b8b93e5330fe9bcc125557bffb198 | Add missing migration for verbose_name alter | edx/ecommerce,edx/ecommerce,eduNEXT/edunext-ecommerce,eduNEXT/edunext-ecommerce,eduNEXT/edunext-ecommerce,edx/ecommerce,edx/ecommerce,eduNEXT/edunext-ecommerce | ecommerce/extensions/payment/migrations/0012_auto_20161109_1456.py | ecommerce/extensions/payment/migrations/0012_auto_20161109_1456.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_paypalprocessorconfiguration'),
]
operations = [
migrations.AlterField(
model_name='paypalprocessorconfiguration',
name='retry_attempts',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Number of times to retry failing Paypal client actions (e.g., payment creation, payment execution)'),
),
]
| agpl-3.0 | Python | |
65c1ddc6837a36b87992304e2d364b5eb6e8d0d9 | add selenium test | otron/flask-travis-selenium-minimality,otron/flask-travis-selenium-minimality | tests/seltest.py | tests/seltest.py | from selenium import webdriver
import pytest
def test_selenium_basic():
driver = webdriver.Firefox()
driver.get("localhost:5000")
bod = driver.get_element_by_tag_name('body')
assert "Hello" in bod.text
driver.quit()
| bsd-2-clause | Python | |
98262d909ad612684df9dfe6f98ee3c8217df2ce | Create __init__.py | robertclf/FAFT,robertclf/FAFT | FAFT_2048-points_C2C/__init__.py | FAFT_2048-points_C2C/__init__.py | bsd-3-clause | Python | ||
a301a90ed9cc570c3de1fdcd4c6908512cfd1183 | add require_billing_admin decorator | puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,gmimano/commcaretest,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,SEL-Columbia/commcare-hq,gmimano/commcaretest,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest | corehq/apps/accounting/decorators.py | corehq/apps/accounting/decorators.py | from django.http import Http404
from corehq import BillingAccountAdmin
def require_billing_admin():
def decorate(fn):
"""
Decorator to require the current logged in user to be a billing admin to access the decorated view.
"""
def wrapped(request, *args, **kwargs):
if not hasattr(request, 'couch_user') and not hasattr(request, 'domain'):
raise Http404()
print request.domain
is_billing_admin = BillingAccountAdmin.get_admin_status_and_account(request.couch_user, request.domain)[0]
if not (is_billing_admin or request.couch_user.is_superuser):
raise Http404()
return fn(request, *args, **kwargs)
return wrapped
return decorate
| bsd-3-clause | Python | |
c38d3695c0b056da3014951ee842bae4f817b657 | Add serialization unit-test | timstaley/chimenea | chimenea/tests/test_obsinfo.py | chimenea/tests/test_obsinfo.py | from __future__ import absolute_import
from unittest import TestCase
import json
from chimenea.obsinfo import ObsInfo
class TestObsInfoSerialization(TestCase):
def setUp(self):
self.obs = ObsInfo(name='foo',
group='fooish',
metadata={'bar':'baz'})
assert isinstance(self.obs, ObsInfo)
self.obs.uv_ms='uv_data.ms'
self.obs.maps_dirty.ms.image='image.ms'
def test_round_trip(self):
rep = json.dumps(self.obs, cls=ObsInfo.Encoder)
obs2 = json.loads(rep, cls=ObsInfo.Decoder)
# print obs2
| bsd-3-clause | Python | |
0d7706db887bb5d1522f3de39b9fe1533f80fd8d | Add original script version, still not fit for general use | Vilkku/Dota-2-Hero-Parser | dota2parser.py | dota2parser.py | from bs4 import BeautifulSoup
import urllib.request
import MySQLdb
db = MySQLdb.connect(user="", passwd="", db="")
c = db.cursor()
c.execute("SELECT id, name FROM heroes WHERE active=1")
heroes = c.fetchall()
for hero_id, hero_name in heroes:
hero_url = 'https://www.dota2.com/hero/'+str(hero_name).replace(' ', '_').replace('\'', '')+'/'
print(hero_url)
response = urllib.request.urlopen(hero_url)
html = response.read()
soup = BeautifulSoup(html)
for overviewAbilityRow in soup.find_all('div', class_='overviewAbilityRow'):
img = overviewAbilityRow.find('img').get('src')
name = overviewAbilityRow.find('h2').string
description = overviewAbilityRow.find('p')
c.execute("INSERT INTO spells (hero_id, name, description, icon) VALUES (%s, %s, %s, %s)", (hero_id, name, description, img))
db.commit()
c.close()
db.close()
| mit | Python | |
a8e2f22bcc521aedc216d0d1849b6e4f58ede443 | Add old matrix file | WalrusCow/pyConsole | matrix.py | matrix.py | ''' Cover the screen in green ones and zeroes, as if in the Matrix. '''
import time, sys
import random
from console import getTerminalSize
ESC = '\033'
def getLine(cols):
''' Create a matrix line. '''
CHOICES = '000111 '
line = ''.join(random.choice(CHOICES) for _ in range(cols))
return line
def matrix():
''' Run the matrix. '''
t = 0.05
MATRIX_STR = '{0}[1m{0}[32m{{0}}{0}[0m'.format(ESC)
MOVE_TO_TOP = '{}[0;0H'.format(ESC)
while True:
sys.stdout.write(MOVE_TO_TOP)
rows, cols, x, y = getTerminalSize()
s = (MATRIX_STR.format(getLine(cols)) for _ in range(rows))
sys.stdout.write('\n'.join(s))
sys.stdout.write('\r')
sys.stdout.flush()
time.sleep(t)
try:
matrix()
except:
print('')
| mit | Python | |
33155a213e1b31e8676a92c8e7a7f0330050b4c1 | Add base class for plugins. | lyft/pycollectd | pycollectd/plugin.py | pycollectd/plugin.py | # -*- coding: utf-8 -*-
#
# © 2013 Lyft, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common code for collectd python plugins.
"""
from __future__ import absolute_import
import collectd
class CollectDPlugin(object):
"""
Base class for collectd plugins written in Python. Each plugin must
have a unique name which must match the name used for the configuration
block in the collectd configuration file.
"""
def __init__(self, name):
self.name = name
collectd.register_config(self.configure, name=self.name)
collectd.register_init(self.initialize)
collectd.register_shutdown(self.shutdown)
@staticmethod
def config_to_dict(config):
"""
Convert a collectd.Config object to a dictionary.
"""
def config_to_tuple(config):
"""
Convert a collectd.Config object to a tuple.
"""
if config.children:
return (config.key, dict(config_to_tuple(child)
for child in config.children))
elif len(config.values) > 1:
return (config.key, config.values)
else:
return (config.key, config.values[0])
return dict([config_to_tuple(config)])
def error(self, message):
"""
Log an error message to the collectd logger.
"""
collectd.error('%s plugin: %s' % (self.name, message))
def warning(self, message):
"""
Log an warning message to the collectd logger.
"""
collectd.warning('%s plugin: %s' % (self.name, message))
def notice(self, message):
"""
Log an notice message to the collectd logger.
"""
collectd.notice('%s plugin: %s' % (self.name, message))
def info(self, message):
"""
Log an info message to the collectd logger.
"""
collectd.info('%s plugin: %s' % (self.name, message))
def debug(self, message):
"""
Log an debug message to the collectd logger.
"""
collectd.debug('%s plugin: %s' % (self.name, message))
def configure(self, config, **kwargs):
"""
Configuration callback for the plugin, will be called by collectd with
a collectd.Config object containing configuration data for this
plugin from the collectd configuration file.
"""
# The top level of the configuration is the 'Module' block, which
# is entirely useless, so we set the config attribute to its value,
# which should be the interesting stuff.
self.config = CollectDPlugin.config_to_dict(config)['Module']
def initialize(self):
"""
Initialization callback for the plugin, will be called by collectd with
no arguments.
"""
pass
def shutdown(self):
"""
Shutdown callback for the plugin, will be called by collectd with no
arguments.
"""
pass
def add_read_callback(self, callback, **kwargs):
"""
Register a read callback with collectd. kwargs will be passed to
collectd.register_read. The callback will be called by collectd
without arguments.
"""
collectd.register_read(callback, **kwargs)
def add_write_callback(self, callback, **kwargs):
"""
Register a write callback with collectd. kwargs will be passed to
collectd.register_read. The callback will be called by collectd
with a collectd.Values object as the only argument.
"""
collectd.register_write(callback)
def add_flush_callback(self, callback, **kwargs):
"""
Register a flush callback with collectd. kwargs will be passed to
collectd.register_flush. The callback will be called by collectd
with two arguments, a timeout and an identifier.
"""
collectd.register_flush(callback, **kwargs)
def add_log_callback(self, callback, **kwargs):
"""
Register a log callback with collectd. kwargs will be passed to
collectd.register_log. The callback will be called by collectd with
two arguments, the severity and the message (without a newline at
the end)
"""
collectd.register_log(callback, **kwargs)
def add_notification_callback(self, callback, **kwargs):
"""
Register a notification callback with collectd. kwargs will be passed
to collectd.register_notification. The callback will be called by
collectd with a collectd.Notification object as the only argument.
"""
collectd.register_notification(callback, **kwargs)
class PluginError(StandardError):
pass
| apache-2.0 | Python | |
0f84eb57024bb856c10a6326a3827cb91e4d20c2 | Put the contents of a pyui file onto the clipboard | cclauss/Ten-lines-or-less | pyui_to_clipboard.py | pyui_to_clipboard.py | import clipboard
filename = 'put_your_filename_here.pyui' # edit this line before running
with open(filename) as in_file:
clipboard.set(in_file.read())
print('The contents of {} are now on the clipboard.'.format(filename))
| apache-2.0 | Python | |
6126ef6028449abab49994bbbe555ecf591ad910 | Work on comets tracking | hadim/fiji_tools,hadim/fiji_tools,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_scripts | plugins/Scripts/Plugins/Test_In_Vivo_Comets_Tracking.py | plugins/Scripts/Plugins/Test_In_Vivo_Comets_Tracking.py | # @Float(label="Sigma 1", required=true, value=4.2) sigma1
# @Float(label="Sigma 2", required=true, value=1.25) sigma2
# @Boolean(label="Do thresholding ?", required=true, value=true) do_thresholding
# @Boolean(label="Show intermediates images (for debugging)", required=true, value=true) show_images
# @ImageJ ij
# @ImagePlus imp
from ij.plugin.filter import DifferenceOfGaussians
from ij.plugin import ContrastEnhancer
from ij.plugin import Duplicator
from ij.plugin import ZProjector
from ij.plugin import Thresholder
from ij.process import AutoThresholder
from ij import ImageStack
from ij import ImagePlus
from ij.process import ImageProcessor
from ij.plugin import ImageCalculator
from net.imagej.axis import Axes
from net.imagej.ops import Ops
from net.imglib2.img.display.imagej import ImageJFunctions
# Do Z Projection
proj = ZProjector(imp)
proj.setMethod(ZProjector.MAX_METHOD)
proj.setStartSlice(1)
proj.setStopSlice(imp.getNChannels())
proj.doHyperStackProjection(True)
imp = proj.getProjection()
if show_images:
imp.show()
# Apply DOG filtering
f = DifferenceOfGaussians()
f.setup("", imp)
for i in range(1, imp.getNFrames()+1):
imp.setPosition(i)
f.run(imp.getProcessor(), sigma1, sigma2)
final_imp = imp
# Apply unimodal compatible thresholding (IJ1 default method)
if do_thresholding:
binary_stack = ImageStack.create(imp.getWidth(), imp.getHeight(), imp.getType(), imp.getBitDepth())
for i in range(1, imp.getNFrames() + 1):
imp.setPosition(i)
ip = imp.getProcessor().duplicate()
ip = ip.convertToShort(False)
ip.setAutoThreshold(AutoThresholder.Method.Otsu, False)
ip.threshold(ip.getAutoThreshold())
ip = ip.convertToByteProcessor(True)
ip.invert()
binary_stack.addSlice(ip)
binary_stack.deleteSlice(1)
binary_imp = ImagePlus("Binary", binary_stack)
if show_images:
binary_imp.show()
# Subtract binary stack to DOG filtered stack
calc = ImageCalculator()
subtracted_imp = calc.run("subtract create stack", imp, binary_imp)
subtracted_imp.show()
final_imp = binary_imp
final_imp.show()
| bsd-3-clause | Python | |
ae0ebcc4da3425539e067d4d6a611554327357ad | Add web-scraping | ioanacrant/Hackathon-Update,ioanacrant/Hackathon-Update,ioanacrant/Hackathon-Update | hackathonupdate-webscraping.py | hackathonupdate-webscraping.py | from lxml import html
import requests
page=requests.get('https://mlh.io/seasons/s2015/events.html')
tree=html.fromstring(page.text)
titles=tree.xpath('//*[name()="h3"]/text()')
#add dates and locations later
print(titles) | apache-2.0 | Python | |
a3706e1c743ef7ec7f38375b116538a71ccb8455 | Add utilities to convert from python2 to python3. | Quasimondo/RasterFairy | rasterfairy/utils.py | rasterfairy/utils.py | def cmp_to_key(mycmp):
"""
Convert `sorted` function from python2 to python3.
This function is used to convert `cmp` parameter of python2 sorted
function into `key` parameter of python3 sorted function.
This code is taken from here:
https://docs.python.org/2/howto/sorting.html#the-old-way-using-the-cmp-parameter
:param mycmp: compare function that compares 2 values
:return: key class that compares 2 values
"""
"Convert a cmp= function into a key= function"
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
| bsd-3-clause | Python | |
c0b53a195974173942b73d320248febd19b6788c | Add exercise 9 | zhaoshengshi/practicepython-exercise | exercise/9.py | exercise/9.py | import random
i = random.randint(1, 9)
def ask_for_input():
while True:
s = input('Please guess what I got (an integer between 1 and 9) in hand?: ' )
ii = int(s)
if 1 <= ii <= 9:
return ii
else:
print('Wrong input!')
continue
def ask_for_again():
while True:
s = input('Wanna try again? (y)es or (e)xit : ')
if s == 'y':
return True
elif s == 'e':
return False
else:
print('Wrong input!')
continue
if __name__ == '__main__':
while True:
a = ask_for_input()
if a == i:
print('You got it right, it is %s' % a)
break
else:
print('Sorry, it\'s not right!')
r = ask_for_again()
if not r:
break
| apache-2.0 | Python | |
141bb79f45053e7bc5bfc4aa06e98d6e2788fc2c | Implement type reranker | filipdbrsk/NWRDomainModel | type_reranker.py | type_reranker.py | from dbpediaEnquirerPy import *
from KafNafParserPy import *
import os
def get_entity_sent(filename, parser, entity):
terms=[]
for ref in entity.get_references():
termbs=ref.get_span().get_span_ids()
if len(termbs)==0:
w.write(filename + "\t" + entity.get_id() + "\n")
return 100
print termbs, len(termbs)
start=termbs[0]
start_sent=parser.get_token(start).get_sent()
return start_sent
w=open("empty_spans.txt", "w")
w2=open("empty_files.txt", "w")
if __name__=="__main__":
my_dbpedia = Cdbpedia_enquirer()
path="/Users/filipilievski/annotations/"
for root, dirs, files in os.walk(path):
for filename in files:
print filename
if filename.endswith(".naf"):
f=root + "/" + filename
try:
parser=KafNafParser(f)
except:
w2.write(filename + "\n")
continue
for entity in parser.get_entities():
sent = get_entity_sent(filename, parser, entity)
print sent
if int(sent)<=6:
print entity.get_type()
ent_type=entity.get_type()
if ent_type!="MISC" and ent_type.strip()!="":
if ent_type=="ORGANIZATION":
ent_type="http://dbpedia.org/ontology/Organisation"
elif ent_type=="PERSON":
ent_type="http://dbpedia.org/ontology/Person"
elif ent_type=="LOCATION":
ent_type="http://dbpedia.org/ontology/Place"
for extref in entity.get_external_references():
dblink=extref.get_reference()
link_types=my_dbpedia.get_dbpedia_ontology_labels_for_dblink(dblink)
print link_types
print (ent_type in link_types)
| apache-2.0 | Python | |
37e3380cbbef86f35f963ebfa3bdb07eb3d3ae3d | Add condor test | swift-lang/swift-e-lab,Parsl/parsl,swift-lang/swift-e-lab,Parsl/parsl,Parsl/parsl,Parsl/parsl | libsubmit/tests/test_integration/test_ssh/test_ssh_condor_earth.py | libsubmit/tests/test_integration/test_ssh/test_ssh_condor_earth.py | import os
import libsubmit
from libsubmit import SshChannel, Condor
import time
def test_1():
config = {
"site": "T3_US_NotreDame",
"execution": {
"scriptDir": ".scripts",
"environment": {
'CONDOR_CONFIG': '/opt/condor/RedHat6/etc/condor_config',
'CONDOR_LOCATION': '/opt/condor/RedHat6',
'PATH': '/opt/condor/RedHat6/bin:${PATH}'
},
"block": {
"environment": {
'foo': 'spacey "quoted" value',
'bar': "this 'works' too",
'baz': 2
},
"nodes": 1,
"walltime": "01:00:00",
"options": {
"project": "cms.org.nd",
"condor_overrides": "",
"requirements": ""
}
}
}
}
channel = SshChannel("earth.crc.nd.edu", os.environ['USER'])
ec, out, err = channel.execute_wait("printenv", envs=config['execution']['environment'])
print("current env:", out)
ec, out, err = channel.execute_wait("which condor_submit", envs=config['execution']['environment'])
print('which condor_submit? ', out)
provider = Condor(config=config, channel=channel)
ids = provider.submit('''echo "sleeping"
sleep 120
echo "Done sleeping" ''', 1)
time.sleep(3)
ids += provider.submit('''echo "sleeping"
sleep 120
echo "Done sleeping" ''', 1)
time.sleep(3)
stats = provider.status(ids)
print(stats)
provider.cancel(ids)
if __name__ == "__main__":
libsubmit.set_stream_logger()
test_1()
| apache-2.0 | Python | |
ed31ebcd8c8058b3cc92a9fd4411577e9227605b | Add models.py with sqla settings and player, game classes | dropshot/dropshot-server | models.py | models.py | from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
import json
engine = create_engine('sqlite:///db.sqlite', echo=True)
Base = declarative_base()
class Player(Base):
__tablename__ = 'players'
id = Column(Integer, primary_key=True)
username = Column(String, unique=True)
password = Column(String)
email = Column(String, unique=True)
authToken = Column(String, unique=True)
games = relationship("Game", primaryjoin = "or_(Player.id==Game.loser_id, Player.id==Game.winner_id)")
def to_json (self):
return json.dumps({ 'username' : self.username, 'gamesPlayed' : len(self.games)})
class Game(Base):
__tablename__ = 'games'
id = Column(Integer, primary_key=True)
winner_id = Column(Integer, ForeignKey('players.id'))
winner = relationship("Player", foreign_keys=[winner_id])
loser_id = Column(Integer, ForeignKey('players.id'))
loser = relationship("Player", foreign_keys=[loser_id])
def to_json(self):
return json.dumps({ 'id' : self.id , 'winner' : self.winner.username, 'loser' : self.loser.username })
Base.metadata.create_all(bind=engine)
Session = sessionmaker(bind=engine)
session=Session()
| mit | Python | |
546e9441b7dc6eb1575aab3c534f414aed0f0c3c | Create GC_content.py | chamkank/Python-DNA-Tool,zechamkank/Python-DNA-Tool | sequence_manipulation/GC_content.py | sequence_manipulation/GC_content.py | '''
Written by Cham K.
June 16th 2015
'''
from sequence_manipulation import nucleotide_count
def GC_content(sequence, percent=True):
''' (str, bool) -> (float)
Returns the GC-content as a non-rounded percentage (or ratio if percent=False) of a DNA sequence.
Can process upper-case and lower-case sequence input with or without whitespace characters.
>>> GC_content('G A T A C C')
50.0
>>> GC_content('G A A A T C A C C')
44.44444444444444
>>> GC_content('G A A A T C A C C', False)
0.4444444444444444
'''
module = nucleotide_count
A = module.nucleotide_count(sequence, 'A')
T = module.nucleotide_count(sequence, 'T')
C = module.nucleotide_count(sequence, 'C')
G = module.nucleotide_count(sequence, 'G')
if percent == True: return (G+C)/(A+T+C+G)*100
elif percent == False: return (G+C)/(A+T+C+G)
| mit | Python | |
150b1c07a55d2f3ce429cc0108fdaf653b9b7132 | Create models.py | gabimachado/cooktop-IoT,gabimachado/cooktop-IoT | models.py | models.py | from django.db import models
class Mode(models.Model):
name = models.CharField(max_length=50)
class State(models.Model):
name = models.CharField(max_length=50)
| mit | Python | |
d26034963c0332346ea1b6b50b9ad3d637da7e36 | Add script to try and push stripe payment of unpaid invoices | SYNHAK/spiff,SYNHAK/spiff,SYNHAK/spiff | spiff/payment/management/commands/attempt_payment.py | spiff/payment/management/commands/attempt_payment.py | from django.core.management import BaseCommand
from spiff.payment.models import Invoice
import stripe
class Command(BaseCommand):
help = 'Attempts to process an invoice via stripe'
def handle(self, *args, **options):
for invoice in Invoice.objects.unpaid().all():
print invoice
try:
unpaid = invoice.unpaidBalance
invoice.chargeStripe()
print "Paid %s"%(unpaid)
except stripe.error.CardError, e:
print "Could not process card.", e
| agpl-3.0 | Python | |
c5ed01ce81b1c0e459d93bf26bf96cdeb80a0344 | Use specific notifications when possible. | typesupply/defconAppKit,typemytype/defconAppKit | Lib/defconAppKit/representationFactories/__init__.py | Lib/defconAppKit/representationFactories/__init__.py | from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : (NSBezierPathFactory, None),
"defconAppKit.NoComponentsNSBezierPath" : (NoComponentsNSBezierPathFactory, None),
"defconAppKit.OnlyComponentsNSBezierPath" : (OnlyComponentsNSBezierPathFactory, None),
"defconAppKit.GlyphCell" : (GlyphCellFactory, None),
"defconAppKit.GlyphCellDetail" : (GlyphCellDetailFactory, None),
"defconAppKit.OutlineInformation" : (OutlineInformationFactory, None),
"defconAppKit.MenuImage" : (MenuImageRepresentationFactory, None),
}
_imageFactories = {
"defconAppKit.NSImage" : (NSImageFactory, ["Image.FileNameChanged", "Image.ColorChanged", "Image.ImageDataChanged"])
}
def registerAllFactories():
for name, (factory, destructiveNotifications) in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=destructiveNotifications)
for name, (factory, destructiveNotifications) in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=destructiveNotifications)
| from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : NSBezierPathFactory,
"defconAppKit.NoComponentsNSBezierPath" : NoComponentsNSBezierPathFactory,
"defconAppKit.OnlyComponentsNSBezierPath" : OnlyComponentsNSBezierPathFactory,
"defconAppKit.GlyphCell" : GlyphCellFactory,
"defconAppKit.GlyphCellDetail" : GlyphCellDetailFactory,
"defconAppKit.OutlineInformation" : OutlineInformationFactory,
"defconAppKit.MenuImage" : MenuImageRepresentationFactory,
}
_imageFactories = {
"defconAppKit.NSImage" : NSImageFactory
}
def registerAllFactories():
for name, factory in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=None)
for name, factory in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=None)
| mit | Python |
31d26cefd8f3d246437511c2b0852051d68cb2c8 | modify wod2vec | mathrho/word2vec,mathrho/word2vec,mathrho/word2vec,mathrho/word2vec | exampleWtoV.py | exampleWtoV.py | #/datastore/zhenyang/bin/python
import gensim, logging
import sys
import os
from xml.etree import ElementTree
def get_parentmap(tree):
parent_map = {}
for p in tree.iter():
for c in p:
if c in parent_map:
parent_map[c].append(p)
# Or raise, if you don't want to allow this.
else:
parent_map[c] = [p]
# Or parent_map[c] = p if you don't want to allow this
return parent_map
def main():
##############
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#pretrained_model = './vectors.bin'
pretrained_model = '../freebase-vectors-skipgram1000-en.bin'
#pretrained_model = '../GoogleNews-vectors-negative300.bin'
model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=True)
#model['animal']
#print model.similarity('/en/dog', '/en/cat')
print model.similarity('/en/man', '/en/woman')
print model.similarity('/en/man', '/en/male')
if __name__ == "__main__":
main() | apache-2.0 | Python | |
b68244965b4f69711f0c4d9d42f24e6b3f5742f4 | Add script to update images in the js code | richq/toggle-js-addon | update-images.py | update-images.py | #!/usr/bin/env python
import urllib
def img2base64(img):
return open(img, "rb").read().encode("base64").replace('\n', '')
disabled_base64 = img2base64("assets/no-js.png")
enabled_base64 = img2base64("assets/jsenabled.png")
data = open('bootstrap.js')
output = []
for line in data.readlines():
if line.startswith('const DISABLED_ICON'):
line = 'const DISABLED_ICON = "data:image/png;base64,%s";\n' % disabled_base64
if line.startswith('const ENABLED_ICON'):
line = 'const ENABLED_ICON = "data:image/png;base64,%s";\n' % enabled_base64
output.append(line)
data.close()
data = open('bootstrap.js', 'w')
for line in output:
data.write(line)
data.close()
data = open('index.html', 'w')
data.write("<img src='data:image/png;base64,%s'>" % disabled_base64)
data.write("<img src='data:image/png;base64,%s'>" % enabled_base64)
data.close()
| bsd-2-clause | Python | |
ef526fe30b0bfcf82319195e76e4da01ab613ca3 | add initial spline | harmslab/epistasis,Zsailer/epistasis | epistasis/models/nonlinear/spline.py | epistasis/models/nonlinear/spline.py | import numpy as np
from .minimizer import Minimizer
from .ordinary import EpistasisNonlinearRegression
from epistasis.models import EpistasisLinearRegression
from epistasis.models.utils import (arghandler, FittingError)
from scipy.interpolate import UnivariateSpline
# -------------------- Minimizer object ------------------------
class SplineMinizer(Minimizer):
"""Spline Fitter.
"""
def __init__(self, k=3, s=None):
self.k = k
self.s = s
self.parameters = None
def _sorter(self, x, y=None):
"""sort x (and y) according to x values"""
idx = np.argsort(x)
if y is None:
return x[idx]
else:
return x[idx], y[idx]
def predict(self, x):
return self._spline(x)
def fit(self, x, y):
# Sort values for fit
x_, y_ = self._sorter(x, y)
# Fit spline.
self._spline = UnivariateSpline(
x=x_,
y=y_,
k=self.k,
s=self.s
)
def transform(self, x, y):
ymodel = self.predict(x)
return (y - ymodel) + x
# -------------------- Minimizer object ------------------------
class EpistasisSpline(EpistasisNonlinearRegression):
"""Epistasis Spline method.
"""
def __init__(self, k=3, s=None, model_type="global"):
# Set atributes
self.k = k
self.s = s
# Set up the function for fitting.
self.minimizer = SplineMinizer(k=self.k, s=self.s)
self.order = 1
self.Xbuilt = {}
# Construct parameters object
self.set_params(model_type=model_type)
# Store model specs.
self.model_specs = dict(model_type=self.model_type)
# Set up additive and high-order linear model
self.Additive = EpistasisLinearRegression(
order=1, model_type=self.model_type)
| unlicense | Python | |
49cb9138a10e3fc9324f6c2e655cc4b8bd34276c | Add transliteration tool | Sakartu/excel-toolkit | extranslit.py | extranslit.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Usage:
translit_location.py INCOLUMN [--lang LANG] [--reverse] [--minlen MINLEN] INFILE...
Options:
INCOLUMN The number of the INCOLUMN to use, 1 based (A=1).
OUTCOLUMN The number of the OUTCOLUMN to put the result in (WILL OVERWRITE ALL VALUES), 1 based (A=1).
--minlen MINLEN The minimal length of values in the INCOLUMN to be a candidate for transliteration. [default: 0]
--lang LANG The language to use, Ukraine by default. [default: uk]
--reverse Go from foreign alphabet to latin, instead of the other way around
INFILE A list of infiles to process.
"""
from __future__ import absolute_import, unicode_literals
import os
from docopt import docopt
import openpyxl
from transliterate import translit
import util
__author__ = 'peter'
def main():
args = docopt(__doc__)
incol = util.col_index(args['INCOLUMN'])
for f in args['INFILE']:
print('Processing {0}...'.format(f))
base, _ = os.path.splitext(f)
wb = openpyxl.Workbook()
w_sheet = wb.get_active_sheet()
for idx, row in util.yield_rows(f, skipfirst=False):
newval = translit(row[incol], args['--lang'], args['--reverse'])
row.append(newval)
w_sheet.append(row)
wb.save(base + '_transliterated.xls')
if __name__ == '__main__':
main() | mit | Python | |
0c50fc3838cd87f09e767727c41ee6c0771b396d | Create type.py | joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle | code/neominim/type.py | code/neominim/type.py | # -*- coding: utf-8 -*-
__author__ = "joshhartigan"
class NeoMinimError(Exception):
""" general purpose neominim error """
pass
| bsd-2-clause | Python | |
b0c6bddfac0931de679972a4d8674269889e5cd2 | Correct set_node_options argument list | Tesora/tesora-project-config,Tesora/tesora-project-config,dongwenjuan/project-config,openstack-infra/project-config,noorul/os-project-config,openstack-infra/project-config,noorul/os-project-config,dongwenjuan/project-config | zuul/openstack_functions.py | zuul/openstack_functions.py | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
def set_log_url(item, job, params):
if hasattr(item.change, 'refspec'):
path = "%s/%s/%s/%s/" % (
params['ZUUL_CHANGE'][-2:], params['ZUUL_CHANGE'],
params['ZUUL_PATCHSET'], params['ZUUL_PIPELINE'])
elif hasattr(item.change, 'ref'):
path = "%s/%s/%s/" % (
params['ZUUL_NEWREV'][:2], params['ZUUL_NEWREV'],
params['ZUUL_PIPELINE'])
else:
path = params['ZUUL_PIPELINE'] + '/'
params['BASE_LOG_PATH'] = path
params['LOG_PATH'] = path + '%s/%s/' % (job.name,
params['ZUUL_UUID'][:7])
def reusable_node(item, job, params):
if 'OFFLINE_NODE_WHEN_COMPLETE' in params:
del params['OFFLINE_NODE_WHEN_COMPLETE']
def set_node_options(item, job, params):
# Set up log url parameter for all jobs
set_log_url(item, job, params)
# Default to single use node. Potentially overriden below.
# Select node to run job on.
params['OFFLINE_NODE_WHEN_COMPLETE'] = '1'
proposal_re = r'^.*(merge-release-tags|(propose|upstream)-(.*?)-(constraints-.*|updates?|update-liberty))$' # noqa
release_re = r'^.*-(forge|jenkinsci|mavencentral|pypi-(both|wheel)|npm)-upload$'
hook_re = r'^hook-(.*?)-(rtfd)$'
# jobs run on the persistent proposal and release workers
if (re.match(proposal_re, job.name) or re.match(release_re, job.name) or
re.match(hook_re, job.name)):
reusable_node(item, job, params)
| # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
def set_log_url(item, job, params):
if hasattr(item.change, 'refspec'):
path = "%s/%s/%s/%s/" % (
params['ZUUL_CHANGE'][-2:], params['ZUUL_CHANGE'],
params['ZUUL_PATCHSET'], params['ZUUL_PIPELINE'])
elif hasattr(item.change, 'ref'):
path = "%s/%s/%s/" % (
params['ZUUL_NEWREV'][:2], params['ZUUL_NEWREV'],
params['ZUUL_PIPELINE'])
else:
path = params['ZUUL_PIPELINE'] + '/'
params['BASE_LOG_PATH'] = path
params['LOG_PATH'] = path + '%s/%s/' % (job.name,
params['ZUUL_UUID'][:7])
def reusable_node(item, job, params):
if 'OFFLINE_NODE_WHEN_COMPLETE' in params:
del params['OFFLINE_NODE_WHEN_COMPLETE']
def set_node_options(item, job, params, default):
# Set up log url parameter for all jobs
set_log_url(item, job, params)
# Default to single use node. Potentially overriden below.
# Select node to run job on.
params['OFFLINE_NODE_WHEN_COMPLETE'] = '1'
proposal_re = r'^.*(merge-release-tags|(propose|upstream)-(.*?)-(constraints-.*|updates?|update-liberty))$' # noqa
release_re = r'^.*-(forge|jenkinsci|mavencentral|pypi-(both|wheel)|npm)-upload$'
hook_re = r'^hook-(.*?)-(rtfd)$'
# jobs run on the persistent proposal and release workers
if (re.match(proposal_re, job.name) or re.match(release_re, job.name) or
re.match(hook_re, job.name)):
reusable_node(item, job, params)
| apache-2.0 | Python |
02d5370f09956077623ea76340e51d1cf6d10f93 | Add boolean primitive type support. | 4degrees/harmony | source/harmony/ui/widget/boolean.py | source/harmony/ui/widget/boolean.py | # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from PySide import QtGui
from .simple import Simple
class Boolean(Simple):
'''Boolean control.'''
def _constructControl(self, **kw):
'''Return the control widget.'''
return QtGui.QCheckBox()
def _postConstruction(self, **kw):
'''Perform post-construction operations.'''
super(Boolean, self)._postConstruction(**kw)
self._control.stateChanged.connect(self._emitValueChanged)
def value(self):
'''Return current value.'''
return self._control.isChecked()
def setValue(self, value):
'''Set current *value*.'''
self._control.setChecked(value)
| apache-2.0 | Python | |
2a93cd12a8e198cd6a09d2a077148999eb6c3739 | add source file collection_builder.py | ellenzinc/collection_builder | collection_builder.py | collection_builder.py | #!/usr/bin/python
#=====================================================================================
# conversion from .bib file to Jekyll collection files (ver 1.0)
#
# Copyright (c) <2015> <Haining Wang>
# https://github.com/ellenzinc/
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# usage example
# bib2jekyllcol("st_ref.bib","./pub/")
#=====================================================================================
import bibtexparser
import os
# function parse names for bib authors
def parse_authors(authorStr):
# parse author's names and output authors names in format of
# H. Wang (or Haining Wang depending on wether full name is provided)
authors = authorStr.split(" and ");
for idx in range(len(authors)):
author = authors[idx].split(", ");
if len(author) == 2:
authors[idx] = author[1] + " " + author[0]
author_str = ""
if len(authors) > 1:
for idx in range(len(authors)-1):
author_str += authors[idx]
author_str += ", "
author_str = author_str + "and " + authors[-1]
else:
author_str = authors[0]
return author_str
# function bib2jekyllcol
def bib2jekyllcol (inputFile, outputDir):
"This prints the bibtex file to output directory as jekyll collection folder(s)"
# read and parse bib file
with open(inputFile) as bibtex_file:
bibtex_str = bibtex_file.read()
bib_database = bibtexparser.loads(bibtex_str)
# type names:
type_list = ["type", "title", "author", "journal", "volume", "number",
"year", "month", "doi", "pages", "publisher", "booktitle"]
if not os.path.exists(outputDir):
os.makedirs(outputDir)
for entry in bib_database.entries:
with open(outputDir+entry["id"]+'.md','w') as f:
f.write("---\n")
for bib_type in type_list:
if(entry.has_key(bib_type)):
if bib_type == "author":
f.write(bib_type +": " + parse_authors(entry[bib_type])+"\n")
else:
f.write(bib_type+": "+entry[bib_type] + "\n")
else:
f.write(bib_type + ":" + "\n")
f.write("---")
| mit | Python | |
e3900a167b0f9bba731353bd8175b8a5ede9491b | add loggeduser migration | mercycorps/TolaActivity,toladata/TolaActivity,mercycorps/TolaActivity,toladata/TolaActivity,mercycorps/TolaActivity,mercycorps/TolaActivity,toladata/TolaActivity,toladata/TolaActivity | activitydb/migrations/0024_loggeduser.py | activitydb/migrations/0024_loggeduser.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-08-23 21:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activitydb', '0023_tolasites_tola_report_url'),
]
operations = [
migrations.CreateModel(
name='LoggedUser',
fields=[
('username', models.CharField(max_length=30, primary_key=True, serialize=False)),
('country', models.CharField(max_length=100)),
('email', models.CharField(default='user@mercycorps.com', max_length=100)),
],
),
]
| apache-2.0 | Python | |
c168efd883bcc1fc5ed8fe3c80de95db905bb468 | Add file for nontermianl adding when grammar is create | PatrikValkovic/grammpy | tests/grammar_creation_test/NonterminalAddingTest.py | tests/grammar_creation_test/NonterminalAddingTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NonterminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
| mit | Python | |
307e4fda61f92e344bfd90c1a43f5a9076e7b832 | Add files for rule's invalid syntax validation | PatrikValkovic/grammpy | tests/rules_tests/isValid_tests/InvalidSyntaxTest.py | tests/rules_tests/isValid_tests/InvalidSyntaxTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class InvalidSyntaxTest(TestCase):
pass
if __name__ == '__main__':
main() | mit | Python | |
bb9a796abc1a1535c5113c260ac5a703c4cefb53 | Add Python source code | marcaube/prime-factors,marcaube/prime-factors,marcaube/prime-factors,marcaube/prime-factors,marcaube/prime-factors,marcaube/prime-factors | primes.py | primes.py | #!/usr/bin/env python
import sys
number = int(sys.argv[1])
candidate = 2
while (number > 1):
while (number % candidate == 0):
print candidate
number /= candidate
candidate += 1
| mit | Python | |
8e48aec6d1e6aca9c2ce54108fface01d2cbde8f | add bottom Ekman layer test | tkarna/cofs | test/bottomFriction/test_ekman_bottom.py | test/bottomFriction/test_ekman_bottom.py | """
Bottom Ekman layer test
=======================
Steady state flow in a channel subject to bottom friction and rotation.
Vertical viscosity is assumed to be constant to allow simple analytical
solution.
"""
from thetis import *
import numpy
import pytest
def run_test(layers=25, tolerance=0.05, verify=True, **model_options):
physical_constants['z0_friction'].assign(1e-3)
depth = 20.0
surf_slope = -5.0e-6 # d elev/dx
# set mesh resolution
dx = 2500.0
nx = 3
lx = nx*dx
ny = 3
ly = ny*dx
mesh2d = PeriodicRectangleMesh(nx, ny, lx, ly, direction='both',
reorder=True)
dt = 90.0
t_end = 4 * 3600.0 # sufficient to reach ~steady state
t_export = 450.0
u_mag = 1.0
f_coriolis = 1e-4
nu_v = 5e-4
# bathymetry
p1_2d = get_functionspace(mesh2d, 'CG', 1)
bathymetry2d = Function(p1_2d, name='Bathymetry')
bathymetry2d.assign(depth)
# create solver
solver_obj = solver.FlowSolver(mesh2d, bathymetry2d, layers)
options = solver_obj.options
options.element_family = 'dg-dg'
options.timestepper_type = 'SSPRK22'
options.solve_salinity = False
options.solve_temperature = False
options.use_implicit_vertical_diffusion = True
options.use_bottom_friction = True
options.use_turbulence = False
options.coriolis_frequency = Constant(f_coriolis)
options.vertical_viscosity = Constant(nu_v)
options.vertical_diffusivity = Constant(nu_v)
options.simulation_export_time = t_export
options.timestepper_options.use_automatic_timestep = False
options.timestep = dt
options.simulation_end_time = t_end
options.horizontal_velocity_scale = Constant(u_mag)
options.no_exports = True
options.update(model_options)
solver_obj.create_function_spaces()
# drive flow with momentum source term equivalent to constant surface slope
pressure_grad = -physical_constants['g_grav'] * surf_slope
options.momentum_source_2d = Constant((pressure_grad, 0))
solver_obj.create_equations()
v_init_2d = -0.49
solver_obj.assign_initial_conditions(uv_2d=Constant((0, v_init_2d)))
x, y, z = SpatialCoordinate(solver_obj.mesh)
solver_obj.iterate()
if verify:
# analytical solution (assuming no-slip bottom)
v_max = 0.4905 # u = g/f d(elev)/dx
d = sqrt(2*nu_v/f_coriolis)
z_b = (depth + z)/d
v_expr = -v_max * (1 - exp(-z_b)*cos(z_b))
u_expr = v_max * exp(-z_b)*sin(z_b)
uv_ana_expr = as_vector((u_expr, v_expr, 0))
uv_ana = Function(solver_obj.function_spaces.P1DGv, name='solution')
uv_ana.interpolate(uv_ana_expr)
uv_p1_dg = Function(solver_obj.function_spaces.P1DGv, name='velocity p1dg')
uv_p1_dg.project(solver_obj.fields.uv_3d + solver_obj.fields.uv_dav_3d)
volume = lx*ly*depth
uv_l2_err = errornorm(uv_ana_expr, uv_p1_dg)/numpy.sqrt(volume)
assert uv_l2_err < tolerance, 'L2 error is too large: {:} > {:}'.format(uv_l2_err, tolerance)
print_output('L2 error {:.4f} PASSED'.format(uv_l2_err))
return solver_obj
@pytest.fixture(params=['dg-dg', 'rt-dg'])
def element_family(request):
return request.param
@pytest.fixture(params=['LeapFrog', 'SSPRK22'])
def timestepper_type(request):
return request.param
@pytest.mark.parametrize("nlayers,max_err",
[(25, 0.035), (5, 0.065)],
ids=['nz25', 'nz5'])
def test_bottom_friction(nlayers, max_err, element_family, timestepper_type):
run_test(nlayers, tolerance=max_err, verify=True,
element_family=element_family, timestepper_type=timestepper_type)
| mit | Python | |
83470a90d8f765438ec77a61527e4d3d8963890f | add test for the fastq_count script | sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana | test/scripts/test_sequana_fastq_count.py | test/scripts/test_sequana_fastq_count.py | from sequana.scripts import fastq_count
from nose.plugins.attrib import attr
from sequana import sequana_data
#@attr("skip")
class TestPipeline(object):
@classmethod
def setup_class(klass):
"""This method is run once for each class before any tests are run"""
klass.prog = "sequana_fastq_count"
klass.params = {'prog': klass.prog}
def test_input(self):
filename = sequana_data('Hm2_GTGAAA_L005_R2_001.fastq.gz')
df = fastq_count.main([self.prog, '--input', filename])
| bsd-3-clause | Python | |
c6e2732575993f76657ba15b155e6cfe45aa60c4 | add static data generator | elly/starbase,eve-val/starbase,elly/starbase,shibdib/starbase,eve-val/starbase,eve-val/starbase,shibdib/starbase | gen-static.py | gen-static.py | #!/usr/bin/env python
import json
import sqlite3
class SDE:
def __init__(self, db):
self.db = db
def groups_in_category(self, category):
c = self.db.cursor()
c.execute("SELECT categoryID FROM invCategories WHERE categoryName = ?", (category,))
category_id = c.fetchone()[0]
c.execute("SELECT groupName FROM invGroups WHERE categoryID = ?", (category_id,))
return map(lambda x: x[0], c.fetchall())
def group_id(self, group):
c = self.db.cursor()
c.execute("SELECT groupID FROM invGroups WHERE groupName = ?", (group,))
return c.fetchone()[0]
def items_in_group(self, group):
group_id = self.group_id(group)
# dirty hack here - some of the QA towers are marked published, so
# filter them by name instead of published field.
c = self.db.cursor()
c.execute("""SELECT typeName FROM invTypes WHERE groupID = ?
AND typeName NOT LIKE 'QA %' AND published = 1""", (group_id,))
return map(lambda x: x[0], c.fetchall())
def item_attribute(self, item_name, attribute_name):
c = self.db.cursor()
c.execute("SELECT typeID FROM invTypes WHERE typeName = ?", (item_name,))
item_id = c.fetchone()[0]
c.execute("SELECT attributeID FROM dgmAttributeTypes WHERE attributeName = ?", (attribute_name,))
attribute_id = c.fetchone()[0]
c.execute("SELECT valueInt, valueFloat FROM dgmTypeAttributes WHERE typeID = ? AND attributeID = ?",
(item_id, attribute_id))
values = c.fetchone()
if not values:
return None
return values[0] or values[1]
def item_volume(self, item_name):
c = self.db.cursor()
c.execute("SELECT volume FROM invTypes WHERE typeName = ?", (item_name,))
return c.fetchone()[0]
def bonused_weapon_type(sde, tower_type):
def hasbonus(name):
return sde.item_attribute(tower_type, 'controlTower%sBonus' % name)
if hasbonus('LaserDamage') or hasbonus('LaserOptimal'):
return 'energy'
if hasbonus('MissileROF') or hasbonus('MissileVelocity'):
return 'missile'
if hasbonus('HybridDamage') or hasbonus('HybridOptimal'):
return 'hybrid'
if hasbonus('ProjectileROF') or hasbonus('ProjectileFalloff') or hasbonus('ProjectileOptimal'):
return 'projectile'
return None
def dump_towers(sde):
towers = []
tower_types = sde.items_in_group('Control Tower')
for ty in tower_types:
t = {'name': ty}
t['power'] = sde.item_attribute(ty, 'powerOutput')
t['cpu'] = sde.item_attribute(ty, 'cpuOutput')
wt = bonused_weapon_type(sde, ty)
if wt:
t['weapon_type'] = wt
towers.append(t)
return towers
def mod_weapon_type(sde, type_name):
charge_group_id = sde.item_attribute(type_name, 'chargeGroup1')
if charge_group_id == sde.group_id('Projectile Ammo'):
return 'projectile'
if charge_group_id == sde.group_id('Hybrid Charge'):
return 'hybrid'
if charge_group_id == sde.group_id('Frequency Crystal'):
return 'energy'
if charge_group_id == sde.group_id('Torpedo'):
return 'missile'
if charge_group_id == sde.group_id('Cruise Missile'):
return 'missile'
if charge_group_id == sde.group_id('Citadel Torpedo'):
return 'missile'
return None
def dump_mods(sde):
mods = []
mod_groups = sde.groups_in_category('Structure')
# hack: control towers are themselves under the structure group but aren't
# tower modules. We remove that group here to avoid including the towers in the module array.
mod_groups.remove('Control Tower')
for gr in mod_groups:
mod_types = sde.items_in_group(gr)
for ty in mod_types:
t = {'name': ty}
t['power'] = sde.item_attribute(ty, 'power') or 0
t['cpu'] = sde.item_attribute(ty, 'cpu') or 0
wt = mod_weapon_type(sde, ty)
if wt:
t['weapon_type'] = wt
mods.append(t)
return mods
conn = sqlite3.connect('sqlite-latest.sqlite')
sde = SDE(conn)
towers = dump_towers(sde)
mods = dump_mods(sde)
print json.dumps({'towers': towers, 'mods': mods}, indent=4, sort_keys=True) | mit | Python | |
1e15f0953076810c1ccd2d04c258d3fb0eba71e1 | Create sample.py | tonythms/project2,tonythms/project2 | sample.py | sample.py | apache-2.0 | Python | ||
2900f014bf7d8bdf5b7f4fe10d844cd516d0372a | Add basic views tests | ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo | web/web/tests.py | web/web/tests.py | from django.test import Client, TestCase
from django.core.urlresolvers import reverse
# Create your tests here.
class BasicViewsTestCase(TestCase):
fixtures = []
def setUp(self):
self.public_views = [('login', dict()), ('logout', dict())]
self.private_views = [('homepage', dict()),
('problem_set_detail', {'problem_set_pk': 1}),
('problem_set_attempts', {'problem_set_pk': 1}),
('problem_set_move', {'problem_set_pk': 1, 'shift': 1}),
('course_detail', {'course_pk': 1}),
('problem_solution', {'problem_pk': 1}),
('problem_attempt_file', {'problem_pk': 1}),
('problem_edit_file', {'problem_pk': 1}),
('problem_move', {'problem_pk': 1, 'shift': 1}),
]
self.client = Client()
def testPublicUnauthenticated(self):
for view, args in self.public_views:
response = self.client.get(reverse(view, kwargs=args))
self.assertEqual(response.status_code, 200,
"Status code {0} instead of 200".format(response.status_code))
def testPrivateUnauthenticated(self):
for view, args in self.private_views:
response = self.client.get(reverse(view, kwargs=args))
self.assertEqual(response.status_code, 302,
"Status code {0} instead of 302".format(response.status_code))
| agpl-3.0 | Python | |
81c4f9300e0cef204aaf2a0205ebf21be23f9414 | add strings to return ehllo world as output | ctsit/J.O.B-Training-Repo-1 | hellomarly.py | hellomarly.py | #This is my hello marly.....
print 'Hello Marly'
| apache-2.0 | Python | |
b3962e17bbc0328faa928d1eaed57de40cc28ee0 | add heroku_api.py | Impactstory/oadoi,Impactstory/sherlockoa,Impactstory/oadoi,Impactstory/oadoi,Impactstory/sherlockoa | heroku_api.py | heroku_api.py | import heroku
import argparse
import os
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning, SNIMissingWarning, InsecurePlatformWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(SNIMissingWarning)
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
# to see all environment vavialbles: heroku run printenv
# to get my process: os.getenv("DYNO")
def run(app_name, process_name_start_to_restart, command):
cloud = heroku.from_key(os.getenv("HEROKU_API_KEY"))
app = cloud.apps[app_name]
if command=="memory":
# need o have done this for it to work: heroku labs:enable log-runtime-metrics
for process in app.processes:
process_name = process.process
print process_name
for line in app.logs(num=100000, ps=process_name).split("\n"):
try:
if u"Error R14 (Memory quota exceeded)" in line or u"Error R15 (Memory quota vastly exceeded)" in line:
print line
except Exception:
pass
if command=="restart":
print(u"restarting {app_name}, processes that start with {process_name}".format(
app_name=app_name, process_name=process_name_start_to_restart))
for process in app.processes:
process_name = process.process
process_name_start = process_name.split(".")[0]
if process_name_start==process_name_start_to_restart:
process.restart()
print(u"upon request in heroku_api, restarted {process_name}".format(
process_name=process_name))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff")
parser.add_argument('--app', default=None, type=str, help="oadoi")
parser.add_argument('--process', default=None, type=str, help="process")
parser.add_argument('--command', default=None, type=str, help="restart")
args = vars(parser.parse_args())
print args
print u"heroku_api.py starting."
run(args["app"], args["process"], args["command"])
| mit | Python | |
257d0c9e8e6a8b571bfc896b2197f303251173df | Create p.py | pythoneiros/PyQuantio | webscraping/p.py | webscraping/p.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
t = urllib2.urlopen('http://www.gmasson.com.br/').read()
# TAG
tags = t.split('<p')[1:]
tags = [ tag.split('</p>')[0] for tag in tags ]
for i in tags:
print i
| mit | Python | |
01710f18efbe29dc5cf187726d5c686beec7e6e7 | Add helper script for getting plaso timeline in to timesketch | armuk/timesketch,armuk/timesketch,google/timesketch,armuk/timesketch,google/timesketch,armuk/timesketch,google/timesketch,lockhy/timesketch,google/timesketch,lockhy/timesketch,lockhy/timesketch,lockhy/timesketch | utils/add_plaso_timeline.py | utils/add_plaso_timeline.py | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add Plaso timeline to timesketch"""
from pyelasticsearch import ElasticSearch
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timesketch.settings")
from django.conf import settings
from django.contrib.auth.models import User
from timesketch.models import Sketch
from timesketch.models import Timeline
from timesketch.models import SketchTimeline
user = User.objects.get(id=2)
es_server = sys.argv[1]
es_port = sys.argv[2]
name = sys.argv[3]
index = sys.argv[4]
es = ElasticSearch("http://%s:%s" % (es_server, es_port))
mapping = {
"plaso_event": {
u'properties': {
u'timesketch_label': {
"type": "nested"}
}
},
}
es.put_mapping(index, "plaso_event", mapping)
timeline = Timeline.objects.create(owner=user, acl_public=True, title=name, description=name, datastore_index=index)
| apache-2.0 | Python | |
2c0fc3387a6dbd54bbcd4c47952ce8739d0b2152 | Add super-simple deduplication filter that uses a dictionary | ryansb/zaqar-webscraper-demo | dedup_worker.py | dedup_worker.py | # Pull URL
# Strip query string
# Query exists
# IFN save to sqlite
# IFN push to queue
# IFY do nothing
seen = {}
if __name__ == '__main__':
from helpers import client
ingest = client.queue('ingest')
scrape = client.queue('scrape')
while True:
claimed = ingest.claim(ttl=180, grace=60)
send = []
for msg in claimed:
msg.delete()
if seen.get(msg.body):
print "skipping %s, seen %d pages" % (msg.body, len(seen.keys()))
continue
print "Sending along %s" % msg.body
seen[msg.body] = True
send.append({'body': msg.body, 'ttl': 180})
if len(send): scrape.post(send)
| mit | Python | |
d62ffdce6df8cf848c1b1b198fc65d4dc0d70a1e | Add MergeSort.py | besirkurtulmus/AdvancedAlgorithms | Sorting/MergeSort.py | Sorting/MergeSort.py | # @auther Besir Kurtulmus
'''
The MIT License (MIT)
Copyright (c) 2014 Ahmet Besir Kurtulmus
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
def Merge(list1, list2):
"""
Description: Merges two sorted lists.
Args:
list1 (List type): The first sorted list.
list2 (List type): The second sorted list.
Examples:
>>> a = [1,3,5,7,9]
>>> b = [2,4,6,8,10]
>>> Merge(a,b)
[1,2,3,4,5,6,7,8,9,10]
"""
if len(list1) == 0:
return list2
if len(list2) == 0:
return list1
if list1[0] <= list2[0]:
return list1[:1] + Merge(list1[1:],list2)
else:
return list2[:1] + Merge(list1, list2[1:])
def MergeSort(l):
"""
Description:
Args:
Examples:
>>>
"""
if len(l) > 1:
return Merge(MergeSort(l[:(len(l)/2)]), MergeSort(l[(len(l)/2):]))
else:
return l | mit | Python | |
c8da605c3eef2cc205a47851e9c3e5b7c2a60f00 | Create nltk13.py | PythonProgramming/Natural-Language-Processing-NLTK-Python-2.7 | nltk13.py | nltk13.py | from __future__ import division
import sqlite3
import time
conn = sqlite3.connect('knowledgeBase.db')
conn.text_factory = str
c = conn.cursor()
negativeWords = []
positiveWords = []
sql = "SELECT * FROM wordVals WHERE value = ?"
def loadWordArrays():
for negRow in c.execute(sql, [(-1)]):
negativeWords.append(negRow[0])
print 'negative words loaded'
for posRow in c.execute(sql, [(1)]):
positiveWords.append(posRow[0])
print 'positive words loaded'
def testPositiveSentiment():
readFile = open('positiveSentiment.txt','r').read()
splitRead = readFile.split('\n')
totalExamples = len(splitRead)
posExamplesFound = 0
for eachPosExample in splitRead:
sentCounter = 0
for eachPosWord in positiveWords:
if eachPosWord in eachPosExample:
sentCounter += 1
for eachNegWord in negativeWords:
if eachNegWord in eachPosExample:
sentCounter -= 1
#print eachPosExample
#print sentCounter
#print '____________________'
#time.sleep(5)
if sentCounter > 0:
posExamplesFound += 1
print ''
print '________________________________'
print ' Positive Sentiment Accuracy Results:'
print 'found examples:', posExamplesFound
print 'out of a total:', totalExamples
print 'postivie accuracy:',posExamplesFound/totalExamples*100
print '________________________________'
def testNegativeSentiment():
readFile2 = open('negativeSentiment.txt','r').read()
splitRead2 = readFile2.split('\n')
totalExamples = len(splitRead2)
negExamplesFound = 0
for eachNegExample in splitRead2:
sentCounter2 = 0
for eachPosWord in positiveWords:
if eachPosWord in eachNegExample:
sentCounter2 += 1
for eachNegWord in negativeWords:
if eachNegWord in eachNegExample:
sentCounter2 -= 8
#print eachNegExample
#print sentCounter2
#print '____________________'
#time.sleep(5)
if sentCounter2 < 0:
negExamplesFound += 1
print ''
print ''
print ''
print '________________________________'
print ' Negative Sentiment Accuracy Results:'
print 'found negative examples:', negExamplesFound
print 'out of a total of :', totalExamples
print 'negative accuracy:',negExamplesFound/totalExamples*100
print '________________________________'
loadWordArrays()
testPositiveSentiment()
testNegativeSentiment()
| mit | Python | |
4ec6852368a79d272da145cdb3aa34620cbf0573 | Create a.py | y-sira/atcoder,y-sira/atcoder | abc006/a.py | abc006/a.py | n = int(input())
if n % 3 == 0 or '3' in str(n):
print('YES')
else:
print('NO')
| mit | Python | |
f25fe8cd315cfd08e5c717a2706bf85fa0fbbbe2 | Add LBFGS tomography example | aringh/odl,kohr-h/odl,aringh/odl,kohr-h/odl,odlgroup/odl,odlgroup/odl | examples/solvers/lbfgs_tomography.py | examples/solvers/lbfgs_tomography.py | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Tomography using the `bfgs_method` solver.
Solves the optimization problem
min_x ||A(x) - g||_2^2
Where ``A`` is a parallel beam forward projector, ``x`` the result and
``g`` is given noisy data.
"""
import numpy as np
import odl
# --- Set up the forward operator (ray transform) --- #
# Discrete reconstruction space: discretized functions on the rectangle
# [-20, 20]^2 with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_pt=[-20, -20], max_pt=[20, 20], shape=[200, 200], dtype='float32')
# Make a parallel beam geometry with flat detector
# Angles: uniformly spaced, n = 360, min = 0, max = 2 * pi
angle_partition = odl.uniform_partition(0, 2 * np.pi, 400)
# Detector: uniformly sampled, n = 300, min = -30, max = 30
detector_partition = odl.uniform_partition(-30, 30, 400)
geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)
# The implementation of the ray transform to use, options:
# 'scikit' Requires scikit-image (can be installed by
# running ``pip install scikit-image``).
# 'astra_cpu', 'astra_cuda' Require astra tomography to be installed.
# Astra is much faster than scikit. Webpage:
# https://github.com/astra-toolbox/astra-toolbox
impl = 'astra_cuda'
# Ray transform aka forward projection.
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl)
# --- Generate artificial data --- #
# Create phantom
discr_phantom = odl.phantom.shepp_logan(reco_space, modified=True)
# Create sinogram of forward projected phantom with noise
data = ray_trafo(discr_phantom)
# data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1
# Create objective functional
obj_fun = odl.solvers.L2NormSquared(ray_trafo.range) * (ray_trafo - data)
# Create line search
line_search = odl.solvers.BacktrackingLineSearch(obj_fun)
# Optionally pass callback to the solver to display intermediate results
callback = (odl.solvers.CallbackPrintIteration() &
odl.solvers.CallbackShow())
# Pick parameters
maxiter = 30
maxcor = 5 # only save some vectors (Limited memory)
# Choose a starting point
x = ray_trafo.domain.zero()
# Run the algorithm
odl.solvers.bfgs_method(
obj_fun, x, line_search=line_search, maxiter=maxiter, maxcor=maxcor,
callback=callback)
# Display images
discr_phantom.show(title='original image')
data.show(title='sinogram')
x.show(title='reconstructed image', show=True)
| mpl-2.0 | Python | |
51c6335718e5aca75d1c8e7e1fa08e396aa8a557 | Create Valid_Parentheses.py | UmassJin/Leetcode | Array/Valid_Parentheses.py | Array/Valid_Parentheses.py | Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
class Solution:
# @return a boolean
def isValid(self, s):
stack = []
bracket_dict = {'(':')',
'[':']',
'{':'}',
}
for bracket in s:
if bracket in bracket_dict.keys():
stack.append(bracket)
elif not stack or bracket != bracket_dict[stack.pop()]:
return False
return len(stack) == 0
def isValid_1(self, s):
if len(s) == 1:
return False
if len(s) == 0:
return True
stack = []
for i in s:
if i == '(' or i == '{' or i == '[':
stack.append(i)
else:
if i == ')' and stack and stack.pop() == '(':
continue
elif i == ']' and stack and stack.pop() == '[':
continue
elif i == '}' and stack and stack.pop() == '{':
continue
else:
return False
if stack != []:
return False
return True
| mit | Python | |
dbba2e9b541af2b95cbc3f9f306b3062be81460e | Create AnimationBase.py | gitoni/animation | AnimationBase.py | AnimationBase.py | # -*- coding: utf_8 -*-
"""
Created on 13.07.2014
@author: gitoni
"""
import subprocess as sp
import os
import webbrowser as wb
class Animation(object):
"""
A class for creating animations with ImageMagick.
Paths need to be adapted to your system.
This here works on Windows, asuming that ImageMagick and Msys is installed and in the Sys Path.
"""
def __init__(self, workdir, im_path = r"C:\Program Files (x86)\ImageMagick-6.8.7-Q16\convert.exe", browser = True):
"""
Define workdir and path to ImageMagick.
Set option for opening in browser when finished.
"""
self.workdir = workdir
self.browser = browser
self.im_path = im_path
print("ready for animation...")
def create_animation(self, aniname, gif = False):
output_dir = self.workdir
try:
print("creating animation...")
aniformat = ".mp4"
if gif == True:
aniformat = ".gif"
args = [self.im_path, "-delay", "20", "*.png", "-quality", "100%", "-compress", "None", "-loop", "0", aniname + aniformat]
p = sp.Popen(args, cwd = output_dir, stdin=sp.PIPE, stdout=sp.PIPE)
t = p.communicate()
print("remove pngs...")
args = ["rm.exe", "*.png"]
p = sp.Popen(args,cwd = output_dir, stdin=sp.PIPE, stdout=sp.PIPE)
t = p.communicate()
print(t)
except Exception as e:
print(e)
if gif == True and self.browser == True:
pa = os.path.join(output_dir, aniname + aniformat)
wb.open("file:///" + pa)
#...............................................................................
if __name__ == "__main__":
def create_pngframes():
import pylab as pl
liner = [3,5,6,7,8,9,4,7,3,4,6,8,5,3,2,3,2,3,5,7,9,3,5,6,7,8,9,4,7,3,4,6,8,5,3,2,3,2,3,5,7,9]
for n, ele in enumerate(liner):
pl.xlim(0,36)
pl.ylim(0,10)
pl.plot(range(n),liner[:n],"go", )
pl.savefig(r"E:\tmp\ani\tmp_" + str(n).zfill(3) + ".png")
create_pngframes()
workdir=r"E:\tmp\ani"
obj = Animation(workdir)
obj.create_animation("testani1",gif = True)
| mit | Python | |
83632d537a033deb017dbfeab02ba4e1073e309a | add export filters | qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq | couchforms/filters.py | couchforms/filters.py | '''
Out of the box filters you can use to filter your exports
'''
def instances(doc):
"""
Only return XFormInstances, not duplicates or errors
"""
return doc["doc_type"] == "XFormInstance"
def duplicates(doc):
"""
Only return Duplicates
"""
return doc["doc_type"] == "XFormDuplicate"
def problem_forms(doc):
"""
Return non-XFormInstances (duplicates, errors, etc.)
"""
return doc["doc_type"] != "XFormInstance" | bsd-3-clause | Python | |
7e8ff3971c21335468a683edcb9efb260c49bd61 | Add packet class | umbc-hackafe/x10-controller | packet.py | packet.py | class Packet:
PACKET_LENGTH = 4
def decode(b):
"""Retuns a Packet object for the given string, or None if it isn't valid."""
if len(b) != Packet.PACKET_LENGTH:
return None
if b[0] ^ b[1] ^ b[2] != b[3] or ((b[0] & 0x80) >> 7) != 1:
return None
if (b[0] & 0x40) >> 6 == 1:
return ControlPacket(encoded=b)
else:
return DataPacket(encoded=b)
def encode(self):
"""Returns a string representation of the current object"""
pass
class ControlPacket(Packet):
def __init__(self, opcode=0, data=0, encoded=None):
if encoded:
opcode = encoded[0] & 0x0f
data = (encoded[1] << 8) | encoded[2]
self.opcode = opcode
self.data = data
def encode(self):
tmp = [0xc0 | opcode & 0x0f, (data >> 8) & 0xff, data & 0xff]
tmp.append(tmp[0] ^ tmp[1] ^ tmp[2])
return bytes(tmp)
class DataPacket(Packet):
def __init__(self, house='A', unit=1, command=1, repetitions=1, encoded=None):
if encoded:
house = chr((encoded[0] & 0x0f) + 65)
unit = ((encoded[1] & 0xf0) >> 4) + 1
command = (encoded[1] & 0x0f)
repetitions = encoded[2] or 1
self.house = house
self.unit = unit
self.command = command
self.repetitions = repetitions
def encode(self):
# This could definitely be more pythonic. Somehow.
t0 = 0x80 | ((ord(self.house) - 65) & 0x0f)
t1 = (((self.unit - 1) << 4) & 0xf0) | (self.command & 0x0f)
t2 = self.repetitions
tmp = [t0, t1, t2, t0 ^ t1 ^ t2]
return bytes(tmp)
| unlicense | Python | |
38aaf30aa1d148bfa31e7856b399a735ba818c6b | Add test for accessing module docstring. | pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython | tests/basics/module_docstring.py | tests/basics/module_docstring.py | """
doc
string"""
try:
__doc__
except NameError:
print("SKIP")
raise SystemExit
print(__doc__)
| mit | Python | |
3a2bec63eff4a2657250e46a523b5f98b9d27aea | Add tests for validate models. | unt-libraries/coda,unt-libraries/coda,unt-libraries/coda,unt-libraries/coda | coda/coda_validate/tests/test_models.py | coda/coda_validate/tests/test_models.py | from .. import factories
class TestValidate:
def test_unicode(self):
validate = factories.ValidateFactory.build()
assert unicode(validate) == validate.identifier
| bsd-3-clause | Python | |
9d30c51aac7ca00b4f191270a82f24372687163c | Add Pandoc filter to convert SVG illustrations to PDF | alerque/casile,alerque/casile,alerque/casile,alerque/casile,alerque/casile | svg2pdf.py | svg2pdf.py | #!/usr/bin/env python
"""
Pandoc filter to convert svg files to pdf as suggested at:
https://github.com/jgm/pandoc/issues/265#issuecomment-27317316
"""
__author__ = "Jerome Robert"
import mimetypes
import subprocess
import os
import sys
from pandocfilters import toJSONFilter, Image
fmt_to_option = {
"sile": ("--export-pdf","pdf"),
"docx": ("--export-png", "png"),
}
def svg_to_any(key, value, fmt, meta):
if key == 'Image':
if len(value) == 2:
# before pandoc 1.16
alt, [src, title] = value
attrs = None
else:
attrs, alt, [src, title] = value
mimet,_ = mimetypes.guess_type(src)
option = fmt_to_option.get(fmt)
if mimet == 'image/svg+xml' and option:
base_name,_ = os.path.splitext(src)
eps_name = base_name + "." + option[1]
try:
mtime = os.path.getmtime(eps_name)
except OSError:
mtime = -1
if mtime < os.path.getmtime(src):
cmd_line = ['inkscape', option[0], eps_name, src]
sys.stderr.write("Running %s\n" % " ".join(cmd_line))
subprocess.call(cmd_line, stdout=sys.stderr.fileno())
if attrs:
return Image(attrs, alt, [eps_name, title])
else:
return Image(alt, [eps_name, title])
if __name__ == "__main__":
toJSONFilter(svg_to_any)
| agpl-3.0 | Python | |
6892e38e328508e05a349b0c4bc9a154dd854f4f | Create Maximal-Square.py | UmassJin/Leetcode | Array/Maximal-Square.py | Array/Maximal-Square.py | '''
Given a 2D binary matrix filled with 0's and 1's, find the largest square containing all 1's and return its area.
For example, given the following matrix:
1 0 1 0 0
1 0 1 1 1
1 1 1 1 1
1 0 0 1 0
Return 4.
'''
# Method 1: use the 2D array DP, the details showed in the following links
class Solution:
# @param {character[][]} matrix
# @return {integer}
def maximalSquare(self, matrix):
if not matrix or not matrix[0]: return 0
m = len(matrix); n = len(matrix[0])
dp = [[0 for i in xrange(n)] for j in xrange(m)]
maxsize = 0
for i in xrange(n):
dp[0][i] = int(matrix[0][i]) - 0
maxsize = max(maxsize, dp[0][i])
for j in xrange(m):
dp[j][0] = int(matrix[j][0]) - 0
maxsize = max(maxsize, dp[j][0])
for i in xrange(1, m):
for j in xrange(1, n):
if matrix[i][j] == "1":
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
maxsize = max(dp[i][j], maxsize)
else:
dp[i][j] = 0
return maxsize * maxsize
# Best Solution: we only need one array
class Solution:
# @param {character[][]} matrix
# @return {integer}
def maximalSquare(self, matrix):
if not matrix or not matrix[0]: return 0
m = len(matrix); n = len(matrix[0])
maxsize = 0; last_lefttop = 0
dp = [0 for _ in xrange(n+1)]
for i in xrange(1, m+1):
for j in xrange(1, n+1):
if matrix[i-1][j-1] == '0':
dp[j] = 0
else:
tmp = dp[j]
dp[j] = min(dp[j-1], dp[j], last_lefttop) + 1
maxsize = max(maxsize, dp[j])
last_lefttop = tmp
return maxsize * maxsize
# https://leetcode.com/discuss/38489/easy-solution-with-detailed-explanations-8ms-time-and-space
| mit | Python | |
d9b03bf39a83a473f76aec045b3b182f25d1d7f5 | Teste de JSON | renzon/appengine-video,renzon/appengine-video,renzon/appengine-video,renzon/appengine-video | backend/test/curso_tests/rest_tests.py | backend/test/curso_tests/rest_tests.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from course_app.course_model import Course
from mommygae import mommy
from routes.courses import rest
class ListarTests(GAETestCase):
def test_sucesso(self):
mommy.save_one(Course)
resposta = rest.index()
self.assert_can_serialize_as_json(resposta) | mit | Python | |
286232f060f4268881105fdc7b08bdf3f2f276a9 | Add meeting.py | vasilvv/ndsc | meeting.py | meeting.py | #!/usr/bin/python
#
# A simple tool to manage the discuss meetings.
#
import argparse
import discuss
import sys
acl_flags = "acdorsw"
def die(text):
sys.stderr.write("%s\n" % text)
sys.exit(1)
def get_user_realm(client):
user = client.who_am_i()
return user[user.find('@'):]
def add_meeting():
if ":" in args.meeting:
server, path = args.meeting.split(":", 2)
if not path.startswith("/"):
path = "/var/spool/discuss/" + path
client = discuss.Client(server, timeout = 5)
mtg = discuss.Meeting(client, path)
else:
mtg = discuss.locate(args.meeting)
if not mtg:
die("Meeting %s was not found." % args.meeting)
rcfile = discuss.RCFile()
rcfile.add(mtg)
rcfile.save()
def list_meetings():
rcfile = discuss.RCFile()
rcfile.load()
servers = list({ entry['hostname'] for entry in rcfile.entries.values() })
servers.sort()
for server in servers:
meetings = [ "%s [%s]" % ( entry['displayname'], ", ".join(entry['names']) )
for entry in rcfile.entries.values() if entry['hostname'] == server ]
meetings.sort()
print "--- Meetings on %s ---" % server
for meeting in meetings:
print "* %s" % meeting
print ""
def get_meeting(name):
rcfile = discuss.RCFile()
meeting_location = rcfile.lookup(name)
if not meeting_location:
sys.stderr.write("Meeting %s not found in .meetings file\n" % name)
sys.exit(1)
server, path = meeting_location
client = discuss.Client(server, timeout = 5)
return discuss.Meeting(client, path)
def list_acl():
meeting = get_meeting(args.meeting)
acl = meeting.get_acl()
acl.sort(key = lambda acl: acl[0])
print "%s Principal" % acl_flags
print "%s ---------" % ("-" * len(acl_flags))
for principal, modes in acl:
print "%s %s" % (modes, principal)
def set_acl():
meeting = get_meeting(args.meeting)
if args.bits == "null" or args.bits == "none":
bits = ""
else:
bits = args.bits
bits = bits.replace(" ", "")
if not all(bit in acl_flags for bit in bits):
wrong_bits = ", ".join(set(bits) - set(acl_flags))
die("Invalid bits present in ACL: %s" % wrong_bits)
principal = args.principal
if "@" not in principal:
principal += get_user_realm(meeting.client)
meeting.set_access(principal, bits)
def parse_args():
global args
argparser = argparse.ArgumentParser(description = "Manage discuss meetings")
subparsers = argparser.add_subparsers()
parser_add = subparsers.add_parser('add', help = 'Add a meeting to the personal meetings list')
parser_add.add_argument('meeting', help = 'The name of the meeting (may be prefixed by server name using a colon)')
parser_list = subparsers.add_parser('list', help = 'Show all the meetings in the personal list')
parser_listacl = subparsers.add_parser('listacl', help = 'Show the ACL of the specified discuss meeting')
parser_listacl.add_argument('meeting', help = 'The meeting to display the ACL of')
parser_setacl = subparsers.add_parser('setacl', help = 'Change the access bits of the specified discuss user')
parser_setacl.add_argument('meeting', help = 'The meeting to modify the ACL of')
parser_setacl.add_argument('principal', help = 'The name of the Kerberos principal in question')
parser_setacl.add_argument('bits', help = 'The access modes to be set for the specified principal')
parser_add.set_defaults(handler = add_meeting)
parser_list.set_defaults(handler = list_meetings)
parser_listacl.set_defaults(handler = list_acl)
parser_setacl.set_defaults(handler = set_acl)
args = argparser.parse_args()
args.handler()
try:
parse_args()
except Exception as err:
die(err)
| mit | Python | |
df09148f7c53177124e898de27f49a082afb86d6 | Create foursq_tips.py | chenyang03/Foursquare_Crawler,chenyang03/Foursquare_Crawler | foursq_tips.py | foursq_tips.py | # -*- coding: utf-8 -*-
import json
import guess_language
from textblob import TextBlob
from foursq_utils import *
def get_venue_category(venue_category_name):
if venue_category_name in category_Arts_Entertainment:
return 'Arts_Entertainment'
elif venue_category_name in category_College_University:
return 'College_University'
elif venue_category_name in category_Event:
return 'Event'
elif venue_category_name in category_Food:
return 'Food'
elif venue_category_name in category_Nightlife_Spot:
return 'Nightlife_Spot'
elif venue_category_name in category_Outdoors_Recreation:
return 'Outdoors_Recreation'
elif venue_category_name in category_Professional_Other_Places:
return 'Professional_Other_Places'
elif venue_category_name in category_Residence:
return 'Residence'
elif venue_category_name in category_Shop_Service:
return 'Shop_Service'
elif venue_category_name in category_Travel_Transport:
return 'Travel_Transport'
else:
return 'unknown'
def fetch_usr_tips(user_id):
success = 0
retry = 0
content = ''
while success == 0:
try:
super_token = 'QEJ4AQPTMMNB413HGNZ5YDMJSHTOHZHMLZCAQCCLXIX41OMP'
fetch_url_str = 'https://api.foursquare.com/v2/users/' + str(user_id) + '/tips?oauth_token='+super_token + \
'&limit=5000&v=20141231'
content = get_raw_info(fetch_url_str)
if content != -1 and content != -2:
success = 1
except:
time.sleep(3)
retry += 1
if retry == AUTO_RECONNECT_TIMES:
return -2
output_dict = {}
content_json = json.loads(content)
output_dict['tips content'] = []
a = {}
if content_json['meta']['code'] != 200:
output_dict['error_meta'] = str(content_json['meta']['code'])
if str(content_json['meta']['errorDetail']) == "Must provide a valid user ID or 'self.'":
output_dict['user existence'] = '-1'
return output_dict
output_dict['count'] = content_json['response']['tips']['count']
for item in (content_json['response']['tips']['items']):
if 'cc' in item['venue']['location']:
venue_country = item['venue']['location']['cc']
else:
venue_country = '-'
a = {}
a['len'] = len(item['text'])
a['text'] = item['text'].encode('utf-8')
a['venue name'] = item['venue']['name'].encode('utf-8')
a['timespam'] = str(item['createdAt'])
a['venue country'] = venue_country
if 'photo' in item:
a['photo'] = "y "
else:
a['photo'] = "n "
cate_info = item['venue']['categories']
if len(cate_info) > 0:
for xx in cate_info:
a['category'] = get_venue_category(xx['name'])
else:
a['category'] = '-'
tip_text = a['text']
tip_language = guess_language.guessLanguage(tip_text)
if tip_language == 'en':
testimonial = TextBlob(tip_text)
polarity = testimonial.sentiment.polarity
a['polarity'] = polarity
else:
a['polarity'] = '-'
output_dict['tips content'].append(a)
return output_dict
| mit | Python | |
e51431c5fb111fc86f1087184accc84d633590de | add first hash of assignee_disambiguatiion | funginstitute/patentprocessor,funginstitute/patentprocessor,nikken1/patentprocessor,nikken1/patentprocessor,funginstitute/patentprocessor,nikken1/patentprocessor,yngcan/patentprocessor,yngcan/patentprocessor,yngcan/patentprocessor | lib/assignee_disambiguation.py | lib/assignee_disambiguation.py | #!/usr/bin/env Python
"""
Performs a basic assignee disambiguation
"""
import redis
from collections import Counter
from Levenshtein import jaro_winkler
from alchemy import fetch_session # gives us the `session` variable
from alchemy.schema import *
THRESHOLD = 0.95
# get alchemy.db from the directory above
s = fetch_session(path_to_sqlite='..')
# get redis session
r = redis.StrictRedis(host='localhost')
# delete all previous keys
for i in r.keys():
r.delete(i)
# get all assignees in database
assignees = s.query(RawAssignee).all()
def get_assignee_id(obj):
"""
Returns string representing an assignee object. Returns obj.organization if
it exists, else returns concatenated obj.name_first + '|' + obj.name_last
"""
if obj.organization: return obj.organization
try:
return obj.name_first + '|' + obj.name_last
except:
return ''
def create_assignee_blocks(assignees):
"""
Iterates through all assignees. If the strings match within the THRESHOLD confidence,
we put them into the same block, else put the current assignee in its own block. Blocks
are stored as redis lists, named by the first ID we encounter for that block
"""
for current in assignees:
for assignee in assignees:
if current == assignee: continue
current_id = get_assignee_id(current)
assignee_id = get_assignee_id(assignee)
if jaro_winkler(current_id, assignee_id) >= THRESHOLD:
# block name is the first id we encountered
r.lpush(current_id, assignee_id, current_id)
assignees.remove(assignee)
assignees.remove(current)
else:
r.lpush(current_id, current_id)
#assignees.remove(current)
def disambiguate_by_frequency():
"""
For block, find the most frequent assignee name and create a hash from each
assignee organization/name to the most frequent name. Delete the old block
during this process. This ensures that the only keys left in our database
are the disambiguations.
"""
for block in r.keys():
assignees = r.lrange(block, 0, -1) # get all elements in list [block]
most_common_id = Counter(assignees).most_common()[0][0]
r.delete(block)
for assignee in assignees:
r.set(assignee, most_common_id)
def create_assignee_table(assignees):
"""
Given a list of assignees and the redis key-value disambiguation,
populates the Assignee table in the database
"""
for assignee in assignees:
record = {} # dict for insertion
disambiguated_name = r.get(get_assignee_id(assignee))
if assignee.organization:
record['organization'] = disambiguated_name
else:
record['name_first'] = disambiguated_name.split('|')[0]
record['name_last'] = disambiguated_name.split('|')[1]
for key in ['residence', 'nationality', 'type']:
record[key] = getattr(assignee, key)
record['id'] = assignee.uuid
assignee_obj = Assignee(**record)
assignee_obj.rawassignees.append(assignee)
s.merge(assignee_obj)
try:
s.commit()
except Exception, e:
s.rollback()
def examine():
assignees = s.query(Assignee).all()
for a in assignees:
print a.id, a.rawassignees
if __name__=='__main__':
create_assignee_blocks(assignees)
disambiguate_by_frequency()
create_assignee_table(assignees)
| bsd-2-clause | Python | |
3ffe8dd8ed59cb5c03087844534a94b11bb73a8d | Add longest increasing subsequence | gsathya/dsalgo,gsathya/dsalgo | algo/lis.py | algo/lis.py | arr = [1, 6, 3, 5, 9, 7]
ans = [1]
for i in range(1, len(arr)):
t = []
for j in range(i):
if arr[i] > arr[j]:
t.append(ans[j]+1)
else:
t.append(ans[j])
ans.append(max(t))
print max(ans)
| mit | Python | |
b7c6b5115ce5aec129af64d6b85c672901a435d3 | Add a multiprocessor for particle learning. | probcomp/cgpm,probcomp/cgpm | gpmcc/experiments/particle_engine.py | gpmcc/experiments/particle_engine.py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from gpmcc.experiments.particle_dim import ParticleDim
import multiprocessing
def _particle_learn(args):
X, cctype, distargs, seed = args
np.random.seed(seed)
np.random.shuffle(X)
dim = ParticleDim(X, cctype, distargs)
dim.particle_learn()
return dim
class ParticleEngine(object):
"""Particle Engine."""
def __init__(self, X, dist, distargs=None, multithread=True):
self.multithread = multithread
self.map = map
if self.multithread:
self.pool = multiprocessing.Pool(multiprocessing.cpu_count())
self.map = self.pool.map
self.X = X
self.dist = dist
self.distargs = distargs
self.dims = None
def particle_learn(self, particles=1, seeds=None):
"""Do particle learning in parallel."""
if seeds is None:
seeds = range(particles)
assert len(seeds) == particles
args = ((self.X, self.dist, self.distargs, seed) for (_, seed) in
zip(xrange(particles), seeds))
self.dims = self.map(_particle_learn, args)
def get_dim(self, index):
return self.dims[index]
| apache-2.0 | Python | |
ac58f25c47c9954a694a98008f8c658ae3a0f840 | add a way to merge cut files together | kratsg/optimization,kratsg/optimization,kratsg/optimization | add-cuts.py | add-cuts.py | import os
import csv
import json
def add_cuts(cuts_left, cuts_right):
output_dict = {}
for h in cuts_left.keys():
output_dict[h] = dict((k, cuts_left[h][k]+cuts_right[h][k]) for k in ["raw", "scaled", "weighted"])
return output_dict
if __name__ == '__main__':
import argparse
import subprocess
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter):
pass
__version__ = subprocess.check_output(["git", "describe", "--always"], cwd=os.path.dirname(os.path.realpath(__file__))).strip()
__short_hash__ = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"], cwd=os.path.dirname(os.path.realpath(__file__))).strip()
parser = argparse.ArgumentParser(description='Left-add multiple cut files together by hash. It will use the first cut file as the list of hashes. Author: G. Stark. v.{0}'.format(__version__),
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=30))
parser.add_argument('cuts', type=str, nargs='+', metavar='<DID.json>', help='cut files to merge')
parser.add_argument('-o', '--output', required=True, type=str, dest='output', help='name the output json')
# parse the arguments, throw errors if missing any
args = parser.parse_args()
output_cuts = reduce(lambda x,y: add_cuts(x, json.load(file(y))), args.cuts[1:], json.load(file(args.cuts[0])))
with open(args.output, 'w+') as f:
f.write(json.dumps(output_cuts, sort_keys=True, indent=4))
| mit | Python | |
db4d640bb4ec5cc3d6e7a21334b3ba35ca6a9268 | Create obmplib.py | omiddavoodi/obmplib | obmplib.py | obmplib.py | def read4bint(f, o):
ret = f[o+3]
ret *= 256
ret += f[o+2]
ret *= 256
ret += f[o+1]
ret *= 256
ret += f[o]
return ret
def read2bint(f, o):
ret = f[o+1]
ret *= 256
ret += f[o]
return ret
def loadBMP(filename):
f = open(filename, 'b+r')
bts = f.read()
f.close()
if (bts[0:2] != b'BM'):
return "Not a supported bitmap file"
bitmapfilesize = read4bint(bts, 0x2)
pixelarrayoffset = read4bint(bts, 0xa)
dibheadersize = read4bint(bts, 0xe)
bitmapwidth = read4bint(bts, 0x12)
bitmapheight = read4bint(bts, 0x16)
bitsperpixel = read2bint(bts, 0x1c)
rawdatasize = read4bint(bts, 0x22)
rowsize = ((bitsperpixel * bitmapwidth + 31) // 32) * 4
ret = []
for j in range(bitmapheight):
row = []
for i in range(bitmapwidth):
x = pixelarrayoffset + i * 3 + j * rowsize
row.append((bts[x + 2], bts[x + 1], bts[x]))
ret.append(row)
return bitmapwidth, bitmapheight, ret[::-1]
def intTo4byte(a):
ret = b''
ret += bytes([a % 256])
a //= 256
ret += bytes([a % 256])
a //= 256
ret += bytes([a % 256])
a //= 256
ret += bytes([a % 256])
return ret
def saveBMP(filename, w, h, pixels):
rowsize = ((24 * w + 31) // 32) * 4
bts = b'BM'
dibheader = b'\x28' + b'\x00' * 3
dibheader += intTo4byte(w)
dibheader += intTo4byte(h)
dibheader += b'\x01\x00\x18' + b'\x00' * 5
dibheader += intTo4byte(h * rowsize)
dibheader += intTo4byte(2835)
dibheader += intTo4byte(2835)
dibheader += intTo4byte(0)
dibheader += intTo4byte(0)
padding = rowsize - 3 * w
pixelarray = []
for a in range(h):
for b in pixels[h - a - 1]:
pixelarray.extend(b)
pixelarray.extend( [0 for i in range(padding)])
pixelarray = bytes(pixelarray)
bmpsize = len(pixelarray) + len(dibheader) + 14
bts += intTo4byte(bmpsize)
bts += intTo4byte(0)
bts += intTo4byte(54)
f = open(filename, 'b+w')
f.write(bts + dibheader + pixelarray)
f.close()
| mit | Python | |
e5a0647862c179fb0840454fd6b827c46a05ecbc | Add check_scattering_factor.py to see atomic scattering factors for X-ray and electrons Commit 41956836 | keitaroyam/yamtbx,keitaroyam/yamtbx,keitaroyam/yamtbx,keitaroyam/yamtbx | cctbx_progs/check_scattering_factor.py | cctbx_progs/check_scattering_factor.py | import numpy
import cctbx.eltbx.xray_scattering
import cctbx.eltbx.e_scattering
def fetch_equation(table):
return "+".join(["%f*exp(-%f*s**2)" %(a,b) for a,b in zip(table.array_of_a(), table.array_of_b())]) + "+%f" % table.c()
def run(elements, smin=0, smax=1, sstep=0.01):
#reg = cctbx.xray.scattering_type_registry()
#reg.process(scatterers=flex.xray_scatterer([cctbx.xray.scatterer("S")]))
#reg.assign_from_table("PENG1996")
#reg.assign_from_table("IT1992")
#print reg.unique_form_factors_at_d_star_sq(0.05**2)[0]
print "element s xray electron"
for el in elements:
xray = cctbx.eltbx.xray_scattering.it1992(el, True).fetch()
elec = cctbx.eltbx.e_scattering.ito_vol_c_2011_table_4_3_2_2_entry_as_gaussian(label=el, exact=True)
print "# Xray for %s :"%el, fetch_equation(xray)
print "# electron for %s:"%el, fetch_equation(elec)
for s in numpy.arange(smin, smax, sstep):
print "%2s %.4f %.4f %.4f" % (el, s, xray.at_x(s), elec.at_x(s))
if __name__ == "__main__":
import sys
elements = sys.argv[1:]
run(elements)
| bsd-3-clause | Python | |
3a9807fd14257c49490ec429d7365c902209508c | Add beginnings of a Python driver. Currently just prints out input file. | nostrademons/GumboStats,nostrademons/GumboStats | gumbo_stats.py | gumbo_stats.py | import ctypes
import sys
def parse_warc(filename):
pass
def parse_file(filename):
with open(filename) as infile:
text = infile.read()
print(text)
if __name__ == '__main__':
filename = sys.argv[1]
if filename.endswith('.warc.gz'):
parse_warc(filename)
else:
parse_file(filename)
| apache-2.0 | Python | |
36d7bc4719490b046d8782465ddeba6e8240233e | Split images into bf and bc bears. Defaults to images. | hypraptive/bearid,hypraptive/bearid,hypraptive/bearid | tools/xml_split_images_locale.py | tools/xml_split_images_locale.py | #! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
##
## write bc and bf face images to separate files
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='write faces from different locale to different xmls.',
formatter_class=RawTextHelpFormatter)
parser.add_argument ('files', nargs='+')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='')
# help="increase output verbosity"
u.set_argv (argv)
args = parser.parse_args()
u.set_verbosity (args.verbosity)
u.set_argv (argv)
u.set_filetype ('faces')
verbose = 0
if verbose > 0:
print("files: ", args.files)
u.split_objects_by_locales (args.files)
if __name__ == "__main__":
main (sys.argv)
| mit | Python | |
daba5cef48e2194a902c82e263fc6df3a279f826 | Add basic Linux LVM support | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/linux_lvm.py | salt/modules/linux_lvm.py | '''
Support for Linux LVM2
'''
# Import python libs
import re
# Import salt libs
import salt.utils
def __virtual__():
'''
Only load the module if lvm is installed
'''
if salt.utils.which('lvm'):
return 'lvm'
return False
def version():
'''
Return LVM version from lvm version
CLI Example::
salt '*' lvm.version
'''
cmd = 'lvm version'
out = __salt__['cmd.run'](cmd).splitlines()
ret = out[0].split(': ')
return ret[1].strip()
def fullversion():
'''
Return all version info from lvm version
CLI Example::
salt '*' lvm.fullversion
'''
ret = {}
cmd = 'lvm version'
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.split(':')
ret[comps[0].strip()] = comps[1].strip()
return ret
def pvdisplay(pvname=''):
'''
Return information about the physical volume(s)
CLI Examples::
salt '*' lvm.pvdisplay
salt '*' lvm.pvdisplay /dev/md0
'''
ret = {}
cmd = 'pvdisplay -c {0}'.format(pvname)
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.strip().split(':')
ret[comps[0]] = {
'Physical Volume Device': comps[0],
'Volume Group Name': comps[1],
'Physical Volume Size (kB)': comps[2],
'Internal Physical Volume Number': comps[3],
'Physical Volume Status': comps[4],
'Physical Volume (not) Allocatable': comps[5],
'Current Logical Volumes Here': comps[6],
'Physical Extent Size (kB)': comps[7],
'Total Physical Extents': comps[8],
'Free Physical Extents': comps[9],
'Allocated Physical Extents': comps[10],
}
return ret
def vgdisplay(vgname=''):
'''
Return information about the volume group(s)
CLI Examples::
salt '*' lvm.vgdisplay
salt '*' lvm.vgdisplay nova-volumes
'''
ret = {}
cmd = 'vgdisplay -c {0}'.format(vgname)
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.strip().split(':')
ret[comps[0]] = {
'Volume Group Name': comps[0],
'Volume Group Access': comps[1],
'Volume Group Status': comps[2],
'Internal Volume Group Number': comps[3],
'Maximum Logical Volumes': comps[4],
'Current Logical Volumes': comps[5],
'Open Logical Volumes': comps[6],
'Maximum Logical Volume Size': comps[7],
'Maximum Phisical Volumes': comps[8],
'Current Physical Volumes': comps[9],
'Actual Physical Volumes': comps[10],
'Volume Group Size (kB)': comps[11],
'Physical Extent Size (kB)': comps[12],
'Total Physical Extents': comps[13],
'Allocated Physical Extents': comps[14],
'Free Physical Extents': comps[15],
'UUID': comps[16],
}
return ret
def lvdisplay(lvname=''):
'''
Return information about the logical volume(s)
CLI Examples::
salt '*' lvm.lvdisplay
salt '*' lvm.lvdisplay /dev/vg_myserver/root
'''
ret = {}
cmd = 'lvdisplay -c {0}'.format(lvname)
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.strip().split(':')
ret[comps[0]] = {
'Logical Volume Name': comps[0],
'Volume Group Name': comps[1],
'Logical Volume Access': comps[2],
'Logical Volume Status': comps[3],
'Internal Logical Volume Number': comps[4],
'Open Logical Volumes': comps[5],
'Logical Volume Size': comps[6],
'Current Logical Extents Associated': comps[7],
'Allocated Logical Extents': comps[8],
'Allocation Policy': comps[9],
'Read Ahead Sectors': comps[10],
'Major Device Number': comps[11],
'Minor Device Number': comps[12],
}
return ret
| apache-2.0 | Python | |
1077d1df94b207606a15a233182a6f6aa07c9625 | create module to support vendoring | ceph/remoto,alfredodeza/remoto | vendor.py | vendor.py | import subprocess
import os
from os import path
import traceback
import sys
import shutil
error_msg = """
This library depends on sources fetched when packaging that failed to be
retrieved.
This means that it will *not* work as expected. Errors encountered:
"""
def run(cmd):
print '[vendoring] Running command: %s' % ' '.join(cmd)
try:
result = subprocess.Popen(
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
except Exception:
# if building with python2.5 this makes it compatible
_, error, _ = sys.exc_info()
print_error([], traceback.format_exc(error).split('\n'))
raise SystemExit(1)
if result.wait():
print_error(result.stdout.readlines(), result.stderr.readlines())
def print_error(stdout, stderr):
print '*'*80
print error_msg
for line in stdout:
print line
for line in stderr:
print line
print '*'*80
def vendor_library(name, version, git_repo):
this_dir = path.dirname(path.abspath(__file__))
vendor_dest = path.join(this_dir, 'remoto/lib/vendor/%s' % name)
vendor_src = path.join(this_dir, name)
vendor_module = path.join(vendor_src, name)
current_dir = os.getcwd()
if path.exists(vendor_src):
run(['rm', '-rf', vendor_src])
if path.exists(vendor_dest):
module = __import__('remoto.lib.vendor.%s' % name, globals(), locals(), ['__version__'])
if module.__version__ != version:
run(['rm', '-rf', vendor_dest])
if not path.exists(vendor_dest):
run(['git', 'clone', git_repo])
os.chdir(vendor_src)
run(['git', 'checkout', version])
run(['mv', vendor_module, vendor_dest])
os.chdir(current_dir)
def clean_vendor(name):
"""
Ensure that vendored code/dirs are removed, possibly when packaging when
the environment flag is set to avoid vendoring.
"""
this_dir = path.dirname(path.abspath(__file__))
vendor_dest = path.join(this_dir, 'remoto/lib/vendor/%s' % name)
run(['rm', '-rf', vendor_dest])
def vendorize(vendor_requirements):
"""
This is the main entry point for vendorizing requirements. It expects
a list of tuples that should contain the name of the library and the
version.
For example, a library ``foo`` with version ``0.0.1`` would look like::
vendor_requirements = [
('foo', '0.0.1', 'https://example.com/git_repo'),
]
"""
for library in vendor_requirements:
name, version, repo = library
vendor_library(name, version, repo)
| mit | Python | |
5f2c2e4736cfe141b1f717b688b290657e3ddef7 | Add script to install gsl-lite package | martinmoene/gsl-lite,martinmoene/gsl-lite,martinmoene/gsl-lite | script/install-gsl-pkg.py | script/install-gsl-pkg.py | #!/usr/bin/env python
#
# Copyright 2015-2018 by Martin Moene
#
# gsl-lite is based on GSL: Guideline Support Library,
# https://github.com/microsoft/gsl
#
# This code is licensed under the MIT License (MIT).
#
# script/install-gsl-pkg.py
#
from __future__ import print_function
import argparse
import os
import re
import sys
# gsl-lite version, updated by script/update-version.py:
gsl_lite_version = "0.28.0"
# Config:
defCompiler = 'g++'
defGenerator = 'Unix Makefiles'
defBuildFolder = './cmake-pkg-install'
# End config.
def cmake( opt, arguments ):
"""Perform CMake command."""
commandline = "cmake {}".format( arguments )
if not opt.quiet:
print( "{}".format( commandline ) )
os.system( commandline )
def installGslLitePackage( opt ):
"""Install gsl-lite package."""
if not opt.quiet:
print( "Installing gsl-lite package:\n")
# configure:
build_folder = opt.build_folder if opt.build_folder else '.'
opt_build_folder = '-B"{folder}"' .format( folder=opt.build_folder ) if opt.build_folder else ''
opt_generator = '-G"{generator}"' .format( generator=opt.generator ) if opt.generator else ''
opt_build_type = '-DCMAKE_BUILD_TYPE="{config}"' .format( config=opt.config ) if opt.config else ''
opt_compiler = '-DCMAKE_CXX_COMPILER="{compiler}"' .format( compiler=opt.compiler ) if opt.compiler else ''
opt_install_pfx = '-DCMAKE_INSTALL_PREFIX="{install_pfx}"'.format( install_pfx=opt.install_pfx ) if opt.install_pfx else ''
cmake( opt, '-H. {opt_build_folder} {opt_generator} {opt_build_type} {opt_compiler} {opt_install_pfx}'.format(
opt_build_folder=opt_build_folder
, opt_generator=opt_generator
, opt_build_type=opt_build_type
, opt_compiler=opt_compiler
, opt_install_pfx=opt_install_pfx ) )
# install:
opt_build = '--build {folder} ' .format( folder=build_folder )
opt_config = '--config "{config}"'.format( config=opt.config ) if opt.config else ''
cmake( opt, '{opt_build} {opt_config} --target install'.format(
opt_build=opt_build, opt_config=opt_config) )
def main():
"""Collect command line options and install gsl-lite package."""
parser = argparse.ArgumentParser(
description='Install gsl-lite package.',
epilog="""""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='report additional information')
parser.add_argument(
'-q', '--quiet',
action='store_true',
help='do not report the cmake commands issued')
parser.add_argument(
'--build-folder',
dest='build_folder',
default=defBuildFolder,
type=str,
metavar='F',
help='the CMake build folder [{deflt}]'.format( deflt=defBuildFolder ) )
parser.add_argument(
'--generator',
dest='generator',
default=defGenerator,
type=str,
metavar='G',
help='the CMake generator [{deflt}]'.format( deflt=defGenerator ) )
parser.add_argument(
'--compiler',
dest='compiler',
default=defCompiler,
type=str,
metavar='C',
help='the CMake compiler [{deflt}]'.format( deflt=defCompiler ) )
parser.add_argument(
'--config',
dest='config',
metavar='C',
help='the CMake configuration (Debug, Release) if different from the default')
parser.add_argument(
'--install-prefix',
dest='install_pfx',
type=str,
metavar='P',
help='the CMake install prefix if different from the default')
opt = parser.parse_args()
installGslLitePackage( opt )
if __name__ == '__main__':
main()
# end of file
| mit | Python | |
62ed9524bc1c7ec5aafb44e3e1aea0c313083d64 | add script to retrieve catalog statistics | cvmfs/cvmfs,Gangbiao/cvmfs,alhowaidi/cvmfsNDN,Moliholy/cvmfs,Gangbiao/cvmfs,trshaffer/cvmfs,djw8605/cvmfs,MicBrain/cvmfs,MicBrain/cvmfs,trshaffer/cvmfs,cvmfs-testing/cvmfs,djw8605/cvmfs,DrDaveD/cvmfs,reneme/cvmfs,alhowaidi/cvmfsNDN,MicBrain/cvmfs,Gangbiao/cvmfs,trshaffer/cvmfs,DrDaveD/cvmfs,reneme/cvmfs,cvmfs-testing/cvmfs,Gangbiao/cvmfs,reneme/cvmfs,cvmfs-testing/cvmfs,cvmfs/cvmfs,reneme/cvmfs,djw8605/cvmfs,Moliholy/cvmfs,trshaffer/cvmfs,cvmfs-testing/cvmfs,DrDaveD/cvmfs,djw8605/cvmfs,cvmfs/cvmfs,cvmfs/cvmfs,cvmfs-testing/cvmfs,alhowaidi/cvmfsNDN,djw8605/cvmfs,Gangbiao/cvmfs,Moliholy/cvmfs,DrDaveD/cvmfs,cvmfs/cvmfs,alhowaidi/cvmfsNDN,Moliholy/cvmfs,DrDaveD/cvmfs,DrDaveD/cvmfs,trshaffer/cvmfs,alhowaidi/cvmfsNDN,reneme/cvmfs,cvmfs/cvmfs,cvmfs/cvmfs,MicBrain/cvmfs,MicBrain/cvmfs,DrDaveD/cvmfs,Moliholy/cvmfs | add-ons/tools/get_info.py | add-ons/tools/get_info.py | #!/usr/bin/env python
import sys
import cvmfs
def usage():
print sys.argv[0] + " <local repo name | remote repo url> [root catalog]"
print "This script looks in the given root_catalog (or the repository HEAD) and"
print "retrieves the contained statistics counters in CVMFS 2.1 catalogs."
print
print "Information is printed as a space separated list containing:"
print "[revision] [regular files] [directories] [symlinks] [file volume] [chunked files] [chunked volume] [chunks] [nested catalogs]"
if len(sys.argv) != 2 and len(sys.argv) != 3:
usage();
sys.exit(1)
repo_identifier = sys.argv[1]
root_catalog_hash = sys.argv[2] if len(sys.argv) == 3 else None
repo = cvmfs.open_repository(repo_identifier)
root_catalog = repo.retrieve_catalog(root_catalog_hash) if root_catalog_hash else repo.retrieve_root_catalog()
statistics = root_catalog.get_statistics()
print ' '.join([str(root_catalog.revision)] + [ str(x) for x in statistics.get_all_fields() ])
| bsd-3-clause | Python | |
34e7a8d904b332a91a615043a629725100367d6f | Add ast.GlyphDefinition | googlefonts/fonttools,fonttools/fonttools | Lib/fontTools/voltLib/ast.py | Lib/fontTools/voltLib/ast.py | from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
import fontTools.feaLib.ast as ast
class VoltFile(ast.Block):
def __init__(self):
ast.Block.__init__(self, location=None)
class GlyphDefinition(ast.Statement):
def __init__(self, location, name, gid, gunicode, gtype, components):
ast.Statement.__init__(self,location)
self.name = name
self.id = gid
self.unicode = gunicode
self.type = gtype
self.components = components
| mit | Python | |
52c0006c7d26e821eeba6e4ad07949f8f59b9e86 | add wrapper | francois-berder/PyLetMeCreate | letmecreate/click/alcohol.py | letmecreate/click/alcohol.py | #!/usr/bin/env python3
"""Python binding of Alcohol Click wrapper of LetMeCreate library."""
import ctypes
_LIB = ctypes.CDLL('libletmecreate_click.so')
def get_measure(mikrobus_index):
"""Returns a 16-bit integer from the Accel Click.
mikrobus_index: must be 0 (MIKROBUS_1) or 1 (MIKROBUS_2)
Note: An exception is thrown if it fails to get a measure from the click.
"""
measure = ctypes.c_uint16(0)
ret = _LIB.alcohol_click_get_measure(mikrobus_index, ctypes.byref(measure))
if ret < 0:
raise Exception("alcohol click get measure failed")
return measure.value
| bsd-3-clause | Python | |
a0d5c9aaf0f573ff11beacb6a30a91f90312dd08 | Create BitonicSort.py (#386) | TheAlgorithms/Python | sorts/BitonicSort.py | sorts/BitonicSort.py | # Python program for Bitonic Sort. Note that this program
# works only when size of input is a power of 2.
# The parameter dir indicates the sorting direction, ASCENDING
# or DESCENDING; if (a[i] > a[j]) agrees with the direction,
# then a[i] and a[j] are interchanged.*/
def compAndSwap(a, i, j, dire):
if (dire == 1 and a[i] > a[j]) or (dire == 0 and a[i] < a[j]):
a[i], a[j] = a[j], a[i]
# It recursively sorts a bitonic sequence in ascending order,
# if dir = 1, and in descending order otherwise (means dir=0).
# The sequence to be sorted starts at index position low,
# the parameter cnt is the number of elements to be sorted.
def bitonicMerge(a, low, cnt, dire):
if cnt > 1:
k = int(cnt / 2)
for i in range(low, low + k):
compAndSwap(a, i, i + k, dire)
bitonicMerge(a, low, k, dire)
bitonicMerge(a, low + k, k, dire)
# This funcion first produces a bitonic sequence by recursively
# sorting its two halves in opposite sorting orders, and then
# calls bitonicMerge to make them in the same order
def bitonicSort(a, low, cnt, dire):
if cnt > 1:
k = int(cnt / 2)
bitonicSort(a, low, k, 1)
bitonicSort(a, low + k, k, 0)
bitonicMerge(a, low, cnt, dire)
# Caller of bitonicSort for sorting the entire array of length N
# in ASCENDING order
def sort(a, N, up):
bitonicSort(a, 0, N, up)
# Driver code to test above
a = []
n = int(input())
for i in range(n):
a.append(int(input()))
up = 1
sort(a, n, up)
print("\n\nSorted array is")
for i in range(n):
print("%d" % a[i])
| mit | Python | |
7457275762214bef85ad53282eb0bb8bc9d6ddba | Create DetachReAttach.py | Ccantey/ArcGIS-Scripting,Ccantey/ArcGIS-Scripting | DetachReAttach.py | DetachReAttach.py | import csv
import arcpy
import os
import sys
input = r"Database Connections\your.sde\pictures featureclass"
inputField = "NAME"
matchTable = r"C:\Users\<user>\Desktop\matchtable.csv"
matchField = "NAME"
pathField = r"picture Location"
rootdir = r"C:\Root Directory\A-Z pictures\picture"
#get subdirectories
subdirectories = [x[0] for x in os.walk(rootdir)]
for folders in subdirectories[1:]:
print folders
try:
# create a new Match Table csv file
writer = csv.writer(open(matchTable, "wb"), delimiter=",")
# write a header row (the table will have two columns: ParcelID and Picture)
writer.writerow([matchField, pathField])
# iterate through each picture in the directory and write a row to the table
for file in os.listdir(folders):
if str(file).find(".pdf") > -1:
writer.writerow([str(file).replace(".pdf", ""), file])
del writer
# the input feature class must first be GDB attachments enabled
# arcpy.EnableAttachments_management(input)
# use the match table with the Remove Attachments tool
arcpy.RemoveAttachments_management(input, inputField, matchTable, matchField, pathField)
# use the match table with the Add Attachments tool
arcpy.AddAttachments_management(input, inputField, matchTable, matchField, pathField, folders)
print "Finished Attaching Documents in " + folders
except:
print arcpy.GetMessages(2)
| unlicense | Python | |
76d2d97183d4e57f9f59f654fc1302bf99740c0b | add failing test | praekelt/molo,praekelt/molo,praekelt/molo,praekelt/molo | molo/core/tests/test_models.py | molo/core/tests/test_models.py | import pytest
from datetime import datetime, timedelta
from django.test import TestCase
from molo.core.models import ArticlePage
@pytest.mark.django_db
class TestModels(TestCase):
fixtures = ['molo/core/tests/fixtures/test.json']
def test_article_order(self):
now = datetime.now()
article1 = ArticlePage.objects.get(pk=7)
article1.first_published_at = now
article1.save()
article2 = ArticlePage.objects.get(pk=8)
article2.first_published_at = now + timedelta(hours=1)
article2.save()
# most recent first
self.assertEquals(
ArticlePage.objects.live()[0].title, article2.title)
# swap published date
article1.first_published_at = now + timedelta(hours=4)
article1.save()
self.assertEquals(
ArticlePage.objects.live()[0].title, article1.title)
| bsd-2-clause | Python | |
1dbfcfd6558a3148ea2726898d65e1e8ef9115fc | Add a management command that imports bug data from YAML files | vipul-sharma20/oh-mainline,Changaco/oh-mainline,openhatch/oh-mainline,SnappleCap/oh-mainline,nirmeshk/oh-mainline,heeraj123/oh-mainline,SnappleCap/oh-mainline,campbe13/openhatch,heeraj123/oh-mainline,eeshangarg/oh-mainline,ehashman/oh-mainline,Changaco/oh-mainline,SnappleCap/oh-mainline,willingc/oh-mainline,sudheesh001/oh-mainline,nirmeshk/oh-mainline,nirmeshk/oh-mainline,sudheesh001/oh-mainline,eeshangarg/oh-mainline,onceuponatimeforever/oh-mainline,willingc/oh-mainline,heeraj123/oh-mainline,SnappleCap/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,ojengwa/oh-mainline,onceuponatimeforever/oh-mainline,heeraj123/oh-mainline,heeraj123/oh-mainline,campbe13/openhatch,openhatch/oh-mainline,onceuponatimeforever/oh-mainline,sudheesh001/oh-mainline,onceuponatimeforever/oh-mainline,eeshangarg/oh-mainline,eeshangarg/oh-mainline,onceuponatimeforever/oh-mainline,vipul-sharma20/oh-mainline,campbe13/openhatch,ojengwa/oh-mainline,willingc/oh-mainline,sudheesh001/oh-mainline,vipul-sharma20/oh-mainline,waseem18/oh-mainline,openhatch/oh-mainline,moijes12/oh-mainline,waseem18/oh-mainline,ojengwa/oh-mainline,openhatch/oh-mainline,campbe13/openhatch,moijes12/oh-mainline,moijes12/oh-mainline,waseem18/oh-mainline,willingc/oh-mainline,Changaco/oh-mainline,ehashman/oh-mainline,ojengwa/oh-mainline,nirmeshk/oh-mainline,SnappleCap/oh-mainline,ehashman/oh-mainline,ehashman/oh-mainline,Changaco/oh-mainline,moijes12/oh-mainline,waseem18/oh-mainline,Changaco/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,openhatch/oh-mainline,willingc/oh-mainline,waseem18/oh-mainline,sudheesh001/oh-mainline,moijes12/oh-mainline,eeshangarg/oh-mainline,ojengwa/oh-mainline,vipul-sharma20/oh-mainline | mysite/customs/management/commands/import_bugimporter_data.py | mysite/customs/management/commands/import_bugimporter_data.py | # This file is part of OpenHatch.
# Copyright (C) 2012 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
import mysite.customs.core_bugimporters
import yaml
class Command(BaseCommand):
args = '<yaml_file yaml_file ...>'
help = "Call this command and pass it YAML files to load into the Bug table"
def handle(self, *args, **options):
for yaml_file in args:
with open(yaml_file) as f:
s = f.read()
bug_dicts = yaml.load(s)
for bug_dict in bug_dicts:
mysite.customs.core_bugimporters.import_one_bug_item(bug_dict)
| agpl-3.0 | Python | |
1314da3ffbaa42aca4a917aef8a230478a22be68 | Add a script that uses the JSON metadata to create ordered symlinks. | webcomics/dosage,mbrandis/dosage,peterjanes/dosage,Freestila/dosage,wummel/dosage,wummel/dosage,blade2005/dosage,Freestila/dosage,mbrandis/dosage,peterjanes/dosage,webcomics/dosage,blade2005/dosage | scripts/order-symlinks.py | scripts/order-symlinks.py | #!/usr/bin/env python
# Copyright (C) 2013 Tobias Gruetzmacher
"""
This script takes the JSON file created by 'dosage -o json' and uses the
metadata to build a symlink farm in the deduced order of the comic. It created
those in a subdirectory called 'inorder'.
"""
from __future__ import print_function
import sys
import os
import codecs
import json
def jsonFn(d):
return os.path.join(d, 'dosage.json')
def loadJson(d):
with codecs.open(jsonFn(d), 'r', 'utf-8') as f:
data = json.load(f)
return data
def prepare_output(d):
outDir = os.path.join(d, 'inorder')
if not os.path.exists(outDir):
os.mkdir(outDir)
for f in os.listdir(outDir):
f = os.path.join(outDir, f)
if os.path.islink(f):
os.remove(f)
return outDir
def create_symlinks(d):
data = loadJson(d)
outDir = prepare_output(d)
unseen = data["pages"].keys()
while len(unseen) > 0:
latest = work = unseen[0]
while work in unseen:
unseen.remove(work)
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
print("Latest page: %s" % (latest))
order = []
work = latest
while work in data["pages"]:
order.extend(data["pages"][work]["images"].values())
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
else:
work = None
order.reverse()
for i, img in enumerate(order):
os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
if __name__ == '__main__':
if len(sys.argv) > 1:
for d in sys.argv[1:]:
if os.path.exists(jsonFn(d)):
create_symlinks(d)
else:
print("No JSON file found in '%s'." % (d))
else:
print("Usage: %s comic-dirs" % (os.path.basename(sys.argv[0])))
| mit | Python | |
a756edd9da035b027e2538228da349592031412e | Create ASCII_Art.py | Alumet/Codingame | Easy/ASCII_Art.py | Easy/ASCII_Art.py | l = int(input())
h = int(input())
t = input()
row=[]
for i in range(h):
row.append(input())
alpha=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
letter=[]
for obj in t:
found=False
for i in range(len(alpha)):
if obj.lower()==alpha[i]:
letter.append(i)
found=True
if found==False:
letter.append(len(alpha))
for i in range (h):
string=""
for obj in letter:
string+=row[i][obj*l:obj*l+l]
print(string)
| mit | Python | |
3cf77344df5993428cab38f646303e6735ecefd3 | Implement shorthand type constructors for users | gdementen/numba,shiquanwang/numba,sklam/numba,ssarangi/numba,GaZ3ll3/numba,gmarkall/numba,numba/numba,pombredanne/numba,gmarkall/numba,pitrou/numba,cpcloud/numba,pombredanne/numba,gdementen/numba,stuartarchibald/numba,jriehl/numba,gmarkall/numba,GaZ3ll3/numba,numba/numba,ssarangi/numba,sklam/numba,IntelLabs/numba,sklam/numba,GaZ3ll3/numba,gmarkall/numba,IntelLabs/numba,pitrou/numba,shiquanwang/numba,jriehl/numba,cpcloud/numba,numba/numba,cpcloud/numba,seibert/numba,stonebig/numba,stuartarchibald/numba,numba/numba,stefanseefeld/numba,seibert/numba,numba/numba,stefanseefeld/numba,seibert/numba,seibert/numba,jriehl/numba,stuartarchibald/numba,ssarangi/numba,GaZ3ll3/numba,IntelLabs/numba,IntelLabs/numba,stefanseefeld/numba,stonebig/numba,stonebig/numba,pitrou/numba,sklam/numba,cpcloud/numba,stefanseefeld/numba,pitrou/numba,pitrou/numba,gdementen/numba,stonebig/numba,stuartarchibald/numba,IntelLabs/numba,stuartarchibald/numba,stefanseefeld/numba,pombredanne/numba,ssarangi/numba,pombredanne/numba,pombredanne/numba,cpcloud/numba,gdementen/numba,sklam/numba,shiquanwang/numba,stonebig/numba,jriehl/numba,GaZ3ll3/numba,jriehl/numba,gmarkall/numba,seibert/numba,gdementen/numba,ssarangi/numba | numba/typesystem/shorthands.py | numba/typesystem/shorthands.py | """
Shorthands for type constructing, promotions, etc.
"""
from numba.typesystem import *
from numba.minivect import minitypes
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
def is_obj(type):
return type.is_object or type.is_array
native_type_dict = {}
for native_type in minitypes.native_integral:
native_type_dict[(native_type.itemsize, native_type.signed)] = native_type
def promote_to_native(int_type):
return native_type_dict[int_type.itemsize, int_type.signed]
def promote_closest(context, int_type, candidates):
"""
promote_closest(Py_ssize_t, [int_, long_, longlong]) -> longlong
"""
for candidate in candidates:
promoted = context.promote_types(int_type, candidate)
if promoted.itemsize == candidate.itemsize and promoted.signed == candidate.signed:
return candidate
return candidates[-1]
def get_type(ast_node):
"""
:param ast_node: a Numba or Python AST expression node
:return: the type of the expression node
"""
return ast_node.variable.type
#------------------------------------------------------------------------
# Type shorthands
#------------------------------------------------------------------------
O = object_
b1 = bool_
i1 = int8
i2 = int16
i4 = int32
i8 = int64
u1 = uint8
u2 = uint16
u4 = uint32
u8 = uint64
f4 = float32
f8 = float64
f16 = float128
c8 = complex64
c16 = complex128
c32 = complex256
#------------------------------------------------------------------------
# Type Constructor Shorthands
#------------------------------------------------------------------------
def from_numpy_dtype(np_dtype):
"""
:param np_dtype: the NumPy dtype (e.g. np.dtype(np.double))
:return: a dtype type representation
"""
return dtype(minitypes.map_dtype(np_dtype))
def dtype(dtype_type):
"""
:param dtype: the Numba dtype type (e.g. double)
:return: a dtype type representation
"""
assert isinstance(dtype_type, minitypes.Type)
return NumpyDtypeType(dtype_type)
def array(dtype, ndim):
"""
:param dtype: the Numba dtype type (e.g. double)
:param ndim: the array dimensionality (int)
:return: an array type representation
"""
if ndim == 0:
return dtype
return minitypes.ArrayType(dtype, ndim)
def tuple_(base_type, size=-1):
"""
:param base_type: the element type of the tuple
:param size: set to a value >= 0 is the size is known
:return: a tuple type representation
"""
return TupleType(base_type, size)
def list_(base_type, size=-1):
"""
:param base_type: the element type of the tuple
:param size: set to a value >= 0 is the size is known
:return: a tuple type representation
"""
return ListType(base_type, size)
| bsd-2-clause | Python | |
66591bd481a8c78f9563ef001f0d799a38130869 | add version file | PaloAltoNetworks-BD/autofocus-client-library | autofocus/version.py | autofocus/version.py | __version__ = "1.2.0"
| isc | Python | |
8d1d0719b2d43e49f9e3403d147201b34b526a81 | Add SubscriptionInfo class | CartoDB/cartoframes,CartoDB/cartoframes | cartoframes/data/observatory/subscription_info.py | cartoframes/data/observatory/subscription_info.py |
class SubscriptionInfo(object):
def __init__(self, raw_data):
self._raw_data = raw_data
@property
def id(self):
return self._raw_data.get('id')
@property
def estimated_delivery_days(self):
return self._raw_data.get('estimated_delivery_days')
@property
def subscription_list_price(self):
return self._raw_data.get('subscription_list_price')
@property
def tos(self):
return self._raw_data.get('tos')
@property
def tos_link(self):
return self._raw_data.get('tos_link')
@property
def licenses(self):
return self._raw_data.get('licenses')
@property
def licenses_link(self):
return self._raw_data.get('licenses_link')
@property
def rights(self):
return self._raw_data.get('rights')
| bsd-3-clause | Python | |
c8f868e24378eebd31a84b5da29d27361eb987de | Create inmoov3.minimalHead.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/hairygael/inmoov3.minimalHead.py | home/hairygael/inmoov3.minimalHead.py | #file : InMoov3.minimalHead.py
# this will run with versions of MRL above 1695
# a very minimal script for InMoov
# although this script is very short you can still
# do voice control of a right hand or finger box
# It uses WebkitSpeechRecognition, so you need to use Chrome as your default browser for this script to work
# Start the webgui service without starting the browser
webgui = Runtime.create("WebGui","WebGui")
webgui.autoStartBrowser(False)
webgui.startService()
# Then start the browsers and show the WebkitSpeechRecognition service named i01.ear
webgui.startBrowser("http://localhost:8888/#/service/i01.ear")
# As an alternative you can use the line below to show all services in the browser. In that case you should comment out all lines above that starts with webgui.
# webgui = Runtime.createAndStart("webgui","WebGui")
# Change to the port that you use
leftPort = "COM20"
i01 = Runtime.createAndStart("i01", "InMoov")
i01.startEar()
# starting parts
i01.startMouth()
mouth = i01.mouth
i01.startMouthControl(leftPort)
#to tweak the default voice
i01.mouth.setVoice("Ryan")
##############
i01.startHead(leftPort)
##############
# tweaking default settings of Head
#i01.head.jaw.setMinMax(43,101)
#i01.head.jaw.map(0,180,43,101)
#i01.mouthControl.setmouth(43,95)
#i01.head.jaw.setRest(43)
# tweaking default settings of eyes
#i01.head.eyeY.setMinMax(63,107)
#i01.head.eyeY.map(0,180,107,63)
#i01.head.eyeY.setRest(90)
#i01.head.eyeX.setMinMax(64,105)
#i01.head.eyeX.map(0,180,105,64)
#i01.head.eyeX.setRest(90)
#i01.head.neck.setMinMax(55,105)
#i01.head.neck.map(0,180,105,55)
#i01.head.neck.setRest(70)
#i01.head.rothead.setMinMax(45,135)
#i01.head.rothead.map(0,180,45,135)
#i01.head.rothead.setRest(86)
#################
i01.startEyesTracking(leftPort)
i01.startHeadTracking(leftPort)
############################################################
#to tweak the default PID values
i01.eyesTracking.xpid.setPID(25.0,5.0,0.1)
i01.eyesTracking.ypid.setPID(25.0,5.0,0.1)
i01.headTracking.xpid.setPID(15.0,5.0,0.2)
i01.headTracking.ypid.setPID(15.0,5.0,0.2)
############################################################
# verbal commands
ear = i01.ear
ear.attach(mouth)
ear.addCommand("rest", "python", "rest")
ear.addCommand("attach head", "i01.head", "attach")
ear.addCommand("disconnect head", "i01.head", "detach")
ear.addCommand("attach eyes", "i01.head.eyeY", "attach")
ear.addCommand("disconnect eyes", "i01.head.eyeY", "detach")
ear.addCommand("capture gesture", ear.getName(), "captureGesture")
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addCommand("search humans", "python", "trackHumans")
ear.addCommand("quit search", "python", "stopTracking")
ear.addCommand("track", "python", "trackPoint")
ear.addCommand("freeze track", "python", "stopTracking")
ear.addCommand("look on your right side", "python", "lookrightside")
ear.addCommand("look on your left side", "python", "lookleftside")
ear.addCommand("look in the middle", "python", "lookinmiddle")
# Confirmations and Negations are not supported yet in WebkitSpeechRecognition
# So commands will execute immediatley
ear.addComfirmations("yes","correct","yeah","ya")
ear.addNegations("no","wrong","nope","nah")
ear.startListening()
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", "python", "heard")
#inmoov.addTextListener(i01.mouth)
def lookrightside():
i01.setHeadSpeed(0.70, 0.70)
i01.moveHead(85,40)
def lookrightside():
i01.setHeadSpeed(0.70, 0.70)
i01.moveHead(85,40)
def lookinmiddle():
i01.setHeadSpeed(0.70, 0.70)
i01.moveHead(85,86)
| apache-2.0 | Python | |
84edade82d83129afe88a6df7748aab551619c38 | Create examples.py | aikramer2/spaCy,aikramer2/spaCy,explosion/spaCy,aikramer2/spaCy,recognai/spaCy,spacy-io/spaCy,recognai/spaCy,aikramer2/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,honnibal/spaCy,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,aikramer2/spaCy,honnibal/spaCy,honnibal/spaCy,honnibal/spaCy,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy | spacy/lang/hi/examples.py | spacy/lang/hi/examples.py | # coding: utf8
from __future__ import unicode_literals
"""
Example sentences to test spaCy and its language models.
>>> from spacy.lang.en.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"एप्पल 1 अरब डॉलर के लिए यू.के. स्टार्टअप खरीदने पर विचार कर रहा है",
"स्वायत्त कार निर्माताओं की ओर बीमा दायित्व रखती है",
"सैन फ्रांसिस्को फुटवे डिलीवरी रोबोटों पर प्रतिबंध लगाने का विचार कर रहा है",
"लंदन यूनाइटेड किंगडम का बड़ा शहर है।",
"आप कहाँ हैं?",
"फ्रांस के राष्ट्रपति कौन हैं?",
"संयुक्त राज्य की राजधानी क्या है?",
"बराक ओबामा का जन्म हुआ था?"
]
| mit | Python | |
8180c84a98bec11308afca884a4d7fed4738403b | Add tests for new Levenshtein alignment | explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,recognai/spaCy,recognai/spaCy,honnibal/spaCy,recognai/spaCy,spacy-io/spaCy,recognai/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,aikramer2/spaCy,honnibal/spaCy,aikramer2/spaCy,recognai/spaCy,aikramer2/spaCy,aikramer2/spaCy | spacy/tests/test_align.py | spacy/tests/test_align.py | import pytest
from .._align import align
@pytest.mark.parametrize('string1,string2,cost', [
(b'hello', b'hell', 1),
(b'rat', b'cat', 1),
(b'rat', b'rat', 0),
(b'rat', b'catsie', 4),
(b't', b'catsie', 5),
])
def test_align_costs(string1, string2, cost):
output_cost, i2j, j2i, matrix = align(string1, string2)
assert output_cost == cost
@pytest.mark.parametrize('string1,string2,i2j', [
(b'hello', b'hell', [0,1,2,3,-1]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2]),
(b't', b'catsie', [2]),
])
def test_align_i2j(string1, string2, i2j):
output_cost, output_i2j, j2i, matrix = align(string1, string2)
assert list(output_i2j) == i2j
@pytest.mark.parametrize('string1,string2,j2i', [
(b'hello', b'hell', [0,1,2,3]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2, -1, -1, -1]),
(b't', b'catsie', [-1, -1, 0, -1, -1, -1]),
])
def test_align_i2j(string1, string2, j2i):
output_cost, output_i2j, output_j2i, matrix = align(string1, string2)
assert list(output_j2i) == j2i
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.