commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
50a7b9bc262c98f4e387746f67a638f50f94ba38 | Add migration merge | portal/migrations/versions/e396fb1974ef_.py | portal/migrations/versions/e396fb1974ef_.py | from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: e396fb1974ef
Revises: ('773b1de060dd', '3271a78bbc8b')
Create Date: 2018-04-24 12:19:56.689921
"""
# revision identifiers, used by Alembic.
revision = 'e396fb1974ef'
down_revision = ('773b1de060dd', '3271a78bbc8b')
def upgrade():
pass
def downgrade():
pass
| Python | 0.000001 | |
85c67110db1fbb5a25faef36bdfe282952f5a034 | Create __init__.py | src/__init__.py | src/__init__.py | Python | 0.000429 | ||
d686f54aff87d4dea1266ee1fec8c1c320dee5b9 | add tests for fizz_buzz. | test_fizz_buzz.py | test_fizz_buzz.py | from cStringIO import StringIO
import sys
import fizz_buzz
class Capture(list):
"""Context manager for capturing stdout."""
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
sys.stdout = self._stdout
def test_fizz_buzz():
expected = ['1 2 F 4 B F 7 8 F B', '1 F 3 F 5 F B F 9 F 11 F 13 FB 15']
with Capture() as output:
fizz_buzz.main("input_test.txt")
assert len(expected) == len(output)
for ex, act in zip(expected, output):
assert ex == act
| Python | 0 | |
cab46eb2323a062bff8bedbf1cc8dd036cd52044 | Solve Code Fights frequency analysis problem | CodeFights/frequencyAnalysis.py | CodeFights/frequencyAnalysis.py | #!/usr/local/bin/python
# Code Fights Frequency Analysis Problem
from collections import Counter
def frequencyAnalysis(encryptedText):
return Counter(encryptedText).most_common(1)[0][0]
def main():
tests = [
["$~NmiNmim$/NVeirp@dlzrCCCCfFfQQQ", "C"],
["Agoodglassinthebishop'shostelinthedevil'sseattwenty-onedegreesandthirteenminutesnortheastandbynorthmainbranchseventhlimbeastsideshootfromthelefteyeofthedeath's-headabeelinefromthetreethroughtheshotfiftyfeetout.", "e"],
["Q", "Q"],
["):<<}:BnUUKc=>~LKU><,;U><U=~BKc=>~}~jKB;UU~n== ~c=fS<c~}~:w~~Unc}=>Kw=~~ceKc*=~Uc<w=>~nU=nc}Lfc<w=>enKcLwncY>U~j~c=>BKeL~nU=UK}~U><<=mw<e=>~B~m=~f~<m=>~}~n=>;US>~n}nL~~BKc~mw<e=>~=w~~=>w<*:>=>~U><=mKm=fm~~=<*=k", "~"],
["(:c:@%aF;:NBo@o:'X:%CFCBoFB@X@iFCTPc@iFi::@o%;@a!PXCF:iTcCNbCFPoFCc;:YCo%a@a}Pcco@Cc:%@FF;::o%BYBo:bi@oT;=nFv:|i@o%`a%Ci:TFCBo<!PXCF:i%iBXaF;:bP|F;iBP|;Bo:: :aBT}:F@o%v:|i@o%X@T:aBPFFB@aXBF*;:i:F;:|iBPXb:|CoaFB%C|=$Co%Co|oBF;Co|F;:i:<v:|i@o%;@a!PXCF:iTcCNbF;:Fi::@|@Co@o%%iBXF;:bP|F;iBP|;F;:a}Pcc`aBF;:i: :dF;: T;BBa:@%CYY:i:oFaXBFFB%C|<F;CaFCN:YCo%Co|F*Ba}:c:FBoa@o%@T;:aFYCcc:%*CF;|Bc%TBCoa@o%D:*:ci =V;: :aFCN@F:F;:FBF@cE@cP:@Fj1=5NCccCBo<bPF:E:oF;@FYC|Pi:XiBE:aFBb:b:cB*F;:@TFP@c*BiF;*;:oF;: :E:oFP@cc a:ccF;:CF:Na=", ":"]
]
for t in tests:
res = frequencyAnalysis(t[0])
if t[1] == res:
print("PASSED: frequencyAnalysis({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: frequencyAnalysis({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
| Python | 0.000077 | |
4fa2ca578b7015bee68f9f2f7bc26df2f7ab01b4 | add test_cli.py module and initial test | tests/test_cli.py | tests/test_cli.py | #!/usr/bin/env python
# coding: utf8
"""
Unit tests for cli functionality
"""
# --- Imports
import subprocess
import geocoder
# --- Constants
_CLI_EX = './geocoder/cli.py' # CLI executable path
us_address = '595 Market St'
us_city = 'San Francisco'
us_state = 'CA'
us_zipcode = '94105'
location = ' '.join([us_address, us_city, us_state, us_zipcode])
# --- CLI tests. Each shell call should have return code 0 if successfull.
def test_cli_default():
# default provider cli test
assert not subprocess.call(['python', _CLI_EX, location])
| Python | 0 | |
8f4ac0b12c0f83ff892e16e312cc5edbfb089850 | add tests for no config startup | tests/test_cli.py | tests/test_cli.py |
from click.testing import CliRunner
from vaping import cli
import pytest
def test_start_no_home():
runner = CliRunner()
with pytest.raises(ValueError) as excinfo:
runner.invoke(cli.cli, ['start'], catch_exceptions=False)
assert str(excinfo.value).startswith('no config specified')
| Python | 0 | |
1f2f4837e823ff6cc5c9cb961f4852753926f0d7 | Create __init__.py | tumor/__init__.py | tumor/__init__.py | Python | 0.000429 | ||
2939b4507b8cbacb3ab2e92e81efc81e5d7b9111 | Create Scraper_OOped.py | Scraper_OOped.py | Scraper_OOped.py | import requests
from bs4 import BeautifulSoup
from time import sleep
import datetime
import random
class CraigslistCity:
def __init__(self, Name, URL):
self.name = Name
self.url = URL
class CraigslistListing:
def __init__(self, TimePosted, ID, Title, Link):
self.TimePosted = TimePosted
self.Title = Title
self.Link = Link
self.ListingID = ID
class Scraper:
ActiveCities = []
ActiveCitiesFound = False
@staticmethod
def FindActiveCities():
url = "http://www.craigslist.org/about/sites"
WebResponse = requests.get(url)
if WebResponse.status_code == 200:
#print("Web OK")
parsedHTML = BeautifulSoup(WebResponse.text, "html.parser")
if(parsedHTML):
#print("HTMLparsing OK")
usadiv = parsedHTML.body.section.find_all('div')[2]
USCities = usadiv.find_all('li')
for cityItem in USCities:
url = cityItem.a['href']
Scraper.ActiveCities.append(CraigslistCity(cityItem.string,"https:" + url + 'search/cpg'))
NumberOfActiveCities = len(Scraper.ActiveCities)
if NumberOfActiveCities == 0:
print("Scraper failed to find any active cities")
return -1
Scraper.ActiveCitiesFound = True
@staticmethod
def SeedTimeout():
random.seed()
def __init__(self, Keywords):
self.SearchKeywords = Keywords
self.Timeout = [10,17]
self.MaxListingAge = datetime.timedelta(days=1)
def __init__(self, Keywords, TimeoutRange, MaxListingAge): #in days
self.SearchKeywords = Keywords
self.Timeout = TimeoutRange
self.MaxListingAge = datetime.timedelta(days=MaxListingAge)
def RefillKeywords(self, Keywords):
self.SearchKeywords = keywords
def ScrapeCity(self, Index):
self.Listings = []
StartTime = datetime.datetime.today()
if not Scraper.ActiveCitiesFound:
print("Cannot scrape city, no active cities found or static class function FindActiveCities has not been called")
return -1
if Index >= 0 and Index < len(Scraper.ActiveCities):
CityURL = Scraper.ActiveCities[Index].url
try:
CityResponse = requests.get(CityURL)
print(CityURL)
except:
print(CurrentCityIndex)
print(City['URL'])
if CityResponse.status_code == 200:
print('ok')
parsedCityHTML = BeautifulSoup(CityResponse.text,'html.parser')
MoveToNextListing = True
FirstListing = True
Count = 0
while(MoveToNextListing):
Count+=1
if(FirstListing):
listing = parsedCityHTML.find('p')
FirstListing = False
else:
listing = listing.find_next_sibling('p')
TimePostedData = listing.find('time')['datetime']
DateTimePosted = datetime.datetime.strptime(TimePostedData,'%Y-%m-%d %H:%M')
if (StartTime - DateTimePosted) > self.MaxListingAge:
MoveToNextListing = False
else:
ValidListing = False
ListingContainers = listing.find_all('a')
ListingTitle = ListingContainers[1].text.lower()
ListingID = ListingContainers[1]['data-id']
ListingLinkSuffix = ListingContainers[1]['href']
if ListingContainers[1]['class'][0] != 'hdrlnk':
for container in ListingContainers:
if container['class'] == 'hdrlnk':
ListingTitle = container.text
ListingID = container['data-id']
ListingLinkSuffix = container['href']
ValidListing = True
else:
ValidListing = True
if ValidListing:
KeywordFound = False
for keyword in KeyWords:
if keyword in ListingTitle:
KeywordFound = True
if KeywordFound:
self.Listings.append(CraigslistListing(TimePostedData, ListingID, ListingTitle, CityURL + ListingLinkSuffix))
MoveToNextListing = True
sleep(random.uniform(10,17))
return 'Success'
else:
print('Invalid Index: ' + Index + '\nAllowed range(inclusive) is (')
if(len(Scraper.ActiveCities) > 0):
print('0,' + (len(Scraper.ActiveCities) - 1 ))
print(')')
return "Error Occured"
def FindCityIndexByName(CityName):
if not Scraper.ActiveCitiesFound:
print("Cannot scrape city, no active cities found or static class function FindActiveCities has not been called")
return -1
for CityIndex in range(0,len(Scraper.ActiveCities)):
if Scraper.ActiveCities[CityIndex].name.lower() == CityName.lower():
return CityIndex
def OutputListingsToFile(self, FileName):
fouthandle = open(FileName, 'w')
for Listing in self.Listings:
NewLine = Listing.TimePosted.encode('ascii', 'replace').decode('utf-8') + ', ' + Listing.Title.encode('ascii', 'replace').decode('utf-8') + ', ' + Listing.Link.encode('ascii', 'replace').decode('utf-8') + '\n'
NewLine.replace('??', '').replace('?', '')
fouthandle.write(NewLine)
fouthandle.close()
KeyWords = ['javascript', 'c++', 'c/c++', 'programmer', 'coder', 'developer', 'python', 'html', 'css', 'tutoring', 'tutor', 'project', 'problem', 'help']
TimoutRange = [10,17]
MaxListingAge = 1
ScraperInstance = Scraper(KeyWords, TimoutRange, MaxListingAge)
Scraper.SeedTimeout()
Scraper.FindActiveCities()
#if(Scraper.ActiveCitiesFound):
# Successes = 0
# Failures = 0
# for CityIndex in range(0, 10):
# Result = ScraperInstance.ScrapeCity(CityIndex)
# if Result == 'Success':
# print('ok')
# Successes+=1
# else:
# print("Error occurred while scraping index: " + str(CityIndex) + '\n')
# ErrorCity = Scraper.ActiveCities[Index].name
# print("This is: " + ErrorCity.name + '\nURL is: ' + ErrorCity.url)
# Failures+=1
#print("(Successes, Failures) : (" + str(Successes) + ', ' + str(Failures) + ')')
ChicagoIndex = Scraper.FindCityIndexByName("Chicago")
ScraperInstance.ScrapeCity(ChicagoIndex)
ScraperInstance.OutputListingsToFile("ScraperResults_OOP.txt")
print('Finished')
| Python | 0 | |
f146583961733feb90567fdf03a6a5ee122c550f | Create r34.py | r34.py | r34.py | # Aradiabot function for searching rule34.xxx
# As they don't have an API, this was easier to put in it's own file so I could organize everything.
import requests
from html.parser import HTMLParser
import random
import sys
counter = [10,9,8,7,6,5,4,3,2,1]
images = []
class booruparser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == 'a':
if any('id' in pairs for pairs in attrs):
try:
images.append(str(attrs[1][1]))
except:
pass
class imageparser(HTMLParser):
def handle_starttag(self, tag, attrs):
if ('id', 'image') in attrs:
print("http:" + attrs[2][1])
parser = booruparser()
imgparser = imageparser()
tags = ""
for arg in sys.argv:
if arg == sys.argv[0]:
pass
else:
tags = tags + arg + "+"
count = 0
while len(images) < 1:
if count < 10:
parser.feed(requests.get('http://rule34.xxx/index.php?page=post&s=list&tags=' + tags + '&pid=' + str(counter[count])).text)
count = count + 1
else:
break
if count != 10:
image = requests.get('http://rule34.xxx/' + random.choice(images)).text
imgparser.feed(image)
else:
print("0")
| Python | 0.000001 | |
d0287d9deaa3eb03076cdd199414b772a291e2c5 | Add command for moving zips | calaccess_website/management/commands/mvzips.py | calaccess_website/management/commands/mvzips.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Move downloaded and cleaned zips to their proper place in the raw data archived dir.
"""
import boto3
from django.conf import settings
from calaccess_raw.management.commands import CalAccessCommand
from calaccess_raw.models.tracking import RawDataVersion
import logging
logger = logging.getLogger(__name__)
class Command(CalAccessCommand):
"""
Move downloaded and cleaned zips to their proper place in the raw data archived dir.
"""
help = 'Move downloaded and cleaned zips to their proper place in the raw data archived dir'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
# set up boto session
self.session = boto3.Session(
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_S3_REGION_NAME
)
# and client
self.client = self.session.client('s3')
# loop over all the versions
for v in RawDataVersion.objects.exclude(id=34).exclude(id=33):
# if there's a download zip
if v.download_zip_archive:
# set the initial path
initial_download_path = v.download_zip_archive.name
# split datetime from file name and ext
download_datetime, download_fullname = initial_download_path.split('/')
# split file name and ext
download_filename, download_ext = download_fullname.split('.')
# set new path
new_download_path = '{fn}_{dt}.{fx}'.format(
fn=download_filename,
dt=download_datetime,
fx=download_ext
)
# move
logger.debug('Move {0} to {1}'.format(
initial_download_path,
new_download_path
)
)
self.client.copy_object(
Bucket=settings.AWS_STORAGE_BUCKET_NAME,
Key=new_download_path,
CopySource={
'Bucket': settings.AWS_STORAGE_BUCKET_NAME,
'Key': initial_download_path,
},
)
# reset file name
v.download_zip_archive.name = new_download_path
# repeat for clean zips
if v.clean_zip_archive:
# set the initial path
initial_clean_path = v.clean_zip_archive.name
# split datetime from file name and ext
clean_datetime, clean_fullname = initial_clean_path.split('/')
# split file name and ext
clean_filename, clean_ext = clean_fullname.split('.')
# set new path
new_clean_path = 'clean_{dt}.{fx}'.format(
dt=clean_datetime,
fx=clean_ext
)
# move
logger.debug('Move {0} to {1}'.format(
initial_clean_path,
new_clean_path
)
)
self.client.copy_object(
Bucket=settings.AWS_STORAGE_BUCKET_NAME,
Key=new_clean_path,
CopySource={
'Bucket': settings.AWS_STORAGE_BUCKET_NAME,
'Key': initial_clean_path,
},
)
# reset file name
v.clean_zip_archive.name = new_clean_path
# save the version
v.save()
| Python | 0.000001 | |
cdd1f3410b8ae304485f7992ac6048e1277cffe1 | Add local locale from file | parsedatetime/pdt_locales/__init__.py | parsedatetime/pdt_locales/__init__.py | # -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
import os
try:
import PyICU as pyicu
except:
pyicu = None
import yaml
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
pdtLocales = [
'icu',
'en_US',
'en_AU',
'es_ES',
'de_DE',
'nl_NL',
'ru_RU',
]
def load_yaml(path):
"""
Read yaml data from filepath
:param path:
:return:
"""
with open(path, 'r') as fio:
return yaml.load(fio.read())
def _get_yaml_path(locale):
"""
Return filepath of locale file
:param locale:
:return:
"""
return os.path.join(os.path.dirname(__file__), '%s.yaml' % locale)
def load_locale(locale):
"""
Return data of locale
:param locale:
:return:
"""
assert locale in pdtLocales, "The locale '%s' is not supported" % locale
_data_base = load_yaml(_get_yaml_path('base'))
return _data_base.update(**load_yaml(_get_yaml_path(locale)))
load_locale('ru_RU')
| # -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
try:
import PyICU as pyicu
except:
pyicu = None
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
| Python | 0.000001 |
c98039a25638db0c124efeaa394f89f2a84a2ede | Create aekjdfh.py | aekjdfh.py | aekjdfh.py | sdl;jfhlkjsdhfhdf
| Python | 0.000037 | |
7e754367f44d8a6764ec9062fce027520702b376 | move archive.py to top directory | archive.py | archive.py | #!/usr/bin/env python
# Copyright (c) 2010, Aaron DeVore
# Released under the Don't Be A Douchbag License.
# Use responsibly. Contribute changes if you feel like it. No CP!
import urllib2
import os
import posixpath
import json
import time
from optparse import OptionParser
from BeautifulSoup import BeautifulSoup, Tag
import htmlentitydefs
USAGE = "%prog [options] <thread ID> <base directory>"
options = OptionParser(usage=USAGE)
options.add_option("-b", "--board", dest="board",
default='b', help="board name")
options.add_option("-o", "--overwrite-images", dest="overwriteImages",
default=False, help="Overwrite non-empty images", action="store_true")
options.add_option("-u", "--update", dest="update", action="store_true",
default=False, help="update the thread")
options.add_option("-p", "--pause-update", type="int", dest="pauseUpdate",
default=100, help="Wait time between thread updates")
options.add_option("--pause-image", type="int", dest="pauseImage",
default=1, help="Wait time between image downloads")
options.add_option("-n", "--no-pics", action="store_false", dest="pics", default=True,
help="Do not download pictures")
class Post(object):
def __init__(self, postID, text, poster, timestamp, image, imageTitle):
self.id = postID
self.text = text
self.poster = poster
self.timestamp = timestamp
if image:
self.image = posixpath.basename(image)
self.imageURL = image
self.imageTitle = imageTitle
else:
self.imageURL = self.image = self.imageTitle = None
def __repr__(self):
if self.image:
return u"%(id)s by %(poster)s with %(image)s" % self.__dict__
else:
return u"%(id)s by %(poster)s with no image" % self.__dict__
def getSoup(board, thread):
url = "http://boards.4chan.org/%s/res/%s" % (board, thread)
print "downloading thread %s for board %s at %s" % (thread, board, url)
f = urllib2.urlopen(url)
soup = BeautifulSoup(f)
f.close()
return soup
def getText(tag, seperator=u""):
"""
Get all child text for a tag.
"""
text = []
for node in tag.recursiveChildGenerator():
if isinstance(node, Tag) and node.name == "br":
text.append(u"\n")
continue
elif not isinstance(node, unicode):
continue
for find, replace in htmlentitydefs.name2codepoint.items():
node = node.replace(u"&%s;" % find, unichr(replace))
text.append(node)
return seperator.join(text)
def getOP(soup):
threadNode = soup.find("form", {'name': "delform"})
timestamp = threadNode.find("span", 'posttime').string
poster = threadNode.find("span", "postername").string
imageNode = threadNode.find("span", "filesize")
imageURL = imageNode.a["href"]
imageTitle = imageNode.findNext("span", "filetitle").string
postID = threadNode.find("input", type="checkbox", value="delete")["name"]
text = getText(threadNode.blockquote, " ")
return Post(postID, text, poster, timestamp, imageURL, imageTitle)
def getRegularPosts(soup, posts):
postTables = (td.findParent('table')
for td in soup.findAll('td', 'doubledash'))
imageCount = 0
for postTable in postTables:
postID = postTable.find('td', id=True)['id']
text = getText(postTable.find('blockquote'), u" ")
posterSpan = postTable.find('span', 'commentpostername')
poster = posterSpan.string
timestamp = posterSpan.findNextSibling(text=True)
filespan = postTable.find('span', 'filesize')
if filespan:
imageCount += 1
imageURL = filespan.find('a')['href']
imageTitle = postTable.find('span', title=True).string
else:
imageURL = None
imageTitle = None
post = Post(postID, text, poster, timestamp, imageURL, imageTitle)
print u"found %s" % post
posts.append(post)
return imageCount
def getPosts(soup):
posts = [getOP(soup)]
imageCount = 1 # Start at 1 for OP's image
imageCount += getRegularPosts(soup, posts)
print u"found %i posts with %i images" % (len(posts), imageCount)
return posts
def downloadImages(posts, dest, overwriteImages, pauseImage):
imageDir = os.path.join(dest, "images")
if not os.path.exists(imageDir):
os.mkdir(imageDir)
print "pause time between image requests:", pauseImage
for post in posts:
if post.image:
localPath = os.path.join(imageDir, post.image)
if os.path.exists(localPath):
if not overwriteImages and os.path.getsize(localPath) != 0:
print u"Skip: image %s already exists" % post.image
continue
print u"downloading %s to %s" % (post.imageURL, post.image)
with open(localPath, 'w') as f:
try:
remote = urllib2.urlopen(post.imageURL)
except urllib2.HTTPError, e:
if e.code == 404:
print "image 404ed"
raise
f.write(remote.read())
time.sleep(pauseImage) # be nice to the servers
def writeData(thread, posts, dest):
target = os.path.join(dest, "thread.js")
jsonPosts = []
jsonCode = {}
jsonCode['id'] = thread
jsonCode['posts'] = jsonPosts
for post in posts:
jsonPosts.append({
'id': post.id,
'poster': post.poster,
'image': post.image,
'timestamp': post.timestamp,
'text': post.text,
'imageTitle': post.imageTitle,
})
print u"writing thread data for %s to %s" % (thread, target)
with open(target, 'w') as f:
json.dump(jsonCode, f, indent=4)
def main():
opts, args = options.parse_args()
if len(args) != 2:
options.print_usage()
thread = args[0]
baseDest = args[1]
board = opts.board
overwriteImages = opts.overwriteImages
if opts.update:
updates = -1
else:
updates = 1
dest = os.path.join(baseDest, u"%s-%s" % (board, thread))
if not os.path.exists(dest):
os.makedirs(dest)
try:
while updates != 0:
updates -= 1
soup = getSoup(opts.board, thread)
posts = getPosts(soup)
if opts.pics:
downloadImages(posts, dest, overwriteImages, opts.pauseImage)
writeData(thread, posts, dest)
if updates != 0:
print "waiting %i seconds for next update" % opts.pauseUpdate
print "-" * 40
time.sleep(opts.pauseUpdate)
except KeyboardInterrupt:
print "Keyboard Interrupt, ending archiving"
except urllib2.HTTPError, e:
if e.code == 404:
print "Thread or image 404ed"
else:
raise
if __name__ == "__main__":
main()
| Python | 0 | |
c51651dba8ccd14be9e6fb9ee028d1d2940b3202 | Add parity test for simple RNN (#1351) | benchmarks/test_rnn_parity.py | benchmarks/test_rnn_parity.py | import time
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import Trainer, LightningModule
class AverageDataset(Dataset):
def __init__(self, dataset_len=300, sequence_len=100):
self.dataset_len = dataset_len
self.sequence_len = sequence_len
self.input_seq = torch.randn(dataset_len, sequence_len, 10)
top, bottom = self.input_seq.chunk(2, -1)
self.output_seq = top + bottom.roll(shifts=1, dims=-1)
def __len__(self):
return self.dataset_len
def __getitem__(self, item):
return self.input_seq[item], self.output_seq[item]
class ParityRNN(LightningModule):
def __init__(self):
super(ParityRNN, self).__init__()
self.rnn = nn.LSTM(10, 20, batch_first=True)
self.linear_out = nn.Linear(in_features=20, out_features=5)
def forward(self, x):
seq, last = self.rnn(x)
return self.linear_out(seq)
def training_step(self, batch, batch_nb):
x, y = batch
y_hat = self(x)
loss = F.mse_loss(y_hat, y)
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
def train_dataloader(self):
return DataLoader(AverageDataset(), batch_size=30)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_pytorch_parity(tmpdir):
"""
Verify that the same pytorch and lightning models achieve the same results
:param tmpdir:
:return:
"""
num_epochs = 2
num_rums = 3
lightning_outs, pl_times = lightning_loop(ParityRNN, num_rums, num_epochs)
manual_outs, pt_times = vanilla_loop(ParityRNN, num_rums, num_epochs)
# make sure the losses match exactly to 5 decimal places
for pl_out, pt_out in zip(lightning_outs, manual_outs):
np.testing.assert_almost_equal(pl_out, pt_out, 8)
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def vanilla_loop(MODEL, num_runs=10, num_epochs=10):
"""
Returns an array with the last loss from each epoch for each run
"""
device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
errors = []
times = []
for i in range(num_runs):
time_start = time.perf_counter()
# set seed
seed = i
set_seed(seed)
# init model parts
model = MODEL()
dl = model.train_dataloader()
optimizer = model.configure_optimizers()
# model to GPU
model = model.to(device)
epoch_losses = []
for epoch in range(num_epochs):
# run through full training set
for j, batch in enumerate(dl):
x, y = batch
x = x.cuda(0)
y = y.cuda(0)
batch = (x, y)
loss_dict = model.training_step(batch, j)
loss = loss_dict['loss']
loss.backward()
optimizer.step()
optimizer.zero_grad()
# track last epoch loss
epoch_losses.append(loss.item())
time_end = time.perf_counter()
times.append(time_end - time_start)
errors.append(epoch_losses[-1])
return errors, times
def lightning_loop(MODEL, num_runs=10, num_epochs=10):
errors = []
times = []
for i in range(num_runs):
time_start = time.perf_counter()
# set seed
seed = i
set_seed(seed)
# init model parts
model = MODEL()
trainer = Trainer(
max_epochs=num_epochs,
show_progress_bar=False,
weights_summary=None,
gpus=1,
early_stop_callback=False,
checkpoint_callback=False,
distributed_backend='dp',
)
trainer.fit(model)
final_loss = trainer.running_loss.last().item()
errors.append(final_loss)
time_end = time.perf_counter()
times.append(time_end - time_start)
return errors, times
| Python | 0 | |
8a1dff9437a4f013a96369a1fe174c505e8636cb | Add missing migration (fix #130) | puput/migrations/0004_auto_20170912_0928.py | puput/migrations/0004_auto_20170912_0928.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-12 09:28
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('puput', '0003_add_short_feed_description_to_blog_page'),
]
operations = [
migrations.AlterModelManagers(
name='blogpage',
managers=[
('extra', django.db.models.manager.Manager()),
],
),
]
| Python | 0 | |
8a3d757be17d395ba14ae7458036a78d10e3e212 | Test to find out how to remove a file a git repo. | holamundo.py | holamundo.py | #!/usr/bin/env python
def main():
print("Hola mundo!!")
if __name__ == "__main__":
main()
| Python | 0 | |
17aefac614890b1fe7079a76b803707ae3fbb832 | Add perihelion | Project3/analysis/perihelion.py | Project3/analysis/perihelion.py | #!/bin/python3
# -*- coding: utf-8 -*-
from runner import Runner
import numpy as np
import matplotlib.pyplot as plt
class Perihelion(Runner):
def setup(self):
self['number of years'] = 100
self['do save results'] = True
self['do save any results'] = False
self['use all planets'] = False
self['save period'] = 1000
self['use planets'] = ['Sun', 'Mercury']
self['steps per year'] = 1e7
self['freeze sun'] = True
self['use two body approximation'] = False
self['use relativistic correction'] = True
self['method'] = 'verlet'
self.get_planet('Sun')['position'] = [0.0, 0.0, 0.0]
self.get_planet('Sun')['Velocity'] = [0.0, 0.0, 0.0]
self.get_planet('Mercury')['velocity'] = [12.44, 0.0, 0.0]
self.get_planet('Mercury')['position'] = [0.0, 0.3075, 0.0]
def run(self):
out, _ = self.run_simulation()
print(out)
# self.run_analysis()
self.getPerihelion()
def getPerihelion(self):
timenangle = np.loadtxt("../data/precession.txt");
print(timenangle.shape)
print(timenangle[:, 1] - timenangle[0, 1])
plt.plot(timenangle[:, 0], timenangle[:, 1], 'o')
plt.xlabel("Time [yr]")
plt.ylabel("Precession angle")
plt.show()
def findPerihelion(self):
position = self.get_position()
r = np.linalg.norm(position[:, :, 1] - position[:, :, 0], axis=1)
# plt.plot(r)
# plt.show()
# argrelextrema()
rfunc = interp1d(np.linspace(0,len(r)-1,len(r)),r, kind='slinear')
r = rfunc(np.linspace(0,len(r)-1, len(r)))
larger = np.roll(r, 1) - r > 1e-3
smaller = np.roll(r, -1) - r > 1e-3
minima = np.logical_and(larger, smaller)
above_mean = r < r.mean()
minima = np.logical_and(minima, above_mean)
plt.plot(r)
plt.plot(r*minima, 'o')
plt.show()
print(minima.sum())
x, y = position[minima, 0:2, 1].T
print(x.shape, y.shape)
theta = np.arctan2(y, x)
plt.plot(theta*180/np.pi)
plt.show()
if __name__ == '__main__':
with Perihelion() as mercury:
mercury.run()
| Python | 0.999979 | |
41f68e14fe890cac3de391f7bc4cdd5c2e5b9d75 | test B07 | spec/Order_B07_spec.py | spec/Order_B07_spec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from primestg.order.orders import Order
from expects import expect, equal
with description('Order B07 Generation'):
with it('generates expected B07 xml'):
expected_result = '<Order IdPet="1234" IdReq="B07" Version="3.1.c">\n ' \
'<Cnc Id="CIR000000000">\n ' \
'<B07 IPftp="10.1.5.206"/>\n ' \
'</Cnc>\n</Order>\n'
generic_values = {
'id_pet': '1234',
'id_req': 'B07',
'cnc': 'CIR000000000',
}
payload = {
'IPftp': '10.1.5.206',
}
order = Order('B07')
order = order.create(generic_values, payload)
expect(order).to(equal(expected_result))
| Python | 0.000001 | |
1163b56cf3f7e6a651f239a45300604c2847d201 | Add a script to train TD3 | examples/mujoco/td3/train_td3.py | examples/mujoco/td3/train_td3.py | """A training script of DDPG on OpenAI Gym Mujoco environments.
This script follows the settings of http://arxiv.org/abs/1802.09477 as much
as possible.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import logging
import os
import sys
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
import gym
import gym.wrappers
import numpy as np
import chainerrl
from chainerrl import experiments
from chainerrl import explorers
from chainerrl import misc
from chainerrl import replay_buffer
def concat_obs_and_action(obs, action):
"""Concat observation and action to feed the critic."""
return F.concat((obs, action), axis=-1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--env', type=str, default='Hopper-v2',
help='OpenAI Gym MuJoCo env to perform algorithm on.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--gpu', type=int, default=0,
help='GPU to use, set to -1 if no GPU.')
parser.add_argument('--load', type=str, default='',
help='Directory to load agent from.')
parser.add_argument('--steps', type=int, default=10 ** 6,
help='Total number of timesteps to train the agent.')
parser.add_argument('--eval-n-runs', type=int, default=10,
help='Number of episodes run for each evaluation.')
parser.add_argument('--eval-interval', type=int, default=5000,
help='Interval in timesteps between evaluations.')
parser.add_argument('--replay-start-size', type=int, default=10000,
help='Minimum replay buffer size before ' +
'performing gradient updates.')
parser.add_argument('--batch-size', type=int, default=100,
help='Minibatch size')
parser.add_argument('--render', action='store_true',
help='Render env states in a GUI window.')
parser.add_argument('--demo', action='store_true',
help='Just run evaluation, not training.')
parser.add_argument('--monitor', action='store_true',
help='Wrap env with gym.wrappers.Monitor.')
parser.add_argument('--logger-level', type=int, default=logging.INFO,
help='Level of the root logger.')
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
args.outdir = experiments.prepare_output_dir(
args, args.outdir, argv=sys.argv)
print('Output files are saved in {}'.format(args.outdir))
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
def make_env(test):
env = gym.make(args.env)
# Unwrap TimiLimit wrapper
assert isinstance(env, gym.wrappers.TimeLimit)
env = env.env
# Use different random seeds for train and test envs
env_seed = 2 ** 32 - 1 - args.seed if test else args.seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = gym.wrappers.Monitor(env, args.outdir)
if args.render and not test:
env = chainerrl.wrappers.Render(env)
return env
env = make_env(test=False)
timestep_limit = env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = env.observation_space
action_space = env.action_space
print('Observation space:', obs_space)
print('Action space:', action_space)
action_size = action_space.low.size
winit = chainer.initializers.LeCunUniform(3 ** -0.5)
policy = chainer.Sequential(
L.Linear(None, 400, initialW=winit),
F.relu,
L.Linear(None, 300, initialW=winit),
F.relu,
L.Linear(None, action_size, initialW=winit),
F.tanh,
chainerrl.distribution.ContinuousDeterministicDistribution,
)
policy_optimizer = optimizers.Adam().setup(policy)
def make_q_func_with_optimizer():
q_func = chainer.Sequential(
concat_obs_and_action,
L.Linear(None, 400, initialW=winit),
F.relu,
L.Linear(None, 300, initialW=winit),
F.relu,
L.Linear(None, 1, initialW=winit),
)
q_func_optimizer = optimizers.Adam().setup(q_func)
return q_func, q_func_optimizer
q_func1, q_func1_optimizer = make_q_func_with_optimizer()
q_func2, q_func2_optimizer = make_q_func_with_optimizer()
# Draw the computational graph and save it in the output directory.
fake_obs = chainer.Variable(
policy.xp.zeros_like(obs_space.low, dtype=np.float32)[None],
name='observation')
fake_action = chainer.Variable(
policy.xp.zeros_like(action_space.low, dtype=np.float32)[None],
name='action')
chainerrl.misc.draw_computational_graph(
[policy(fake_obs)], os.path.join(args.outdir, 'policy'))
chainerrl.misc.draw_computational_graph(
[q_func1(fake_obs, fake_action)], os.path.join(args.outdir, 'q_func1'))
chainerrl.misc.draw_computational_graph(
[q_func2(fake_obs, fake_action)], os.path.join(args.outdir, 'q_func2'))
rbuf = replay_buffer.ReplayBuffer(10 ** 6)
explorer = explorers.AdditiveGaussian(
scale=0.1, low=action_space.low, high=action_space.high)
def burnin_action_func():
"""Select random actions until model is updated one or more times."""
return np.random.uniform(
action_space.low, action_space.high).astype(np.float32)
# Hyperparameters in http://arxiv.org/abs/1802.09477
agent = chainerrl.agents.TD3(
policy,
q_func1,
q_func2,
policy_optimizer,
q_func1_optimizer,
q_func2_optimizer,
rbuf,
gamma=0.99,
explorer=explorer,
replay_start_size=args.replay_start_size,
gpu=args.gpu,
minibatch_size=args.batch_size,
burnin_action_func=burnin_action_func,
)
if len(args.load) > 0:
agent.load(args.load)
eval_env = make_env(test=True)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_with_evaluation(
agent=agent, env=env, steps=args.steps,
eval_env=eval_env, eval_n_steps=None,
eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval,
outdir=args.outdir,
train_max_episode_len=timestep_limit)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
e6f9f12d1c9faae8f718306bcd2862278a083351 | Test gnssUblox message processing (#24404) | selfdrive/locationd/test/test_ublox_processing.py | selfdrive/locationd/test/test_ublox_processing.py | import unittest
import numpy as np
from laika import AstroDog
from laika.helpers import UbloxGnssId
from laika.raw_gnss import calc_pos_fix, correct_measurements, process_measurements, read_raw_ublox
from selfdrive.test.openpilotci import get_url
from tools.lib.logreader import LogReader
def get_gnss_measurements(log_reader):
gnss_measurements = []
for msg in log_reader:
if msg.which() == "ubloxGnss":
ublox_msg = msg.ubloxGnss
if ublox_msg.which == 'measurementReport':
report = ublox_msg.measurementReport
if len(report.measurements) > 0:
gnss_measurements.append(read_raw_ublox(report))
return gnss_measurements
class TestUbloxProcessing(unittest.TestCase):
NUM_TEST_PROCESS_MEAS = 10
@classmethod
def setUpClass(cls):
lr = LogReader(get_url("4cf7a6ad03080c90|2021-09-29--13-46-36", 0))
cls.gnss_measurements = get_gnss_measurements(lr)
def test_read_ublox_raw(self):
count_gps = 0
count_glonass = 0
for measurements in self.gnss_measurements:
for m in measurements:
if m.ublox_gnss_id == UbloxGnssId.GPS:
count_gps += 1
elif m.ublox_gnss_id == UbloxGnssId.GLONASS:
count_glonass += 1
self.assertEqual(count_gps, 5036)
self.assertEqual(count_glonass, 3651)
def test_get_fix(self):
dog = AstroDog()
position_fix_found = 0
count_processed_measurements = 0
count_corrected_measurements = 0
position_fix_found_after_correcting = 0
pos_ests = []
for measurements in self.gnss_measurements[:self.NUM_TEST_PROCESS_MEAS]:
processed_meas = process_measurements(measurements, dog)
count_processed_measurements += len(processed_meas)
pos_fix = calc_pos_fix(processed_meas)
if len(pos_fix) > 0 and all(pos_fix[0] != 0):
position_fix_found += 1
corrected_meas = correct_measurements(processed_meas, pos_fix[0][:3], dog)
count_corrected_measurements += len(corrected_meas)
pos_fix = calc_pos_fix(corrected_meas)
if len(pos_fix) > 0 and all(pos_fix[0] != 0):
pos_ests.append(pos_fix[0])
position_fix_found_after_correcting += 1
mean_fix = np.mean(np.array(pos_ests)[:, :3], axis=0)
np.testing.assert_allclose(mean_fix, [-2452306.662377, -4778343.136806, 3428550.090557], rtol=0, atol=1)
# Note that can happen that there are less corrected measurements compared to processed when they are invalid.
# However, not for the current segment
self.assertEqual(position_fix_found, self.NUM_TEST_PROCESS_MEAS)
self.assertEqual(position_fix_found_after_correcting, self.NUM_TEST_PROCESS_MEAS)
self.assertEqual(count_processed_measurements, 69)
self.assertEqual(count_corrected_measurements, 69)
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
45a91a5c32227aabf17b52960d98851cd7608dd1 | add qha plot tool (function version) | workflows/tools/plot_quasiparticle_scan.py | workflows/tools/plot_quasiparticle_scan.py | from aiida import load_dbenv
load_dbenv()
from aiida.orm import load_node, load_workflow
from aiida.orm import Code, DataFactory
import matplotlib.pyplot as plt
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
ArrayData = DataFactory('array')
KpointsData = DataFactory('array.kpoints')
import numpy as np
#######################
wf = load_workflow(1086)
#######################
thermal_properties = wf.get_result('thermal_properties')
energy = thermal_properties.get_array('electronic_energies')
volumes = thermal_properties.get_array('volumes')
entropy = thermal_properties.get_array('entropy')
cv = thermal_properties.get_array('cv')
temperature = thermal_properties.get_array('temperature')
plt.figure(1)
plt.plot(volumes, energy)
plt.figure(2)
for i, w in enumerate(wf.get_steps()[1].get_sub_workflows()):
frequencies = [w.get_result('quasiparticle_data').get_dict()['{}'.format(k)]['q_point_0']['4']['frequency'] for k in range(100,800,100)]
plt.plot(volumes, frequencies, label='{}'.format(temperature[i]))
plt.show()
| Python | 0 | |
082e7d63192c2e7eaa4210e0c559b145313ecc3a | Add files via upload | server/src/datasource/parse_indepexpends.py | server/src/datasource/parse_indepexpends.py | from datasource import fec
from datasource import propublica
import os
FEC_APIKEY = os.getenv('FEC_API_KEY', '')
ProPublica_APIKEY = os.getenv('PP_API_KEY', '')
FecApiObj = fec.FECAPI(FEC_APIKEY)
committees = FecApiObj.get_committees()
PPCampFinObj = propublica.CampaignFinanceAPI(ProPublica_APIKEY)
datafile = open("IndepExpends.json", 'w')
for committee in committees:
if(2016 in committee['cycles']):
print(committee['committee_id'])
indepExpend = PPCampFinObj.get_indep_expends(str(committee['committee_id']))
datafile.write(str(indepExpend))
datafile.close() | Python | 0 | |
d8ba95ddb1e469600c735316a1aeafa115399b3c | Add an execution module called key to return minion public key finger | salt/modules/key.py | salt/modules/key.py | '''
Functions to view the minion's public key information
'''
# Import python libs
import os
# Import Salt libs
import salt.utils
def finger():
'''
Return the minion's public key fingerprint
CLI Example::
salt '*' key.finger
'''
return salt.utils.pem_finger(
os.path.join(__opts__['pki_dir'], 'minion.pub')
)
| Python | 0.000001 | |
ce8465e5f0f085bedcd1a84220316c8eab29a493 | Add Tensor Flow | python/src/algorithm/coding/setupdate.py | python/src/algorithm/coding/setupdate.py | n = int(input())
s = set(map(int, input().split()))
N = int(input())
for i in range(N):
cmd = input()
B = set(map(int, input().split()))
if "symmetric_difference_update" in cmd:
s.symmetric_difference_update(B)
elif "intersection_update" in cmd:
s.intersection_update(B)
elif "difference_update" in cmd:
s.difference_update(B)
elif "update" in cmd:
s.update(B)
print(sum(s))
| Python | 0.000019 | |
b3889f8ff6d66963d4253d6796c3bb20dc9adbb7 | Add external driver and parameter file | scripts/my_Param.py | scripts/my_Param.py | #=================================================
# Observation
#-------------------------------------------------
sstObsPath = '/clim_obs/obs/ocn/mo/tos/UKMETOFFICE-HadISST-v1-1/130122_HadISST_sst.nc'
tauxObsPath = '/clim_obs/obs/atm/mo/tauu/ERAINT/tauu_ERAINT_198901-200911.nc'
sstNameObs = 'sst'
tauxNameObs = 'tauu'
#=================================================
# Models
#-------------------------------------------------
modpath = '/work/cmip5/historical/atm/mo/VAR/cmip5.MOD.historical.r1i1p1.mo.atm.Amon.VAR.ver-1.latestX.xml'
modnames = ['ACCESS1-0', 'ACCESS1-3',
'BNU-ESM',
'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS',
'CSIRO-Mk3-6-0', 'CanCM4',
'GISS-E2-H-CC', 'GISS-E2-H', 'GISS-E2-R-CC', 'GISS-E2-R',
'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES',
'IPSL-CM5A-LR',
'MIROC-ESM-CHEM', 'MIROC-ESM', 'MIROC4h', 'MIROC5',
'MPI-ESM-LR', 'MPI-ESM-MR',
'inmcm4'
]
modnames = ['IPSL-CM5A-LR']
# Variables
sstName = 'ts'
tauxName= 'tauu'
#=================================================
# Output
#-------------------------------------------------
outpathdata = '.' # e.g. '/user/directory/output/nc'
outpathjsons = '.' # e.g. '/user/directory/output/json'
outnamejson = 'test.json'
#=================================================
# Output
#-------------------------------------------------
# Metrics
metrics = ['EnsoAmpl', 'EnsoMu']
# Variable name and nino box
ninoBox = 'nino3'
| Python | 0 | |
771fc766446e1610a0599102720dc7e0f358e0e6 | Add wsgi file | app.wsgi | app.wsgi | from app import app as application
| Python | 0.000001 | |
fc636dbaacb5d2d1ebba1ba7f577ee4ec4deb958 | Add synthtool scripts (#3765) | google-cloud-containeranalysis/synth.py | google-cloud-containeranalysis/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
library = gapic.java_library(
service='container',
version='v1beta1',
config_path='/google/devtools/containeranalysis/artman_containeranalysis_v1beta1.yaml',
artman_output_name='')
s.copy(library / 'gapic-google-cloud-containeranalysis-v1beta1/src', 'src')
s.copy(library / 'grpc-google-cloud-containeranalysis-v1beta1/src', '../../google-api-grpc/grpc-google-cloud-containeranalysis-v1beta1/src')
s.copy(library / 'proto-google-cloud-containeranalysis-v1beta1/src', '../../google-api-grpc/proto-google-cloud-containeranalysis-v1beta1/src')
| Python | 0.000001 | |
faf9e73f44cfee0c637c006ad6c37e8dcef0d1df | Create arcEC.py | arcEC.py | arcEC.py | import arcpy
import sys
## Version 1.8 (8 functions) '130213/MaHvi
def SetMsg(msg, severity=0): # 0:Message, 1:Warning, 2:Error
#print msg
try:
for string in msg.split('\n'):
string = ":) "+string
if severity == 0:
arcpy.AddMessage(string)
elif severity == 1:
arcpy.AddWarning(string)
elif severity == 2:
arcpy.AddError(string)
except:
pass
def ecMessage(strI,numI=0,severity=0):
""" Neither message number nor severity is mandatory """
if numI == 0:
SetMsg(" Message: "+strI,0)
else:
SetMsg(" Message: "+str(numI)+" : "+strI,0)
def ecWarning(strI,numI,severity=0):
""" Severity is not mandatory """
SetMsg(" ! Warning: "+str(numI)+" : "+strI,1)
def ecError(strI,numI,severity):
""" Severity > 0 causes program termination """
SetMsg("!!!Error: "+str(numI)+" : "+strI,2)
if severity > 0:
sys.exit(numI)
def Describe2String(desIn):
strReport = ""
if hasattr(desIn, "Name"):
strReport +="\n Name: "+desIn.Name
if hasattr(desIn, "baseName"):
strReport +="\n baseName: "+desIn.baseName
if hasattr(desIn, "dataType"):
strReport +="\n dataType: "+desIn.dataType
#if hasattr(desIn, "dataElementType"):
# strReport +="\n dataElementType: "+desIn.dataElementType
if hasattr(desIn, "catalogPath"):
strReport +="\n catalogPath: "+desIn.catalogPath
if hasattr(desIn, "children"):
strReport +="\n children: "+str(len(desIn.children))
if hasattr(desIn, "fields"):
strReport +="\n fields: "+str(len(desIn.fields))
if len(desIn.fields) > 0:
for fldX in desIn.fields:
strReport +="\n field: "+fldX.name
if hasattr(desIn, "pludder"):
strReport +="\n pludder: "+desIn.pludder
return strReport
def Table2Ascii(tblIn):
strReport = ""
desIn = arcpy.Describe(tblIn)
if hasattr(desIn, "dataType"):
if desIn.dataType == "Table":
strReport +="\n Table2Ascii ::"
if hasattr(desIn, "fields"):
strReport +="\n fields: "+str(len(desIn.fields))+"\n"
if len(desIn.fields) > 0:
for fldX in desIn.fields:
strReport +="|"+fldX.name+" <"+fldX.type+">"
rows = arcpy.SearchCursor(tblIn)
numRows = 0
for rowX in rows:
strReport += "\n "
for fldX in desIn.fields:
strReport += "|"+str(rowX.getValue(fldX.name))
numRows += 1
strReport += "\n Row count: "+str(numRows)
else:
strReport +="No Fields in tabel ..."
return strReport
def Table2Ascii_byFields(tblIn):
strReport = ""
desIn = arcpy.Describe(tblIn)
if hasattr(desIn, "dataType"):
if desIn.dataType == "Table":
strReport +="Table2Ascii_ByFields"
if hasattr(desIn, "fields"):
strReport +="\n fields: "+str(len(desIn.fields))
if len(desIn.fields) > 0:
for fldX in desIn.fields:
rows = arcpy.SearchCursor(tblIn)
strReport +="\n field: "+fldX.name+" <"+fldX.type+">"
strReport += "\n "
for rowX in rows:
strReport += "|"+str(rowX.getValue(fldX.name))
rows.reset()
return strReport
def Dict2String(dicIn):
strReport = ""
lstK = dicIn.keys()
lstK.sort()
for K in lstK:
strReport += str(K)+" : "+str(dicIn[K])+"\n"
return strReport
# Music that accompanied the coding of this script:
# Deep Forest - Savana Dance
| Python | 0 | |
3aed2efd64d38a78682d7ae4c55400763af22c63 | add avoid.py | avoid.py | avoid.py | #!/usr/bin/env python
import unicornhat as unicorn
import getch, random, time, colorsys
import numpy as np
unicorn.rotation(90)
unicorn.brightness(0.4)
screen = [[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0]]
score=0
carX=3
carY=6
def drawObstacles():
for y in range(8):
for x in range(8):
unicorn.set_pixel(x,y,0,screen[y][x],0)
unicorn.show()
def addObstacle():
r=random.randrange(0,2)
if r==1:
screen[0][random.randrange(0,7)]=64
def moveObstacles():
for y in range(7,-1,-1):
for x in range(7):
screen[y][x]=screen[y-1][x]
def drawCar(y, x):
unicorn.set_pixel((x),(y),0,0,64 )
unicorn.set_pixel((x)+1,(y),0,0,64 )
unicorn.set_pixel((x),(y)+1,0,0,64 )
unicorn.set_pixel((x)+1,(y)+1,0,0,64)
unicorn.show()
def undrawCar(y,x):
unicorn.set_pixel((x),(y),0,0,0)
unicorn.set_pixel((x)+1,(y),0,0,0)
unicorn.set_pixel((x),(y)+1,0,0,0)
unicorn.set_pixel((x)+1,(y)+1,0,0,0)
unicorn.show()
def checkHit():
if (screen[carY][carX]==64) or (screen[carY+1][carX]==64) or (screen[carY][carX+1]==64) or (screen[carY+1][carX+1]==64):
return True
else:
return False
def crashed():
for z in range(10):
rand_mat = np.random.rand(8,8)
for y in range(8):
for x in range(8):
h = 0.1 * rand_mat[x, y]
s = 0.8
v = rand_mat[x, y]
rgb = colorsys.hsv_to_rgb(h, s, v)
r = int(rgb[0]*255.0)
g = int(rgb[1]*255.0)
b = int(rgb[2]*255.0)
unicorn.set_pixel(x, y, r, g, b)
unicorn.show()
time.sleep(0.01)
while True:
moveObstacles()
addObstacle()
drawObstacles()
drawCar(carY,carX)
if (checkHit()==True):
crashed()
print "Crashed\nGame Over\nScore: ",score
break
else:
score=score+1
user_input=""
while user_input=="":
user_input = getch.getch().lower()
if (user_input!="q") and (user_input!="w") and (user_input!=" ") and (user_input!="x"):
user_input=""
if user_input!="x":
undrawCar(carY,carX)
if user_input=="q":
carX=carX-1
if carX < 0:
carX=0
elif user_input=="w":
carX=carX+1
if carX > 6:
carX=6
elif user_input==" ":
pass
else:
print "Game Over\nScore: ", score
break
| Python | 0.000001 | |
7f7effb6ec47c3714df5d6d9dbb403c6fda9cd89 | Add test to create repository on GitHub | selenium_github.py | selenium_github.py | import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class SeleniumGithub(unittest.TestCase):
def setUp(self):
""" Setup
"""
self.GITHUB_URL = "https://github.com/"
# GitHub credentials
self.GITHUB_LOGIN = "developergithubnoreply"
self.GITHUB_PASSWORD = "eRm-dpW-qkd-34f-!"
# GitHub repository informations
self.GITHUB_REPO_NAME = "selenium5"
self.GITHUB_REPO_DESC = "Automated web test with selenium"
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.BASE_URL = self.GITHUB_URL
self.verificationErrors = []
self.accept_next_alert = True
def test_selenium_github(self):
""" Login into Github account and create a new public repository
and test if the repository is created
"""
driver = self.driver
driver.get(self.BASE_URL)
driver.find_element_by_link_text("Sign in").click()
# Login
driver.find_element_by_id("login_field").clear()
driver.find_element_by_id("login_field").send_keys(self.GITHUB_LOGIN)
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(self.GITHUB_PASSWORD)
driver.find_element_by_name("commit").click()
# Create new repository
driver.find_element_by_xpath("//ul[@id='user-links']/li[2]/a/span").click()
driver.find_element_by_link_text("New repository").click()
driver.find_element_by_id("repository_name").clear()
driver.find_element_by_id("repository_name").send_keys(self.GITHUB_REPO_NAME)
driver.find_element_by_id("repository_public_true").click()
driver.find_element_by_id("repository_description").clear()
driver.find_element_by_id("repository_description").send_keys(self.GITHUB_REPO_DESC)
driver.find_element_by_xpath("//button[@type='submit']").click()
# Check existence of the repository previously created
self.assertEqual(requests.head(self.BASE_URL + "/" + self.GITHUB_LOGIN + "/" + self.GITHUB_REPO_NAME).status_code, 200)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| Python | 0.000001 | |
1fe3fd59e4000216c4d6694690dc0ba866a66ecb | add bloom_count_intersection.py | scripts/bloom_count_intersection.py | scripts/bloom_count_intersection.py | ## using bloom filter to count intersection
import khmer
import sys
import screed
from screed.fasta import fasta_iter
filename = sys.argv[1]
K = int(sys.argv[2]) # size of kmer
HT_SIZE= int(sys.argv[3])# size of hashtable
N_HT = int(sys.argv[4]) # number of hashtables
ht = khmer.new_hashbits(K, HT_SIZE, N_HT)
n_unique = 0
for n, record in enumerate(fasta_iter(open(filename))):
sequence = record['sequence']
seq_len = len(sequence)
for n in range(0,seq_len+1-K):
kmer = sequence[n:n+K]
if (not ht.get(kmer)):
n_unique+=1
ht.count(kmer)
print filename,'has been consumed.'
print '# of unique kmers:',n_unique
print '# of occupied bin:',ht.n_occupied()
filename2 = sys.argv[5]
ht2 = khmer.new_hashbits(K, HT_SIZE, N_HT)
n_unique = 0
n_overlap = 0
for n, record in enumerate(fasta_iter(open(filename2))):
sequence = record['sequence']
seq_len = len(sequence)
for n in range(0,seq_len+1-K):
kmer = sequence[n:n+K]
if (not ht2.get(kmer)):
n_unique+=1
if (ht.get(kmer)):
n_overlap+=1
ht2.count(kmer)
print filename2,'has been consumed.'
print '# of unique kmers:',n_unique
print '# of occupied bin:',ht2.n_occupied()
print n_overlap,'unique kmers also appears in ',filename2
| Python | 0.000019 | |
4722c73643cbf9cbd63f05736a8469afc4c03443 | test project: convert IPAddressField fields to GenericIPAddressField | test_django_admin_bootstrapped/test_django_admin_bootstrapped/models.py | test_django_admin_bootstrapped/test_django_admin_bootstrapped/models.py | from django.db import models
class TestMe(models.Model):
test_m2m = models.ManyToManyField('self', blank=True, help_text="Lorem dolor")
test_ip = models.GenericIPAddressField(help_text="Lorem dolor")
test_url = models.URLField(help_text="Lorem dolor")
test_int = models.IntegerField(help_text="Lorem dolor")
test_img = models.ImageField(upload_to='dummy', blank=True)
test_file = models.FileField(upload_to='dummy', blank=True)
test_date = models.DateField(help_text="Lorem dolor")
test_char = models.CharField(max_length=50, help_text="Lorem dolor")
test_bool = models.BooleanField(help_text="Lorem dolor", default=False)
test_time = models.TimeField(help_text="Lorem dolor")
test_slug = models.SlugField(help_text="Lorem dolor")
test_text = models.TextField(help_text="Lorem dolor")
test_email = models.EmailField(help_text="Lorem dolor")
test_float = models.FloatField(help_text="Lorem dolor")
test_bigint = models.BigIntegerField(help_text="Lorem dolor")
test_positive_integer = models.PositiveIntegerField(help_text="Lorem dolor")
test_decimal = models.DecimalField(max_digits=5, decimal_places=2, help_text="Lorem dolor")
test_comma_separated_int = models.CommaSeparatedIntegerField(max_length=100, help_text="Lorem dolor")
test_small_int = models.SmallIntegerField(help_text="Lorem dolor")
test_nullbool = models.NullBooleanField(help_text="Lorem dolor")
test_filepath = models.FilePathField(blank=True, help_text="Lorem dolor")
test_positive_small_int = models.PositiveSmallIntegerField(help_text="Lorem dolor")
def get_absolute_url(self):
return ''
class Meta:
verbose_name = u'Test me'
verbose_name_plural = u'Lot of Test me'
class TestMeProxyForFieldsets(TestMe):
class Meta:
proxy = True
verbose_name = u'Test me fieldsets'
verbose_name_plural = u'Lot of Test me fieldsets'
class TestThat(models.Model):
that = models.ForeignKey(TestMe, help_text="Lorem dolor")
test_ip = models.GenericIPAddressField(help_text="Lorem dolor")
test_url = models.URLField(help_text="Lorem dolor")
test_int = models.IntegerField(help_text="Lorem dolor")
test_date = models.DateField(help_text="Lorem dolor")
test_bool = models.BooleanField(help_text="Lorem dolor", default=True)
class Meta:
verbose_name = u'Test that'
verbose_name_plural = u'Lot of Test that'
class TestSortable(models.Model):
that = models.ForeignKey(TestMe)
position = models.PositiveSmallIntegerField("Position")
test_char = models.CharField(max_length=5)
class Meta:
ordering = ('position', )
| from django.db import models
class TestMe(models.Model):
test_m2m = models.ManyToManyField('self', blank=True, help_text="Lorem dolor")
test_ip = models.IPAddressField(help_text="Lorem dolor")
test_url = models.URLField(help_text="Lorem dolor")
test_int = models.IntegerField(help_text="Lorem dolor")
test_img = models.ImageField(upload_to='dummy', blank=True)
test_file = models.FileField(upload_to='dummy', blank=True)
test_date = models.DateField(help_text="Lorem dolor")
test_char = models.CharField(max_length=50, help_text="Lorem dolor")
test_bool = models.BooleanField(help_text="Lorem dolor", default=False)
test_time = models.TimeField(help_text="Lorem dolor")
test_slug = models.SlugField(help_text="Lorem dolor")
test_text = models.TextField(help_text="Lorem dolor")
test_email = models.EmailField(help_text="Lorem dolor")
test_float = models.FloatField(help_text="Lorem dolor")
test_bigint = models.BigIntegerField(help_text="Lorem dolor")
test_positive_integer = models.PositiveIntegerField(help_text="Lorem dolor")
test_decimal = models.DecimalField(max_digits=5, decimal_places=2, help_text="Lorem dolor")
test_comma_separated_int = models.CommaSeparatedIntegerField(max_length=100, help_text="Lorem dolor")
test_small_int = models.SmallIntegerField(help_text="Lorem dolor")
test_nullbool = models.NullBooleanField(help_text="Lorem dolor")
test_filepath = models.FilePathField(blank=True, help_text="Lorem dolor")
test_positive_small_int = models.PositiveSmallIntegerField(help_text="Lorem dolor")
def get_absolute_url(self):
return ''
class Meta:
verbose_name = u'Test me'
verbose_name_plural = u'Lot of Test me'
class TestMeProxyForFieldsets(TestMe):
class Meta:
proxy = True
verbose_name = u'Test me fieldsets'
verbose_name_plural = u'Lot of Test me fieldsets'
class TestThat(models.Model):
that = models.ForeignKey(TestMe, help_text="Lorem dolor")
test_ip = models.IPAddressField(help_text="Lorem dolor")
test_url = models.URLField(help_text="Lorem dolor")
test_int = models.IntegerField(help_text="Lorem dolor")
test_date = models.DateField(help_text="Lorem dolor")
test_bool = models.BooleanField(help_text="Lorem dolor", default=True)
class Meta:
verbose_name = u'Test that'
verbose_name_plural = u'Lot of Test that'
class TestSortable(models.Model):
that = models.ForeignKey(TestMe)
position = models.PositiveSmallIntegerField("Position")
test_char = models.CharField(max_length=5)
class Meta:
ordering = ('position', )
| Python | 0.000003 |
63f91c2459cb98cf0cfb1e60d298944212d9d639 | add missing file in symm | symm/addons.py | symm/addons.py | #
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
import pyscf.lib.logger
def label_orb_symm(mol, irrep_name, symm_orb, mo):
nmo = mo.shape[1]
s = mol.intor_symmetric('cint1e_ovlp_sph')
mo_s = numpy.dot(mo.T, s)
orbsym = [None] * nmo
for i,ir in enumerate(irrep_name):
moso = numpy.dot(mo_s, symm_orb[i])
for j in range(nmo):
if not numpy.allclose(moso[j], 0, atol=1e-6):
if orbsym[j] is None:
orbsym[j] = ir
else:
raise ValueError('orbital %d not symmetrized' % j)
pyscf.lib.logger.debug(mol, 'irreps of each MO %s', str(orbsym))
return orbsym
def symmetrize_orb(mol, irrep_name, symm_orb, mo):
s = mol.intor_symmetric('cint1e_ovlp_sph')
mo_s = numpy.dot(mo.T, s)
mo1 = 0
for csym in symm_orb:
ovlpso = reduce(numpy.dot, (csym.T, s, csym))
sc = numpy.linalg.solve(ovlpso, numpy.dot(mo_s, csym).T)
mo1 = mo1 + numpy.dot(csym, sc)
return mo1
if __name__ == "__main__":
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.build(
atom = [['H', (0,0,0)], ['H', (0,0,1)]],
basis = {'H': 'cc-pvdz'},
symmetry = 1
)
mf = scf.RHF(mol)
mf.scf()
print label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mf.mo_coeff)
| Python | 0.000001 | |
c1f2b25638f0653f6ad70271e7bcb3351d06162a | allow to pass input and output vector sizes | brain.py | brain.py | #!/usr/bin/env python2
# -*- coding : utf-8 -*-
import logging
import math
import random
__author__ = "Julien Rialland"
__copyright__ = "Copyright 2015, J.Rialland"
__license__ = "Apache License 2.0"
__version__ = "0.1"
__maintainer__ = __author__
__email__ = "julien.rialland@gmail.com"
__status__ = "Production"
def flatten(lst):
for x in lst:
if isinstance(x, list):
for x in flatten(x):
yield x
else:
yield x
class NeuralNetwork:
def __init__(self, learningRate=0.3, momentum=0.1, hiddenLayers=4, binaryThresh=0.5):
self.learningRate = learningRate
self.momentum = momentum
self.hiddenSizes = hiddenLayers
self.binaryThresh = binaryThresh
def _initialize(self, sizes):
self.sizes = sizes
self.outputLayer = len(self.sizes) - 1
self.biases = [] # weights for bias nodes
self.weights = []
self.outputs = []
# state for training
self.deltas = [[] for _ in xrange(self.outputLayer + 1)]
self.changes = [[] for _ in xrange(self.outputLayer + 1)]
self.errors = [[] for _ in xrange(self.outputLayer + 1)]
self.outputs = [[] for _ in xrange(self.outputLayer + 1)]
self.biases = [[] for _ in xrange(self.outputLayer + 1)]
self.weights = [[] for _ in xrange(self.outputLayer + 1)]
for layer in xrange(self.outputLayer + 1):
size = self.sizes[layer]
self.deltas[layer] = [0 for _ in xrange(size)]
self.errors[layer] = [0 for _ in xrange(size)]
self.outputs[layer] = [0 for _ in xrange(size)]
if layer > 0:
self.biases[layer] = [
random.random() * 0.4 - 0.2 for _ in xrange(size)]
self.weights[layer] = [0 for _ in xrange(size)]
self.changes[layer] = [0 for _ in xrange(size)]
for node in xrange(size):
prevSize = self.sizes[layer - 1]
self.weights[layer][node] = [
random.random() * 0.4 - 0.2 for _ in xrange(prevSize)]
self.changes[layer][node] = [0 for _ in xrange(prevSize)]
def train(self, data, inputSize=None, outputSize=None, iterations=20000, errorThresh=0.005, logLevel=logging.DEBUG, log=False, logPeriod=10, learningRate=0.3, callback=None, callbackPeriod=10):
if inputSize is None:
inputSize = len(data[0][0])
if outputSize is None:
outputSize = len(data[0][1])
hiddenSizes = self.hiddenSizes
if not hiddenSizes:
hiddenSizes = [math.max(3, math.floor(inputSize / 2))]
self._initialize(list(flatten([inputSize, hiddenSizes, outputSize])))
error = 1
done = 0
for i in xrange(iterations):
done = i
if error <= errorThresh:
break
sum = 0
for d in data:
err = self.trainPattern(d[0], d[1], learningRate)
sum = sum + err
error = sum / len(data)
if log and i % logPeriod == 0:
logging.log(logLevel, "iterations:{0}, training error: {1}".format(i, error))
if callback is not None and i % callbackPeriod == 0:
callback(error=error, iterations=i)
return (error, done)
def trainPattern(self, input, target, learningRate):
# forward propogate
self.run(input)
# back propogate
self._calculateDeltas(target)
self._adjustWeights(learningRate)
return self._mse(self.errors[self.outputLayer])
def run(self, input):
output = self.outputs[0] = input # set output state of input layer
for layer in xrange(1, self.outputLayer + 1):
for node in xrange(self.sizes[layer]):
weights = self.weights[layer][node]
sum = self.biases[layer][node]
for k in xrange(len(weights)):
sum += weights[k] * input[k]
self.outputs[layer][node] = 1 / (1 + math.exp(-sum))
input = self.outputs[layer]
output = input
return output
def _calculateDeltas(self, target):
layer = self.outputLayer
while layer >= 0:
for node in xrange(self.sizes[layer]):
output = self.outputs[layer][node]
error = 0
if layer == self.outputLayer:
error = target[node] - output
else:
deltas = self.deltas[layer + 1]
for k in xrange(len(deltas)):
error += deltas[k] * self.weights[layer + 1][k][node]
self.errors[layer][node] = error
self.deltas[layer][node] = error * output * (1 - output)
layer -= 1
def _adjustWeights(self, learningRate):
for layer in xrange(1, self.outputLayer + 1):
incoming = self.outputs[layer - 1]
for node in xrange(self.sizes[layer]):
delta = self.deltas[layer][node]
for k in xrange(len(incoming)):
change = self.changes[layer][node][k]
change = learningRate * delta * \
incoming[k] + (self.momentum * change)
self.changes[layer][node][k] = change
self.weights[layer][node][k] += change
self.biases[layer][node] += learningRate * delta
def _mse(self, errors):
sum = 0
for err in errors:
sum += math.pow(err, 2)
return sum / len(errors)
__all__ = ['NeuralNetwork']
| Python | 0.000001 | |
005872ea37dfdd4b8ab8b16e3c5b0083fb86cdb9 | Add settings file | scripts/settings.py | scripts/settings.py | #!/usr/bin/env python
#===============================================================================
# GLOBAL CONSTANTS
#===============================================================================
# --- Set up GPIO referencing----
broadcom_ref = True
if broadcom_ref:
PIN_11 = 17
PIN_12 = 18
PIN_13 = 27
PIN_15 = 22
PIN_37 = 26
PIN_38 = 20
PIN_40 = 21
else:
PIN_11 = 11
PIN_12 = 12
PIN_13 = 13
PIN_15 = 15
PIN_37 = 37
PIN_38 = 38
PIN_40 = 40
# --- System set up ---
UPDATE_RATE = 300 # seconds
W1_DEVICE_PATH = '/sys/bus/w1/devices/'
DEBOUNCE_MICROS = 0.250 #seconds
SYS_FOLDER = '/home/pi/weather'
DATA_FOLDER = '/data/'
TICK_DATA = 'tick_count'
# --- RRDTool set up ---
RRDTOOL_RRD_FILE = 'weather_data.rrd'
RRDTOOL_HEARTBEAT = 2 # multiplier
# XML filename: Consolidation type, Resolution (minutes), Recording Period (days)
RRDTOOL_RRA = {'wd_last_1d.xml': ('LAST', 5, 1.17),
'wd_avg_2d.xml': ('AVERAGE', 30, 2),
'wd_avg_1w.xml': ('AVERAGE', 120, 7),
'wd_avg_1m.xml': ('AVERAGE', 240, 31),
'wd_avg_3m.xml': ('AVERAGE', 720, 93),
'wd_avg_1y.xml': ('AVERAGE', 1440, 365),
'wd_min_1y.xml': ('MIN', 1440, 365),
'wd_max_1y.xml': ('MAX', 1440, 365)}
SENSOR_SET= { 'inside_temp': (True, PIN_37, '*C', -50, 100, 'GAUGE'),
'inside_hum': (True, PIN_37, '%', -1, 101, 'GAUGE'),
'door_open': (True, PIN_40, '', -1, 2, 'GAUGE'),
'precip_rate': (True, PIN_38, 'mm', -5, 50, 'GAUGE'),
'precip_acc': (True, PIN_38, 'mm', -5, 500, 'GAUGE'),
'outside_temp': (True, '28-0414705bceff',
'*C', -50, 50, 'GAUGE'),
'sw_status': (True, '', '', -1, 2, 'GAUGE'),
'sw_power': (True, '', 'W', -9999, 9999, 'GAUGE')}
| Python | 0.000001 | |
746dd90a17d756f5601ddcbbd6c2de6fed9c75d5 | add splitter script | scripts/splitter.py | scripts/splitter.py | import sys
import os
import json
import pdb
content = ""
for line in sys.stdin:
content += line
data = json.loads(content)
print('ok')
for item in data:
filename = "items_data/{0}.json".format(item['_key'])
print("creating ".format(filename))
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(filename, 'w') as file_:
file_.write(json.dumps(item, indent=4))
print(len(data))
| Python | 0.000001 | |
7a49dfb41888b6afed4ff3dca3987f641e497056 | Add PageHandler | handler/page.py | handler/page.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Powered By KK Studio
from BaseHandler import BaseHandler
# 404 Page
class Page404Handler(BaseHandler):
def get(self):
self.render('page/404.html', title="404")
# 500 Page
class Page500Handler(BaseHandler):
def get(self):
self.render('page/500.html', title="500")
# Blank Page
class BlankHandler(BaseHandler):
def get(self):
self.render('page/blank.html', title="Blank") | Python | 0 | |
d488ba2daf3fe2530dd5e67394328cc904c3889d | add expect_column_values_bitcoin_tx_is_confirmed (#4778) | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_bitcoin_tx_is_confirmed.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_bitcoin_tx_is_confirmed.py | """
This is a template for creating custom ColumnMapExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations
"""
import json
from typing import Optional
import blockcypher
import requests
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_btc_tx_confirmed(tx: str) -> bool:
try:
cnt = blockcypher.get_num_confirmations(tx)
if cnt > 0:
return True
else:
return False
except Exception as e:
print(e)
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesBitcoinTxIsConfirmed(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.bitcoin_tx_is_confirmed"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_btc_tx_confirmed(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesBitcoinTxIsConfirmed(ColumnMapExpectation):
"""Expect column values Bitcoin transaction hash is confirmed"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"7f0881d44ad2c5972457825c0846ba330a53b159251e3f53a7d0de4cd2727590",
"4eeeaf8216f8ba3c5258dc1d4fc489a35381dda8259e5160d885b851e21e319d",
"6d12598311328a63c324ee7164b6d97e8d38b6af42cd4017643602fd982cc40c",
"ac6080a633e4bcf39e437ba160c95c8edfbb9fabe3525058a7fca2d890cc4ef5",
],
"some_other": [
"1BoatSLRHtKNngkdXEeobR76b53LETtpyT",
"n2nzi7xDTrMVK9stGpbK3BtrpBCJfH7LRQ",
"3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC",
"bc1qxneu85dnhx33asv8da45x55qyeu44ek9h3vngxdsare",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.bitcoin_tx_is_confirmed"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
return True
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["blockcypher"],
}
if __name__ == "__main__":
ExpectColumnValuesBitcoinTxIsConfirmed().print_diagnostic_checklist()
| Python | 0.000001 | |
d3248cebcb1ef161dfc706d99b4d361205fc9fbe | Add wsgi file | t10server.wsgi | t10server.wsgi | from teeminus10_api import app as application | Python | 0.000001 | |
be249ac1ce39977e4448e320f80d3b642f2c9193 | Add original code | gitcontrib.py | gitcontrib.py | #!/usr/bin/env python3
# TODO
# Multiple source extensions
# Handle different name same email as one contributor
# Handle different email same name as one contributor
# Handle github-side edits being double-counted
import subprocess as sp
from sys import argv, exit
from os import chdir, devnull
def usage():
print("Usage:\ngitcontrib <Path> <File Extension>");
def pretty_output(loc, auth_loc, expected_contrib):
print("\033[37;1mPROJECT CONTRIBUTIONS:\033[0m")
print("\033[37mThe project has \033[34;1m%d\033[0;37m lines of code.\033[0m" % loc)
print()
print("\033[37mContributors (%d):\033[0m" % len(auth_loc.keys()))
print('', end=' ')
print('\n '.join(auth_loc.keys()))
print()
print("\033[37mContribution breakdown:\033[0m")
outs = []
for a in auth_loc:
outs.append((a, auth_loc[a]))
outs.sort(key = lambda u: u[1])
outs.reverse()
for a in outs:
if a[1] >= expected_contrib*loc:
print(' ', a[0], ' has contributed ', '\033[32;1m', a[1], '\033[0m', ' lines of code ', '(\033[032;1m%.2f%%\033[0m) ' % (a[1]*100/loc), sep="")
else:
print(' ', a[0], ' has contributed ', '\033[31;1m', a[1], '\033[0m', ' lines of code ', '(\033[031;1m%.2f%%\033[0m) ' % (a[1]*100/loc), sep="")
def git_contrib(location, ext):
try:
chdir(location)
except:
print("Error accessing %s (check file permissions?)" % location)
return 1
try:
sp.check_call(['ls', '.git'], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
except:
print("%s is not a git repository" % location)
return 1
(s, author_out) = sp.getstatusoutput("git log | grep Author | sort -u")
if s != 0:
print(author_out)
return 0
authors = author_out.split('\n')
authors = [a.replace("Author: ", "") for a in authors]
try:
assert len(authors) > 0
except AssertionError:
print("No git-commit authors found")
return 1
files = sp.getoutput("find . -iname \*.%s" % ext).replace('\n', ' ')
if len(files):
try:
loc = int(sp.getoutput("wc -l %s" % files).split("\n")[-1].split()[0]);
assert loc >= 0
except:
print("Error in parsing files (check file permissions?)")
return 1
else:
print("No files with extension '%s' in %s" % (ext, location))
return 1
auth_loc = {}
for a in authors:
aloc = 0
try:
name = a[0:a.index("<") - 1]
except:
name = a
for f in files.split():
aloc += sum([int(x) for x in sp.getoutput("git blame %s | grep \"%s\" | wc -l" % (f, name)).split('\n')])
auth_loc[a] = aloc
pretty_output(loc, auth_loc, 1 / len(authors))
return 0
def main():
if (len(argv) != 3):
usage()
return 1
return git_contrib(argv[1], argv[2])
if __name__ == '__main__':
exit(main())
| Python | 0.000222 | |
2866c8fbb3549ffd2405c5b13338a3fdf87a6c5d | add checks | dog/checks.py | dog/checks.py | from discord.ext import commands
owner_id = '97104885337575424'
def is_owner():
return commands.check(lambda ctx: ctx.message.author.id == owner_id)
| Python | 0.000001 | |
daa846ba7db9fe7ff080dd1b38f4cdde97b9c426 | Create นาดี.py | ai/นาดี.py | ai/นาดี.py | # Bot นาดี AI ผู้ช่วยต้นแบบ
# make2
#License: BSD
# เขียนโดย นาย วรรณพงษ์ ภัททิยไพบูลย์
# https://python3.wannaphong.com/2015/03/สั่งงานด้วยเสียงใน-python.html
__author__ = 'วรรณพงษ์'
#License: BSD
import PyICU #ใช้ในการตัดคำ
import re
import webbrowser #ใช้ในการค้นหาโดยเปิด web browser แล้วค้นผ่านกูเกิล
from gtts import gTTS #โมดูลระบบสังเคราะห์เสียงพูด
import pyglet #โมดูลงานระบบเล่นไฟล์เสียง
import speech_recognition as sr #โมดูลรับเสียง
import time #ดึงเวลา
#ระบบตัดคำไทย
def isThai(chr):
cVal = ord(chr)
if(cVal >= 3584 and cVal <= 3711):
return True
return False
def warp(txt):
bd = PyICU.BreakIterator.createWordInstance(PyICU.Locale("th"))
bd.setText(txt)
lastPos = bd.first()
retTxt = ""
try:
while(1):
currentPos = next(bd)
retTxt += txt[lastPos:currentPos]
#เฉพาะภาษาไทยเท่านั้น
if(isThai(txt[currentPos-1])):
if(currentPos < len(txt)):
if(isThai(txt[currentPos])):
#คั่นคำที่แบ่ง
retTxt += ','
lastPos = currentPos
except StopIteration:
pass
#retTxt = retTxt[:-1]
return retTxt
#ระบบพูดและรับเสียง
class speak:
def __init__(self,onetxt):
self.onetxt = onetxt
tts = gTTS(text=onetxt,lang='th') # text คือ ข้อความ lang คือ รหัสภาษา
tts.save('hello-thai.mp3')
#ระบบเล่นไฟล์เสียง
music = pyglet.resource.media('hello-thai.mp3') #ดึงไฟล์เสียงเข้ามา
music.play()
def exit_callback(dt):
return
pyglet.clock.schedule_once(exit_callback,music.duration) #เช็คการทำงานระบบเล่นเสียง ไม่ให้เล่นซ้ำกัน
class text:
def __init__(self,txtto):
self.txtto = txtto
#print(txtto)
speak(txtto)
hello = text("สวัสดีค่ะ ฉันชื่อ นาดี ยินดีที่ได้รับใช้ค่ะ") #เสียงต้อนรับ
while True:
r = sr.Recognizer()
r.energy_threshold = 4000
with sr.Microphone() as source:
audio = r.listen(source)
try:
say = r.recognize_google(audio,language = "th-TH") #กำหนดค่าภาษาเป็นภาษาไทย
except LookupError:
say = "ไม่สามารถรับเสียงได้ค่ะ"
txtcom = [warp(say)] #ตัดคำจากข้อความที่แปลงมาจากเสียง
#ส่วนคำสั่ง
txt = (','.join(str(x) for x in txtcom))
if txt.find("สวัสดี") == 0 or txt.find("ไง")==0 or txt.find("หวัดดี") == 0:
text("สวัสดีค่ะ ฉันชื่อ นาดี ค่ะ")
elif txt.find("ออก") == 0 & txt.find("ระบบ") == 0:
break
elif txt.find("ค้นหา") == 0 & txt.find("ว่า") == 0: #ถ้าค้นแล้วเจอคำเหล่านี้
s = txt.split("ว่า")
s = str(s)
a = s[1]
a = a.replace(',', '')
text("ค้นหาคำว่า %s" % a)
webbrowser.get('windows-default').open_new_tab('https://www.google.co.th/search?q=%s' % a)
elif txt.find("เวลา") == 0 & txt.find("เท่า") == 0 or txt.find("ตอน") == 0 & txt.find("นี้") == 0:
a = time.strftime("%H - %M:")
a = a.replace('-', 'นาฬิกา')
a = a.replace(':', 'นาที')
text("ขณะนี้เป็นเวลา %sค่ะ" % a)
elif txt.find("พิมพ์") == 0 & txt.find("ตาม") == 0:
import pyperclip
text("กรุณาพูดออกมาค่ะ")
with sr.Microphone() as source:
audio = r.listen(source)
try:
say = r.recognize(audio)
except LookupError:
say = "ไม่สามารถรับเสียงได้ค่ะ"
txtcom = [warp(say)]
txt = (','.join(str(x) for x in txtcom))
pyperclip.copy(txt)
text("ระบบได้คัดลอกที่พูดออกมาแล้ว")
else:
text("ขออภัยค่ะ ระบบไม่รู้จักคำสั่งนี้ค่ะ")
| Python | 0 | |
482859488865fe9b1e05a923e7aafeb7e090f049 | Create volumeBars.py | python/volumeBars.py | python/volumeBars.py | #!/usr/bin/env python
from rgbmatrix import RGBMatrix
from random import randint
import time
rows = 16
chains = 1
parallel = 1
ledMatrix = RGBMatrix(rows, chains, parallel)
height = ledMatrix.height
width = ledMatrix.width
nextFrame = ledMatrix.CreateFrameCanvas()
while True:
nextFrame.SetPixel(randint(0, width), randint(0, height), randint(0, 255), randint(0, 255), randint(0, 255))
nextFrame = ledMatrix.swapOnVSync(nextFrame) | Python | 0.000001 | |
15839dd4b37761e49599f6b278f6bd6e6d18b1e5 | Add initial rpc implementation example | examples/mailbox/rpc.py | examples/mailbox/rpc.py | import sys
sys.path.append('.') # NOQA
from xwing.mailbox import spawn, run, stop
class Server(object):
def hello_world(self):
return 'Hello World!'
def run(self):
async def rpc_server(mailbox, server):
while True:
function, pid = await mailbox.recv()
print('Got call from: ', pid)
result = getattr(server, function)()
await mailbox.send(pid, result)
spawn(rpc_server, self, name='rpc_server')
class Client(object):
def __init__(self, server_pid):
self.server_pid = server_pid
def call(self, function):
async def dispatch(mailbox, function):
await mailbox.send(self.server_pid, function, mailbox.pid)
result = await mailbox.recv()
print(result)
spawn(dispatch, function)
if __name__ == '__main__':
# python examples/mailbox/rpc.py
server = Server()
server.run()
client = Client('rpc_server@127.0.0.1')
client.call('hello_world')
try:
run()
except KeyboardInterrupt:
print('Stopping...')
stop()
| Python | 0 | |
f09bddb89681fdd03ac190a1caa4847b3da7a61f | add script for reinserting unparsed sentences into the parser output | src/corex/save_unparsables.py | src/corex/save_unparsables.py | #!/usr/bin/python
# This script takes the input file (one-sentence-per-line) for the
# the Berkeleyparser (topological fields model) and compares it to the
# parser's output file. Sentences missing in the parser output (unparsables)
# in the output are inserted from the parsers input file, one-sentence-per-line)
#
import sys
import codecs
import re
original = codecs.open(sys.argv[1], 'r', 'utf-8')
parsed = codecs.open(sys.argv[2], 'r', 'utf-8')
pos_and_token = re.compile('\(([^ ]+ (?:[^ )]+|\)))\)')
# This takes a line of the Berkeley topological parser's
# output, returns a string of tokens separated by whitespace
def get_tokens(line):
pt = pos_and_token.findall(line)
if len(pt) > 0:
pt = [i.split(" ") for i in pt]
t = [i[1] for i in pt]
s = " ".join(t)
else:
s = ''
return(s)
for oline in original:
oline = oline.strip()
pline = parsed.readline().strip()
pline_tokens = get_tokens(pline)
if oline == pline_tokens:
print(pline.encode('utf-8'))
else:
print(oline.encode('utf-8'))
if not pline_tokens =="":
for ooline in original:
ooline = ooline.strip()
if not ooline == pline_tokens:
print(ooline.encode('utf-8'))
else:
print(pline.encode('utf-8'))
break
| Python | 0.000003 | |
7d52d1efaf5bb07bfbb66e78f7c51e92b6c531dd | Use BytesIO. Closes #17 | ajaximage/image.py | ajaximage/image.py | import os
from PIL import Image, ImageOps
try:
from StringIO import StringIO as IO
except ImportError:
from io import BytesIO as IO
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
def resize(file_, max_width=0, max_height=0, crop=0):
max_width = int(max_width)
max_height = int(max_height)
crop = int(crop)
if(max_width is 0 and max_height is 0):
return file_
max_width = 9999 if max_width is 0 else max_width
max_height = 9999 if max_height is 0 else max_height
size = (max_width, max_height)
image = Image.open(file_)
if(image.mode == 'RGBA'):
image.load()
background = Image.new('RGB', image.size, (255, 255, 255))
background.paste(image, mask=image.split()[3])
image = background
temp = IO()
if(crop is 1):
image = ImageOps.fit(image, size, Image.ANTIALIAS)
else:
image.thumbnail(size, Image.ANTIALIAS)
image.save(temp, 'jpeg')
temp.seek(0)
return SimpleUploadedFile(file_.name,
temp.read(),
content_type='image/jpeg') | import os
from PIL import Image, ImageOps
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
def resize(file_, max_width=0, max_height=0, crop=0):
max_width = int(max_width)
max_height = int(max_height)
crop = int(crop)
if(max_width is 0 and max_height is 0):
return file_
max_width = 9999 if max_width is 0 else max_width
max_height = 9999 if max_height is 0 else max_height
size = (max_width, max_height)
image = Image.open(file_)
if(image.mode == 'RGBA'):
image.load()
background = Image.new('RGB', image.size, (255, 255, 255))
background.paste(image, mask=image.split()[3])
image = background
temp = StringIO()
if(crop is 1):
image = ImageOps.fit(image, size, Image.ANTIALIAS)
else:
image.thumbnail(size, Image.ANTIALIAS)
image.save(temp, 'jpeg')
temp.seek(0)
return SimpleUploadedFile(file_.name,
temp.read(),
content_type='image/jpeg') | Python | 0 |
5f9c6e49597abe07a74cd2e7370216bd0fc57cd4 | add topology | scripts/topology.py | scripts/topology.py | #!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, OVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel
import sys
def multiControllerNet( number ):
"Create a network from semi-scratch with multiple controllers."
net = Mininet( controller=Controller, switch=OVSSwitch, build=False )
print "*** Creating (reference) controllers"
c0 = net.addController( 'c0' , port=(7700))
s_count = int(number)
h_count = s_count * 2
# sys.exit("END");
hosts = [0] * h_count
switches = [0] * s_count
for i in range(h_count):
hosts[i] = net.addHost('h' + str(i))
for i in range(s_count):
switches[i] = net.addSwitch('s' + str(i))
print "*** Creating links between hosts and #switch"
for i in range(s_count):
net.addLink( switches[i],hosts[i * 2] )
net.addLink( switches[i],hosts[i * 2 + 1] )
print "*** Creating links between switches"
for i in range(s_count-1):
net.addLink( switches[i],switches[i+1] )
print "*** Starting network"
net.build()
c0.start()
for i in range(s_count):
switches[i].start( [c0] )
print "*** Testing network"
# net.pingAll()
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' ) # for CLI output
if len(sys.argv) < 3:
print "Usage: sudo ./topo1.py -s [switch number]\n"
sys.exit(1)
elif sys.argv[1] == "-s":
multiControllerNet(sys.argv[2])
| Python | 0.000018 | |
4ff6b846311a0f7bd6cfcf2e661a7c53061406fe | Add command to print vault info | glaciercmd/command_vault_info.py | glaciercmd/command_vault_info.py | import boto
class CommandVaultInfo(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[2])
print "Vault info:\n\tname={}\n\tarn={}\n\tcreation_date={}\n\tlast_inventory_date={}\n\tsize={}\n\tnumber_of_archives={}".format(vault.name, vault.arn, vault.creation_date, vault.last_inventory_date, vault.size, vault.number_of_archives)
except:
print "Vaule named '{}' does not exist.".format(args[2])
def accept(self, args):
return len(args) >= 3 and args[0] == 'vault' and args[1] == 'info'
def command_init():
return CommandVaultInfo()
| Python | 0.000001 | |
adcbdc06f0c476bc4c24e8c69d06cffbb6726a9f | Add migration | ovp_organizations/migrations/0023_auto_20170712_1704.py | ovp_organizations/migrations/0023_auto_20170712_1704.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-12 17:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ovp_organizations', '0022_auto_20170613_1424'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='address',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='ovp_core.SimpleAddress', verbose_name='address'),
),
]
| Python | 0.000002 | |
582ebd448508625ed2c9f362aaafc3fc46e60df0 | Add unit tests for security_scan | functest/tests/unit/features/test_security_scan.py | functest/tests/unit/features/test_security_scan.py | #!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import security_scan
from functest.utils import constants
class SecurityScanTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sscan = security_scan.SecurityScan()
def test_init(self):
self.assertEqual(self.sscan.project_name, "securityscanning")
self.assertEqual(self.sscan.case_name, "security_scan")
self.assertEqual(
self.sscan.repo,
constants.CONST.__getattribute__("dir_repo_securityscan"))
self.assertEqual(
self.sscan.cmd, (
'. {0}/stackrc && cd {1} && '
'python security_scan.py --config config.ini && '
'cd -'.format(
constants.CONST.__getattribute__("dir_functest_conf"),
self.sscan.repo)))
if __name__ == "__main__":
unittest.main(verbosity=2)
| Python | 0 | |
25f5ff62e1652e3293d12e3e73e44e7d7c21463c | upgrade incs | bin/upgrade_fortran_inc.py | bin/upgrade_fortran_inc.py | #!/usr/bin/env python3
# -*- coding: utf8 -*-
# py f:\dev\progs\bin\clean_fortran.py ricks.f90
# f:\f90ppr\moware\f90ppr.exe < tmp.f90 > out.txt
import sys, os, subprocess, shutil
sys.path.append(r'C:\msys64\mingw64\bin')
f90ppr_exe = r"F:\f90ppr\moware\f90ppr"
def main(fname):
# tmpname = 'tmp.f90'
if not os.path.isfile(fname):
raise Exception(f'{fname} not found!')
base, ext = os.path.splitext(fname)
outname = base+'.ppr'+ext
outfile = open(outname,'wb')
cmd = [ f90ppr_exe ]
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=outfile)
# maximum line length (2-132)
p.stdin.write(b'$define FPPR_MAX_LINE 132\n')
# keywords case: FPPR_LEAVE, FPPR_UPPER, FPPR_LOWER
p.stdin.write(b'$define FPPR_KWD_CASE FPPR_LOWER\n')
# variables case: FPPR_LEAVE, FPPR_UPPER, FPPR_LOWER
p.stdin.write(b'$define FPPR_USR_CASE FPPR_LEAVE\n')
# indentation (0-60)
p.stdin.write(b'$define FPPR_STP_INDENT 4\n')
# input format: 0=free format
p.stdin.write(b'$define FPPR_FXD_IN 1\n')
# output format: 0=free format
p.stdin.write(b'$define FPPR_FXD_OUT 0\n')
with open(fname,'rb') as infile:
for l in infile.readlines():
p.stdin.write(l)
p.stdin.close()
retcode = p.wait()
print(f'retcode={retcode}')
outfile.close()
# overwrite file
shutil.copy(outname, fname)
# remove temporary
if os.path.isfile(outname):
os.remove(outname)
if __name__=="__main__":
f = sys.argv[1]
main(f)
| Python | 0.000001 | |
0fe4a3c3a1d31230c9b5c931ff1e33584f1ccd4e | Create maximum-length-of-pair-chain.py | Python/maximum-length-of-pair-chain.py | Python/maximum-length-of-pair-chain.py | # Time: O(nlogn)
# Space: O(1)
# You are given n pairs of numbers.
# In every pair, the first number is always smaller than the second number.
#
# Now, we define a pair (c, d) can follow another pair (a, b)
# if and only if b < c. Chain of pairs can be formed in this fashion.
#
# Given a set of pairs, find the length longest chain which can be formed.
# You needn't use up all the given pairs. You can select pairs in any order.
#
# Example 1:
# Input: [[1,2], [2,3], [3,4]]
# Output: 2
# Explanation: The longest chain is [1,2] -> [3,4]
# Note:
# The number of given pairs will be in the range [1, 1000].
class Solution(object):
def findLongestChain(self, pairs):
"""
:type pairs: List[List[int]]
:rtype: int
"""
pairs.sort(key=lambda x: x[1])
cnt, i = 0, 0
for j in xrange(len(pairs)):
if j == 0 or pairs[i][1] < pairs[j][0]:
cnt += 1
i = j
return cnt
| Python | 0.998344 | |
0a0f17044b90b6897cd931a2e9d002b764b27b00 | Add pytorch-lightning bf16 training example (#5526) | python/nano/tutorial/training/pytorch-lightning/lightning_train_bf16.py | python/nano/tutorial/training/pytorch-lightning/lightning_train_bf16.py | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torchvision import transforms
from torchvision.datasets import OxfordIIITPet
from torch.utils.data.dataloader import DataLoader
import torch
from torchvision.models import resnet18
from bigdl.nano.pytorch import Trainer
import pytorch_lightning as pl
class MyLightningModule(pl.LightningModule):
def __init__(self):
super().__init__()
self.model = resnet18(pretrained=True)
num_ftrs = self.model.fc.in_features
# Here the size of each output sample is set to 37.
self.model.fc = torch.nn.Linear(num_ftrs, 37)
self.criterion = torch.nn.CrossEntropyLoss()
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
output = self.model(x)
loss = self.criterion(output, y)
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
output = self.forward(x)
loss = self.criterion(output, y)
pred = torch.argmax(output, dim=1)
acc = torch.sum(y == pred).item() / (len(y) * 1.0)
metrics = {'test_acc': acc, 'test_loss': loss}
self.log_dict(metrics)
def configure_optimizers(self):
return torch.optim.SGD(self.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
def create_dataloaders():
train_transform = transforms.Compose([transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=.5, hue=.3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
val_transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Apply data augmentation to the tarin_dataset
train_dataset = OxfordIIITPet(root="/tmp/data", transform=train_transform, download=True)
val_dataset = OxfordIIITPet(root="/tmp/data", transform=val_transform)
# obtain training indices that will be used for validation
indices = torch.randperm(len(train_dataset))
val_size = len(train_dataset) // 4
train_dataset = torch.utils.data.Subset(train_dataset, indices[:-val_size])
val_dataset = torch.utils.data.Subset(val_dataset, indices[-val_size:])
# prepare data loaders
train_dataloader = DataLoader(train_dataset, batch_size=32)
val_dataloader = DataLoader(val_dataset, batch_size=32)
return train_dataloader, val_dataloader
if __name__ == "__main__":
model = MyLightningModule()
train_loader, val_loader = create_dataloaders()
# Bfloat16 Training
#
# BFloat16 is a custom 16-bit floating point format for machine learning
# that’s comprised of one sign bit, eight exponent bits, and seven mantissa bits.
# BFloat16 has a greater "dynamic range" than FP16. This means it is able to
# improve numerical stability than FP16 while delivering increased performance
# and reducing memory usage.
#
# In BigDL-Nano, you can easily enable BFloat16 Mixed precision by setting precision='bf16'
#
# Note: Using BFloat16 precision with torch < 1.12 may result in extremely slow training.
trainer = Trainer(max_epochs=5, precision='bf16')
trainer.fit(model, train_dataloaders=train_loader)
trainer.validate(model, dataloaders=val_loader)
# You can also set use_ipex=True and precision='bf16' to enable ipex optimizer fusion
# for bf16 to gain more acceleration from BFloat16 data type.
trainer = Trainer(max_epochs=5, use_ipex=True, precision='bf16')
trainer.fit(model, train_dataloaders=train_loader)
trainer.validate(model, dataloaders=val_loader)
| Python | 0 | |
08a6963b1e7b93929b2fcb6bf27e10e2c9d2e7c5 | Add server stats | server_stats.py | server_stats.py | #*-* coding: utf-8 *-*
from flask import Flask
from flask import jsonify
from flask import request
import requests
import json
app = Flask(__name__)
CHARTS_DIR = "static"
def get_chart_URL(filename="myChart.jpeg"):
"""Download chart and save the image.
ref: https://core.telegram.org/blackberry/chat-media-send
"""
r = requests.get('https://getcharts.herokuapp.com/updateChart')
result = r.json()
return result['url']
def get_ticker(currency):
"""Get the currency echange ratio of bitcoin.
Params:
currencyt (str): the currency selected by the user
Returns:
(str) The response string
"""
CURRENCY_TYPE = {
'dollar': "USD",
'euro': "EUR",
'hkd': "HKD"
}
result = requests.get('https://blockchain.info/ticker')
result = result.json()
return "The bitchoin exchange rate in {} is : {}".format(
CURRENCY_TYPE[currency],
result[CURRENCY_TYPE[currency]]['last']
)
def get_stats():
"""Get the general stats about blockchains.
Returns:
(str) The response string
"""
result = requests.get('https://api.blockchain.info/stats')
result = result.json()
return "The market price in USD is {}. The hash rate is {}. The number of blocks mined is {}. The number of total blocks is {}. The estimated transaction volume is {}. The bitcoin trade volume is {}. The USD trade volumes is {} ".format(
result['market_price_usd'],
result['hash_rate'],
result['n_blocks_mined'],
result['n_blocks_total'],
result['estimated_transaction_volume_usd'],
result['trade_volume_btc'],
result['trade_volume_usd']
)
@app.route("/chainBot", methods=['POST'])
def chainBot():
"""The Chain Bot service.
Doc: https://api.ai/docs/fulfillment
Doc on error responses: https://api.ai/docs/fulfillment#errors
"""
##
# Convert the request data string into JSON obj
req = json.loads(request.data)
print(json.dumps(req, indent=2))
##
# Check if the action is completed
if not req['result']['actionIncomplete']:
##
# INFO CRYPTOCURRENCY EXCHANGE
if req['result']['contexts'][0]['name'] == "info-cryptocurrency-exchange":
##
# Call the reponse for the "info-bitcoin-exchange" context
if req['result']['contexts'][0]['parameters']['cryptocurrency'] == "bitcoin":
response = get_ticker(req['result']['contexts'][
0]['parameters']['currency'])
##
# Return the response
return jsonify({
"speech": response,
"displayText": response,
"data": {},
"contextOut": [],
"source": ""
}), 200, {'Content-Type': 'application/json; charset=utf-8'}
else:
return jsonify({
"speech": "I don't know the exchange rate for that cryptocurrency...",
"displayText": "I don't know the exchange rate for that cryptocurrency...",
"data": {},
"contextOut": [],
"source": ""
}), 200, {'Content-Type': 'application/json; charset=utf-8'}
##
# INFO MARKET
elif req['result']['contexts'][0]['name'] == "info-market":
chart_url = get_chart_URL()
print(chart_url)
return jsonify({
"speech": chart_url,
"messages": [
{
"type": 3,
"platform": "telegram",
"imageUrl": chart_url
},
{
"type": 0,
"speech": chart_url
}
]
}), 200, {'Content-Type': 'application/json; charset=utf-8'}
##
#INFO STATS
elif req['result']['contexts'][0]['name'] == "info-stats":
response = get_stats()
##
# Return the response
return jsonify({
"speech": response,
"displayText": response,
"data": {},
"contextOut": [],
"source": ""
}), 200, {'Content-Type': 'application/json; charset=utf-8'}
else:
return jsonify({
"speech": "Sorry, can't understand your request...",
"displayText": "Sorry, can't understand your request...",
"data": {},
"contextOut": [],
"source": ""
}), 404, {'Content-Type': 'application/json; charset=utf-8'}
else:
return jsonify({
"speech": "Sorry, your request is not complete...",
"displayText": "Sorry, your request is not complete...",
"data": {},
"contextOut": [],
"source": ""
}), 400, {'Content-Type': 'application/json; charset=utf-8'}
if __name__ == "__main__":
app.run("0.0.0.0", 80, debug=True
##
# Only for HTTPS
# , ssl_context=(
# '/etc/letsencrypt/live/chain.vector3d.xyz/fullchain.pem',
# '/etc/letsencrypt/live/chain.vector3d.xyz/privkey.pem'
# )
) | Python | 0.000001 | |
4f586f16eaf3e06d347bf9976a02005c70cd7e13 | Create installTests.py | installTests.py | installTests.py | import unittest
import install
class TestOperationWrapperMethods(unittest.TestCase):
def setUp(self):
# TODO: Write Tests
self.test_dataset = ""
| Python | 0.000001 | |
95182581beebbd181b20b23ee02657cb18347dd6 | update spec: update read_spectrum.py: add read_spectrum for elodie | bopy/spec/read_spectrum.py | bopy/spec/read_spectrum.py | # -*- coding: utf-8 -*-
"""
Author
------
Bo Zhang
Email
-----
bozhang@nao.cas.cn
Created on
----------
- Tue Mar 8 15:26:00 2016 read_spectrum
Modifications
-------------
-
Aims
----
- read various kinds of spectra
"""
import os
import numpy as np
from astropy.io import fits
from .spec import Spec
def reconstruct_wcs_coord_from_fits_header(hdr, dim=1):
""" reconstruct wcs coordinates (e.g., wavelenght array) """
# assert dim is not larger than limit
assert dim <= hdr['NAXIS']
# get keywords
crval = hdr['CRVAL%d' % dim]
cdelt = hdr['CDELT%d' % dim]
crpix = hdr['CRPIX%d' % dim]
naxis = hdr['NAXIS%d' % dim]
# reconstruct wcs coordinates
coord = np.arange(1 - crpix, naxis + 1 - crpix) * cdelt + crval
return coord
def read_spectrum_elodie_r42000(fp):
""" read spectrum from ELODIE library (R42000) """
# assert the file exists
assert os.path.exists(fp)
# read fits
hl = fits.open(fp)
# reconstruct wave array
wave = reconstruct_wcs_coord_from_fits_header(hl[0].header, dim=1)
# flux
flux = hl[0].data
# flux err
flux_err = hl[2].data
# flux ivar
flux_ivar = 1 / flux_err ** 2.
# reconstruct spec
sp = Spec(data=[wave, flux, flux_ivar, flux_err],
names=['wave', 'flux', 'flux_ivar', 'flux_err'])
return sp
| Python | 0 | |
39bb6cd51ce5351bfd93adac7b083a52b25590f8 | Create 6kyu_vending_machine.py | Solutions/6kyu/6kyu_vending_machine.py | Solutions/6kyu/6kyu_vending_machine.py | class VendingMachine():
def __init__(self, items, money):
self.items = dict(enumerate(items))
self.money = money
def vend(self, selection, item_money):
try:
n,v = [(n,self.items[n]) for n in self.items
if (self.items[n]['code']).lower() == selection.lower()][0]
except:
return "Invalid selection! : Money in vending machine = {:.2f}".format(self.money)
if item_money < v['price']:
return "Not enough money!"
if v['quantity'] <= 0:
return "{}: Out of stock!".format(v['name'])
if item_money > v['price']:
change = item_money - v['price']
v['quantity'] = v['quantity'] - 1
self.money += (-change + item_money)
self.items[n] = v
return "Vending {} with {:.2f} change.".format(v['name'], change)
else:
v['quantity'] = v['quantity'] - 1
self.money += item_money
self.items[n] = v
return "Vending {}".format(v['name'])
| Python | 0.000057 | |
5e96dd2846660f14e1d7b691ba928da63b699f19 | Add support for Spotify | services/spotify.py | services/spotify.py | from oauthlib.common import add_params_to_uri
import foauth.providers
class Spotify(foauth.providers.OAuth2):
# General info about the provider
provider_url = 'https://spotify.com/'
docs_url = 'https://developer.spotify.com/web-api/endpoint-reference/'
category = 'Music'
# URLs to interact with the API
authorize_url = 'https://accounts.spotify.com/authorize'
access_token_url = 'https://accounts.spotify.com/api/token'
api_domain = 'api.spotify.com'
available_permissions = [
(None, 'Read your publicly available information'),
('playlist-modify', 'Manage your public playlists'),
('playlist-modify-private', 'Manage all your playlists (even private)'),
('playlist-read-private', 'Access your private playlists'),
('user-read-private', 'Access your name, image and subscription details'),
('user-read-email', 'Get your real email address'),
]
def get_user_id(self, key):
r = self.api(key, self.api_domain, u'/v1/me')
return r.json()[u'id']
| Python | 0 | |
062c4bc134f77f9279d18774b954a06566f99c5a | Add logger | src/acquisition/covidcast/logger.py | src/acquisition/covidcast/logger.py | """Structured logger utility for creating JSON logs in Delphi pipelines."""
import logging
import sys
import threading
import structlog
def handle_exceptions(logger):
"""Handle exceptions using the provided logger."""
def exception_handler(etype, value, traceback):
logger.exception("Top-level exception occurred",
exc_info=(etype, value, traceback))
def multithread_exception_handler(args):
exception_handler(args.exc_type, args.exc_value, args.exc_traceback)
sys.excepthook = exception_handler
threading.excepthook = multithread_exception_handler
def get_structured_logger(name=__name__,
filename=None,
log_exceptions=True):
"""Create a new structlog logger.
Use the logger returned from this in indicator code using the standard
wrapper calls, e.g.:
logger = get_structured_logger(__name__)
logger.warning("Error", type="Signal too low").
The output will be rendered as JSON which can easily be consumed by logs
processors.
See the structlog documentation for details.
Parameters
---------
name: Name to use for logger (included in log lines), __name__ from caller
is a good choice.
filename: An (optional) file to write log output.
"""
# Configure the underlying logging configuration
handlers = [logging.StreamHandler()]
if filename:
handlers.append(logging.FileHandler(filename))
logging.basicConfig(
format="%(message)s",
level=logging.INFO,
handlers=handlers
)
# Configure structlog. This uses many of the standard suggestions from
# the structlog documentation.
structlog.configure(
processors=[
# Filter out log levels we are not tracking.
structlog.stdlib.filter_by_level,
# Include logger name in output.
structlog.stdlib.add_logger_name,
# Include log level in output.
structlog.stdlib.add_log_level,
# Allow formatting into arguments e.g., logger.info("Hello, %s",
# name)
structlog.stdlib.PositionalArgumentsFormatter(),
# Add timestamps.
structlog.processors.TimeStamper(fmt="iso"),
# Match support for exception logging in the standard logger.
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
# Decode unicode characters
structlog.processors.UnicodeDecoder(),
# Render as JSON
structlog.processors.JSONRenderer()
],
# Use a dict class for keeping track of data.
context_class=dict,
# Use a standard logger for the actual log call.
logger_factory=structlog.stdlib.LoggerFactory(),
# Use a standard wrapper class for utilities like log.warning()
wrapper_class=structlog.stdlib.BoundLogger,
# Cache the logger
cache_logger_on_first_use=True,
)
logger = structlog.get_logger(name)
if log_exceptions:
handle_exceptions(logger)
return logger
| Python | 0.00002 | |
934f4ccfc4e34c5486c3d5a57b429742eb9b5915 | add algorithms.ml to make format for machine learning | algorithms/ml.py | algorithms/ml.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Machine learning algorithms.
"""
import sys
from optparse import OptionParser
from jcvi.apps.base import ActionDispatcher, debug
debug()
def main():
actions = (
('libsvm', 'convert csv file to LIBSVM format'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def libsvm(args):
"""
%prog libsvm csvfile prefix.ids
Convert csv file to LIBSVM format. `prefix.ids` contains the prefix mapping.
Ga -1
Gr 1
So the feature in the first column of csvfile get scanned with the prefix
and mapped to different classes. Formatting spec:
http://svmlight.joachims.org/
"""
from jcvi.formats.base import DictFile
p = OptionParser(libsvm.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
csvfile, prefixids = args
d = DictFile(prefixids)
fp = open(csvfile)
fp.next()
for row in fp:
atoms = row.split()
klass = atoms[0]
kp = klass.split("_")[0]
klass = d.get(kp, "0")
feats = ["{0}:{1}".format(i + 1, x) for i, x in enumerate(atoms[1:])]
print " ".join([klass] + feats)
if __name__ == '__main__':
main()
| Python | 0.000004 | |
2ca07d4a8893196bbf304bcdac16688505e6123a | Add a management command to register webhooks | shopify/webhooks/management/commands/webhookregister.py | shopify/webhooks/management/commands/webhookregister.py | from django.core.management.base import NoArgsCommand
from webhooks.models import Webhook
class Command(NoArgsCommand):
help = 'Register all created Shopify webhooks'
def handle_noargs(self, **options):
Webhook.objects.register()
| Python | 0.000001 | |
dbde6b78e0abc47473246abe0a11e2dd445134a6 | Implement WSGI loader. | helper-scripts/wsgi-loader.py | helper-scripts/wsgi-loader.py | #!/usr/bin/env python
# Phusion Passenger - http://www.modrails.com/
# Copyright (c) 2010, 2011, 2012 Phusion
#
# "Phusion Passenger" is a trademark of Hongli Lai & Ninh Bui.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os, re, imp, traceback, socket, select, struct
from socket import _fileobject
options = {}
def abort(message):
sys.stderr.write(message + "\n")
sys.exit(1)
def readline():
result = sys.stdin.readline()
if result == "":
raise EOFError
else:
return result
def handshake_and_read_startup_request():
global options
print("I have control 1.0")
if readline() != "You have control 1.0\n":
abort("Invalid initialization header")
line = readline()
while line != "\n":
result = re.split(': *', line.strip(), 2)
name = result[0]
value = result[1]
options[name] = value
line = readline()
def load_app():
return imp.load_source('passenger_wsgi', 'passenger_wsgi.py')
def create_server_socket():
filename = '/tmp/foo'
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.remove(filename)
except OSError:
pass
s.bind(filename)
s.listen(1000)
return (filename, s)
def advertise_sockets(socket_filename):
print("socket: main;unix:%s;session;1" % socket_filename)
print("")
class RequestHandler:
def __init__(self, server_socket, owner_pipe, app):
self.server = server_socket
self.owner_pipe = owner_pipe
self.app = app
def main_loop(self):
done = False
try:
while not done:
client, address = self.accept_connection()
if not client:
done = True
break
try:
try:
env, input_stream = self.parse_request(client)
if env:
if env['REQUEST_METHOD'] == 'ping':
self.process_ping(env, input_stream, client)
else:
self.process_request(env, input_stream, client)
else:
done = True
except KeyboardInterrupt:
done = True
except Exception, e:
traceback.print_tb(sys.exc_info()[2])
sys.stderr.write(str(e.__class__) + ": " + str(e) + "\n")
finally:
try:
client.close()
except:
pass
except KeyboardInterrupt:
pass
def accept_connection(self):
result = select.select([self.owner_pipe, self.server.fileno()], [], [])[0]
if self.server.fileno() in result:
return self.server.accept()
else:
return (None, None)
def parse_request(self, client):
buf = ''
while len(buf) < 4:
tmp = client.recv(4 - len(buf))
if len(tmp) == 0:
return (None, None)
buf += tmp
header_size = struct.unpack('>I', buf)[0]
buf = ''
while len(buf) < header_size:
tmp = client.recv(header_size - len(buf))
if len(tmp) == 0:
return (None, None)
buf += tmp
headers = buf.split("\0")
headers.pop() # Remove trailing "\0"
env = {}
i = 0
while i < len(headers):
env[headers[i]] = headers[i + 1]
i += 2
return (env, client)
def process_request(self, env, input_stream, output_stream):
# The WSGI speculation says that the input paramter object passed needs to
# implement a few file-like methods. This is the reason why we "wrap" the socket._socket
# into the _fileobject to solve this.
#
# Otherwise, the POST data won't be correctly retrieved by Django.
#
# See: http://www.python.org/dev/peps/pep-0333/#input-and-error-streams
env['wsgi.input'] = _fileobject(input_stream, 'r', 512)
env['wsgi.errors'] = sys.stderr
env['wsgi.version'] = (1, 0)
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = True
env['wsgi.run_once'] = True
if env.get('HTTPS','off') in ('on', '1', 'true', 'yes'):
env['wsgi.url_scheme'] = 'https'
else:
env['wsgi.url_scheme'] = 'http'
headers_set = []
headers_sent = []
def write(data):
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
# Before the first output, send the stored headers.
status, response_headers = headers_sent[:] = headers_set
output_stream.send('Status: %s\r\n' % status)
for header in response_headers:
output_stream.send('%s: %s\r\n' % header)
output_stream.send('\r\n')
output_stream.send(data)
def start_response(status, response_headers, exc_info = None):
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent.
raise exc_info[0], exc_info[1], exc_info[2]
finally:
# Avoid dangling circular ref.
exc_info = None
elif headers_set:
raise AssertionError("Headers already set!")
headers_set[:] = [status, response_headers]
return write
result = self.app(env, start_response)
try:
for data in result:
# Don't send headers until body appears.
if data:
write(data)
if not headers_sent:
# Send headers now if body was empty.
write('')
finally:
if hasattr(result, 'close'):
result.close()
def process_ping(self, env, input_stream, output_stream):
output_stream.send("pong")
if __name__ == "__main__":
handshake_and_read_startup_request()
app_module = load_app()
socket_filename, server_socket = create_server_socket()
handler = RequestHandler(server_socket, sys.stdin, app_module.application)
print("Ready")
advertise_sockets(socket_filename)
handler.main_loop()
| Python | 0 | |
4aafeac9c238ffb8dc448c87f18abfd7f1f0c9d7 | store data dir info | gemini/anno_info.py | gemini/anno_info.py | #!/usr/bin/env python
"""
Store the path for GEMINI data-dir
"""
from gemini.config import read_gemini_config
config = read_gemini_config()
anno_dirname = config["annotation_dir"]
print anno_dirname
| Python | 0 | |
9d6a053441505fae600915e24a263de798843fbb | Add test_weapon class | test_weapon.py | test_weapon.py | import unittest
import weapon
class TestWeapon(unittest.TestCase):
def setUp(self):
self.w = weapon.Weapon('bow', 30, 1.0, 1)
self.w2 = weapon.Weapon('bow', 30, 2.0, 1)
def test_weapon_init(self):
self.assertEqual('bow', self.w.type)
self.assertEqual(30, self.w.damage)
self.assertEqual(1.0, self.w.critical_strike_percent)
self.assertEqual(1, self.w.tier)
def test_weapon_init2(self):
self.assertEqual('bow', self.w2.type)
self.assertEqual(30, self.w2.damage)
self.assertEqual(0.0, self.w2.critical_strike_percent)
self.assertEqual(1, self.w2.tier)
def test_weapon_init_with_incorrect_argument(self):
self.assertEqual(0.0, self.w2.critical_strike_percent)
def test_weapon_to_string(self):
self.assertEqual('bow\n30 damage\n100% critical strike percent',
str(self.w))
def test_critical_hit(self):
self.assertTrue(self.w.critical_hit())
self.assertFalse(self.w2.critical_hit())
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
7a4b4a116a10f389f6d14321547fa1966b262c0d | Add Hacker News | sources/misc.py | sources/misc.py | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Clarence Ho (clarenceho at gmail dot com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import datetime
from lxml import html
from lxml import etree
import traceback
from logger import logger
from fetcher import read_http_page
from .base import BaseSource
from .base import RSSBase
from .base import RDFBase
class HackerNews(BaseSource):
def get_id(self):
return 'hackernews'
def get_desc(self):
return 'Hacker News'
def get_articles(self):
# Although the source is in RSS, the daily items are consolidated as CDATA.
# Parse and break them down instead of using RSSBase
rss_url = 'http://www.daemonology.net/hn-daily/index.rss'
resultList = []
try:
doc = html.document_fromstring(read_http_page(rss_url))
for item in doc.xpath('//rss/channel/item'):
title = item.xpath('title')[0].text if len(item.xpath('title')) > 0 else 'Daily Hacker News'
resultList.append(self.create_section(title))
description = item.xpath('description')[0] if len(item.xpath('description')) > 0 else None
if description is not None:
for article in description.xpath('ul/li/span[@class="storylink"]/a'):
if article.text and article.get('href'):
resultList.append(self.create_article(article.text.strip(), article.get('href')))
except Exception as e:
logger.exception('Problem processing Hacker News: ' + str(e))
logger.exception(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
return resultList
| Python | 0 | |
cd48829eb08df62d8222128b33a7c00b9ca2ed8a | Add files via upload | interpro_go_extraction_direct.py | interpro_go_extraction_direct.py | #!/usr/bin/env python2
# Daniel Elsner
# 26.09.2016
# ake the GO ID directly from Interproscan, without the need of previous cutting and grepping.
# Input: The interproscan-output.tsv file
import sys
with open(sys.argv[1], 'r') as readfile:
id_list_content = list(readfile)
outdict={}
# make a dict, this prevents duplicate entries and makes access easy
for i in range(len(id_list_content)):
if "GO" in id_list_content[i]:
# only if there is a GO entry, otherwise there is nothing to split
inputs = id_list_content[i].split('\t')
p, j = inputs[0], inputs[13]
#from the entry line, get the Gene Name and the GO IDs
outdict[p] = set()
# create a set, this spares us from checking for duplicates and just keeps everything once
else:
pass
#if there is no GO entry, pass the line
for i in range(len(id_list_content)):
if "GO" in id_list_content[i]:
# only if there is a GO entry, otherwise there is nothing to split
inputs = id_list_content[i].split('\t')
p, j = inputs[0], inputs[13]
#from the entry line, get the Gene Name and the GO IDs
if '|' in str(j):
for n in str(j).split('|'):
outdict[p].add(n.strip())
# individual GOs are separated by "|", for each of them add them to the set, automatically checking if it is already there.
else:
outdict[p].add(str(j.strip()))
# create a set, this spares us from checking for duplicates and just keeps everything once
else:
pass
#if there is no GO entry, pass the line
for i in range(len(outdict)):
print str(outdict.keys()[i]) + "\t" + ', '.join(outdict.values()[i])
| Python | 0 | |
ed578177781ff1d4aeb0b7abb7d5f11fc5a7c626 | Create copy of WeakList and set it to raise exception instead of removing item from list | grammpy/WeakList.py | grammpy/WeakList.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 12:11
:Licence GNUv3
Part of grammpy
Original implementation: https://github.com/apieum/weakreflist
"""
import weakref
from .exceptions import TreeDeletedException
__all__ = ["WeakList"]
def is_slice(index):
return isinstance(index, slice)
class WeakList(list):
def __init__(self, items=list()):
list.__init__(self, self._refs(items))
def value(self, item):
if isinstance(item, weakref.ReferenceType):
if item() is None:
raise TreeDeletedException()
return item()
return item
def ref(self, item):
try:
item = weakref.ref(item)
finally:
return item
def __contains__(self, item):
return list.__contains__(self, self.ref(item))
def __getitem__(self, index):
items = list.__getitem__(self, index)
return type(self)(self._values(items)) if is_slice(index) else self.value(items)
def __setitem__(self, index, item):
items = self._refs(item) if is_slice(index) else self.ref(item)
return list.__setitem__(self, index, items)
def __iter__(self):
return iter(self[index] for index in range(len(self)))
def __reversed__(self):
reversed_self = type(self)(self)
reversed_self.reverse()
return reversed_self
def append(self, item):
list.append(self, self.ref(item))
def remove(self, item):
return list.remove(self, self.ref(item))
def remove_all(self, item):
item = self.ref(item)
while list.__contains__(self, item):
list.remove(self, item)
def index(self, item, start=None, stop=None):
return list.index(self, self.ref(item), start=start, stop=stop)
def count(self, item):
return list.count(self, self.ref(item))
def pop(self, index=-1):
return self.value(list.pop(self, self.ref(index)))
def insert(self, index, item):
return list.insert(self, index, self.ref(item))
def extend(self, items):
return list.extend(self, self._refs(items))
def __iadd__(self, other):
return list.__iadd__(self, self._refs(other))
def _refs(self, items):
return map(self.ref, items)
def _values(self, items):
return map(self.value, items)
def _sort_key(self, key=None):
return self.value if key is None else lambda item: key(self.value(item))
def sort(self, *, key=None, reverse=False):
return list.sort(self, key=self._sort_key(key), reverse=reverse)
| Python | 0 | |
ee859881af0633d4d2d88015c907cfa856516dbe | Create TwoSum II for Lint | lintcode/000-000-Two-Sum-II/TwoSumII.py | lintcode/000-000-Two-Sum-II/TwoSumII.py | class Solution:
# @param nums, an array of integer
# @param target, an integer
# @return an integer
def twoSum2(self, nums, target):
# Write your code here
nums.sort()
i, j = 0, len(nums) - 1
res = 0
while i < j:
if nums[i] + nums[j] <= target:
i += 1
else:
res += j - i
j -= 1
return res
| Python | 0 | |
52e8a378d8a31989c9d93ef83eabbe6df339f915 | Add data migration to add category components for VPC. | src/waldur_mastermind/marketplace/migrations/0083_offering_component.py | src/waldur_mastermind/marketplace/migrations/0083_offering_component.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from waldur_mastermind.marketplace_openstack import STORAGE_TYPE, RAM_TYPE, CORES_TYPE, PACKAGE_TYPE
def create_category_components(apps, schema_editor):
CATEGORY_TITLE = 'Private clouds'
Category = apps.get_model('marketplace', 'Category')
CategoryComponent = apps.get_model('marketplace', 'CategoryComponent')
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
try:
vpc_category = Category.objects.get(title=CATEGORY_TITLE)
except Category.DoesNotExist:
return
storage_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=STORAGE_TYPE,
name='Storage',
measured_unit='GB'
)
ram_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=RAM_TYPE,
name='RAM',
measured_unit='GB'
)
cores_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=CORES_TYPE,
name='Cores',
measured_unit='cores'
)
components = OfferingComponent.objects.filter(offering__type=PACKAGE_TYPE, parent=None)
components.filter(type=STORAGE_TYPE).update(parent=storage_gb_cc)
components.filter(type=RAM_TYPE).update(parent=ram_gb_cc)
components.filter(type=CORES_TYPE).update(parent=cores_cc)
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0082_orderitem_activated'),
]
operations = [
migrations.RunPython(create_category_components),
]
| Python | 0 | |
df0772b3ae02ff0180f18410cf4350b493db9cb4 | Create fold_stereonet_fisher_mean.py | fold_stereonet_fisher_mean.py | fold_stereonet_fisher_mean.py | #Definition of inputs and outputs
#==================================
##[Mes scripts GEOL]=group
##entree=vector
##dip_dir=field entree
##dip=field entree
#Algorithm body
#==================================
from qgis.core import *
from apsg import *
layer = processing.getObject(entree)
dipdir = layer.fieldNameIndex(dip_dir)
dip = layer.fieldNameIndex(dip)
if layer.selectedFeatureCount():
print "ok", layer.selectedFeatureCount()
g= Group([Vec3(Fol(elem.attributes()[dipdir],elem.attributes()[dip])) for elem in layer.selectedFeatures()],name='plis')
else:
g= Group([Vec3(Fol(elem.attributes()[dipdir],elem.attributes()[dip])) for elem in layer.getFeatures()],name='plis')
# mean vector
resultat= "mean vector: " + str(int(round(g.R.aslin.dd[1]))) + " - " + str(int(round(g.R.aslin.dd[0])))
s = StereoNet()
s.line(g.aslin, 'b.',markersize=18)
s.line(g.R.aslin,'g^',markersize=18)
s.cone(g.R.aslin, g.fisher_stats['a95'], 'r')
s.cone(g.R.aslin, g.fisher_stats['csd'], 'k')
a = s.ax
a.set_title(resultat, y=1.06, size=14, color='g')
s.show()
| Python | 0.003638 | |
8ce4b91c9f1eca911809bc2e8c315ea24eac10ae | Add scheduler example | schedule.py | schedule.py | import schedule
import time
def job():
print("I'm working...")
schedule.every(10).minutes.do(job)
schedule.every().hour.do(job)
schedule.every().day.at("10:30").do(job)
schedule.every(5).to(10).minutes.do(job)
schedule.every().monday.do(job)
schedule.every().wednesday.at("13:15").do(job)
while True:
schedule.run_pending()
time.sleep(1)
| Python | 0.000001 | |
9ab752bc96c1ad8d6e718cbf87f247aba4ab76a9 | Create MiddleCharacter.py | Edabit/MiddleCharacter.py | Edabit/MiddleCharacter.py | #!/usr/bin/env python3
'''
Create a function that takes a string and returns the middle character(s). With conditions.
'''
def get_middle(word):
if len(word) <= 2:
return word
elif len(word) % 2 == 0:
return word[(len(word) // 2) - 1] + word[(len(word) // 2)]
else:
return word[(len(word) // 2)]
#Alternative Solutions
def get_middle(word):
return word[(len(word)-1)//2:(len(word)+2)//2]
def get_middle(word):
while len(word) > 2:
word = word[1:-1]
return word
| Python | 0.000001 | |
4e7a074fdfb0b5df300c2b4f1c2109ffb47e89cc | Create t3_muptiplot.py | t3_muptiplot.py | t3_muptiplot.py | '''
Created on 2017年8月20日
@author: rob
'''
# Learn about API authentication here: https://plot.ly/python/getting-started
# Find your api_key here: https://plot.ly/settings/api
import plotly.plotly as py
import plotly.graph_objs as go
import plotly
import numpy as np
import pandas as pd
#py.sign_in('matterphiz', '3eu3YK0Rjn56EzoOWvgx') #Streaming API Tokens:g5sptjri5x
dates = pd.date_range('20160101',periods=60)
print(dates)
df = pd.DataFrame(np.random.rand(60,4),index=dates,columns=list('ABCD'))
# DataFrame 不可以小写
# np.random.rand(6,4),随机生成一个6*4的矩阵,其元素介于0-1之间
# index=dates 索引按照dates的日期元素作为索引
# columns=list('ABCD'),列名为A,B,C,D
print(df)
trace1 = go.Scatter(
x=df.index,
y=df['A'],
name='yaxis1 data'
)
trace2 = go.Scatter(
x=df.index,
y=10*df['B'],
name='yaxis2 data',
yaxis='y2'
)
trace3 = go.Scatter(
x=df.index,
y=100*df['C'],
name='yaxis3 data',
yaxis='y3'
)
trace4 = go.Scatter(
x=df.index,
y=1000*df['D'],
name='yaxis4 data',
yaxis='y4'
)
data = [trace1, trace2, trace3, trace4]
layout = go.Layout(
title='多轴数据示范',
width=1280,
xaxis=dict(
domain=[20160101, 20160301]
),
yaxis=dict(
title='yaxis title',
titlefont=dict(
color='#1f77b4'
),
tickfont=dict(
color='#1f77b4'
)
),
yaxis2=dict(
title='yaxis2 title',
titlefont=dict(
color='#ff7f0e'
),
tickfont=dict(
color='#ff7f0e'
),
anchor='free',
overlaying='y',
side='left',
position=0.15
),
yaxis3=dict(
title='yaxis4 title',
titlefont=dict(
color='#d62728'
),
tickfont=dict(
color='#d62728'
),
anchor='x',
overlaying='y',
side='right'
),
yaxis4=dict(
title='yaxis5 title',
titlefont=dict(
color='#9467bd'
),
tickfont=dict(
color='#9467bd'
),
anchor='free',
overlaying='y',
side='right',
position=0.85
)
)
fig = go.Figure(data=data, layout=layout)
#plot_url = py.plot(fig, filename='multiple-axes-multiple')
plot_url = plotly.offline.plot(fig)
| Python | 0.000005 | |
2e7058a9b48154ad205b6f53e07a224574a2e125 | add command-line script to fix missing exposure times in MMIRS data | scripts/fix_mmirs_exposure_time.py | scripts/fix_mmirs_exposure_time.py | #!/usr/bin/env python
import sys
import math
import argparse
from datetime import datetime
from pathlib import Path
from astropy.io import fits
import logging
log = logging.getLogger('Fix MMIRS')
log.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
def main():
parser = argparse.ArgumentParser(description='Utility for fixing missing exposure times in MMIRS WFS images.')
parser.add_argument(
'rootdir',
metavar="<WFS data directory>",
help="Directory containing MMIRS WFS data to fix.",
default="."
)
parser.add_argument(
'--dryrun',
help="Calculate new exposure times, but don't rewrite files.",
action="store_true"
)
args = parser.parse_args()
rootdir = Path(args.rootdir)
files = sorted(list(rootdir.glob("mmirs*.fits")))
if len(files) < 1:
log.error(f"No MMIRS WFS data found in {str(rootdir)}")
return
timedict = {}
for f in files:
with fits.open(f) as hdulist:
hdr = hdulist[-1].header
data = hdulist[-1].data
timedict[str(f)] = hdr['DATE-OBS']
log.debug(timedict)
sec = 0.
for i in range(0, len(files)):
if i < len(files)-1:
t1 = datetime.strptime(timedict[str(files[i])], "%Y-%m-%dT%H:%M:%S")
t2 = datetime.strptime(timedict[str(files[i+1])], "%Y-%m-%dT%H:%M:%S")
else: # handle last file
t1 = datetime.strptime(timedict[str(files[i-1])], "%Y-%m-%dT%H:%M:%S")
t2 = datetime.strptime(timedict[str(files[i])], "%Y-%m-%dT%H:%M:%S")
diff = t2-t1
# exposure times are almost always in multiples of 5 sec unless the exposures are very short
diff_sec = 5 * math.floor(diff.seconds/5)
# mmirs wfs exposures should almost never be more than 3 min during normal operations.
# large gaps are assumed to be the end of a track so 200 seems a good cutoff to reject
# those and use the previous time diff instead.
if diff_sec < 200:
sec = diff_sec
f = files[i]
with fits.open(f) as hdulist:
changed = False
for h in hdulist:
if 'EXPTIME' in h.header:
if h.header['EXPTIME'] == 0.0:
if args.dryrun:
log.info(f"DRYRUN -- Setting EXPTIME to {sec} in {str(f)}..")
else:
log.info(f"Setting EXPTIME to {sec} in {str(f)}..")
h.header['EXPTIME'] = sec
changed = True
else:
log.info(f"EXPTIME already set to {h.header['EXPTIME']} for {str(f)}")
if changed and not args.dryrun:
hdulist.writeto(f, overwrite=True)
if __name__ == "__main__":
main()
| Python | 0 | |
0cff0d69f0d2f52f950be37f95c8f261a9741ae7 | Create KAKAO_DATA_PREPARE_NEW.py | KAKAO_DATA_PREPARE_NEW.py | KAKAO_DATA_PREPARE_NEW.py | import h5py
from scipy.spatial import distance
import scipy.misc
import numpy as np
path = './Desktop/COVER_SONG/chroma_data_training/CP_1000ms_training_s2113_d2113_170106223452.h5'
f1 = h5py.File(path)
datasetNames=[n for n in f1.keys()]
X = f1['X']
idxDis_train = f1['idxDis_train']
idxDis_validate = f1['idxDis_validate']
idxSim_train = f1['idxSim_train']
idxSim_validate = f1['idxSim_validate']
def oti(cover1,cover2,chroma_dim):
cover1_mean = np.sum(cover1,axis=0)/np.max(np.sum(cover1,axis=0))
cover2_mean = np.sum(cover2,axis=0)/np.max(np.sum(cover2,axis=0))
dist_store = np.zeros(chroma_dim)
for i in range(0,chroma_dim):
cover2_mean_shifted = np.roll(cover2_mean, i)
dist = np.dot(cover1_mean,cover2_mean_shifted)
dist_store[i] = dist
oti = np.argmax(dist_store)
cover2_shifted = np.roll(cover2, oti, axis=1)
return cover1, cover2_shifted
def simple_matrix(X,Y):
XX = oti(X,Y,12)[0]
YY = oti(X,Y,12)[1]
M = [[0 for col in range(180)] for row in range(180)]
for i in range(180):
for j in range(180):
M[i][j] = distance.euclidean(XX[i,:],YY[j,:])
return np.asarray(M)
# np.shape(idxSim_train)[0]
for i in range(np.shape(idxSim_train)[0]):
a=[idxSim_train[i][0], idxSim_train[i][1]]
scipy.misc.imsave('./Desktop/KAKAO_ALL_PAIR_TRAIN/'+'{:0=4}'.format((int)(min(a)))+'_'+'{:0=4}'.format((int)(max(a)))+'_S.jpg',simple_matrix(X[min(a)-1],X[max(a)-1]))
print((str)(i)+'th complete')
# np.shape(idxDis_train)[0]
for i in range(np.shape(idxDis_train)[0]):
a=[idxDis_train[i][0], idxDis_train[i][1]]
scipy.misc.imsave('./Desktop/KAKAO_ALL_PAIR_TRAIN/'+'{:0=4}'.format((int)(min(a)))+'_'+'{:0=4}'.format((int)(max(a)))+'_D.jpg',simple_matrix(X[min(a)-1],X[max(a)-1]))
print((str)(i)+'th complete')
# 1175 x 1175 pair (180 by 180 matrix) complete
| Python | 0 | |
3de9ab07b67bd37e418cba16318aa813326793bb | Create createREFgenomesForPhasing.py | createREFgenomesForPhasing.py | createREFgenomesForPhasing.py | Python | 0 | ||
c87779ed6e0163503c01efd3a3913b547954d73d | Create convcsv.py | convcsv.py | convcsv.py | #!/usr/bin/python
#
# convert spreadsheet data, removing multiple spaces
#
import os, sys, getopt, shutil, glob, re, traceback, json, csv
def handle_exception():
traceback.print_exc()
os._exit(1)
def addRow(lst,row):
key = row[9]
if key in lst:
setlst = lst[key]
setlst.append(row)
else:
setlst=[row]
lst[key]=setlst
return lst
def getRow(filename):
try:
lst = {}
with open(filename,"rb") as csvfile:
rdr = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in rdr:
hdr=row
break
for row in rdr:
row=re.sub("\s{2,}" , " ", row)
key = row[1].lower()
if "almaden" in key:
lst=addRow(lst,row)
elif "san jose" in key:
lst=addRow(lst,row)
elif "arc" in key:
lst=addRow(lst,row)
csvfile.close()
return lst
except:
traceback.print_exc()
#
# argv[0] = NAME
# argv[1] = IP
#
def main(argv):
try:
if len(argv)<1:
print '{"STATUS":"FAIL", "MSG":"MISSING ARGS" }'
os._exit(2)
lst=getRow(argv[0])
for name in lst:
#print name
machines=lst[name]
for machine in machines:
print machine[9]+","+machine[13]+","+machine[11]+","+machine[12]
break
for machine in machines:
print " "+machine[3]+","+machine[2]
#print lst
except:
handle_exception()
if __name__ == "__main__":
main(sys.argv[1:])
| Python | 0.000412 | |
1c81643eaed91b4171a4e68699d930e5ef3688db | Add negative API tests for policy validation | senlin/tests/tempest/api/policies/test_policy_validate_negative.py | senlin/tests/tempest/api/policies/test_policy_validate_negative.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('4b55bb3e-12d6-4728-9b53-9db5094ac8b5')
def test_policy_validate_with_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('a1c35d93-2d19-4a72-919f-cfd70f5cbf06')
def test_policy_validate_no_spec(self):
params = {
'policy': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('6073da36-ee3e-4925-bce1-6c9a158e710d')
def test_policy_validate_policy_type_incorrect(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['type'] = 'senlin.policy.bogus'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('1e1833ea-4a67-4ac1-b6e2-f9afff51c945')
def test_policy_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['properties']['bogus'] = 'foo'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
| Python | 0.000005 | |
f040351dd3397ba7297b69b2468b2b37589c0d8f | Add task to get stats about files | games/management/commands/get_installer_urls.py | games/management/commands/get_installer_urls.py | import json
from collections import defaultdict
from django.core.management.base import BaseCommand
from common.util import load_yaml
from games import models
class Command(BaseCommand):
def handle(self, *args, **kwargs):
self.stdout.write("Installer stats\n")
installers = models.Installer.objects.all()
url_stats = defaultdict(list)
for installer in installers:
slug = installer.slug
installer_content = load_yaml(installer.content)
try:
files = installer_content.get("files", [])
except AttributeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if files is None:
print("Deleting installer %s" % installer)
installer.delete()
continue
for url_dict in files:
fileid = next(iter(url_dict))
try:
url = url_dict[fileid]
except TypeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if isinstance(url, str):
if url.startswith("N/A"):
continue
url_stats[url].append(slug)
elif isinstance(url, dict):
if url["url"].startswith("N/A"):
continue
url_stats[url["url"]].append(slug)
with open("installer-files.json", "w") as installer_files:
json.dump(url_stats, installer_files, indent=2)
| Python | 0.000001 | |
cff5035ad469adc46ed9cf446bb95d9a1e07bd77 | Fix inline template | judge/templatetags/smart_math.py | judge/templatetags/smart_math.py | from HTMLParser import HTMLParser
from django.template import Library
from django.conf import settings
import re
register = Library()
MATHTEX_CGI = 'http://www.forkosh.com/mathtex.cgi'#settings.get('MATHTEX_CGI', 'http://www.forkosh.com/mathtex.cgi')
inlinemath = re.compile(r'~(.*?)~|\\\((.*?)\\\)')
def inline_template(match):
math = match.group(1) or match.group(2)
return r'''
<span>
<img class="tex-image" src="%s?\textstyle %s"/>
<span class="tex-text" style="display:none">\( %s \)</span>
</span>
''' % (MATHTEX_CGI, math, math)
displaymath = re.compile(r'\$\$(.*?)\$\$|\\\[(.*?)\\\]')
def display_template(match):
math = match.group(1) or match.group(2)
return r'''
<span>
<img class="tex-image" src="%s?\displaystyle %s" alt="%s"/>
<div class="tex-text" style="display:none">\[ %s \]</div>
</span>
''' % (MATHTEX_CGI, math, math, math)
class MathHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.new_page = []
self.data_buffer = []
def purge_buffer(self):
if self.data_buffer:
buffer = ''.join(self.data_buffer)
buffer = inlinemath.sub(inline_template, buffer)
buffer = displaymath.sub(display_template, buffer)
self.new_page.append(buffer)
del self.data_buffer[:]
def handle_starttag(self, tag, attrs):
self.purge_buffer()
self.new_page.append('<%s%s>' % (tag, ' '.join([''] + ['%s="%s"' % p for p in attrs])))
def handle_endtag(self, tag):
self.purge_buffer()
self.new_page.append('</%s>' % tag)
def handle_data(self, data):
self.data_buffer.append(data)
def handle_entityref(self, name):
self.data_buffer.append('&%s;' % name)
def handle_charref(self, name):
self.data_buffer.append('&#%s;' % name)
@register.filter(name='smart_math', is_safe=True)
def math(page):
parser = MathHTMLParser()
parser.feed(page)
return ''.join(parser.new_page)
| from HTMLParser import HTMLParser
from django.template import Library
from django.conf import settings
import re
register = Library()
MATHTEX_CGI = 'http://www.forkosh.com/mathtex.cgi'#settings.get('MATHTEX_CGI', 'http://www.forkosh.com/mathtex.cgi')
inlinemath = re.compile(r'~(.*?)~|\\\((.*?)\\\)')
def inline_template(match):
math = match.group(1) or match.group(2)
return r'''
<span>
<img src="%s?\textstyle %s"/>
<span style="display:none">\( %s \)</span>
</span>
''' % (MATHTEX_CGI, math, math)
displaymath = re.compile(r'\$\$(.*?)\$\$|\\\[(.*?)\\\]')
def display_template(match):
math = match.group(1) or match.group(2)
return r'''
<span>
<img class="tex-image" src="%s?\displaystyle %s" alt="%s"/>
<div class="tex-text" style="display:none">\[ %s \]</div>
</span>
''' % (MATHTEX_CGI, math, math, math)
class MathHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.new_page = []
self.data_buffer = []
def purge_buffer(self):
if self.data_buffer:
buffer = ''.join(self.data_buffer)
buffer = inlinemath.sub(inline_template, buffer)
buffer = displaymath.sub(display_template, buffer)
self.new_page.append(buffer)
del self.data_buffer[:]
def handle_starttag(self, tag, attrs):
self.purge_buffer()
self.new_page.append('<%s%s>' % (tag, ' '.join([''] + ['%s="%s"' % p for p in attrs])))
def handle_endtag(self, tag):
self.purge_buffer()
self.new_page.append('</%s>' % tag)
def handle_data(self, data):
self.data_buffer.append(data)
def handle_entityref(self, name):
self.data_buffer.append('&%s;' % name)
def handle_charref(self, name):
self.data_buffer.append('&#%s;' % name)
@register.filter(name='smart_math', is_safe=True)
def math(page):
parser = MathHTMLParser()
parser.feed(page)
return ''.join(parser.new_page)
| Python | 0.000001 |
12445164d5a7651ddcc381f5e602577d8372fe6a | Add is_eq_size script | is_eq_size.py | is_eq_size.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from PIL import Image
from utils import get_file_list
@click.command()
@click.argument('path', type=click.Path(exists=True))
def is_eq_size(path):
"""
Test all pictures in folder (recursive) for size equality.
"""
files = get_file_list(path)
sizes = [Image.open(f).size for f in files]
if all(s == sizes[0] for s in sizes):
print 'all pictures have same size'
else:
print 'not all pictures have same size'
if __name__ == '__main__':
is_eq_size()
| Python | 0.000077 | |
ad052e71145296897c1510752c0f3403b9cb45a4 | add 1st py file | hello.py | hello.py | print('Hello, Python!');
name = input();
print(name); | Python | 0 | |
73f2260e0e5ae3534f13664063808abbe73b1d72 | add a new extractor, from json files | bin/extract_json.py | bin/extract_json.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright 2015 Pablo Santiago Blum de Aguiar <scorphus@gmail.com>. All rights
# reserved. Use of this source code is governed by Apache License, Version 2.0,
# that can be found on https://opensource.org/licenses/Apache-2.0
import json
import sys
def main(argv):
'''Usage: extract_json.py <json-file> <key>[.<key>...]'''
def usage(json_dict=None):
print main.__doc__
if json_dict:
print 'Available keys:\n {}'.format(
'\n '.join(sorted(json_dict.keys()))
)
if len(argv) < 1:
usage()
return 1
if not sys.stdin.isatty():
try:
json_dict = json.load(sys.stdin)
except Exception as e:
print 'Could not read from STDIN: {}'.format(e)
return 2
keys = [x for x in argv[1].split('.')] if len(argv) > 1 else []
else:
with open(sys.argv[1]) as json_file:
try:
json_dict = json.load(json_file)
except Exception as e:
print 'Could not read {}: {}'.format(sys.argv[1], e)
return 2
keys = [x for x in argv[2].split('.')] if len(argv) > 2 else []
if not keys:
usage(json_dict)
return 3
for key in keys:
try:
idx = int(key)
key = idx
except:
pass
try:
json_dict = json_dict[key]
except:
json_dict = ''
print(json_dict.encode('utf8') if json_dict else '')
if __name__ == '__main__':
status = main(sys.argv)
sys.exit(status)
| Python | 0 | |
81a38564379af16f4ea2d64572e517a6657f4450 | add first test for NormalIndPower and normal_power | statsmodels/stats/tests/test_power.py | statsmodels/stats/tests/test_power.py | # -*- coding: utf-8 -*-
"""Tests for statistical power calculations
Note:
test for ttest power are in test_weightstats.py
tests for chisquare power are in test_gof.py
Created on Sat Mar 09 08:44:49 2013
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_almost_equal
import statsmodels.stats.power as smp
def test_normal_power_explicit():
# a few initial test cases for NormalIndPower
sigma = 1
d = 0.3
nobs = 80
alpha = 0.05
res1 = smp.normal_power(d, nobs/2., 0.05)
res2 = smp.NormalIndPower().power(d, nobs, 0.05)
res3 = smp.NormalIndPower().solve_power(effect_size=0.3, nobs1=80, alpha=0.05, beta=None)
res_R = 0.475100870572638
assert_almost_equal(res1, res_R, decimal=13)
assert_almost_equal(res2, res_R, decimal=13)
assert_almost_equal(res3, res_R, decimal=13)
norm_pow = smp.normal_power(-0.01, nobs/2., 0.05)
norm_pow_R = 0.05045832927039234
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="two.sided")
assert_almost_equal(norm_pow, norm_pow_R, decimal=13)
norm_pow = smp.NormalIndPower().power(0.01, nobs, 0.05, alternative="1s")
norm_pow_R = 0.056869534873146124
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="greater")
assert_almost_equal(norm_pow, norm_pow_R, decimal=13)
# Note: negative effect size is same as switching one-sided alternative
# TODO: should I switch to larger/smaller instead of "one-sided" options
norm_pow = smp.NormalIndPower().power(-0.01, nobs, 0.05, alternative="1s")
norm_pow_R = 0.0438089705093578
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
assert_almost_equal(norm_pow, norm_pow_R, decimal=13)
| Python | 0 | |
f24c8376847b0226f3d3f674af2f568367f15234 | add data structure for parens problem | src/linked_list.py | src/linked_list.py | """Singly-Linked List in Python."""
class Node(object):
"""docstring for LinkedList."""
def __init__(self, data, next_item=None):
"""Init for instance of a node."""
self.data = data
self.next_item = next_item
class LinkedList(object):
"""Class for head of Linked List."""
def __init__(self, data=None):
"""Initialize the head node."""
self.head = None
if data:
try:
for item in data:
self.push(item)
except TypeError:
self.head = Node(data)
def push(self, data=None):
"""Create new node in front of head."""
new_head = Node(data, self.head)
self.head = new_head
def pop(self):
"""Remove the first value off the head of the list and return it."""
if self.head is None:
raise IndexError('Cannot pop from an empty list.')
new_head = self.head.next_item
old_head = self.head.data
self.head = new_head
return old_head
def size(self):
"""Count the objects in linked list."""
count = 0
curr = self.head
while curr:
count += 1
curr = curr.next_item
return count
def search(self, val):
"""Iterate through the linked list to find instance containing val."""
curr = self.head
result = None
try:
while val != curr.data:
curr = curr.next_item
else:
result = curr
except AttributeError:
pass
return result
def remove(self, node):
"""Remove a given node in the list."""
curr = self.head
previous = None
while curr:
if curr == node:
break
previous = curr
curr = previous.next_item
if previous is None:
self.head = curr.next_item
else:
previous.next_item = curr.next_item
def display(self):
"""Return a string of the linked list."""
curr = self.head
return_tuple = ()
while curr:
return_tuple = return_tuple + (curr.data, )
curr = curr.next_item
return return_tuple
| Python | 0.000002 | |
9992a4ff90156a1c5678303530c2feeaecf700d6 | Create a_deco.py | src/misc/a_deco.py | src/misc/a_deco.py | import os
import sys
import linecache
def trace(func):
"""
A trace decorator
from: https://zhuanlan.zhihu.com/p/20175869
:param func:
:return:
"""
def globaltrace(frame, why, arg):
if why == "call":
return localtrace
return None
def localtrace(frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
line_no = frame.f_lineno
b_name = os.path.basename(filename)
tmp = linecache.getline(filename, line_no)
print("{0}({1}):{2}".format(b_name, line_no, tmp), end='')
return localtrace
def _func(*args, **kwargs):
sys.settrace(globaltrace)
result = func(*args, **kwargs)
sys.settrace(None)
return result
return _func
@trace
def foo(i):
string = "Hello world!"
print(string)
print(string[i])
os.system("cls")
if __name__ == "__main__":
foo(-1)
| Python | 0.00033 | |
463502a251111199da130e508929a35b2f126f4e | Add columns to User model | bookmarks/models.py | bookmarks/models.py | from sqlalchemy import Column, Integer, String
from bookmarks.database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True, nullable=False)
name = Column(String(120))
email = Column(String(256), unique=True, nullable=False)
def __init__(self, name=None, username=None, email=None):
self.username = username
self.name = name
self.email = email
def __repr__(self):
return '<User %r>' % (self.name)
| Python | 0.000001 | |
0c77666c259ba78899863bbbe482a857102c19be | add settings module | hackerearth/settings.py | hackerearth/settings.py |
# v3 API endpoints of HackerEarth Code Checker API
COMPILE_API_ENDPOINT = 'https://api.hackerearth.com/v3/code/compile'
RUN_API_ENDPOINT = 'https://api.hackerearth.com/v3/code/run'
# Max run time of a program in seconds
RUN_TIME_UPPER_LIMIT = 5
# Max memory consumption allowed for a program
MEMORY_UPPER_LIMIT = 1024*256
# please keep this secret
CLIENT_SECRET = ''
| Python | 0.000034 | |
36af113eb363ddf25f96ab53e41db0ea7f3bb481 | add a python scripts to generate user weibo file from weibo.txt | src/main/python/data_aggregation.py | src/main/python/data_aggregation.py | import sys, os
def generateData(inputData, outputDir, userLimit):
print "Generate person weibo to folder: " + outputDir
if not os.path.isdir(outputDir):
os.mkdir(outputDir)
print 'Directory created at: ' + outputDir
currentID = ""
userNum = 0
outputFile = None
l = inputData.readline()
while l:
line = l.strip()
if line:
fields = line.split("\t")
if len(fields) < 6:
print "Broken line found: " + line
l = inputData.readline()
continue
if fields[1] != currentID:
userNum += 1
if userNum > userLimit:
break
print "Find weibo for " + str(userNum) + " user: " + fields[1]
currentID = fields[1]
fileName = outputDir + "/" + currentID
print "Create a new file: " + fileName
outputFile = file(fileName,"w")
outputFile.write(fields[5] + "\n")
l = inputData.readline()
print "Generate user weibo " + str(userNum - 1)
def usage():
print "Two parameter is required to run the scripts: input file and output folder\n"
print "One parameter is optional: the limited number of user need generate, default will generate all the user weibo data in the input file\n"
if __name__ == "__main__":
if len(sys.argv) < 3: # Expect more then two argument: the input data file and output folder
usage()
sys.exit(2)
try:
inputData = file(sys.argv[1],"r")
except IOError:
sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % arg)
sys.exit(1)
userCount = sys.maxint
if len(sys.argv) >= 4:
userCount = int(sys.argv[3])
print "Generate weibo user: " + str(userCount)
generateData(inputData, sys.argv[2], userCount)
| Python | 0 | |
8c78679bc9875c698f639a0c45a5208b43162f4e | comment obsolete ff prefs script. | setup/nodes/review/set_ff_prefs.py | setup/nodes/review/set_ff_prefs.py | #!/usr/bin/python
"""
Allow a web page to access local files.
This makes it easier to preview title screens and video files.
FF stores profiles in ~/.mozilla/firefox/profiles.ini
FF settings are set by creating a .js file that sets things on startup
1. count number of FF profiles.
If more than 1, give up.
2. get profile dir
3. create user.js that sets custom settings.
"""
import os
import ConfigParser
home_dir = os.path.expanduser('~')
print "home dir:", home_dir
profiles_path= os.path.join(home_dir, ".mozilla","firefox","profiles.ini")
print "profiles_path:", profiles_path
# read ini file
config = ConfigParser.RawConfigParser()
config.read([profiles_path])
profiles = [s for s in config.sections() if s !='General']
if len(profiles)>1:
print "more than one profile, you fix it."
print profiles
else:
d=dict(config.items(profiles[0]))
settings_path= os.path.join(home_dir, ".mozilla","firefox",d['path'],"user.js")
config="""
user_pref("capability.policy.policynames", "localfilelinks");
user_pref("capability.policy.localfilelinks.sites", "http://localhost:8080","http://veyepar.nextdayvideo.com:8080");
user_pref("capability.policy.localfilelinks.checkloaduri.enabled", "allAccess");
"""
print "writing to", settings_path
open(settings_path,'w').write(config)
| #!/usr/bin/python
import os
import ConfigParser
home_dir = os.path.expanduser('~')
print "home dir:", home_dir
profiles_path= os.path.join(home_dir, ".mozilla","firefox","profiles.ini")
print "profiles_path:", profiles_path
# read ini file
config = ConfigParser.RawConfigParser()
config.read([profiles_path])
profiles = [s for s in config.sections() if s !='General']
if len(profiles)>1:
print "more than one profile, you fix it."
print profiles
else:
d=dict(config.items(profiles[0]))
settings_path= os.path.join(home_dir, ".mozilla","firefox",d['path'],"user.js")
config="""
user_pref("capability.policy.policynames", "localfilelinks");
user_pref("capability.policy.localfilelinks.sites", "http://localhost:8080","http://veyepar.nextdayvideo.com:8080");
user_pref("capability.policy.localfilelinks.checkloaduri.enabled", "allAccess");
"""
print "writing to", settings_path
open(settings_path,'w').write(config)
| Python | 0 |
bb11ab050fe9a7bb0ffe83419eb0e87390f7deac | Add registration method for TB | hopsutil/tensorboard.py | hopsutil/tensorboard.py | """
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import socket
import subprocess
import os
import hdfs
def register(logdir):
#find free port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('',0))
addr, port = s.getsockname()
s.close()
#let tb bind to port
subprocess.Popen([os.getenv("PYSPARK_PYTHON"), "tensorboard", "--logdir=%s"%logdir, "--port=%d"%port, "--debug"])
tb_url = "http://{0}:{1}".format(addr, port)
#dump tb host:port to hdfs
hops_user = os.environ["USER"];
hops_user_split = hops_user.split("__");
project = hops_user_split[0];
hdfs_handle = hdfs.get()
hdfs_handle.dump(tb_url, "hdfs:///Projects/" + project + "/Resources/.jupyter.tensorboard", user=hops_user)
| Python | 0 | |
b22cfb4c6b8c0c0c3751078b720313d0e2baff1d | Test API call | src/filmyBot.py | src/filmyBot.py | import time,json,requests
import os
from slackclient import SlackClient
# get the Slack API token as an environment variable
SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
CHANNEL_NAME = "test2"
BOT_ID = "U53TE8XSS"
SLACK_BOT_NAME = "<@" + BOT_ID + ">"
def main():
print(SLACK_BOT_NAME)
# Create the slackclient instance
sc = SlackClient(SLACK_BOT_TOKEN)
response = requests.get("http://www.omdbapi.com/?t=The+Dark+Knight&plot=full")
data = response.json()
# Connect to slack
if sc.rtm_connect():
# Send first message
#sc.rtm_send_message(CHANNEL_NAME, "I'm ALIVE!!!")
while True:
# Read latest messages
for slack_message in sc.rtm_read():
message = slack_message.get("text")
user = slack_message.get("user")
print(message, user)
if(message and user):
if(SLACK_BOT_NAME in message):
print("done!")
sc.rtm_send_message(CHANNEL_NAME, data["Plot"])
sc.rtm_send_message(CHANNEL_NAME, sc.api_call("users.list"))
else:
sc.rtm_send_message(CHANNEL_NAME, "")
if __name__ == '__main__':
main() | Python | 0.000001 | |
6f2ab55d0b83c33fad322101e7214425efd10829 | add colors to module | comoving_rv/plot.py | comoving_rv/plot.py | colors = dict()
colors['line_marker'] = '#3182bd'
colors['gp_model'] = '#ff7f0e'
colors['not_black'] = '#333333'
colors['fit'] = '#2ca25f'
| Python | 0 | |
963b1ab24767acb5253b9fe2f29749d8656b2918 | index file added | index.py | index.py | #!/usr/bin/env python
import web
import page
import upload
import utils
#from google.appengine.ext import db
import logging
from Cheetah.Template import Template
import os
urls = (
'/page', page.app_page,
'/upload', upload.app_upload,
'/login', "login",
'/(\d+)-(?:[\w|-]+)\.html', "index",
"/(.*)", "index"
)
class login:
def GET(self):
#utils.login()
#return '<form action="/login" method="POST"><input type="text" name="email" value="ryan" /><input type="submit" /></form>'
path = os.path.join(os.path.dirname(__file__), 'templates/login.html')
template_values = { 'user':'test',}
tmpl = Template( file = path, searchList = (template_values,) )
return tmpl
def POST(self):
if (utils.login() is None):
raise web.seeother('/login')
else:
raise web.seeother('/index.html')
#class Page(db.Model):
# id = db.IntegerProperty()
# title = db.StringProperty()
# tags = db.StringListProperty()
# content = db.TextProperty()
# owner = db.IntegerProperty(default=666)
class redirect:
def GET(self,page_name):
if utils.set_page_id(page_name):
web.redirect("/index.html")
else:
return "FAIL"
class index:
def GET(self,page_name):
if page_name == "w":
return 'test'
#page = Page()
#page.id = 1
#page.title = "Random Stuff"
#page.tags = ["test","ryan","links"]
#page.content = '{"name": "Untitled", "order": "", "components": {}, "last_id":0 }'
#page.put()
else:
#path = os.path.join(os.path.dirname(__file__), 'static/index.html')
path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
template_values = { 'page_name':page_name,}
tmpl = Template( file = path, searchList = (template_values,) )
return tmpl
app = web.application(urls, globals())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
app.run()
| Python | 0.000001 | |
93c1bf2808087d1fd6e082e55d74180a445ebf91 | Add migrations for names | pyBuchaktion/migrations/0002_auto_20170404_1716.py | pyBuchaktion/migrations/0002_auto_20170404_1716.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-04-04 15:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pyBuchaktion', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='book',
options={'verbose_name': 'book', 'verbose_name_plural': 'books'},
),
migrations.AlterModelOptions(
name='order',
options={'verbose_name': 'order', 'verbose_name_plural': 'orders'},
),
migrations.AlterModelOptions(
name='ordertimeframe',
options={'verbose_name': 'order timeframe', 'verbose_name_plural': 'order timeframes'},
),
migrations.AlterModelOptions(
name='semester',
options={'verbose_name': 'semester', 'verbose_name_plural': 'semesters'},
),
migrations.AlterModelOptions(
name='student',
options={'verbose_name': 'student', 'verbose_name_plural': 'students'},
),
migrations.AlterModelOptions(
name='tucanmodule',
options={'verbose_name': 'TUCaN module', 'verbose_name_plural': 'TUCaN modules'},
),
migrations.AlterField(
model_name='book',
name='author',
field=models.CharField(max_length=64, verbose_name='author'),
),
migrations.AlterField(
model_name='book',
name='isbn_13',
field=models.CharField(max_length=13, unique=True, verbose_name='ISBN-13'),
),
migrations.AlterField(
model_name='book',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=6, null=True, verbose_name='price'),
),
migrations.AlterField(
model_name='book',
name='state',
field=models.CharField(choices=[('AC', 'Accepted'), ('RJ', 'Rejected'), ('PP', 'Proposed'), ('OL', 'Obsolete')], default='AC', max_length=2, verbose_name='status'),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(max_length=42, verbose_name='title'),
),
migrations.AlterField(
model_name='order',
name='book',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='pyBuchaktion.Book', verbose_name='book'),
),
migrations.AlterField(
model_name='order',
name='hint',
field=models.TextField(verbose_name='hint'),
),
migrations.AlterField(
model_name='order',
name='order_timeframe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pyBuchaktion.OrderTimeframe', verbose_name='order timeframe'),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('PD', 'Pending'), ('OD', 'Ordered'), ('RJ', 'Rejected'), ('AR', 'Arrived')], default='PD', max_length=2, verbose_name='status'),
),
migrations.AlterField(
model_name='order',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='pyBuchaktion.Student', verbose_name='student'),
),
migrations.AlterField(
model_name='ordertimeframe',
name='end_date',
field=models.DateField(verbose_name='end date'),
),
migrations.AlterField(
model_name='ordertimeframe',
name='semester',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pyBuchaktion.Semester', verbose_name='semester'),
),
migrations.AlterField(
model_name='ordertimeframe',
name='spendings',
field=models.DecimalField(decimal_places=2, max_digits=7, verbose_name='spendings'),
),
migrations.AlterField(
model_name='ordertimeframe',
name='start_date',
field=models.DateField(verbose_name='start date'),
),
migrations.AlterField(
model_name='semester',
name='budget',
field=models.DecimalField(decimal_places=2, max_digits=7, verbose_name='budget'),
),
migrations.AlterField(
model_name='semester',
name='season',
field=models.CharField(choices=[('W', 'Winter term'), ('S', 'Summer term')], default='W', max_length=1, verbose_name='season'),
),
migrations.AlterField(
model_name='semester',
name='year',
field=models.DecimalField(decimal_places=0, max_digits=2, verbose_name='year'),
),
migrations.AlterField(
model_name='tucanmodule',
name='last_offered',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pyBuchaktion.Semester', verbose_name='last offered'),
),
migrations.AlterField(
model_name='tucanmodule',
name='literature',
field=models.ManyToManyField(to='pyBuchaktion.Book', verbose_name='literature'),
),
migrations.AlterField(
model_name='tucanmodule',
name='module_id',
field=models.CharField(max_length=13, unique=True, verbose_name='module id'),
),
migrations.AlterField(
model_name='tucanmodule',
name='name',
field=models.CharField(max_length=128, verbose_name='name'),
),
]
| Python | 0.000001 | |
71f717b2e65228633f35acd5addab9464c2d6f6d | Add test_add_model | tests/test_add_model.py | tests/test_add_model.py | import datetime
from unittest.mock import patch
import pytest
from flask import Flask, redirect
from flask_restful import Api, Resource, abort, fields, marshal_with, reqparse
from flask_restful_swagger import registry, swagger
class MockBasicObject:
pass
class TodoItem:
"""This is an example of a model class that has parameters in its constructor
and the fields in the swagger spec are derived from the parameters
to __init__.
In this case we would have args, arg2 as required parameters and arg3 as
optional parameter."""
def __init__(self, arg1, arg2, arg3="123"):
pass
class ModelWithResourceFields:
resource_fields = {"a_string": fields.String()}
@swagger.nested(
a_nested_attribute=ModelWithResourceFields.__name__,
a_list_of_nested_types=ModelWithResourceFields.__name__,
)
class TodoItemWithResourceFields:
"""This is an example of how Output Fields work
(http://flask-restful.readthedocs.org/en/latest/fields.html).
Output Fields lets you add resource_fields to your model in which you specify
the output of the model when it gets sent as an HTTP response.
flask-restful-swagger takes advantage of this to specify the fields in
the model"""
resource_fields = {
"a_string": fields.String(attribute="a_string_field_name"),
"a_formatted_string": fields.FormattedString,
"an_enum": fields.String,
"an_int": fields.Integer,
"a_bool": fields.Boolean,
"a_url": fields.Url,
"a_float": fields.Float,
"an_float_with_arbitrary_precision": fields.Arbitrary,
"a_fixed_point_decimal": fields.Fixed,
"a_datetime": fields.DateTime,
"a_list_of_strings": fields.List(fields.String),
"a_nested_attribute": fields.Nested(
ModelWithResourceFields.resource_fields
),
"a_list_of_nested_types": fields.List(
fields.Nested(ModelWithResourceFields.resource_fields)
),
}
# Specify which of the resource fields are required
required = ["a_string"]
swagger_metadata = {"an_enum": {"enum": ["one", "two", "three"]}}
@pytest.mark.parametrize(
"test_input,properties,required,defaults",
[
(MockBasicObject, [], [], []),
(TodoItem, ["arg1", "arg2", "arg3"], ["arg1", "arg2"], ["arg3"]),
(ModelWithResourceFields, ["a_string"], [], []),
(
TodoItemWithResourceFields,
[
"a_string",
"a_formatted_string",
"an_enum",
"an_int",
"a_bool",
"a_url",
"a_float",
"an_float_with_arbitrary_precision",
"a_fixed_point_decimal",
"a_datetime",
"a_list_of_strings",
"a_nested_attribute",
"a_list_of_nested_types",
],
["a_string"],
[],
),
],
)
def test_integration_test_add_model(test_input, properties, required, defaults):
"""Integration test for `add_model(...)` method.
Ensures models are added to `registry["models"]` with expected structure.
Example each model should have 'description', 'id','notes',
'properties', etc.
Example `registry["models"]`:
# print(registry["models"])
{ 'models': { .....
'TodoItem': { 'description': 'This is an example of a '
'model class that has '
'parameters in its '
'constructor',
'id': 'TodoItem',
'notes': 'and the fields in the swagger spec '
'are derived from the '
'parameters<br/>to __init__.<br/>In '
'this case we would have args, arg2 '
'as required parameters and arg3 '
'as<br/>optional parameter.',
'properties': { 'arg1': {'type': 'string'},
'arg2': {'type': 'string'},
'arg3': { 'default': '123',
'type': 'string'}},
'required': ['arg1', 'arg2']},
..........
"""
swagger.add_model(test_input)
assert test_input.__name__ in registry["models"]
assert "description" in registry["models"][test_input.__name__]
assert "notes" in registry["models"][test_input.__name__]
if "resource_fields" in dir(test_input):
if hasattr(test_input, "required"):
assert "required" in registry["models"][test_input.__name__]
else:
# if "__init__" in dir(test_input):
assert "required" in registry["models"][test_input.__name__]
assert "properties" in registry["models"][test_input.__name__]
print(test_input)
print(dir(test_input))
# test_add_model(TodoItem)
# test_add_model(MockBasicObject)
# test_add_model(ModelWithResourceFields)
# test_add_model(TodoItemWithResourceFields)
# # print(registry)
# import pprint
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(registry)
| Python | 0.000494 | |
b91eb0b8b5bd66ea0bf090e6c6e71232c81d6e7a | Add mount.py | kiwi/mount.py | kiwi/mount.py | def mountpoint(path):
try:
subprocess.check_call(['mountpoint', path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return False
return True
def unmount(path):
subprocess.check_call(['umount', path])
def mount(src, dst, mkdir=False, force=False):
if mkdir: subprocess.check_call(['mkdir', '-p', dst])
if mountpoint(dst):
logger.warning('Destination %s is already a mountpoint' % dst)
if force: unmount(dst)
else: return
subprocess.check_call(['mount', src, dst])
| Python | 0.000002 | |
cc1a799da671fbbbdd0406eeebc8c5a801a099d5 | Add extension test | tests/test_extension.py | tests/test_extension.py | """
tests.test_extension
====================
Tests for extension
"""
import json
from flask import Flask
from flask_swag import Swag
def test_extension():
"""Basic test for flask extension."""
app = Flask(__name__)
app.config['SWAG_TITLE'] = "Test application."
app.config['SWAG_API_VERSION'] = '1.0.1'
swag = Swag(app)
with app.test_request_context('/swagger/swagger.json'):
swagger_json = app.generate_swagger()
client = app.test_client()
response = client.get('/swagger/swagger.json')
assert 200 == response.status_code
assert swagger_json == json.loads(response.data.decode('utf-8'))
| Python | 0.000001 | |
4181f69bda52c4cbec7ac1d7529d44e26ede61d1 | create object base classes. | pygeobase/object_base.py | pygeobase/object_base.py | # Copyright (c) 2015, Vienna University of Technology, Department of Geodesy
# and Geoinformation. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology, Department of
# Geodesy and Geoinformation nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class TS(object):
"""
The TS class represents the base object of a time series.
"""
def __init__(self, gpi, data, metadata):
"""
Initialization of the image object.
Parameters
----------
gpi : int
Grid point index associated with the time series
data : pandas.DataFrame
Pandas DataFrame that holds data for each variable of the time
series
metadata : dict
dictionary of numpy arrays that hold the metadata
"""
self.gpi = gpi
self.data = data
self.metadata = metadata
class Image(object):
"""
The Image class represents the base object of an image.
"""
def __init__(self, data, metadata, lon, lat, timestamp):
"""
Initialization of the image object.
Parameters
----------
data : dict
dictionary of numpy arrays that holds the image data for each
variable of the dataset
metadata : dict
dictionary of numpy arrays that hold the metadata
lon : numpy.array or None
array of longitudes, if None self.grid will be assumed
lat : numpy.array or None
array of latitudes, if None self.grid will be assumed
timestamp : datetime.datetime
exact timestamp of the image
"""
self.data = data
self.metadata = metadata
self.lon = lon
self.lat = lat
self.timestamp = timestamp
| Python | 0 | |
f1a56c1f5a043703f3dc049ebb363e3faf85f3aa | Add PARSEC tests | tests/gem5/parsec-benchmarks/test_parsec.py | tests/gem5/parsec-benchmarks/test_parsec.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from testlib import *
if config.bin_path:
resource_path = config.bin_path
else:
resource_path = joinpath(absdirpath(__file__), "..", "resources")
def test_parsec(
boot_cpu: str,
detailed_cpu: str,
num_cpus: int,
mem_system: str,
benchmark: str,
size: str,
length: str,
):
if (boot_cpu == "kvm" or detailed_cpu == "kvm") and not os.access(
"/dev/kvm", mode=os.R_OK | os.W_OK
):
# Don't run the tests if KVM is unavailable.
return
if mem_system == "mesi_two_level":
protocol_to_use="MESI_Two_Level"
else:
protocol_to_use=None
gem5_verify_config(
name="{}-boot-cpu_{}-detailed-cpu_{}-cores_{}_{}_{}_parsec-test"\
.format(
boot_cpu,
detailed_cpu,
str(num_cpus),
mem_system,
benchmark,
size,
),
verifiers=(),
fixtures=(),
config=joinpath(
config.base_dir,
"configs",
"example",
"components-library",
"parsec_disk_run.py",
),
config_args=[
"--cpu",
detailed_cpu,
"--boot-cpu",
boot_cpu,
"--num-cpus",
str(num_cpus),
"--mem-system",
mem_system,
"--benchmark",
benchmark,
"--size",
size,
"--override-download",
"--resource-directory",
resource_path,
],
valid_isas=(constants.x86_tag,),
valid_hosts=constants.supported_hosts,
protocol=protocol_to_use,
length=length,
)
#### The very long (Weekly) tests ####
# Note: The cross product of all possible PARSEC tests is huge, and there is
# little value in doing all. Therefore a sensible selection covering all
# benchmarks have been chosen.
#
# Note: At present the MESI_Two_Level protocol does not appear to work
# correctly with the SwitchableProcessor. As such they are commented out. This
# issue is documented here: https://gem5.atlassian.net/browse/GEM5-1085.
test_parsec(
boot_cpu="kvm",
detailed_cpu="atomic",
num_cpus=2,
mem_system="classic",
benchmark="blackscholes",
size="simsmall",
length=constants.very_long_tag,
)
#test_parsec(
# boot_cpu="kvm",
# detailed_cpu="timing",
# num_cpus=1,
# mem_system="mesi_two_level",
# benchmark="bodytrack",
# size="simsmall",
# length=constants.very_long_tag,
#)
test_parsec(
boot_cpu="kvm",
detailed_cpu="o3",
num_cpus=1,
mem_system="classic",
benchmark="canneal",
size="simsmall",
length=constants.very_long_tag,
)
#test_parsec(
# boot_cpu="kvm",
# detailed_cpu="kvm",
# num_cpus=8,
# mem_system="mesi_two_level",
# benchmark="dedup",
# size="simsmall",
# length=constants.very_long_tag,
#)
test_parsec(
boot_cpu="kvm",
detailed_cpu="atomic",
num_cpus=2,
mem_system="classic",
benchmark="facesim",
size="simsmall",
length=constants.very_long_tag,
)
#test_parsec(
# boot_cpu="kvm",
# detailed_cpu="timing",
# num_cpus=1,
# mem_system="mesi_two_level",
# benchmark="ferret",
# size="simsmall",
# length=constants.very_long_tag,
#)
test_parsec(
boot_cpu="kvm",
detailed_cpu="o3",
num_cpus=1,
mem_system="classic",
benchmark="fluidanimate",
size="simsmall",
length=constants.very_long_tag,
)
#test_parsec(
# boot_cpu="kvm",
# detailed_cpu="kvm",
# num_cpus=8,
# mem_system="mesi_two_level",
# benchmark="freqmine",
# size="simsmall",
# length=constants.very_long_tag,
#)
test_parsec(
boot_cpu="kvm",
detailed_cpu="atomic",
num_cpus=2,
mem_system="classic",
benchmark="raytrace",
size="simsmall",
length=constants.very_long_tag,
)
#test_parsec(
# boot_cpu="kvm",
# detailed_cpu="timing",
# num_cpus=1,
# mem_system="mesi_two_level",
# benchmark="streamcluster",
# size="simsmall",
# length=constants.very_long_tag,
#)
test_parsec(
boot_cpu="kvm",
detailed_cpu="o3",
num_cpus=1,
mem_system="classic",
benchmark="swaptions",
size="simsmall",
length=constants.very_long_tag,
)
#test_parsec(
# boot_cpu="kvm",
# detailed_cpu="kvm",
# num_cpus=8,
# mem_system="mesi_two_level",
# benchmark="vips",
# size="simsmall",
# length=constants.very_long_tag,
#)
#test_parsec(
# boot_cpu="kvm",
# detailed_cpu="timing",
# num_cpus=1,
# mem_system="mesi_two_level",
# benchmark="x264",
# size="simsmall",
# length=constants.very_long_tag,
#)
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.