text stringlengths 0 1.05M | meta dict |
|---|---|
from frt_server.tests import TestMinimal
from frt_server.tables import Family, Font
from sqlalchemy.orm import joinedload
import json
import os
class UfoLoadingTestCase(TestMinimal):
def setUp(self):
super().setUp()
self.login_as('eve@evil.com', 'eveisevil')
family = Family(family_name='Riblon')
session = self.connection.session
session.add(family)
session.commit()
session.refresh(family)
self.family_id = family._id
self.upload_font_file(self.family_id, 'testFiles/RiblonSans/RiblonSans.ufo.zip')
family = session.query(Family).get(self.family_id)
self.font_id = family.fonts[0]._id
def helper_send_query(self, query_json):
query = json.dumps(query_json)
data, status = self.get('/font/{}/ufo?query={}'.format(self.font_id, query))
self.assertEqual(status, 200)
# make sure we actually have data
self.assertIsNotNone(data)
return data
def helper_check_glif_contents(self, glifs):
self.assertIsNotNone(glifs)
self.assertTrue('A' in glifs)
self.assertTrue(glifs['A'].startswith('<?xml version="1.0" encoding="UTF-8"?>\n' +
'<glyph name="A" format="2">'))
self.assertTrue('s' in glifs)
self.assertTrue(glifs['s'].startswith('<?xml version="1.0" encoding="UTF-8"?>\n' +
'<glyph name="s" format="2">'))
def helper_check_fontinfo(self, fontinfo):
self.assertIsNotNone(fontinfo)
self.assertTrue('ascender' in fontinfo)
self.assertEqual(fontinfo["ascender"], 800)
self.assertTrue('unitsPerEm' in fontinfo)
self.assertEqual(fontinfo["unitsPerEm"], 1000)
def test_load_contents_plist(self):
data = self.helper_send_query({"glyphs": None})
glyphs = data['glyphs']
self.assertIsNotNone(glyphs)
self.assertEqual(glyphs, {"A": "A_.glif", "a": "a.glif", "s": "s.glif", "space": "space.glif"})
# our plist SHOULD contain 4 different entries
self.assertEqual(len(glyphs), 4)
def test_get_fontinfo_plist(self):
data = self.helper_send_query({"fontinfo": None})
self.helper_check_fontinfo(data['fontinfo'])
def test_get_all_glifs(self):
data = self.helper_send_query({'glifs': None})
self.assertTrue('glifs' in data)
glifs = data['glifs']
self.helper_check_glif_contents(glifs)
self.assertEqual(len(glifs), 4)
def test_get_glifs(self):
data = self.helper_send_query({"glifs": ['A', 's']})
self.assertTrue('glifs' in data)
self.helper_check_glif_contents(data['glifs'])
def test_get_glifs_and_fontinfo(self):
data = self.helper_send_query({"fontinfo": None, "glifs": ['A', 's']})
self.assertTrue('fontinfo' in data)
self.assertTrue('glifs' in data)
self.helper_check_glif_contents(data['glifs'])
self.helper_check_fontinfo(data['fontinfo'])
def test_get_missing_features(self):
data = self.helper_send_query({'features': None})
self.assertTrue('features' in data)
self.assertIsNone(data['features'])
| {
"repo_name": "HPI-SWA-Lab/BP2016H1",
"path": "frt_server/tests/test_ufo_loading.py",
"copies": "1",
"size": "3178",
"license": "mit",
"hash": -5021213436701986000,
"line_mean": 33.9230769231,
"line_max": 103,
"alpha_frac": 0.6236626809,
"autogenerated": false,
"ratio": 3.384451544195953,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9501208200995541,
"avg_score": 0.001381204820082532,
"num_lines": 91
} |
from frt_server.tests import TestMinimal
from sqlalchemy import func
from frt_server.tables import User
class LoginTestCase(TestMinimal):
def test_valid_login(self):
response = self.login('eve@evil.com', 'eveisevil')
self.assertEqual(response[1], 200)
def test_invalid_login(self):
response = self.login('eve@evil.com', 'nicetry')
self.assertEqual(response[1], 401)
def test_get_font_resource(self):
self.login_as('eve@evil.com', 'eveisevil')
_, status = self.get('/font')
self.assertEqual(status, 200)
def test_register_new_user(self):
session = self.connection.session()
count = session.query(func.count(User._id)).scalar()
response = self.post('/register', dict(email='eve@eviler.com', username='Eva', password='eveisevil'))
self.assertEqual(response[1], 200)
newCount = session.query(func.count(User._id)).scalar()
self.assertGreater(newCount, count)
def test_register_user_with_missing_email(self):
session = self.connection.session()
count = session.query(func.count(User._id)).scalar()
response = self.post('/register', dict(email='', username='Eva', password='eveisevil'))
self.assertEqual(response[1], 400)
newCount = session.query(func.count(User._id)).scalar()
self.assertEqual(newCount, count)
| {
"repo_name": "HPI-SWA-Lab/BP2016H1",
"path": "frt_server/tests/test_login.py",
"copies": "1",
"size": "1385",
"license": "mit",
"hash": 3646039837932710400,
"line_mean": 40.9696969697,
"line_max": 109,
"alpha_frac": 0.6548736462,
"autogenerated": false,
"ratio": 3.6447368421052633,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.978728572274162,
"avg_score": 0.002464953112728624,
"num_lines": 33
} |
from fruitynutters.cart.models import Cart
# Util
def get_session_cart(session):
# If there is no cart id in the session, create a cart and save its id to
# the session.
if not session.get('cart_id'):
cart = Cart()
cart.save()
session['cart_id'] = cart.id
return cart
# If there is a cart id in the session try to fetch the corresponding cart.
else:
# Try to retrieve the cart ...
try:
return Cart.objects.get(id__exact=session.get('cart_id'))
# If there's no cart matching the id, delete the id from the session
# and try again.
except Cart.DoesNotExist:
del session['cart_id']
return get_session_cart(session)
# Ported from Recipe 3.9 in Secure Programming Cookbook for C and C++ by
# John Viega and Matt Messier (O'Reilly 2003)
from string import *
rfc822_specials = '()<>@,;:\\"[]'
def isAddressValid(addr):
# First we validate the name portion (name@domain)
c = 0
while c < len(addr):
if addr[c] == '"' and (not c or addr[c - 1] == '.' or addr[c - 1] == '"'):
c = c + 1
while c < len(addr):
if addr[c] == '"': break
if addr[c] == '\\' and addr[c + 1] == ' ':
c = c + 2
continue
if ord(addr[c]) < 32 or ord(addr[c]) >= 127: return 0
c = c + 1
else: return 0
if addr[c] == '@': break
if addr[c] != '.': return 0
c = c + 1
continue
if addr[c] == '@': break
if ord(addr[c]) <= 32 or ord(addr[c]) >= 127: return 0
if addr[c] in rfc822_specials: return 0
c = c + 1
if not c or addr[c - 1] == '.': return 0
# Next we validate the domain portion (name@domain)
domain = c = c + 1
if domain >= len(addr): return 0
count = 0
while c < len(addr):
if addr[c] == '.':
if c == domain or addr[c - 1] == '.': return 0
count = count + 1
if ord(addr[c]) <= 32 or ord(addr[c]) >= 127: return 0
if addr[c] in rfc822_specials: return 0
c = c + 1
return count >= 1
| {
"repo_name": "brew/fruitynutters",
"path": "webapps/fruitynutters/util.py",
"copies": "1",
"size": "2207",
"license": "mit",
"hash": 7810289209198469000,
"line_mean": 31.9402985075,
"line_max": 82,
"alpha_frac": 0.5074762121,
"autogenerated": false,
"ratio": 3.5654281098546043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4572904321954604,
"avg_score": null,
"num_lines": null
} |
from fsa import Nfa
import copy
import pdb
from transition import *
from error import *
from nameGenerator import IndexNameGenerator
from state import State
class FstState(State):
def __init__(self, name):
State.__init__(self, name)
def __str__(self):
output = ""
for transitionList in list(self.transitions.values()):
for transition in transitionList:
output += str(transition) + "\n"
return output
def add(self, transition):
"""
With this function you can add a 3-tuple, a 4-tuple
transition or a Transition object.
"""
if transition.__class__ != Transition:
transition = Transition(transition)
if self.name is None:
self.name = transition.start
elif self.name != transition.start:
raise ConstructionError( "The transition that you are trying to add is not part of this state")
if transition.input not in self.transitions:
self.transitions[transition.input] = []
self.transitions[transition.input].append(transition)
def inverse(self):
other = copy.copy(self)
for key in list(other.transitions.keys()):
other.transitions[key] = [s.inverse() for s in other.transitions[key]]
return other
def __eq__(lhs, rhs):
okay = True
if lhs.name != rhs.name:
okay = False
if list(lhs.transitions.keys()) != list(rhs.transitions.keys()):
okay = False
pdb.set_trace()
if okay is not False:
for key in list(lhs.transitions.keys()):
if lhs.transitions[key] != rhs.transitions[key]:
okay = False
return okay
def __ne__(lhs, rhs):
return not lhs.__eq__(rhs)
class Fst(Nfa):
"""This class represent a Finite-State Transducer
Each state(FstState) is a list of transitions (4-tuple or a
Transition)
(Q, E1*, E2*, Q) , The input state, The input symbol,
The output symbol and the The output state, respectely.
"""
def __init__(self, states, alphabet1, alphabet2, startState, finalStates):
self.alphabet = alphabet1
self.alphabet2 = alphabet2
#adding states to the Fst
self.states = {}
for state in states:
self.add(state)
self.startState = str(startState)
#we ensure that finalStates is a list
if finalStates.__class__ != list:
finalStates = [finalStates]
#we ensure that each element of finalStates is a string
self.finalStates = copy.copy(finalStates)
for i in range(len(self.finalStates)):
if hasattr(self.finalStates[i], "name"):
self.finalStates[i] = self.finalStates[i].name
else:
self.finalStates[i] = str(self.finalStates[i])
def __str__(self):
output = "starting state: " + str(self.startState) + "\n"
output += "final states: " + str(self.finalStates) + "\n"
for state in list(self.states.values()):
output += str(state)
return output
def add(self, state):
"""This function gives you the ability to add a transition
(Transition, or a 4-tuple), or a FstState to this FST.
"""
if state.__class__ == tuple:
state = Transition(state)
if state.__class__ == Transition:
if state.start not in self.states:
self.states[state.start] = FstState(state.start)
self.states[state.start].add(state)
if state.__class__ == FstState:
self.states[state.name] = state
def inverse(self):
other = copy.copy(self)
other.states = dict([(s.name, s.inverse()) for s in list(other.states.values())])
return other
| {
"repo_name": "jpbarrette/moman",
"path": "finenight/python/fst.py",
"copies": "1",
"size": "4040",
"license": "mit",
"hash": -8651193796492075000,
"line_mean": 24.7324840764,
"line_max": 107,
"alpha_frac": 0.5579207921,
"autogenerated": false,
"ratio": 4.261603375527426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014737109125638925,
"num_lines": 157
} |
from fsa import *
from nameGenerator import *
class IncrementalAdfa(Dfa):
"""This class is an Acyclic Deterministic Finite State Automaton
constructed by a list of words.
"""
def __init__(self, nameGenerator = None):
if nameGenerator is None:
nameGenerator = IndexNameGenerator()
self.nameGenerator = nameGenerator
self.register = {}
self.finalStates = []
self.startState = self.nameGenerator.generate()
self.states = {self.startState : State(self.startState)}
def initSearch(self) :
self.replaceOrRegister(self.startState)
def getCommonPrefix(self, word):
stateName = self.startState
index = 0
nextStateName = stateName
while nextStateName is not None:
symbol = word[index]
stateName = nextStateName
if self.states[stateName].transitions.has_key(symbol):
nextStateName = self.states[stateName].transitions[symbol]
index += 1
else:
nextStateName = None
return (stateName, word[index:])
def hasChildren(self, stateName):
okay = False
if filter(lambda s: s, self.states[stateName].transitions.values()):
okay = True
return okay
def addSuffix(self, stateName, currentSuffix):
lastState = stateName
while len(currentSuffix) > 0:
newStateName = self.nameGenerator.generate()
symbol = currentSuffix[0]
currentSuffix = currentSuffix[1:]
self.states[stateName].transitions[symbol] = newStateName
self.states[newStateName] = State(newStateName)
stateName = newStateName
self.finalStates.append(stateName)
def markedAsRegistered(self, stateName):
return self.register.has_key(stateName)
def markAsRegistered(self, stateName):
self.register[stateName] = True
def equivalentRegisteredState(self, stateName):
equivatentState = None
for state in self.register.keys():
print stateName, state
if self.areEquivalents(state, stateName):
equivatentState = state
print 'foo', state,
return equivatentState
def lastChild(self, stateName):
input = self.states[stateName].transitions.keys()
input.sort()
return (self.states[stateName].transitions[input[-1]], input[-1])
def replaceOrRegister(self, stateName):
#childName = self.finalStates[-1]
childName, lastSymbol = self.lastChild(stateName)
if not self.markedAsRegistered(childName):
if self.hasChildren(childName):
self.replaceOrRegister(childName)
#equivalentState = self.equivalentRegisteredState(childName)
#if equivalentState is not None:
# self.deleteBranch(childName)
# self.states[stateName].transitions[lastSymbol] = equivalentState
#else:
self.markAsRegistered(childName)
def deleteBranch(self, child):
childs = [child]
while len(childs) > 0:
nextChilds = []
for child in childs:
nextChilds += filter(lambda s: not self.markedAsRegistered(s), self.states[child].transitions.values())
self.states.pop(child)
if child in self.finalStates:
self.finalStates.remove(child)
childs = nextChilds
def createFromSortedListOfWords(self, word):
word = unicode(word)
if word.endswith('\n'):
word = word[:-1]
lastStateName, currentSuffix = self.getCommonPrefix(word)
if self.hasChildren(lastStateName):
self.replaceOrRegister(lastStateName)
self.addSuffix(lastStateName, currentSuffix)
def createFromArbitraryListOfWords(self, words):
self.register = {}
self.finalStates = []
self.startState = self.nameGenerator.generate()
self.states = {self.startState : State(self.startState)}
| {
"repo_name": "datamade/moman",
"path": "finenight/iadfa.py",
"copies": "1",
"size": "4186",
"license": "mit",
"hash": 3152843284105244000,
"line_mean": 28.6879432624,
"line_max": 119,
"alpha_frac": 0.6070234114,
"autogenerated": false,
"ratio": 4.364963503649635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010705045751237267,
"num_lines": 141
} |
from fsa import *
import copy
def create(start, finals, edges):
states = {}
for e in edges:
if e[1] is None:
states.setdefault(e[0], ({}, []))[1].append(e[2])
else:
states.setdefault(e[0], ({}, []))[0][e[1]] = e[2]
states.setdefault(e[2], ({}, []))
states = map(lambda s: State(s[0], s[1][0], epsilon = s[1][1]), states.items())
alphabet = []
for s in states:
for t in s.transitions:
if t not in alphabet:
alphabet.append(t)
states_map = dict(map(lambda s: (s.name, s), states))
return Nfa(states, alphabet, states_map[start], map(lambda s: states_map[s], finals))
def union(lhs, rhs):
lhs, rhs = binaryOperationRenaming(lhs, rhs, True, None)
new = Nfa(lhs.states.values() + rhs.states.values(),
lhs.alphabet + filter(lambda s: s not in lhs.alphabet,
rhs.alphabet),
lhs.startState,
rhs.finalStates + lhs.finalStates)
new.states[new.startState].epsilon.append(rhs.startState)
return new
# this function will produce an fsa that will accept a mininum
# of "start" of that fsa to less than "end" fsa
def repeat(lhs, start, end):
# add the optional ones.
optional = copy.deepcopy(lhs)
for final in optional.finalStates:
optional.states[optional.startState].epsilon.append(final)
if start > 0:
new = copy.deepcopy(lhs)
for i in range(1, start):
new = new.concatenate(lhs)
else:
new = optional
end = end - 1
for i in range(start, end):
new = new.concatenate(optional)
return new
# This function will remove the deadend states.
# This makes the FSA cleaner to display.
def clean(fsa):
deadends = []
for label,state in fsa.states.items():
if label not in fsa.finalStates:
destinations = []
for dest in state.transitions.values():
for d in dest:
if d not in destinations:
destinations.append(d)
if label in destinations:
destinations.remove(label)
if len(destinations) == 0:
deadends.append(label)
for label,state in fsa.states.items():
for input,dest in state.transitions.items():
for d in dest:
if d in deadends:
dest.remove(d)
if len(dest) == 0:
del state.transitions[input]
# This function will dump in a text file the current fsa.
# first line correspond to:
#
# start_state final_state1 final_state2 ...
#
# Then every other line is just a tuple of edges.
def dump(fsa, filename):
file = open(filename, "w")
line = str(fsa.startState)
for fs in fsa.finalStates:
line += " " + str(fs)
lines = [line + "\n"]
for label, state in fsa.states.items():
for input, dest in state.transitions.items():
input, output = input.split("|")
for d in dest:
lines.append("%s %s %s %s\n" % (str(label), input, output, str(d)))
file.writelines(lines)
# This function takes a list of symbol and will create
# a NFA which will recognize the sequence of those symbols.
# It means that for ["a", "b", "c"], this NFA will recognize
# the string sequence "abc".
def seq(symbols):
edges = []
for i in range(len(symbols)):
edges.append((i, symbols[i], i + 1))
return create(0, [len(symbols)], edges)
| {
"repo_name": "datamade/moman",
"path": "finenight/utils.py",
"copies": "1",
"size": "3562",
"license": "mit",
"hash": -8249069639410096000,
"line_mean": 30.2456140351,
"line_max": 89,
"alpha_frac": 0.569904548,
"autogenerated": false,
"ratio": 3.6646090534979425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4734513601497943,
"avg_score": null,
"num_lines": null
} |
from fscloud import FsCloud
from environmentinfo import *
from commonutils import *
import threading
def fs_cloud_2_dict(obj):
result = {}
result.update(obj.__dict__)
return result
def dict_2_fs_cloud(fs_dict):
#fs_cloud = FsCloud(cloud_id=fs_dict.get("cloud_id"),azName=fs_dict.get('azName'),dc=fs_dict.get('dc'),)
fs_cloud = FsCloud(**fs_dict)
return fs_cloud
fs_cloud_data_file = os.path.join("/home/hybrid_cloud/data",
"fs_access_cloud.data")
fs_cloud_data_file_lock = threading.Lock()
class FsCloudDataHandler(object):
def __init__(self):
pass
def list_fs_clouds(self):
cloud_dicts = self.__read_fs_cloud_info__()
return cloud_dicts.keys()
def get_fs_cloud(self, cloud_id):
cloud_dicts = self.__read_fs_cloud_info__()
if cloud_id in cloud_dicts.keys():
return dict_2_fs_cloud(cloud_dicts[cloud_id])
else:
return None
def delete_fs_cloud(self, cloud_id):
fs_cloud_data_file_lock.acquire()
try:
cloud_dicts = self.__read_fs_cloud_info__()
cloud_dicts.pop(cloud_id)
self.__write_aws_cloud_info__(cloud_dicts)
except Exception as e:
logger.error("delete fs cloud data file error, "
"cloud_id: %s, error: %s"
% (cloud_id, e.message))
finally:
fs_cloud_data_file_lock.release()
def add_fs_cloud(self, fs_cloud):
fs_cloud_data_file_lock.acquire()
try:
cloud_dicts = self.__read_fs_cloud_info__()
dict_temp = fs_cloud_2_dict(fs_cloud)
cloud_dicts[fs_cloud.cloud_id] = dict_temp
self.__write_aws_cloud_info__(cloud_dicts)
except Exception as e:
logger.error("add fs cloud data file error, "
"fs cloud: %s, error: %s"
% (fs_cloud, e.message))
finally:
fs_cloud_data_file_lock.release()
@staticmethod
def __read_fs_cloud_info__():
if not os.path.exists(fs_cloud_data_file):
logger.error("read %s : No such file." % fs_cloud_data_file)
cloud_dicts = {}
else:
with open(fs_cloud_data_file, 'r+') as fd:
cloud_dicts = json.loads(fd.read())
return cloud_dicts
@staticmethod
def __write_aws_cloud_info__(cloud_dicts):
with open(fs_cloud_data_file, 'w+') as fd:
fd.write(json.dumps(cloud_dicts, indent=4))
| {
"repo_name": "Hybrid-Cloud/cloud_manager",
"path": "code/cloudmanager/fscloudpersist.py",
"copies": "3",
"size": "2563",
"license": "apache-2.0",
"hash": 1257525483067427600,
"line_mean": 32.7236842105,
"line_max": 108,
"alpha_frac": 0.5559890753,
"autogenerated": false,
"ratio": 3.4776119402985075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001282725848886546,
"num_lines": 76
} |
from fsdict import FSDict
import feedgenerator
from urllib import quote_plus
import os.path
from feeddirectives import Latest
from feednodes import latest
from sphinx.addnodes import toctree
from docutils import nodes
#global
feed_entries = None
#constant unlikely to occur in a docname and legal as a filename
MAGIC_SEPARATOR = '---###---'
def parse_date(datestring):
try:
parser = parse_date.parser
except AttributeError:
import dateutil.parser
parser = dateutil.parser.parser()
parse_date.parser = parser
return parser.parse(datestring)
def setup(app):
"""
see: http://sphinx.pocoo.org/ext/appapi.html
this is the primary extension point for Sphinx
"""
from sphinx.application import Sphinx
if not isinstance(app, Sphinx): return
app.add_config_value('feed_title', '', 'html')
app.add_config_value('feed_base_url', '', 'html')
app.add_config_value('feed_description', '', 'html')
app.add_config_value('feed_filename', 'rss.xml', 'html')
app.add_config_value('feed_blacklist', [], 'html')
app.add_directive('latest', Latest)
app.add_node(latest)
app.connect('build-finished', emit_feed)
app.connect('builder-inited', create_feed_container)
app.connect('env-purge-doc', remove_dead_feed_item)
app.connect('env-purge-doc', purge_dates)
#I would like to parse dates here, but we aren't supplied the document name in the handler, so it's pointless
#app.connect('doctree-read', parse_article_date)
app.connect('html-page-context', create_feed_item)
app.connect('doctree-resolved', process_latest_toc)
def purge_dates(app, env, docname):
if not hasattr(env, 'feed_pub_dates'):
return
try:
del(env.feed_pub_dates[docname])
except KeyError:
pass
def process_latest_toc(app, doctree, fromdocname):
"""We traverse the doctree looking for publication dates to build the
date-based ToC here. Since the ordering is ill-defined, from our
perspective, we parse all of them each time, but cache them in the
environment"""
env = app.builder.env
cache_article_dates(env)
feed_pub_dates = getattr(env, 'feed_pub_dates', {})
def is_blacklisted(docname):
for blacklist_entry in app.config.feed_blacklist:
if blacklist_entry in docname:
return True
return False
for node in doctree.traverse(latest):
entries = node['entries']
includefiles = node['includefiles']
decorated_entries = [
(feed_pub_dates.get(doc), title, doc)
for title, doc in entries
if doc in feed_pub_dates]
decorated_entries.sort(reverse=True)
latest_list = nodes.bullet_list('')
for date, title, docname in decorated_entries:
if is_blacklisted(docname):
continue
para = nodes.paragraph()
list_item = nodes.list_item('', para)
if title is None:
title = env.titles.get(docname)
if title:
title = title[0] #.astext()
# Create a reference
newnode = nodes.reference('', '')
# date
stringdate = date.strftime('%Y-%m-%d') + ':'
para += nodes.Text(stringdate, stringdate)
para += nodes.Text(' ', ' ')
# title and link
innernode = title #nodes.emphasis(title, title)
newnode['refdocname'] = docname
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, docname)
newnode.append(innernode)
para += newnode
# Insert into the latestlist
latest_list.append(list_item)
node.replace_self(latest_list)
def create_feed_container(app):
"""
create lazy filesystem stash for keeping RSS entry fragments, since we
don't want to store the entire site in the environment (in fact, even if
we did, it wasn't persisting for some reason.)
"""
global feed_entries
rss_fragment_path = os.path.realpath(os.path.join(app.outdir, '..', 'rss_entry_fragments'))
feed_entries = FSDict(work_dir=rss_fragment_path)
app.builder.env.feed_url = app.config.feed_base_url + '/' + \
app.config.feed_filename
def cache_article_dates(env):
#This should only be run once, although currently it is run many times,
# wasting CPU cycles
if not hasattr(env, 'feed_pub_dates'):
env.feed_pub_dates = {}
feed_pub_dates = env.feed_pub_dates
for docname, doc_metadata in env.metadata.iteritems():
doc_metadata = env.metadata.get(docname, {})
if 'date' not in doc_metadata:
continue #don't index dateless articles
try:
pub_date = parse_date(doc_metadata['date'])
feed_pub_dates[docname] = pub_date
except ValueError, exc:
#probably a nonsensical date
app.builder.warn('date parse error: ' + str(exc) + ' in ' + docname)
def get_date_for_article(env, docname):
feed_pub_dates = env.feed_pub_dates
if docname in feed_pub_dates:
return feed_pub_dates[docname]
def create_feed_item(app, docname, templatename, ctx, doctree):
"""
Here we have access to nice HTML fragments to use in, say, an RSS feed.
We serialize them to disk so that we get them preserved across builds.
We also inject useful metadata into the context here.
"""
global feed_entries
from absolutify_urls import absolutify
env = app.builder.env
metadata = env.metadata.get(docname, {})
pub_date = get_date_for_article(env, docname)
if not pub_date:
return
# RSS item attributes, w/defaults:
# title, link, description, author_email=None,
# author_name=None, author_link=None, pubdate=None, comments=None,
# unique_id=None, enclosure=None, categories=(), item_copyright=None,
# ttl=None,
link = app.config.feed_base_url + '/' + ctx['current_page_name'] + ctx['file_suffix']
item = {
'title': ctx.get('title'),
'link': link,
'unique_id': link,
'description': absolutify(ctx.get('body'), link),
'pubdate': pub_date
}
if 'author' in metadata:
item['author'] = metadata['author']
feed_entries[dated_name(docname, pub_date)] = item
#Now, useful variables to keep in context
ctx['rss_link'] = app.builder.env.feed_url
ctx['pub_date'] = pub_date
def remove_dead_feed_item(app, env, docname):
"""
TODO:
purge unwanted crap
"""
global feed_entries
munged_name = ''.join([MAGIC_SEPARATOR,quote_plus(docname)])
for name in feed_entries:
if name.endswith(munged_name):
del(feed_entries[name])
def emit_feed(app, exc):
global feed_entries
import os.path
title = app.config.feed_title
if not title:
title = app.config.project
feed_dict = {
'title': title,
'link': app.config.feed_base_url,
'feed_url': app.config.feed_base_url,
'description': app.config.feed_description
}
if app.config.language:
feed_dict['language'] = app.config.language
if app.config.copyright:
feed_dict['feed_copyright'] = app.config.copyright
feed = feedgenerator.Rss201rev2Feed(**feed_dict)
app.builder.env.feed_feed = feed
ordered_keys = feed_entries.keys()
ordered_keys.sort(reverse=True)
for key in ordered_keys:
feed.add_item(**feed_entries[key])
outfilename = os.path.join(app.builder.outdir,
app.config.feed_filename)
fp = open(outfilename, 'w')
feed.write(fp, 'utf-8')
fp.close()
def dated_name(docname, date):
"""
we need convenient filenames which incorporate dates for ease of sorting
and guid for uniqueness, plus will work in the FS without inconvenient
characters. NB, at the moment, hour of publication is ignored.
"""
return quote_plus(MAGIC_SEPARATOR.join([date.isoformat(), docname]))
| {
"repo_name": "zerotired/manticore-ext",
"path": "src/zt/manticore/sphinxcontrib/feed/__init__.py",
"copies": "1",
"size": "8134",
"license": "bsd-2-clause",
"hash": 6681136646537362000,
"line_mean": 32.6115702479,
"line_max": 113,
"alpha_frac": 0.6256454389,
"autogenerated": false,
"ratio": 3.795613625758283,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49212590646582827,
"avg_score": null,
"num_lines": null
} |
from FSelement import Element
import datetime
class Folder(Element):
root = None
def __init__(self, name, parent):
if parent == None:
if Folder.root == None:
Folder.root = self
parent = self
else: raise Exception()
self.__content = [];
Element.__init__(self, name, parent)
def drawTree():
print('Baumansicht'.center(80));
print(''.ljust(80, '='))
Folder.root.drawFolders(0)
def drawFolders(self, level):
space = "+"
for i in range(0, level):
space = "--" + space
print(space, self.getName())
for x in self.__content:
if isinstance(x, Folder):
x.drawFolders(level+1)
def getSize(self):
self.__size = 0
for element in self.__content:
self.__size += element.getSize()
return self.__size
def addElement(self, e):
self.__content.append(e)
e.__parent = self
def getChangeDate(self):
last = self.__changeDate
for x in self.__content:
changeDate = x.getChangeDate();
if changeDate > last:
last = changeDate
return last
# def printInfo(self):
# print (self.getName().ljust(20), str(self.getSize()).ljust(20), self.getType().ljust(14), str(self.getChangeDate()).ljust(26), sep="")
def listView(self):
print('Detailansicht'.center(80));
print(''.ljust(80, '='))
print("Dateiname".ljust(20), "Größe".ljust(20), "Typ".ljust(14), "Änderungsdatum".ljust(26), sep="")
print("".ljust(80, '='))
for x in self.__content:
x.printInfo()
def getType(self):
return "Folder"
| {
"repo_name": "tobiasmuehl/FileSystem",
"path": "FSfolder.py",
"copies": "1",
"size": "1627",
"license": "unlicense",
"hash": -3647355002068216000,
"line_mean": 23.2388059701,
"line_max": 140,
"alpha_frac": 0.5745073892,
"autogenerated": false,
"ratio": 3.5692307692307694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46437381584307696,
"avg_score": null,
"num_lines": null
} |
from FSError import *
class ProtectFlags:
FIBF_DELETE = 1
FIBF_EXECUTE = 2
FIBF_WRITE = 4
FIBF_READ = 8
FIBF_ARCHIVE = 16
FIBF_PURE = 32
FIBF_SCRIPT = 64
flag_txt = "HSPArwed"
flag_num = len(flag_txt)
flag_none = 0xf # --------
empty_string = "-" * flag_num
def __init__(self, mask=0):
self.mask = mask
def __str__(self):
txt = ""
pos = self.flag_num - 1
m = 1 << pos
for i in xrange(self.flag_num):
bit = self.mask & m == m
show = '-'
flg = self.flag_txt[i]
flg_low = flg.lower()
if bit:
if flg_low != flg:
show = flg_low
else:
if flg_low == flg:
show = flg_low
txt += show
m >>= 1
pos -= 1
return txt
def bin_str(self):
res = ""
m = 1 << (self.flag_num - 1)
for i in xrange(self.flag_num):
if m & self.mask == m:
res += "1"
else:
res += "0"
m >>= 1
return res
def short_str(self):
return str(self).replace("-","")
def parse(self, s):
if len(s) == 0:
return
# allow to add with '+' or sub with '-'
n = self.flag_txt
mode = '+'
self.mask = self.flag_none
for a in s.lower():
if a in '+-':
mode = a
else:
mask = None
is_low = None
for i in xrange(self.flag_num):
flg = self.flag_txt[i]
flg_low = flg.lower()
if flg_low == a:
mask = 1<<(self.flag_num - 1 - i)
is_low = flg_low == flg
break
if mask == None:
raise FSError(INVALID_PROTECT_FORMAT,extra="char: "+a)
# apply mask
if mode == '+':
if is_low:
self.mask &= ~mask
else:
self.mask |= mask
else:
if is_low:
self.mask |= mask
else:
self.mask &= ~mask
def is_set(self, mask):
return self.mask & mask == 0 # LO active
def set(self, mask):
self.mask &= ~mask
def clr(self, mask):
self.mask |= mask
def is_d(self):
return self.is_set(self.FIBF_DELETE)
def is_e(self):
return self.is_set(self.FIBF_EXECUTE)
def is_w(self):
return self.is_set(self.FIBF_WRITE)
def is_r(self):
return self.is_set(self.FIBF_READ)
if __name__ == '__main__':
inp = ["h","s","p","a","r","w","e","d"]
for i in inp:
p = ProtectFlags()
p.parse(i)
s = str(p)
if not i in s:
print s
| {
"repo_name": "alpine9000/amiga_examples",
"path": "tools/external/amitools/amitools/fs/ProtectFlags.py",
"copies": "1",
"size": "2478",
"license": "bsd-2-clause",
"hash": 1417604206992610600,
"line_mean": 20.9380530973,
"line_max": 64,
"alpha_frac": 0.4778046812,
"autogenerated": false,
"ratio": 3.0859277708592776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8818703726939165,
"avg_score": 0.04900574502402238,
"num_lines": 113
} |
from FSEvents import *
import objc
import sys
import os
import stat
import errno
def T_or_F(x):
if x:
return "TRUE"
else:
return "FALSE"
class Settings (object):
__slots__ = (
'sinceWhen',
'latency',
'flags',
'array_of_paths',
'print_settings',
'verbose',
'flush_seconds',
)
def __init__(self):
self.sinceWhen = kFSEventStreamEventIdSinceNow
self.latency = 60
self.flags = 0
self.array_of_paths = []
self.print_settings = False
self.verbose = False
self.flush_seconds = -1
def mesg(self, fmt, *args, **kwds):
if args:
fmt = fmt % args
elif kwds:
fmt = fmt % kwds
if self.verbose:
print >>sys.stderr, fmt
else:
print >>sys.stdout, fmt
def debug(self, fmt, *args, **kwds):
if not self.verbose:
return
if args:
fmt = fmt % args
elif kwds:
fmt = fmt % kwds
print >>sys.stderr, fmt
def error(self, fmt, *args, **kwds):
if args:
fmt = fmt % args
elif kwds:
fmt = fmt % kwds
print >>sys.stderr, fmt
def dump(self):
self.mesg("settings->sinceWhen = %d", self.sinceWhen)
self.mesg("settings->latency = %f", self.latency)
self.mesg("settings->flags = %#x", self.flags)
self.mesg("settings->num_paths = %d", len(self.array_of_paths))
for idx, path in enumerate(self.array_of_paths):
self.mesg("settings->array_of_paths[%d] = '%s'", idx, path)
self.mesg("settings->verbose = %s", T_or_F(self.verbose))
self.mesg("settings->print_settings = %s", T_or_F(self.print_settings))
self.mesg("settings->flush_seconds = %d", self.flush_seconds)
def parse_argv(self, argv):
self.latency = 1.0
self.sinceWhen = -1 # kFSEventStreamEventIdSinceNow
self.flush_seconds = -1
idx = 1
while idx < len(argv):
if argv[idx] == '-usage':
usage(argv[0])
elif argv[idx] == '-print_settings':
self.print_settings = True
elif argv[idx] == '-sinceWhen':
self.sinceWhen = int(argv[idx+1])
idx += 1
elif argv[idx] == '-latency':
self.latency = float(argv[idx+1])
idx += 1
elif argv[idx] == '-flags':
self.flags = int(argv[idx+1])
idx += 1
elif argv[idx] == '-flush':
self.flush_seconds = float(argv[idx+1])
idx += 1
elif argv[idx] == '-verbose':
self.verbose = True
else:
break
idx += 1
self.array_of_paths = argv[idx:]
settings = Settings()
def usage(progname):
settings.mesg("")
settings.mesg("Usage: %s <flags> <path>", progname)
settings.mesg("Flags:")
settings.mesg(" -sinceWhen <when> Specify a time from whence to search for applicable events")
settings.mesg(" -latency <seconds> Specify latency")
settings.mesg(" -flags <flags> Specify flags as a number")
settings.mesg(" -flush <seconds> Invoke FSEventStreamFlushAsync() after the specified number of seconds.")
settings.mesg("")
sys.exit(1)
def timer_callback(timer, streamRef):
settings.debug("CFAbsoluteTimeGetCurrent() => %.3f", CFAbsoluteTimeGetCurrent())
settings.debug("FSEventStreamFlushAsync(streamRef = %s)", streamRef)
FSEventStreamFlushAsync(streamRef)
def fsevents_callback(streamRef, clientInfo, numEvents, eventPaths, eventMasks, eventIDs):
settings.debug("fsevents_callback(streamRef = %s, clientInfo = %s, numEvents = %s)", streamRef, clientInfo, numEvents)
settings.debug("fsevents_callback: FSEventStreamGetLatestEventId(streamRef) => %s", FSEventStreamGetLatestEventId(streamRef))
full_path = clientInfo
for i in range(numEvents):
path = eventPaths[i]
if path[-1] == '/':
path = path[:-1]
if eventMasks[i] & kFSEventStreamEventFlagMustScanSubDirs:
recursive = True
elif eventMasks[i] & kFSEventStreamEventFlagUserDropped:
settings.mesg("BAD NEWS! We dropped events.")
settings.mesg("Forcing a full rescan.")
recursive = 1
path = full_path
elif eventMasks[i] & kFSEventStreamEventFlagKernelDropped:
settings.mesg("REALLY BAD NEWS! The kernel dropped events.")
settings.mesg("Forcing a full rescan.")
recursive = 1
path = full_path
else:
recursive = False
new_size = get_directory_size(path, recursive)
if new_size < 0:
print "Could not update size on %s"%(path,)
else:
print "New total size: %d (change made to %s) for path: %s"%(
get_total_size(), path, full_path)
def my_FSEventStreamCreate(path):
if settings.verbose:
print [path]
streamRef = FSEventStreamCreate(kCFAllocatorDefault,
fsevents_callback,
path,
[path],
settings.sinceWhen,
settings.latency,
settings.flags)
if streamRef is None:
settings.error("ERROR: FSEVentStreamCreate() => NULL")
return None
if settings.verbose:
FSEventStreamShow(streamRef)
return streamRef
def main(argv=None):
if argv is None:
argv = sys.argv
settings.parse_argv(argv)
if settings.verbose or settings.print_settings:
settings.dump()
if settings.print_settings:
return 0
if len(settings.array_of_paths) != 1:
usage(argv[0])
full_path = os.path.abspath(settings.array_of_paths[0])
streamRef = my_FSEventStreamCreate(full_path)
FSEventStreamScheduleWithRunLoop(streamRef, CFRunLoopGetCurrent(), kCFRunLoopDefaultMode)
startedOK = FSEventStreamStart(streamRef)
if not startedOK:
settings.error("failed to start the FSEventStream")
return
# NOTE: we get the initial size *after* we start the
# FSEventStream so that there is no window
# during which we would miss events.
#
dir_sz = get_directory_size(full_path, 1)
print "Initial total size is: %d for path: %s"%(get_total_size(), full_path)
if settings.flush_seconds >= 0:
settings.debug("CFAbsoluteTimeGetCurrent() => %.3f", CFAbsoluteTimeGetCurrent())
timer = CFRunLoopTimerCreate(
FSEventStreamGetSinceWhen(streamRef),
CFAbsoluteTimeGetCurrent() + settings.flush_seconds,
settings.flush_seconds,
0, 0, timer_callback, streamRef)
CFRunLoopAddTimer(CFRunLoopGetCurrent(), timer, kCFRunLoopDefaultMode)
# Run
CFRunLoopRun()
#Stop / Invalidate / Release
FSEventStreamStop(streamRef)
FSEventStreamInvalidate(streamRef)
#FSEventStreamRelease(streamRef)
return
#
#--------------------------------------------------------------------------------
# Routines to keep track of the size of the directory hierarchy
# we are watching.
#
# This code is not exemplary in any way. It should definitely
# not be used in production code as it is inefficient.
#
class dir_item (object):
__slots__ = ('dirname', 'size')
dir_items = {}
def get_total_size():
return sum(dir_items.itervalues())
def iterate_subdirs(dirname, recursive):
dir_items[dirname] = 0
try:
names = os.listdir(dirname)
except os.error, msg:
print msg.errno, errno.EPERM
if msg.errno in (errno.ENOENT, errno.EPERM, errno.EACCES):
del dir_items[dirname]
return 0
raise
size = 0
for nm in names:
full_path = os.path.join(dirname, nm)
st = os.lstat(full_path)
size += st.st_size
if stat.S_ISDIR(st.st_mode) and (recursive or (full_path not in dir_items)):
result = get_directory_size(full_path, 1)
dir_items[dirname] = size
return size
def check_for_deleted_dirs():
for path in dir_items.keys():
try:
os.stat(path)
except os.error:
del dir_items[path]
def get_directory_size(dirname, recursive):
check_for_deleted_dirs()
return iterate_subdirs(dirname, recursive)
if __name__ == "__main__":
main()
| {
"repo_name": "dylanvee/pyobjc-framework-FSEvents",
"path": "Examples/watcher.py",
"copies": "2",
"size": "8744",
"license": "mit",
"hash": -748516633193887500,
"line_mean": 28.049833887,
"line_max": 129,
"alpha_frac": 0.565073193,
"autogenerated": false,
"ratio": 3.875886524822695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5440959717822694,
"avg_score": null,
"num_lines": null
} |
from fs import ResourceType
from fs.base import FS
from fs.info import Info
from fs.mode import Mode
from fs.subfs import SubFS
from fs.time import datetime_to_epoch
from materials_commons.api import Client, MCAPIError
class MCFS(FS):
def __init__(self, project_id, apitoken, base_url):
super(MCFS, self).__init__()
self._project_id = project_id
self._c = Client(apitoken, base_url)
def listdir(self, path):
self.check()
dir_listing = self._c.list_directory_by_path(self._project_id, path)
return [d.path for d in dir_listing]
def makedir(self, path, permissions=None, recreate=False):
self.check()
self._c.create_directory_by_path(self._project_id, path)
return SubFS(self, path)
def openbin(self, path, mode="r", buffering=-1, **options):
self.check()
_mode = Mode(mode)
_mode.validate_bin()
if _mode.create:
def on_close_create(mcfile):
try:
mcfile.raw.seek(0)
self._c.upload_file(self._project_id, 1, )
finally:
pass
def remove(self, path):
pass
def removedir(self, path):
pass
def setinfo(self, path, info):
pass
def isdir(self, path):
try:
return self.getinfo(path).is_dir
except MCAPIError:
return False
def getinfo(self, path, namespaces=None):
if path == "/":
return Info({
"basic": {"name": "", "is_dir": True},
"details": {"type": int(ResourceType.directory)}
})
f = self._c.get_file_by_path(self._project_id, path)
is_dir = f.mime_type == "directory"
return Info({
"basic": {"name": f.name, "is_dir": is_dir},
"modified": datetime_to_epoch(f.mtime),
"size": f.size,
"type": int(ResourceType.directory if is_dir else ResourceType.file)
})
| {
"repo_name": "materials-commons/mcfs",
"path": "materials_commons/mcfs/mcfs.py",
"copies": "1",
"size": "2012",
"license": "mit",
"hash": 1586011188738673700,
"line_mean": 29.4848484848,
"line_max": 80,
"alpha_frac": 0.5516898608,
"autogenerated": false,
"ratio": 3.6849816849816848,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47366715457816844,
"avg_score": null,
"num_lines": null
} |
from fsm.FSM import STATE, EVENT, FSM
# pylint: skip-file
# flake8: noqa
# authenticate
# autocommit
# check_query_response
# close
# connected
# init_query
# isolation
# parse_auth_response
# parse_greeting
# query
# query_complete
# read_data_packet
# read_descriptor_packet
# ready
# transaction
# transaction_done
# transaction_end
def create(**actions):
S_init=STATE('init')
S_greeting=STATE('greeting',on_enter=actions['parse_greeting'])
S_authenticate=STATE('authenticate',on_enter=actions['authenticate'])
S_autocommit=STATE('autocommit',on_enter=actions['autocommit'])
S_isolation=STATE('isolation',on_enter=actions['isolation'])
S_connected=STATE('connected',on_enter=actions['connected'])
S_transaction=STATE('transaction',on_enter=actions['transaction'])
S_query=STATE('query',on_enter=actions['init_query'])
S_transaction_end=STATE('transaction_end')
S_query_descriptors=STATE('query_descriptors')
S_query_fields=STATE('query_fields')
S_close=STATE('close',on_enter=actions['close'])
S_init.set_events([EVENT('packet',[], S_greeting),EVENT('query',[]),EVENT('close',[], S_close),])
S_greeting.set_events([EVENT('done',[], S_authenticate),EVENT('query',[]),EVENT('close',[], S_close),])
S_authenticate.set_events([EVENT('sent',[]),EVENT('ok',[actions['parse_auth_response']]),EVENT('done',[], S_autocommit),EVENT('query',[]),EVENT('close',[], S_close),])
S_autocommit.set_events([EVENT('ok',[], S_isolation),EVENT('close',[], S_close),])
S_isolation.set_events([EVENT('ok',[actions['ready']], S_connected),EVENT('close',[], S_close),])
S_connected.set_events([EVENT('query',[actions['query']]),EVENT('transaction',[], S_transaction),EVENT('sent',[], S_query),EVENT('close',[], S_close),])
S_transaction.set_events([EVENT('ok',[actions['transaction_done']], S_connected),EVENT('query',[]),EVENT('close',[], S_close),])
S_query.set_events([EVENT('packet',[actions['check_query_response']]),EVENT('ok',[actions['transaction_end']], S_transaction_end),EVENT('done',[], S_query_descriptors),EVENT('close',[], S_close),])
S_transaction_end.set_events([EVENT('ok',[actions['query_complete']]),EVENT('query',[]),EVENT('done',[], S_connected),EVENT('close',[], S_close),])
S_query_descriptors.set_events([EVENT('packet',[actions['read_descriptor_packet']]),EVENT('eof',[], S_query_fields),EVENT('close',[], S_close),])
S_query_fields.set_events([EVENT('packet',[actions['read_data_packet']]),EVENT('eof',[actions['transaction_end']]),EVENT('ok',[actions['query_complete']]),EVENT('done',[], S_connected),EVENT('query',[]),EVENT('close',[], S_close),])
S_close.set_events([EVENT('done',[]),])
return FSM([S_init,S_greeting,S_authenticate,S_autocommit,S_isolation,S_connected,S_transaction,S_query,S_transaction_end,S_query_descriptors,S_query_fields,S_close])
| {
"repo_name": "robertchase/spindrift",
"path": "spindrift/mysql/fsm_protocol.py",
"copies": "1",
"size": "2812",
"license": "mit",
"hash": -3024580423106565600,
"line_mean": 60.1304347826,
"line_max": 234,
"alpha_frac": 0.6899004267,
"autogenerated": false,
"ratio": 3.2735739231664724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4463474349866472,
"avg_score": null,
"num_lines": null
} |
from fsm.fsmspec import FSMSpecification
class START(object):
"""
Example node plugin for automated test suite testing only.
We adopt a convention of NODE names in ALL-CAPS and
edge names in lower-case.
This represents a START node.
"""
def get_path(self, node, state, request, **kwargs):
"""
Provide this method to generate URL for a node programmatically.
"""
return '/ct/some/where/else/'
def start_event(self, node, fsmStack, request, **kwargs):
"""
Example event plugin method to intercept start event.
"""
# do whatever analysis you want...
# then if you want to trigger a transition, call it directly
return fsmStack.state.transition(fsmStack, request, 'next', **kwargs)
# otherwise just return None to indicate that generic UI
# behavior should just continue as normal (i.e. your FSM is
# not intercepting and redirecting this event.
def next_edge(self, edge, fsmStack, request, **kwargs):
"""
Example edge plugin method to execute named transition.
"""
# do whatever processing you want...
fsm = edge.fromNode.fsm
mid = fsm.get_node('MID')
return mid # finally return whatever destination node you want
# node specification data goes here
title = 'start here'
path = 'ct:home'
doLogging = True
edges = (
dict(name='next', toNode='END', title='go go go'),
)
class MID(object):
def next_filter(self, edge, obj):
"""
Example edge filter_input method to check whether input is acceptable.
"""
return obj == 'the right stuff'
def get_help(self, node, state, request):
path_help = {
'/ct/about/': 'here here!',
'/ct/courses/1/': 'there there'
}
return path_help.get(request.path, None)
title = 'in the middle'
path = 'ct:about'
edges = (
dict(name='next', toNode='END', title='go go go'),
)
def get_specs():
'get FSM specifications stored in this file'
spec = FSMSpecification(
name='test', title='try this',
pluginNodes=[START, MID], # nodes w/ plugin code
nodeDict=dict( # all other nodes
END=dict(title='end here', path='ct:home'),
),
)
return (spec,)
# sub-FSM example code
class CALLER(object):
def call_edge(self, edge, fsmStack, request, **kwargs):
node = edge.toNode
node._path = fsmStack.push(request, 'SUBFSMNAME')
return node
edges = (
dict(name='call', toNode='WAITER', title='start a sub-fsm'),
)
class WAITER(object):
def get_path(self, node, state, request, **kwargs):
"""
Hand back stored URL of our sub-FSM.
"""
return self._path
edges = (
dict(
name='subfsmdone',
toNode='SOMENODE',
title='continue after sub-fsm done'),
)
| {
"repo_name": "derdmitry/socraticqs2",
"path": "mysite/fsm/fsm_plugin/testme.py",
"copies": "3",
"size": "2994",
"license": "apache-2.0",
"hash": -136316236786729440,
"line_mean": 28.6435643564,
"line_max": 78,
"alpha_frac": 0.5868403474,
"autogenerated": false,
"ratio": 3.9342969776609724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021523891519586742,
"num_lines": 101
} |
from FSM import FSM
def parse(s):
rgp = RegExParser(s)
fsm = rgp.parse()
return fsm
class RegExParser:
def __init__(self, s):
self.i=0;
self.s = s
def next(self):
self.i += 1
def token(self):
if self.i < len(self.s):
return self.s[self.i]
else:
return None
def parse(self):
fsm = self.expr()
if self.token():
raise ValueError
return fsm
def expr(self):
fsm = self.term()
while self.token() == "|":
self.next()
fsm.unionize(self.term())
return fsm
def term(self):
if self.token() not in {"|",")",None}:
fsm = self.factor()
else:
fsm = FSM("")
while self.token() not in {"|",")",None}:
fsm.concatenate(self.factor())
return fsm
def factor(self):
fsm = self.atom()
if self.token() == "*":
fsm.close()
while self.token() == "*":
self.next()
return fsm
def atom(self):
if self.token() == "(":
self.next()
fsm = self.expr()
if self.token() == ")":
self.next()
else:
raise ValueError
elif self.token() not in {")","|","*"}:
fsm = FSM(self.token())
self.next()
else:
raise ValueError
return fsm
| {
"repo_name": "Irishmanluke/FSMGraphSimulator",
"path": "FSMRegEx.py",
"copies": "1",
"size": "1501",
"license": "mit",
"hash": -5762459689710625000,
"line_mean": 19.8472222222,
"line_max": 49,
"alpha_frac": 0.4237175217,
"autogenerated": false,
"ratio": 3.868556701030928,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9647331938187813,
"avg_score": 0.02898845690862302,
"num_lines": 72
} |
from fsm import *
from diary import *
import time
import os
class Application:
@staticmethod
def create_diary():
"""
In this function we create the table of transitions between programs states and get info about the user.
"""
states_dict = {}
states_dict[Application.__main_menu] = [('add note', None, Application.__add_note), ('add task', None, Application.__add_task), ('show tasks', None, Application.__show_all_tasks), ('show t_tasks', None, Application.__show_todays_tasks), ('show notes', None, Application.show_all_notes), ('exit', None, Application.exit)]
states_dict[Application.__add_note] = [('view', None, Application.__show_note_by_id), ('apply', None, Application.__main_menu), ('cancel', None, Application.__remove_note)]
states_dict[Application.__add_task] = [('view', None, Application.__show_task_by_id), ('apply', None, Application.__main_menu), ('cancel', None, Application.__remove_note)]
states_dict[Application.show_all_notes] = [('main menu', None, Application.__main_menu), ('show note', None, Application.__show_note_by_id)]
states_dict[Application.__show_all_tasks] = [('main menu', None, Application.__main_menu), ('show task', None, Application.__show_task_by_id), ('show t_tasks', None, Application.__show_todays_tasks)]
states_dict[Application.__show_note_by_id] = [('edit', None, Application.__edit_current_note), ('delete', None, Application.__remove_note), ('return', None, Application.show_all_notes), ('main menu', None, Application.__main_menu)]
states_dict[Application.__show_task_by_id] = [('edit', None, Application.__edit_current_task), ('delete', None, Application.__remove_note), ('return', None, Application.__show_all_tasks), ('main menu', None, Application.__main_menu)]
states_dict[Application.__edit_current_note] = [('main menu', None, Application.__main_menu), ('return', None, Application.show_all_notes), ('message', None, Application.__show_note_by_id)]
states_dict[Application.__edit_current_task] = [('main menu', None, Application.__main_menu), ('return', None, Application.__show_all_tasks), ('task', None, Application.__show_task_by_id), ('t tasks', None, Application.__show_todays_tasks)]
states_dict[Application.__remove_note] = [('main menu', None, Application.__main_menu), ('notes', None, Application.show_all_notes), ('tasks', None, Application.__show_all_tasks)]
states_dict[Application.__show_todays_tasks] = [('main menu', None, Application.__main_menu), ('show task', None, Application.__show_task_by_id)]
Application.__brain = FSM(Application.__main_menu, states_dict, Application.__main_menu)
owners_name = input('Enter your name')
owners_surname = input('Enter your surname')
owners_birthday = input('Enter your birthday dd/mm/yyyy')
owner = Person(owners_name, owners_surname, owners_birthday)
d_name = input('Enter diary`s name')
Application.diary = Diary(d_name, owner)
@staticmethod
def exit():
"""
This function is used simply to exit from the software. We need it to be able to have menu enty 'exit' as far as
we need a staticmethod to call it from somewhere.
"""
raise SystemExit(0)
@staticmethod
def __add_note():
"""
This function is used to add a new note to the diary. Actually, we don`t need any checks here, so we don`t
care about them.
"""
os.system('clear')
os.system('cls')
title = input('Enter notes name: ')
text = input('Enter note`s text: ')
created_on = time.strftime('%x')
Application.diary.add_note(title, created_on, text)
Application.current_entry = Application.diary.id
@staticmethod
def __add_task():
"""
This function is used to add a new task(note with a deadline) to the diary. Very same to
add_note() function.
"""
os.system('clear')
os.system('cls')
title = input('Enter tasks name: ')
deadline = input('Enter tasks deadline(mm/dd/yy): ')
text = input('Enter tasks text: ')
created_on = time.strftime('%x')
Application.diary.add_note(title, created_on, text, True, deadline)
Application.current_entry = Application.diary.id
def show_all_notes():
"""
This function is used to print all the notes that user had entered to the system. If the
lenght of the notes body is more then 100 characters, show only first 100 + ...
"""
os.system('clear')
os.system('cls')
notes = [note for note in Application.diary.get_notes(None) if note.is_event is False]
print('Notes in {} are:\n'.format(Application.diary.name))
for note in notes:
print(note.id)
print(note.name)
print(note.created_date)
if len(note.text) > 100:
print(note.text[0:100] + '....')
else:
print(note.text)
print()
@staticmethod
def __show_all_tasks():
"""
This function is used to print all the tasks that user had entered to the system. If the
lenght of the notes body is more then 100 characters, show only first 100 + ...
"""
os.system('clear')
os.system('cls')
tasks = [task for task in Application.diary.get_notes() if task.is_event is True]
print('Tasks in {} are:\n'.format(Application.diary.name))
for task in tasks:
print(task.id)
print(task.name)
print(task.created_date)
print(task.event_date)
if len(task.text) > 100:
print(task.text[0:100] + '....')
else:
print(task.text)
print()
@staticmethod
def __show_note_by_id():
"""
This function is used to show information about one note from the diary. Ids are unique so, we can
use them to index our data.
"""
id = input('Enter notes id: ')
Application.current_entry = id
print(Application.diary.get_notes())
print(Application.diary.get_notes()[0].id)
print(Application.diary.get_notes()[0].is_event)
note = [note for note in Application.diary.get_notes() if str(note.id) == str(id) and note.is_event is False][0]
os.system('clear')
os.system('cls')
print('Notes id is {}'.format(note.id))
print('Notes name is: {}'.format(note.name))
print('Notes was created on: {}'.format(note.created_date))
print('Notes deadline is on: {}'.format(note.event_date))
print('Note is:\n' + note.text)
print()
@staticmethod
def __show_task_by_id():
"""
This function is used to show information about one task from the diary. Ids are unique so, we can
use them to index our data.
"""
id = input('Enter tasks id: ')
os.system('clear')
os.system('cls')
Application.current_entry = int(id)
note = [note for note in Application.diary.get_notes() if note.id == int(id)][0]
os.system('clear')
os.system('cls')
print('Notes id is {}'.format(note.id))
print('Notes name is: {}'.format(note.name))
print('Notes was created on: {}'.format(note.created_date))
print('Notes deadline is on: {}'.format(note.event_date))
print('Note is:\n' + note.text)
print()
@staticmethod
def __show_entries_by_date():
"""
This function is used to print all entries(notes and tasks) from the diary for a certain date.
"""
date = str(input('Enter the date in format "mm/dd/yy"'))
os.system('clear')
os.system('cls')
notes = Application.diary.get_notes_date(date)
for note in notes:
print('Notes id is {}'.format(note.id))
print('Notes name is: {}'.format(note.name))
print('Notes was created on: {}'.format(note.created_date))
print('Notes deadline is on: {}'.format(note.event_date))
print('Note is:\n' + note.text)
print()
@staticmethod
def __show_todays_tasks():
"""
This function is used to print the information about all the tasks from the diary which have deadline
on the current date.
"""
notes = [note for note in Application.diary.get_notes() if str(note.event_date) == str(time.strftime('%x'))]
os.system('clear')
os.system('cls')
for note in notes:
print('Notes id is {}'.format(note.id))
print('Notes name is: {}'.format(note.name))
print('Notes was created on: {}'.format(note.created_date))
print('Notes deadline is on: {}'.format(note.event_date))
print('Note is:\n' + note.text)
print()
@staticmethod
def __edit_current_note():
"""
This function is used to edit the note with id, which is saved is Application.current_entry. User can
edit notes title and/or name.
"""
os.system('clear')
os.system('cls')
title = input('Do you want to edit the title? (Y/n)? ')
if title.lower() == 'y':
new_title = input('Enter new title: ')
else:
new_title = None
body = input('Do you want to edit the text of the note? (Y/n)? ')
if body.lower() == 'y':
new_body = input('Enter new text: ')
else:
new_body = None
Application.diary.update_note(Application.current_entry, title=new_title, text=new_body)
@staticmethod
def __edit_current_task():
"""
This function is used to edit the task with id, which is saved is Application.current_entry. User can
edit tasks title, name and deadline..
"""
os.system('clear')
os.system('cls')
title = input('Do you want to edit the title of the task? (Y/n)? ')
if title.lower() == 'y':
new_title = input('Enter new title: ')
else:
new_title = None
body = input('Do you want to edit the text of the task? (Y/n)? ')
if body.lower() == 'y':
new_body = input('Enter new text: ')
else:
new_body = None
deadline = input('Do you want to edit the deadline of the task? (Y/n)? ')
if deadline.lower() == 'y':
new_deadline = input('Enter new deadline(mm/dd/yy): ')
else:
new_deadline = [task for task in Application.diary.get_notes() if task.id == Application.current_entry][0].event_date
Application.diary.update_note(Application.current_entry, title=new_title, text=new_body, deadline=new_deadline)
@staticmethod
def __remove_note():
"""
This function removes an entry with id from Application.current_entry. We don`t care if the entry is a note or a task, because
they are stored in the same list.
"""
Application.diary.remove_by_id(Application.current_entry)
@staticmethod
def __main_menu():
"""
We need this function because of the same reason as exit() : to be able to use this fucntion in the
our finite state machine, which is in charge of software state.
"""
pass
@staticmethod
def __print_menu():
"""
This function is responsible for the main part of interaction with user. Actually, one of the reasons to add it, was that it
very clear shows, that FSM is the main 'brain' of this program. We check the FSM`s state, and depending on it, print the user menu.
"""
if Application.__brain.state == Application.__main_menu:
print('Welcome to your diary, {}!\nYou`re in the main menu now. Here are available actions'.format(Application.diary.owners_name))
print('Print "add note" to create a new note')
print('Print "add task" to create a new task')
print('Print "show tasks" to show all your tasks')
print('Print "show t_tasks" to show all tasks which have deadline today')
print('Print "show notes" to show all your notes')
print('Print "exit" to exit the application')
elif Application.__brain.state == Application.__add_note:
print('To add the note print "apply"')
print('To view the note print "view"')
print('To cancel print "cancel"')
elif Application.__brain.state == Application.__add_task:
print('To add the task print "apply"')
print('To view the task print "view"')
print('To cancel print "cancel"')
elif Application.__brain.state == Application.show_all_notes:
print('To return to main menu print "main menu"')
print('To view full info about one note print "show note"')
elif Application.__brain.state == Application.__show_all_tasks:
print('To view full info about one task print "show task"')
print('To return to main menu print "main menu"')
elif Application.__brain.state == Application.__show_todays_tasks:
print('To view full info about one task print "show task"')
print('To return to main menu print "main menu"')
elif Application.__brain.state == Application.__show_note_by_id:
print('To edit this note print "edit"')
print('To delete this note print "delete"')
print('To see all notes print "return"')
print('To return to main menu print "menu"')
elif Application.__brain.state == Application.__show_task_by_id:
print('To edit this task print "edit"')
print('To close this task print "close"')
print('To see all task print "return"')
print('To return to main menu print "menu"')
elif Application.__brain.state == Application.__edit_current_note:
print('To return to the list of all notes print "return"')
print('To see current note print "message"')
print('To return to the main menu print "main menu"')
elif Application.__brain.state == Application.__edit_current_task:
print('To see the list of all tasks print "return"')
print('To see the list of todays tasks print "t tasks"')
print('To see current task print "task"')
print('To return to the main menu print "main menu"')
elif Application.__brain.state == Application.__remove_note:
print('To go to main menu print "main menu"')
print('To see all notes print "notes"')
print('To see all tasks print "tasks"')
else:
print(Application.__brain.state.__func__())
print('asdasdasf')
@staticmethod
def run():
"""
This is the loop function of the program. Just it.
"""
while 1:
try:
Application.__print_menu()
message = input()
Application.__brain.handle_message(message)
Application.__brain.state()
except Exception:
continue
Application.create_diary()
Application.run()
| {
"repo_name": "dubovyk/Finite-State-Machine",
"path": "Examples/Diary/main.py",
"copies": "1",
"size": "15373",
"license": "mit",
"hash": 904382216965844000,
"line_mean": 45.726443769,
"line_max": 328,
"alpha_frac": 0.5916867235,
"autogenerated": false,
"ratio": 4.085304278501196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010285644495962964,
"num_lines": 329
} |
from fsm import *
import cocos
import pyglet
from pyglet.window import key
width = 700
height = 700
class RotatingText(cocos.text.Label):
def __init__(self, text='', pos=(0, 0)):
super(RotatingText, self).__init__(text=text, position=pos, font_size=32)
self.schedule(self.update)
def update(self, dt):
self.rotation += 60 * dt
class KeyboardControlledObject(cocos.actions.Move):
def step(self, dt):
super(KeyboardControlledObject, self).step(dt)
velocity_x = 100 * (keyboard[key.RIGHT] - keyboard[key.LEFT])
velocity_y = 100 * (keyboard[key.UP] - keyboard[key.DOWN])
self.target.velocity = (velocity_x, velocity_y)
def main():
global keyboard
cocos.director.director.init(width=700, height=700)
player_layer = cocos.layer.Layer()
me = cocos.sprite.Sprite('img.png')
me.scale = 0.512
text = RotatingText('Hi guys!', (500, 500))
# Set initial position and velocity.
me.position = (200, 100)
me.velocity = (0, 0)
# Set the sprite's movement class.
me.do(KeyboardControlledObject())
player_layer.add(me)
player_layer.add(text)
# Create a scene and set its initial layer.
main_scene = cocos.scene.Scene(player_layer)
# Attach a KeyStateHandler to the keyboard object.
keyboard = key.KeyStateHandler()
cocos.director.director.window.push_handlers(keyboard)
# Play the scene in the window.
cocos.director.director.run(main_scene)
main()
| {
"repo_name": "dubovyk/Finite-State-Machine",
"path": "Examples/2D-Game/main.py",
"copies": "1",
"size": "1491",
"license": "mit",
"hash": -8834158461654186000,
"line_mean": 27.1320754717,
"line_max": 81,
"alpha_frac": 0.665325285,
"autogenerated": false,
"ratio": 3.411899313501144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9562597026349412,
"avg_score": 0.00292551443034646,
"num_lines": 53
} |
from .fsm import Transition
class CharacterTransition(Transition):
"""
A transition that checks if the data is equal to a character.
"""
def __init__(self, source, target, character):
"""
Create the transition.
"""
super(CharacterTransition, self).__init__(source, target, None)
self.character = character
def is_valid(self, data):
"""
Check if the transition is valid.
"""
return data == self.character
def generate_fsm(tokens, initial_state, condition_terminate):
"""
Generate the states and transitions used by a finite state machine to
extract a set of tokens with the given initial state and termination
condition. The termination condition is used as the transition to the
end state for each token.
"""
states = {}
# iterate through the tokens
for token in tokens:
# start at the initial state
current_state, next_state = None, initial_state
# iterate through the token characters in order
collected = ''
for character in token.value[1]:
# keep track of collected characters
collected += character
# move to the next character state which is determined using
# the characters collected so far. this allows tokens with common
# characters at the beginning to share those states and avoid
# creating parallel valid transitions
current_state = next_state
next_state = "char_{0}".format(collected)
# check if the next state already exists
if next_state in states:
# condition was already created so there is
# nothing to do but move on to next character
continue
# create the condition to move from the current state to the
# next state and add it to the current state's list
states.setdefault(current_state, [])
states[current_state].append(CharacterTransition(
current_state,
next_state,
character))
# create the condition to the end state for extracting the token
current_state = next_state
final_state = "token_{0}".format(token.value[1])
states.setdefault(current_state, [])
states[current_state].append(Transition(
current_state,
final_state,
condition_terminate))
# make sure the final state exists
states.setdefault(final_state, [])
# return the list of states and transitions
return list(states.keys()), sum(states.values(), [])
| {
"repo_name": "CtrlC-Root/cse3341",
"path": "Core/cse3341/generator.py",
"copies": "1",
"size": "2687",
"license": "mit",
"hash": -6081819725521614000,
"line_mean": 32.1728395062,
"line_max": 77,
"alpha_frac": 0.6122069222,
"autogenerated": false,
"ratio": 5.108365019011407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6220571941211408,
"avg_score": null,
"num_lines": null
} |
from fsm.models import ActivityLog
FSM_NAMES_TO_RESET_ON_RECYCLE = ['live_chat']
def quit_edge(self, edge, fsmStack, request, **kwargs):
"""
Edge method that terminates this live-session.
"""
for studentState in fsmStack.state.linkChildren.all():
studentState.linkState = None # detach from our state
studentState.save()
return edge.toNode
QuitEdgeData = dict(
name='quit', toNode='END', title='End this live-session',
description='''If you have no more questions to ask, end
this live session.''',
help='''Click here to end this live-session. ''',
showOption=True,
)
class START(object):
"""
This activity will allow you to select questions
for students to answer in-class.
"""
def start_event(self, node, fsmStack, request, **kwargs):
'event handler for START node'
unit = fsmStack.state.get_data_attr('unit')
course = fsmStack.state.get_data_attr('course')
fsmStack.state.title = 'Teaching: %s' % unit.title
activity = ActivityLog(
fsmName=fsmStack.state.fsmNode.fsm.name,
course=course
) # create a new activity
activity.save()
fsmStack.state.activity = activity
fsmStack.state.isLiveSession = True
return node.get_path(fsmStack.state, request, **kwargs)
# node specification data goes here
path = 'fsm:fsm_node'
title = 'Start Teaching a Live Session'
edges = (
dict(name='next', toNode='CHOOSE', title='Start asking a question',
showOption=True),
)
class CHOOSE(object):
"""
At this step you choose a question to ask in this live session.
"""
def select_UnitLesson_filter(self, edge, unit_lesson):
"""
Return True if input is acceptable for this edge.
input: UnitLesson
"""
return unit_lesson.is_question()
# node specification data goes here
path = 'ct:unit_lessons'
title = 'Choose a Question to Ask'
help = '''Select a question below that you want to ask your students in this
live session, then click its Ask this Question button. '''
edges = (
dict(name='select_UnitLesson', toNode='QUESTION',
title='Ask this question',
help='''Click here to start posing this question to your
live session students.'''),
QuitEdgeData
)
class QUESTION(object):
path = 'ct:live_question'
title = 'Ask a question to students in a classroom live-session'
help = '''Explain the question and ask if there are any aspects
where the students are unsure what exactly they are being asked.
Then click the START button and ask the students to think about
the question for a minute or so, then briefly type whatever
answer they come up with. You will be able to monitor their
progress on this page in real-time.'''
edges = (
dict(name='next', toNode='ANSWER', title='Present the answer',
help='''Click here to move to the assessment stage of this
exercise. '''),
QuitEdgeData
)
class ANSWER(object):
quit_edge = quit_edge
path = 'ct:ul_teach'
title = 'Present the answer for students to self-assess'
help = '''Explain the answer and ask if there are any aspects
the students are wondering about. Then ask them to assess
their own answer against the correct answer'''
edges = (
dict(name='next', toNode='RECYCLE', title='Finish this question',
help='''Click here to end this question. '''),
QuitEdgeData
)
class RECYCLE(object):
"""
You have completed presenting this question. Do you want to
ask the students another question, or end this live session?
"""
quit_edge = quit_edge
def next_edge(self, edge, fsmStack, request, pageData=None, **kwargs):
'make sure timer is reset before going to another question'
pageData.set_refresh_timer(request, False)
# reset all child nodes to WAIT_ASK node.
children = fsmStack.state.linkChildren.filter(
fsmNode__fsm__name__in=FSM_NAMES_TO_RESET_ON_RECYCLE
).select_related('fsmNode__fsm')
for item in children:
fsm = item.fsmNode.fsm
wait_ask_node = item.fsmNode.__class__.objects.filter(fsm=fsm, name='WAIT_ASK').first()
if wait_ask_node:
item.fsmNode = wait_ask_node
item.save()
return edge.toNode
path = 'fsm:fsm_node'
title = 'Do you want to ask another question?'
edges = (
dict(name='next', toNode='CHOOSE', title='Move on to another question',
help='''Click here to choose another question to ask. '''),
QuitEdgeData
)
class END(object):
# node specification data goes here
path = 'ct:unit_tasks'
title = 'Live Session completed'
help = '''You have successfully ended this live-session.
See below for suggested next steps for what you can work on next
to help students with this courselet.'''
def get_specs():
'get FSM specifications stored in this file'
from fsm.fsmspec import FSMSpecification
spec = FSMSpecification(
name='liveteach',
title='Teach a live (classroom) session',
description='''You can begin teaching this courselet in a
live classroom session by clicking here:''',
pluginNodes=[START, CHOOSE, QUESTION, ANSWER, RECYCLE, END],
fsmGroups=('teach/unit/published',),
)
return (spec,)
| {
"repo_name": "raccoongang/socraticqs2",
"path": "mysite/ct/fsm_plugin/live.py",
"copies": "2",
"size": "5604",
"license": "apache-2.0",
"hash": 4994816948992068000,
"line_mean": 34.025,
"line_max": 99,
"alpha_frac": 0.6316916488,
"autogenerated": false,
"ratio": 3.977288857345635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5608980506145635,
"avg_score": null,
"num_lines": null
} |
from fsm.models import ActivityLog
def quit_edge(self, edge, fsmStack, request, **kwargs):
"""
Edge method that terminates this live-session.
"""
for studentState in fsmStack.state.linkChildren.all():
studentState.linkState = None # detach from our state
studentState.save()
return edge.toNode
QuitEdgeData = dict(
name='quit', toNode='END', title='End this live-session',
description='''If you have no more questions to ask, end
this live session.''',
help='''Click here to end this live-session. ''',
showOption=True,
)
class START(object):
"""
This activity will allow you to select questions
for students to answer in-class.
"""
def start_event(self, node, fsmStack, request, **kwargs):
'event handler for START node'
unit = fsmStack.state.get_data_attr('unit')
course = fsmStack.state.get_data_attr('course')
fsmStack.state.title = 'Teaching: %s' % unit.title
activity = ActivityLog(
fsmName=fsmStack.state.fsmNode.fsm.name,
course=course
) # create a new activity
activity.save()
fsmStack.state.activity = activity
fsmStack.state.isLiveSession = True
return node.get_path(fsmStack.state, request, **kwargs)
# node specification data goes here
path = 'fsm:fsm_node'
title = 'Start Teaching a Live Session'
edges = (
dict(name='next', toNode='CHOOSE', title='Start asking a question',
showOption=True),
)
class CHOOSE(object):
"""
At this step you choose a question to ask in this live session.
"""
def select_UnitLesson_filter(self, edge, unit_lesson):
"""
Return True if input is acceptable for this edge.
input: UnitLesson
"""
return unit_lesson.is_question()
# node specification data goes here
path = 'ct:unit_lessons'
title = 'Choose a Question to Ask'
help = '''Select a question below that you want to ask your students in this
live session, then click its Ask this Question button. '''
edges = (
dict(name='select_UnitLesson', toNode='QUESTION',
title='Ask this question',
help='''Click here to start posing this question to your
live session students.'''),
)
class QUESTION(object):
path = 'ct:live_question'
title = 'Ask a question to students in a classroom live-session'
help = '''Explain the question and ask if there are any aspects
where the students are unsure what exactly they are being asked.
Then click the START button and ask the students to think about
the question for a minute or so, then briefly type whatever
answer they come up with. You will be able to monitor their
progress on this page in real-time.'''
edges = (
dict(name='next', toNode='ANSWER', title='Present the answer',
help='''Click here to move to the assessment stage of this
exercise. '''),
)
class ANSWER(object):
quit_edge = quit_edge
path = 'ct:ul_teach'
title = 'Present the answer for students to self-assess'
help = '''Explain the answer and ask if there are any aspects
the students are wondering about. Then ask them to assess
their own answer against the correct answer'''
edges = (
dict(name='next', toNode='RECYCLE', title='Finish this question',
help='''Click here to end this question. '''),
QuitEdgeData,
)
class RECYCLE(object):
"""
You have completed presenting this question. Do you want to
ask the students another question, or end this live session?
"""
def next_edge(self, edge, fsmStack, request, pageData=None, **kwargs):
'make sure timer is reset before going to another question'
pageData.set_refresh_timer(request, False)
return edge.toNode
path = 'fsm:fsm_node'
title = 'Do you want to ask another question?'
edges = (
dict(name='next', toNode='CHOOSE', title='Move on to another question',
help='''Click here to choose another question to ask. '''),
QuitEdgeData,
)
class END(object):
# node specification data goes here
path = 'ct:unit_tasks'
title = 'Live Session completed'
help = '''You have successfully ended this live-session.
See below for suggested next steps for what you can work on next
to help students with this courselet.'''
def get_specs():
'get FSM specifications stored in this file'
from fsm.fsmspec import FSMSpecification
spec = FSMSpecification(
name='liveteach',
title='Teach a live (classroom) session',
description='''You can begin teaching this courselet in a
live classroom session by clicking here:''',
pluginNodes=[START, CHOOSE, QUESTION, ANSWER, RECYCLE, END],
fsmGroups=('teach/unit/published',),
)
return (spec,)
| {
"repo_name": "derdmitry/socraticqs2",
"path": "mysite/ct/fsm_plugin/live.py",
"copies": "1",
"size": "5005",
"license": "apache-2.0",
"hash": 4395845462270979600,
"line_mean": 34.2464788732,
"line_max": 80,
"alpha_frac": 0.6385614386,
"autogenerated": false,
"ratio": 4.04607922392886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0000869414014953921,
"num_lines": 142
} |
from fsm.models import FSM
class FSMSpecification(object):
"""
Convenience class for specifying an FSM graph, loading it.
"""
def __init__(self, name, title, nodeDict=None, edges=None,
pluginNodes=(), attrs=('help', 'path', 'data', 'doLogging'),
fsmGroups=(), **kwargs):
kwargs['name'] = name
kwargs['title'] = title
self.fsmData = kwargs
self.fsmGroups = fsmGroups
if not nodeDict:
nodeDict = {}
if not edges:
edges = []
for node in pluginNodes: # expect list of node class objects
modName = node.__module__
name = node.__name__
node_dict = dict(
title=node.title,
funcName=modName + '.' + name,
description=getattr(node, '__doc__', None)
)
for attr in attrs: # save node attributes
if hasattr(node, attr):
node_dict[attr] = getattr(node, attr)
nodeDict[name] = node_dict
for edge in getattr(node, 'edges', ()):
edge = edge.copy() # prevent side effects
edge['fromNode'] = name
edges.append(edge)
self.nodeData = nodeDict
self.edgeData = edges
def save_graph(self, *args, **kwargs):
"""
Load this FSM specification into the database.
"""
return FSM.save_graph(
self.fsmData,
self.nodeData,
self.edgeData,
fsmGroups=self.fsmGroups,
*args,
**kwargs
)
class CallerNode(object):
"""
Base class for node representing a call to a sub-FSM.
"""
def exceptCancel_edge(self, edge, fsmStack, request, **kwargs):
"""
Implements default behavior: if sub-FSM cancelled, we cancel too.
"""
fsmStack.pop(request, eventName='exceptCancel') # cancel this FSM
return edge.toNode
def deploy(mod_path, username):
"""
Load FSM specifications found in the specified plugin module.
"""
import importlib
mod = importlib.import_module(mod_path)
fsm_list = []
for fsmSpec in mod.get_specs():
fsm_list.append(fsmSpec.save_graph(username))
return fsm_list
def deploy_all(username, ignore=('testme', '__init__'),
pattern='*/fsm_plugin/*.py'):
"""
Load all FSM specifications found via pattern but not ignore.
"""
import glob
fsm_list = []
for modpath in glob.glob(pattern):
splitted_path = modpath[:-3].split('/')
if splitted_path[-1] not in ignore:
fsm_list.extend(deploy('.'.join(splitted_path), username))
return fsm_list
| {
"repo_name": "derdmitry/socraticqs2",
"path": "mysite/fsm/fsmspec.py",
"copies": "3",
"size": "2754",
"license": "apache-2.0",
"hash": 297000078969346900,
"line_mean": 30.6551724138,
"line_max": 77,
"alpha_frac": 0.5464778504,
"autogenerated": false,
"ratio": 4.11044776119403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 87
} |
from fs.mountfs import MountFS
from fs.memoryfs import MemoryFS
import unittest
class TestMountFS(unittest.TestCase):
def test_auto_close(self):
"""Test MountFS auto close is working"""
multi_fs = MountFS()
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.mount('/m1', m1)
multi_fs.mount('/m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(m1.closed)
self.assert_(m2.closed)
def test_no_auto_close(self):
"""Test MountFS auto close can be disabled"""
multi_fs = MountFS(auto_close=False)
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.mount('/m1', m1)
multi_fs.mount('/m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(not m1.closed)
self.assert_(not m2.closed)
def test_mountfile(self):
"""Test mounting a file"""
quote = b"""If you wish to make an apple pie from scratch, you must first invent the universe."""
mem_fs = MemoryFS()
mem_fs.makedir('foo')
mem_fs.setcontents('foo/bar.txt', quote)
foo_dir = mem_fs.opendir('foo')
mount_fs = MountFS()
mount_fs.mountfile('bar.txt', foo_dir.open, foo_dir.getinfo)
self.assert_(mount_fs.isdir('/'))
self.assert_(mount_fs.isdir('./'))
self.assert_(mount_fs.isdir(''))
# Check we can see the mounted file in the dir list
self.assertEqual(mount_fs.listdir(), ["bar.txt"])
self.assert_(not mount_fs.exists('nobodyhere.txt'))
self.assert_(mount_fs.exists('bar.txt'))
self.assert_(mount_fs.isfile('bar.txt'))
self.assert_(not mount_fs.isdir('bar.txt'))
# Check open and getinfo callables
self.assertEqual(mount_fs.getcontents('bar.txt'), quote)
self.assertEqual(mount_fs.getsize('bar.txt'), len(quote))
# Check changes are written back
mem_fs.setcontents('foo/bar.txt', 'baz')
self.assertEqual(mount_fs.getcontents('bar.txt'), b'baz')
self.assertEqual(mount_fs.getsize('bar.txt'), len('baz'))
# Check changes are written to the original fs
self.assertEqual(mem_fs.getcontents('foo/bar.txt'), b'baz')
self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz'))
# Check unmount
self.assert_(mount_fs.unmount("bar.txt"))
self.assertEqual(mount_fs.listdir(), [])
self.assert_(not mount_fs.exists('bar.txt'))
# Check unount a second time is a null op, and returns False
self.assertFalse(mount_fs.unmount("bar.txt"))
def test_empty(self):
"""Test MountFS with nothing mounted."""
mount_fs = MountFS()
self.assertEqual(mount_fs.getinfo(''), {})
self.assertEqual(mount_fs.getxattr('', 'yo'), None)
self.assertEqual(mount_fs.listdir(), [])
self.assertEqual(list(mount_fs.ilistdir()), [])
| {
"repo_name": "pscottdevos/pyfilesystem",
"path": "fs/tests/test_mountfs.py",
"copies": "13",
"size": "3012",
"license": "bsd-3-clause",
"hash": 4835134561254373000,
"line_mean": 35.2891566265,
"line_max": 105,
"alpha_frac": 0.5966135458,
"autogenerated": false,
"ratio": 3.5064027939464495,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from fs.multifs import MultiFS
from fs.memoryfs import MemoryFS
import unittest
from six import b
class TestMultiFS(unittest.TestCase):
def test_auto_close(self):
"""Test MultiFS auto close is working"""
multi_fs = MultiFS()
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.addfs('m1', m1)
multi_fs.addfs('m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(m1.closed)
self.assert_(m2.closed)
def test_no_auto_close(self):
"""Test MultiFS auto close can be disables"""
multi_fs = MultiFS(auto_close=False)
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.addfs('m1', m1)
multi_fs.addfs('m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(not m1.closed)
self.assert_(not m2.closed)
def test_priority(self):
"""Test priority order is working"""
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2)
multi_fs.addfs("m3", m3)
self.assert_(multi_fs.getcontents("name") == b("m3"))
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3)
self.assert_(multi_fs.getcontents("name") == b("m2"))
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3, priority=10)
self.assert_(multi_fs.getcontents("name") == b("m3"))
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1, priority=11)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3, priority=10)
self.assert_(multi_fs.getcontents("name") == b("m1"))
| {
"repo_name": "Konubinix/pyfilesystem",
"path": "fs/tests/test_multifs.py",
"copies": "2",
"size": "2690",
"license": "bsd-3-clause",
"hash": -6196274587328129000,
"line_mean": 30.6470588235,
"line_max": 61,
"alpha_frac": 0.5423791822,
"autogenerated": false,
"ratio": 3.0464326160815403,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.458881179828154,
"avg_score": null,
"num_lines": null
} |
from fsopy.receiver_operating_characteristic import th_roc_glq, th_roc_num
from matplotlib import pyplot as plt
# ook modulation
mod_order = 2
# signal to noise ratio in dB
snr_db = 10
# number of transmitted symbols
n_samples = 20
# number of points to make the ROC
n_thresh = 1000
# number of terms for the GL quadrature
n_terms = 90
# fading type
fading = 'gamma_gamma'
# fading parameters
alpha = 1
eta = 1
beta = 1
Pf, Pm1 = th_roc_glq(mod_order, snr_db, n_samples, n_thresh, n_terms, fading,
beta, alpha)
Pf, Pm2 = th_roc_num(mod_order, snr_db, n_samples, n_thresh, fading, beta, alpha)
plt.figure()
plt.loglog(Pf, Pm1)
plt.figure()
plt.semilogx(Pf, Pm1-Pm2)
plt.show()
fading = 'exp_weibull'
Pf, Pm1 = th_roc_glq(mod_order, snr_db, n_samples, n_thresh, n_terms, fading,
beta, alpha, eta)
Pf, Pm2 = th_roc_num(mod_order, snr_db, n_samples, n_thresh, fading, beta,
alpha, eta)
plt.figure()
plt.loglog(Pf, Pm1)
plt.figure()
plt.semilogx(Pf, Pm1-Pm2)
plt.show()
| {
"repo_name": "mirca/fsopy",
"path": "fsopy/examples/plot_th_roc.py",
"copies": "1",
"size": "1042",
"license": "mit",
"hash": -4262960377614389000,
"line_mean": 19.84,
"line_max": 81,
"alpha_frac": 0.6593090211,
"autogenerated": false,
"ratio": 2.4691943127962084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8627283821701086,
"avg_score": 0.00024390243902439024,
"num_lines": 50
} |
from fsspec import AbstractFileSystem
from fsspec.utils import tokenize
class AbstractArchiveFileSystem(AbstractFileSystem):
"""
A generic superclass for implementing Archive-based filesystems.
Currently, it is shared amongst `ZipFileSystem`, `LibArchiveFileSystem` and
`TarFileSystem`.
"""
def __str__(self):
return "<Archive-like object %s at %s>" % (type(self).__name__, id(self))
__repr__ = __str__
def ukey(self, path):
return tokenize(path, self.fo, self.protocol)
def _all_dirnames(self, paths):
"""Returns *all* directory names for each path in paths, including intermediate ones.
Parameters
----------
paths: Iterable of path strings
"""
if len(paths) == 0:
return set()
dirnames = {self._parent(path) for path in paths} - {self.root_marker}
return dirnames | self._all_dirnames(dirnames)
def info(self, path, **kwargs):
self._get_dirs()
path = self._strip_protocol(path)
if path in self.dir_cache:
return self.dir_cache[path]
elif path + "/" in self.dir_cache:
return self.dir_cache[path + "/"]
else:
raise FileNotFoundError(path)
def ls(self, path, detail=False, **kwargs):
self._get_dirs()
paths = {}
for p, f in self.dir_cache.items():
p = p.rstrip("/")
if "/" in p:
root = p.rsplit("/", 1)[0]
else:
root = ""
if root == path.rstrip("/"):
paths[p] = f
elif all(
(a == b)
for a, b in zip(path.split("/"), [""] + p.strip("/").split("/"))
):
# root directory entry
ppath = p.rstrip("/").split("/", 1)[0]
if ppath not in paths:
out = {"name": ppath + "/", "size": 0, "type": "directory"}
paths[ppath] = out
out = list(paths.values())
if detail:
return out
else:
return list(sorted(f["name"] for f in out))
| {
"repo_name": "intake/filesystem_spec",
"path": "fsspec/archive.py",
"copies": "1",
"size": "2150",
"license": "bsd-3-clause",
"hash": -5313211158361647000,
"line_mean": 30.6176470588,
"line_max": 93,
"alpha_frac": 0.508372093,
"autogenerated": false,
"ratio": 4.118773946360153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5127146039360153,
"avg_score": null,
"num_lines": null
} |
from fsspec import callbacks
def test_callbacks():
empty_callback = callbacks.callback()
assert empty_callback.call("something", "somearg") is None
simple_callback = callbacks.callback(something=lambda arg: arg + 2)
assert simple_callback.call("something", 2) == 4
multi_arg_callback = callbacks.callback(something=lambda arg1, arg2: arg1 + arg2)
assert multi_arg_callback.call("something", 2, 2) == 4
def test_callbacks_as_callback():
empty_callback = callbacks.as_callback(None)
assert empty_callback.call("something", "somearg") is None
assert callbacks.as_callback(None) is callbacks.as_callback(None)
real_callback = callbacks.as_callback(
callbacks.callback(something=lambda arg: arg + 2)
)
assert real_callback.call("something", 2) == 4
def test_callbacks_lazy_call():
empty_callback = callbacks.as_callback(None)
simple_callback = callbacks.callback(something=lambda arg: arg + 2)
total_called = 0
def expensive_func(n):
nonlocal total_called
total_called += 1
return n
assert empty_callback.lazy_call("something", expensive_func, 8) is None
assert simple_callback.lazy_call("nonexistent callback", expensive_func, 8) is None
assert total_called == 0
assert simple_callback.lazy_call("something", expensive_func, 8) == 10
assert total_called == 1
def test_callbacks_wrap():
events = []
callback = callbacks.callback(relative_update=events.append)
for _ in callback.wrap(range(10)):
...
assert len(events) == 10
assert sum(events) == 10
| {
"repo_name": "intake/filesystem_spec",
"path": "fsspec/tests/test_callbacks.py",
"copies": "1",
"size": "1604",
"license": "bsd-3-clause",
"hash": -1276764259013274000,
"line_mean": 29.8461538462,
"line_max": 87,
"alpha_frac": 0.683915212,
"autogenerated": false,
"ratio": 3.828162291169451,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5012077503169451,
"avg_score": null,
"num_lines": null
} |
from FSString import FSString
class FileName:
root_path_aliases = (u'', u'/', u':')
def __init__(self, name, is_intl=False):
# check that name is a FSString
if not isinstance(name, FSString):
raise ValueError("FileName's name must be a FSString")
self.name = name
self.is_intl = is_intl
def __str__(self):
return self.name
def __repr__(self):
return self.name
def is_root_path_alias(self):
return self.name.get_unicode() in self.root_path_aliases
def has_dir_prefix(self):
return self.name.get_unicode().find("/") != -1
def split_path(self):
pc = self.name.get_unicode().split("/")
p = []
for path in pc:
p.append(FileName(FSString(path), is_intl=self.is_intl))
return p
def get_dir_and_base_name(self):
"""Return portion after last slash '/' or the full name in unicode"""
s = self.name.get_unicode()
pos = s.rfind(u'/')
if pos != -1:
dir_name = s[:pos]
file_name = s[pos+1:]
if len(file_name) == 0:
return FSString(dir_name), None
else:
return FSString(dir_name), FSString(file_name)
else:
return None, self.name
def get_upper_ami_str(self):
result = self.name.get_ami_str().upper();
if self.is_intl:
r = ""
for i in xrange(len(result)):
o = ord(result[i])
if o >= 224 and o <= 254 and o != 247:
r += chr(o - (ord('a')-ord('A')))
else:
r += chr(o)
return r
else:
return result
def is_valid(self):
# check if path contains dir prefix components
if self.has_dir_prefix():
e = self.split_path()
# empty path?
if len(e) == 0:
return False
for p in e:
if not p.is_valid():
return False
return True
else:
# single file name
s = self.name.get_ami_str()
# check for invalid chars
for c in s:
o = ord(c)
if o == ':' or o == '/':
return False
# check max size
if len(s) > 30:
return False
return True
def hash(self, hash_size=72):
up = self.get_upper_ami_str();
h = len(up)
for c in up:
h = h * 13;
h += ord(c)
h &= 0x7ff
h = h % hash_size
return h
def get_name(self):
"""Return file name string as a FSString."""
return self.name
def get_ami_str_name(self):
return self.name.get_ami_str()
def get_unicode_name(self):
return self.name.get_unicode()
| {
"repo_name": "alpine9000/amiga_examples",
"path": "tools/external/amitools/amitools/fs/FileName.py",
"copies": "1",
"size": "2553",
"license": "bsd-2-clause",
"hash": -6440572641906118000,
"line_mean": 24.0294117647,
"line_max": 73,
"alpha_frac": 0.5393654524,
"autogenerated": false,
"ratio": 3.3904382470119523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4429803699411952,
"avg_score": null,
"num_lines": null
} |
from fst import FST
import string, sys
from string import ascii_lowercase
from fsmutils import compose
'''
Jiajie Sven Yan
'''
def letters_to_numbers():
"""
Returns an FST that converts letters to numbers as specified by
the soundex algorithm
"""
# Let's define our first FST
f1 = FST('soundex-generate')
# Indicate that '1' is the initial state
f1.add_state('1')
f1.initial_state = '1'
for i in range(2,10):
f1.add_state(str(i))
for letter in string.letters:
f1.add_arc('1','2',letter,letter)
for letter in ['a','e','h','i','o','u','w','y']:
for i in range(2,10):
f1.add_arc(str(i),'3',letter,'')
for letter in ['b','f','p','v']:
for i in range(2,10):
if str(i)=='4':f1.add_arc(str(i),'4',letter,'')
else:f1.add_arc(str(i),'4',letter,'1')
for letter in ['c','g','j','k','q','s','x','z']:
for i in range(2,10):
if str(i)=='5':f1.add_arc(str(i),'5',letter,'')
else:f1.add_arc(str(i),'5',letter,'2')
for letter in ['d','t']:
for i in range(2,10):
if str(i)=='6':f1.add_arc(str(i),'6',letter,'')
else:f1.add_arc(str(i),'6',letter,'3')
for letter in ['l']:
for i in range(2,10):
if str(i)=='7':f1.add_arc(str(i),'7',letter,'')
else:f1.add_arc(str(i),'7',letter,'4')
for letter in ['m','n']:
for i in range(2,10):
if str(i)=='8':f1.add_arc(str(i),'8',letter,'')
else:f1.add_arc(str(i),'8',letter,'5')
for letter in ['r']:
for i in range(2,10):
if str(i)=='9':f1.add_arc(str(i),'9',letter,'')
else:f1.add_arc(str(i),'9',letter,'6')
# Set all the final states
for i in range(2,10):
f1.set_final(str(i))
return f1
def truncate_to_three_digits():
"""
Create an FST that will truncate a soundex string to three digits
"""
# Ok so now let's do the second FST, the one that will truncate
# the number of digits to 3
f2 = FST('soundex-truncate')
# Indicate initial and final states
f2.add_state('1')
f2.initial_state = '1'
for i in range(2,6):
f2.add_state(str(i))
for i in range(2,6):
f2.set_final(str(i))
for letter in string.letters:
f2.add_arc('1','2',letter,letter)
f2.add_arc('2','2',letter,letter)
for letter in ['1','2','3','4','5','6']:
f2.add_arc('1','3',letter,letter)
f2.add_arc('2','3',letter,letter)
f2.add_arc('3','4',letter,letter)
f2.add_arc('4','5',letter,letter)
f2.add_arc('5','5',letter,'')
return f2
def add_zero_padding():
# Now, the third fst - the zero-padding fst
f3 = FST('soundex-padzero')
f3.add_state('1')
f3.initial_state = '1'
for i in range(2,8):
f3.add_state(str(i))
f3.set_final('5')
f3.set_final('7')
for letter in string.letters:
f3.add_arc('1','2',letter,letter)
f3.add_arc('2','2',letter,letter)
for letter in ['1','2','3','4','5','6']:
f3.add_arc('1','3',letter,letter)
f3.add_arc('2','3',letter,letter)
f3.add_arc('3','4',letter,letter)
f3.add_arc('4','5',letter,letter)
f3.add_arc('3','6','','0')
f3.add_arc('6','7','','0')
f3.add_arc('4','7','','0')
return f3
def soundex_convert(name_string):
"""Combine the three FSTs above and use it to convert a name into a Soundex"""
char_list=[char for char in name_string]
target=compose(char_list,letters_to_numbers(),truncate_to_three_digits(),add_zero_padding())
return ''.join(target[0])
if __name__ == '__main__':
user_input = input().strip()
f1 = letters_to_numbers()
f2 = truncate_to_three_digits()
f3 = add_zero_padding()
if user_input:
print("%s -> %s" % (user_input, soundex_convert(tuple(user_input))))
| {
"repo_name": "svenyan/coursework_114_foundamentals_of_compling",
"path": "finite_state_transducer/soundex.py",
"copies": "1",
"size": "3464",
"license": "mit",
"hash": 100853326257288030,
"line_mean": 26.712,
"line_max": 93,
"alpha_frac": 0.6091224018,
"autogenerated": false,
"ratio": 2.3532608695652173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7942257482815118,
"avg_score": 0.10402515771001994,
"num_lines": 125
} |
from ftd2xx import ftd2xx
class N64CartDev:
CMD_READ = [0x01]
CMD_WRITE = [0x02]
EMBED_RAM_ADDR = 0x00000000
EMBED_RAM_LEN = 0x1000
SDRAM_ADDR = 0x10000000
SDRAM_LEN = 0x4000000
CART_SRAM_ADDR = 0x28000000
CART_ROM_ADDR = 0x30000000
BURST_LEN = 0x20000
def __init__(self):
self.dev = None
def list(self):
return ftd2xx.listDevices()
def open(self, dev=1):
if str(dev).isdigit():
self.dev = ftd2xx.open(int(dev))
else:
self.dev = ftd2xx.openEx(bytes(dev, encoding='ascii'))
self.dev.resetDevice()
self.dev.read(self.dev.getQueueStatus())
self.dev.setUSBParameters(0x10000, 0x10000)
self.dev.setLatencyTimer(2)
def close(self):
self.dev.close()
self.dev = None
def read(self, address, length):
address_bytes = [address >> i & 0xFF for i in (0, 8, 16, 24)]
len_bytes = [(int(length / 2) - 1) >> i & 0xFF for i in (0, 8)]
self.dev.write(bytes(self.CMD_READ + address_bytes + len_bytes))
return self.dev.read(length)
def write(self, address, data):
address_bytes = [address >> i & 0xFF for i in (0, 8, 16, 24)]
len_bytes = [(int(len(data) / 2) - 1) >> i & 0xFF for i in (0, 8)]
self.dev.write(bytes(self.CMD_WRITE + address_bytes + len_bytes))
self.dev.write(data)
def read_to_file(self, address, length, filename):
file = open(filename, 'wb')
while length != 0:
data = self.read(address, min(length, self.BURST_LEN))
file.write(data)
address += len(data)
length -= len(data)
file.close()
def write_from_file(self, address, filename):
file = open(filename, 'rb')
while True:
data = file.read(self.BURST_LEN)
if not data:
break
self.write(address, data)
address += len(data)
file.close()
def verify_from_file(self, address, filename):
file = open(filename, 'rb')
while True:
file_data = file.read(self.BURST_LEN)
if not file_data:
break
cart_data = self.read(address, len(file_data))
if cart_data != file_data:
raise RuntimeError("Verification failure at {:08x}".format(address))
address += len(file_data)
file.close()
| {
"repo_name": "jeffkub/n64-cart-reader",
"path": "old/scripts/n64_cart_dev.py",
"copies": "1",
"size": "2458",
"license": "mit",
"hash": -4492301761316927000,
"line_mean": 25.1489361702,
"line_max": 84,
"alpha_frac": 0.5504475183,
"autogenerated": false,
"ratio": 3.367123287671233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4417570805971233,
"avg_score": null,
"num_lines": null
} |
from ft.db.dbtestcase import DbTestCase
from passerine.db.common import ProxyObject
from passerine.db.uow import Record
from passerine.db.entity import entity
from passerine.db.mapper import link, CascadingType, AssociationType
@link('left', association=AssociationType.ONE_TO_ONE, cascading=[CascadingType.PERSIST, CascadingType.DELETE])
@link('right', association=AssociationType.ONE_TO_ONE, cascading=[CascadingType.PERSIST, CascadingType.DELETE])
@entity
class TestNode(object):
def __init__(self, name, left, right):
self.name = name
self.left = left
self.right = right
def __repr__(self):
return '<TestNode {} "{}">'.format(self.id, self.name)
@entity
class Computer(object):
def __init__(self, name):
self.name = name
@link('computer', Computer, association=AssociationType.ONE_TO_ONE)
@link('delegates', association=AssociationType.ONE_TO_MANY, cascading=[CascadingType.PERSIST, CascadingType.DELETE])
@entity
class Developer(object):
def __init__(self, name, computer=None, delegates=[]):
self.name = name
self.computer = computer
self.delegates = delegates
class TestFunctional(DbTestCase):
def test_commit_with_insert_with_cascading(self):
reference_map = self.__inject_data_with_cascading()
collection = self.session.repository(TestNode)
doc = self._find_one_by_name(TestNode, 'a')
self.assertEqual(len(reference_map), len(collection.filter()))
self.assertEqual(reference_map['a'].id, doc.id)
def test_commit_with_insert_without_cascading(self):
reference_map = self.__mock_data_without_cascading()
developer_collection = self.session.repository(Developer)
computer_collection = self.session.repository(Computer)
self.session.persist(reference_map['d1'])
self.session.flush()
developers = self._get_all(Developer)
computers = self._get_all(Computer)
self.assertEqual(1, len(developers))
self.assertEqual(0, len(computers))
developer = self._find_one_by_name(Developer, 'Shiroyuki')
self.assertIsNone(developer.computer.id)
raw_data = developer_collection.driver.find_one(
developer_collection.name,
{'_id': developer.id}
)
self.assertIsNone(raw_data['computer'])
def test_commit_with_update(self):
reference_map = self.__inject_data_with_cascading()
doc = self.session.repository(TestNode).filter_one({'name': 'a'})
doc.name = 'root'
self.session.persist(doc)
self.session.flush()
docs = self.session.repository(TestNode).filter()
self.assertEqual(len(reference_map), len(docs))
self.assertEqual(reference_map['a'].id, doc.id)
self.assertEqual(reference_map['a'].name, doc.name)
doc = self.session.repository(TestNode).filter_one({'name': 'root'})
self.assertEqual(reference_map['a'].id, doc.id)
def test_commit_with_update_with_cascading(self):
reference_map = self.__inject_data_with_cascading()
doc = self.session.repository(TestNode).filter_one({'name': 'a'})
doc.left.name = 'left'
self.session.persist(doc)
self.session.flush()
docs = self.session.repository(TestNode).filter()
self.assertEqual(len(reference_map), len(docs))
self.assertEqual(reference_map['b'].id, doc.left.id)
self.assertEqual(reference_map['b'].name, doc.left.name)
doc = self.session.repository(TestNode).filter_one({'name': 'a'})
self.assertEqual(reference_map['b'].id, doc.left.id)
def test_commit_with_update_without_cascading(self):
reference_map = self.__mock_data_without_cascading()
developer_collection = self.session.repository(Developer)
computer_collection = self.session.repository(Computer)
self.session.persist(reference_map['d1'], reference_map['c1'])
self.session.flush()
self.assertEqual(1, len(developer_collection.filter()))
self.assertEqual(1, len(computer_collection.filter()))
developer = developer_collection.filter_one({'name': 'Shiroyuki'})
developer.computer.name = 'MacBook Pro'
self.session.persist(developer)
self.session.flush()
record = self.session._uow.retrieve_record(developer.computer)
self.assertEqual(Record.STATUS_CLEAN, record.status)
raw_data = computer_collection.driver.find_one(
computer_collection.name,
{'_id': reference_map['c1'].id}
)
self.assertNotEqual(raw_data['name'], developer.computer.name)
def test_commit_with_delete(self):
reference_map = self.__inject_data_with_cascading()
collection = self.session.repository(TestNode)
doc_g = collection.filter_one({'name': 'g'})
self.session.delete(doc_g)
self.session.flush()
self.assertEqual(len(reference_map) - 1, len(collection.filter()))
def test_commit_with_delete_with_cascading(self):
reference_map = self.__inject_data_with_cascading()
collection = self.session.repository(TestNode)
doc_a = collection.filter_one({'name': 'a'})
self.session.delete(doc_a)
self.session.flush()
self.assertEqual(len(reference_map) - 4, len(collection.filter()))
def test_commit_with_delete_with_cascading_with_some_dependency_left(self):
reference_map = self.__inject_data_with_cascading()
expected_max_size = len(reference_map) + 1
collection = self.session.repository(TestNode)
# Added an extra node that relies on node "f" without using the collection.
collection.driver.insert(
collection.name,
{'left': None, 'right': reference_map['f'].id, 'name': 'extra'}
)
doc_h = collection.filter_one({'name': 'h'})
self.session.delete(doc_h)
self.session.flush()
self.assertEqual(
expected_max_size - 1,
len(collection.filter()),
'Expected for %s nodes remaining' % expected_max_size
)
def test_commit_with_delete_with_cascading_with_some_unsupervised_dependency_left(self):
reference_map = self.__inject_data_with_cascading()
expected_max_size = len(reference_map) + 1
collection = self.session.repository(TestNode)
# Added an extra node that relies on node "f" without using the repository.
collection.driver.insert(
collection.name,
{'left': None, 'right': reference_map['f'].id, 'name': 'extra'}
)
self.session.delete(reference_map['e'], reference_map['h'])
self.session.flush()
remainings = collection.filter()
self.assertEqual(
expected_max_size - 2,
len(remainings),
', '.join([d.name for d in remainings])
)
def test_commit_with_delete_with_cascading_with_no_dependency_left(self):
reference_map = self.__inject_data_with_cascading()
collection = self.session.repository(TestNode)
self.session.delete(reference_map['e'], reference_map['h'])
self.session.flush()
self.assertEqual(len(reference_map) - 3, len(collection.filter()))
def __inject_data_with_cascading(self):
reference_map = {}
reference_map['g'] = TestNode('g', None, None)
reference_map['f'] = TestNode('f', None, None)
reference_map['e'] = TestNode('e', None, reference_map['f'])
reference_map['d'] = TestNode('d', None, None)
reference_map['c'] = TestNode('c', reference_map['d'], None)
reference_map['b'] = TestNode('b', None, reference_map['d'])
reference_map['a'] = TestNode('a', reference_map['b'], reference_map['c'])
reference_map['h'] = TestNode('h', reference_map['f'], None)
self.session.persist(reference_map['a'], reference_map['e'], reference_map['g'], reference_map['h'])
self.session.flush()
return reference_map
def __mock_data_without_cascading(self):
reference_map = {}
reference_map['c1'] = Computer('MacBook Air')
reference_map['d1'] = Developer('Shiroyuki', reference_map['c1'])
return reference_map | {
"repo_name": "shiroyuki/passerine",
"path": "test/ft/db/test_session.py",
"copies": "1",
"size": "8360",
"license": "mit",
"hash": 1877045009132612400,
"line_mean": 34.2784810127,
"line_max": 116,
"alpha_frac": 0.6344497608,
"autogenerated": false,
"ratio": 3.6925795053003534,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48270292661003533,
"avg_score": null,
"num_lines": null
} |
from ft.db.dbtestcase import DbTestCase
from passerine.db.session import Session
from passerine.db.repository import Repository
from passerine.db.common import ProxyObject
from passerine.db.entity import entity
from passerine.db.manager import Manager
from passerine.db.mapper import AssociationType, link
@entity('s')
class Skill(object):
def __init__(self, name):
self.name = name
#@link('skills', Skill, None, AssociationType.ONE_TO_MANY)
@entity('j')
class Job(object):
def __init__(self, name, level, skills=[]):
self.name = name
self.level = level
self.skills = skills
@entity('w')
class Weapon(object):
def __init__(self, name, attack, defend):
self.name = name
self.attack = attack
self.defend = defend
@link('job', Job, association=AssociationType.ONE_TO_ONE)
@link('left_hand', Weapon, association=AssociationType.ONE_TO_ONE)
@link('right_hand', Weapon, association=AssociationType.ONE_TO_ONE)
@entity('c')
class Character(object):
def __init__(self, name, level, job, left_hand, right_hand, _id=None):
self._id = _id
self.name = name
self.level = level
self.job = job
self.left_hand = left_hand
self.right_hand = right_hand
class TestFunctional(DbTestCase):
def test_get(self):
self._reset_db(self.__data_provider())
character = self._get_first(Character)
self.assertEqual('Shiroyuki', character.name)
self.assertIsInstance(character.job, ProxyObject) # Check the type of the proxy object
self.assertIsInstance(character.job._actual, Job) # Check the type of the actual object
self.assertEqual(1, character.job.id) # Check if the property of the actual object is accessible via the proxy
self.assertEqual('Knight', character.job.name) # Check if the property of the actual object is accessible via the proxy
self.assertFalse(character.job._read_only) # Check if the proxy setting is readable
def __data_provider(self):
return [
{
'class': Character,
'fixtures': [
{
'_id': 1,
'name': 'Shiroyuki',
'level': 82,
'job': 1,
'left_hand': 2,
'right_hand': 1
}
]
},
{
'class': Job,
'fixtures': [
{
'_id': 1,
'name': 'Knight',
'level': 8,
'skills': [
{'name': 'Attack'},
{'name': 'Charge'}
]
}
]
},
{
'class': Weapon,
'fixtures': [
{
'_id': 2,
'name': 'Shield',
'attack': 76,
'defend': 234
},
{
'_id': 1,
'name': 'Sword',
'attack': 495,
'defend': 89
}
]
}
] | {
"repo_name": "shiroyuki/passerine",
"path": "test/ft/db/test_mapper_link.py",
"copies": "1",
"size": "3406",
"license": "mit",
"hash": 3146313114451779600,
"line_mean": 32.7326732673,
"line_max": 127,
"alpha_frac": 0.4718144451,
"autogenerated": false,
"ratio": 4.30050505050505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008137881469730828,
"num_lines": 101
} |
from ft.db.dbtestcase import DbTestCase, skip
from passerine.db.session import Session
from passerine.db.repository import Repository
from passerine.db.common import ProxyObject
from passerine.db.entity import entity
from passerine.db.manager import Manager
from passerine.db.mapper import AssociationType, link
@entity('skills')
class Skill(object):
def __init__(self, name):
self.name = name
@link('skills', Skill, None, AssociationType.ONE_TO_MANY)
@entity('jobs')
class Job(object):
def __init__(self, name, level, skills=[]):
self.name = name
self.level = level
self.skills = skills
@entity('tools')
class Weapon(object):
def __init__(self, name, attack, defend):
self.name = name
self.attack = attack
self.defend = defend
@link('skills', Skill, None, AssociationType.ONE_TO_MANY)
@link('job', Job, association=AssociationType.ONE_TO_ONE)
@link('left_hand', Weapon, association=AssociationType.ONE_TO_ONE)
@link('right_hand', Weapon, association=AssociationType.ONE_TO_ONE)
@entity('characters')
class Character(object):
def __init__(self, name, level, job, left_hand, right_hand, skills, _id=None):
self._id = _id
self.name = name
self.level = level
self.job = job
self.left_hand = left_hand
self.right_hand = right_hand
self.skills = skills
class TestFunctional(DbTestCase):
#verify_data = True
#@skip('Under development')
def test_simple_query_ok(self):
self._reset_db(self.__data_provider())
repo = self.session.repository(Character)
query = repo.new_criteria('c')
query.join('c.skills', 's')
query.join('c.job', 'j')
query.join('j.skills', 'j_k')
query.join('c.left_hand', 'l')
query.expect('j.name = :job')
query.expect('c.level < 50')
query.expect('s.name = "Attack"')
query.expect('j_k.name = "Charge"')
query.expect('l.attack > :min_attack')
query.define('job', 'Knight')
query.define('min_attack', 400)
query.limit(1)
character = repo.find(query)
self.assertIsInstance(character, Character, 'This is a single query.')
def __data_provider(self):
return [
{
'class': Character,
'fixtures': [
{
'_id': 1,
'name': 'Shiroyuki',
'level': 82,
'job': 1,
'left_hand': 2,
'right_hand': 1,
'skills': [
2
]
},
{
'_id': 2,
'name': 'Cloud',
'level': 12,
'job': 1,
'left_hand': 1,
'right_hand': 2,
'skills': [
1
]
}
]
},
{
'class': Job,
'fixtures': [
{
'_id': 1,
'name': 'Knight',
'level': 8,
'skills': [
1, 2
]
}
]
},
{
'class': Skill,
'fixtures': [
{
'_id': 1,
'name': 'Attack'
},
{
'_id': 2,
'name': 'Charge'
}
]
},
{
'class': Weapon,
'fixtures': [
{
'_id': 2,
'name': 'Shield',
'attack': 76,
'defend': 234
},
{
'_id': 1,
'name': 'Sword',
'attack': 495,
'defend': 89
}
]
}
] | {
"repo_name": "shiroyuki/passerine",
"path": "test/ft/db/test_driver_mongodriver.py",
"copies": "1",
"size": "4392",
"license": "mit",
"hash": -3108521364417545700,
"line_mean": 29.0890410959,
"line_max": 82,
"alpha_frac": 0.3938979964,
"autogenerated": false,
"ratio": 4.327093596059114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5220991592459114,
"avg_score": null,
"num_lines": null
} |
from ftest import TestCase
from ftest import TestProcess
import signal
class TestCaseImpl(TestCase):
def __init__(self):
TestCase.__init__(self, "test_02")
self.server1 = None
self.server2 = None
self.client1 = None
def ramp_up(self):
# Create server and client processes with own logger instances
self.server1 = TestProcess("./server", self.get_logger("server1"))
self.server2 = TestProcess("./server", self.get_logger("server2"))
self.client1 = TestProcess("./client", self.get_logger("client1"))
def case(self):
# Start client1
self.client1.start()
# Wait the client1 to finish
self.client1.stop(stop_signal=None)
# Verify connection error event
self.client1.verify_traces(["Connection failed\.", "Exit: Success"])
# Start server1
self.server1.start()
# Attempt to start server2
self.server2.start()
# Stop server1 and verify return code (1)
self.server2.stop(stop_signal=None, expected_retcode=1)
# Verify that bind() call fails; address is already in use
self.server2.verify_traces(["bind\(\): Address already in use", "Creating socket failed\.", "Exit: Failure"])
# Stop server1
self.server1.stop(stop_signal=signal.SIGINT)
def ramp_down(self):
pass
| {
"repo_name": "jpellikk/ebnlib",
"path": "test/ftests/cases/test_02.py",
"copies": "1",
"size": "1385",
"license": "mit",
"hash": 7882216581142411000,
"line_mean": 29.1086956522,
"line_max": 117,
"alpha_frac": 0.6267148014,
"autogenerated": false,
"ratio": 4.0144927536231885,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5141207555023188,
"avg_score": null,
"num_lines": null
} |
from ftfy import fix_text, fix_text_segment
from ftfy.fixes import unescape_html
import pytest
def test_entities():
example = '&\n<html>\n&'
assert fix_text(example) == '&\n<html>\n&'
assert fix_text_segment(example) == '&\n<html>\n&'
assert fix_text(example, unescape_html=True) == '&\n<html>\n&'
assert fix_text_segment(example, unescape_html=True) == '&\n<html>\n&'
assert fix_text(example, unescape_html=False) == '&\n<html>\n&'
assert fix_text_segment(example, unescape_html=False) == '&\n<html>\n&'
assert fix_text_segment('<>', unescape_html=False) == '<>'
assert fix_text_segment('<>', unescape_html=True) == '<>'
assert fix_text_segment('<>') == '<>'
assert fix_text_segment('jednocześnie') == 'jednocześnie'
assert fix_text_segment('JEDNOCZEŚNIE') == 'JEDNOCZEŚNIE'
assert fix_text_segment('ellipsis…', normalization='NFKC') == 'ellipsis...'
assert fix_text_segment('ellipsis…', normalization='NFKC') == 'ellipsis...'
assert fix_text_segment('broken') == 'broken\x81'
assert fix_text_segment('&amp;amp;') == '&'
assert unescape_html('euro €') == 'euro €'
assert unescape_html('EURO &EURO;') == 'EURO €'
assert unescape_html('not an entity x6;') == 'not an entity x6;'
assert unescape_html('JEDNOCZE&SACUTE;NIE') == 'JEDNOCZEŚNIE'
assert unescape_html('V&SCARON;ICHNI') == 'VŠICHNI'
assert unescape_html('') == ''
assert unescape_html('�') == '\ufffd'
assert (
fix_text_segment('this is just informal english ¬ html') ==
'this is just informal english ¬ html'
)
def test_old_parameter_name():
example = '&\n<html>\n&'
with pytest.deprecated_call():
assert fix_text(example, fix_entities=True) == '&\n<html>\n&'
with pytest.deprecated_call():
assert fix_text(example, fix_entities=False) == '&\n<html>\n&'
| {
"repo_name": "LuminosoInsight/python-ftfy",
"path": "tests/test_entities.py",
"copies": "1",
"size": "2022",
"license": "mit",
"hash": -662582638079974100,
"line_mean": 43.7555555556,
"line_max": 84,
"alpha_frac": 0.631082423,
"autogenerated": false,
"ratio": 2.9837037037037035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41147861267037034,
"avg_score": null,
"num_lines": null
} |
from ftfy import guess_bytes
from ftfy.bad_codecs.utf8_variants import IncrementalDecoder
import pytest
TEST_ENCODINGS = [
'utf-16', 'utf-8', 'sloppy-windows-1252'
]
TEST_STRINGS = [
'Renée\nFleming', 'Noël\nCoward', 'Señor\nCardgage',
'€ • £ • ¥', '¿Qué?'
]
@pytest.mark.parametrize("string", TEST_STRINGS)
def test_guess_bytes(string):
for encoding in TEST_ENCODINGS:
result_str, result_encoding = guess_bytes(string.encode(encoding))
assert result_str == string
assert result_encoding == encoding
if '\n' in string:
old_mac_bytes = string.replace('\n', '\r').encode('macroman')
result_str, result_encoding = guess_bytes(old_mac_bytes)
assert result_str == string.replace('\n', '\r')
def test_guess_bytes_null():
bowdlerized_null = b'null\xc0\x80separated'
result_str, result_encoding = guess_bytes(bowdlerized_null)
assert result_str == 'null\x00separated'
assert result_encoding == 'utf-8-variants'
def test_incomplete_sequences():
test_bytes = b'surrogates: \xed\xa0\x80\xed\xb0\x80 / null: \xc0\x80'
test_string = 'surrogates: \U00010000 / null: \x00'
# Test that we can feed this string to decode() in multiple pieces, and no
# matter where the break between those pieces is, we get the same result.
for split_point in range(len(test_string) + 1):
left = test_bytes[:split_point]
right = test_bytes[split_point:]
decoder = IncrementalDecoder()
got = decoder.decode(left, final=False)
got += decoder.decode(right)
assert got == test_string
| {
"repo_name": "LuminosoInsight/python-ftfy",
"path": "tests/test_bytes.py",
"copies": "1",
"size": "1624",
"license": "mit",
"hash": -5933866879196363000,
"line_mean": 31.22,
"line_max": 78,
"alpha_frac": 0.6579764122,
"autogenerated": false,
"ratio": 3.222,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.92799764122,
"avg_score": 0.02,
"num_lines": 50
} |
from ftpdata.models import Candidate, Committee
from summary_data.models import Candidate_Overlay, District, Committee_Overlay
from fec_alerts.models import newCommittee, f1filer
from summary_data.utils.term_reference import get_election_year_from_term_class, get_term_class_from_election_year
from summary_data.utils.party_reference import get_party_from_pty
from django.template.defaultfilters import slugify
def make_candidate_overlay_from_masterfile(candidate_id, cycle_to_copy_from=2016, election_year=2016, cycle_to_copy_to=2016, verify_does_not_exist=True, display_candidate=False):
if candidate_id == 'P20003851':
return None
## Returns overlay if created, None if not.
if verify_does_not_exist:
try:
# If there's already a candidate overlay, don't do this.
entered_candidate = Candidate_Overlay.objects.get(cycle=cycle_to_copy_to, fec_id=candidate_id)
#print "Found candidate %s status %s" % (entered_candidate.name, entered_candidate.cand_ici)
return None
except Candidate_Overlay.DoesNotExist:
pass
thiscandidate = None
try:
thiscandidate = Candidate.objects.get(cycle=cycle_to_copy_from, cand_id=candidate_id, cand_election_year=election_year)
except Candidate.DoesNotExist:
print "Couldn't find candidate in masterfile id=%s election_year=%s cycle=%s" % (candidate_id, election_year, cycle_to_copy_from)
return None
state = thiscandidate.cand_office_st
term_class = None
if thiscandidate.cand_office == 'S':
term_class = get_term_class_from_election_year(thiscandidate.cand_election_year)
this_district = None
try:
if thiscandidate.cand_office == 'S':
this_district = District.objects.get(election_year=election_year, state=state, office=thiscandidate.cand_office, term_class=term_class)
elif thiscandidate.cand_office == 'H':
this_district = District.objects.get(election_year=election_year, state=state, office=thiscandidate.cand_office, office_district=thiscandidate.cand_office_district)
elif thiscandidate.cand_office == 'P':
# there's a single presidential district per cycle, it needs to be created manually
this_district = District.objects.get(election_year=election_year, office=thiscandidate.cand_office)
except District.DoesNotExist:
print "!! Invalid %s district for %s term_class=%s district=%s election_year=%s state=%s" % (thiscandidate.cand_office, thiscandidate.cand_name, term_class, thiscandidate.cand_office_district, thiscandidate.cand_election_year, state)
# If we can't find a district, override the display setting--just don't display it.
display_candidate = False
co = Candidate_Overlay.objects.create(
district=this_district,
office_district=thiscandidate.cand_office_district,
cycle=cycle_to_copy_to,
fec_id=candidate_id,
name=thiscandidate.cand_name,
slug = slugify(thiscandidate.cand_name),
pty=thiscandidate.cand_pty_affiliation,
party = get_party_from_pty(thiscandidate.cand_pty_affiliation),
pcc=thiscandidate.cand_pcc,
term_class=term_class,
election_year=thiscandidate.cand_election_year,
curated_election_year=thiscandidate.cand_election_year,
state=thiscandidate.cand_office_st,
office=thiscandidate.cand_office,
cand_ici=thiscandidate.cand_ici,
candidate_status='D',
display = display_candidate,
)
return co
# udpate committee information
def update_committee_from_masterfile(committee_id, cycle_to_copy_from=2016, cycle_to_copy_to=2016):
#print "Updating %s" % (committee_id)
c = None
try:
c = Committee.objects.get(cmte_id=committee_id, cycle=cycle_to_copy_from)
except Committee.MultipleObjectsReturned:
print "Multiple committees found with id=%s cycle=%s!" % (committee_id, cycle_to_copy_from)
return None
except Committee.DoesNotExist:
print "Missing committee with id=%s cycle=%s!" % (committee_id, cycle_to_copy_from)
return None
committee_overlay = None
try:
committee_overlay = Committee_Overlay.objects.get(fec_id=committee_id, cycle=cycle_to_copy_from)
except Committee_Overlay.DoesNotExist:
# This shouldn't happen
return None
ctype = c.cmte_tp
is_hybrid = False
is_noncommittee = False
is_superpac = False
if ctype:
if ctype.upper() in ['O', 'U']:
is_superpac = True
if ctype.upper() in ['V', 'W']:
is_hybrid = True
if ctype.upper() in ['I']:
is_noncommittee = True
party = c.cmte_pty_affiliation
if party:
party = get_party_from_pty(party)
committee_overlay.cycle = cycle_to_copy_to
committee_overlay.name = c.cmte_name
committee_overlay.slug = slugify(c.cmte_name)
committee_overlay.party = party
committee_overlay.treasurer = c.tres_nm
committee_overlay.street_1 = c.cmte_st1
committee_overlay.street_2 = c.cmte_st2
committee_overlay.city = c.cmte_city
committee_overlay.state = c.cmte_st
committee_overlay.connected_org_name = c.connected_org_nm
committee_overlay.filing_frequency = c.cmte_filing_freq
committee_overlay.candidate_id = c.cand_id
committee_overlay.is_superpac = is_superpac
committee_overlay.is_hybrid = is_hybrid
committee_overlay.is_noncommittee = is_noncommittee
committee_overlay.designation = c.cmte_dsgn
committee_overlay.ctype = ctype
committee_overlay.save()
# the new committee list is now longer used. See below.
def make_committee_from_new_committee_list(committee_id, cycle='2016'):
nc = None
try:
nc = newCommittee.objects.get(fec_id = committee_id, cycle=cycle)
except newCommittee.DoesNotExist:
return None
except newCommittee.MultipleObjectsReturned:
return None
co = None
try:
co = Committee_Overlay.objects.get(fec_id=committee_id, cycle=cycle)
return None
except Committee_Overlay.MultipleObjectsReturned:
return None
except Committee_Overlay.DoesNotExist:
# only create one if this doesn't exist.
#print "Creating committee from new committee %s" % (committee_id)
ctype = nc.get_ctype()
is_hybrid = False
is_noncommittee = False
is_superpac = False
if ctype:
if ctype.upper() in ['O', 'U']:
is_superpac = True
elif ctype.upper() in ['V', 'W']:
is_hybrid = True
elif ctype.upper() in ['I']:
is_noncommittee = True
#print cycle, nc.name, nc.fec_id, is_superpac, is_hybrid, is_noncommittee, ctype
cm = Committee_Overlay.objects.create(
cycle = cycle,
name = nc.name,
fec_id = nc.fec_id,
slug = slugify(nc.name),
is_superpac = is_superpac,
is_hybrid = is_hybrid,
is_noncommittee = is_noncommittee,
ctype = ctype,
is_dirty=True,
)
return cm
""" I think this format was also changed roughly May 12, 2015--this needs to be rewritten"""
def make_committee_from_f1filer(committee_id, cycle):
nc = None
try:
nc = f1filer.objects.get(cmte_id = committee_id, cycle=cycle)
except f1filer.DoesNotExist:
return None
except f1filer.MultipleObjectsReturned:
return None
co = None
try:
co = Committee_Overlay.objects.get(fec_id=committee_id, cycle=cycle)
return None
except Committee_Overlay.MultipleObjectsReturned:
return None
except Committee_Overlay.DoesNotExist:
# only create one if this doesn't exist.
print "Creating committee from new committee %s" % (committee_id)
ctype = nc.filed_cmte_tp
is_hybrid = False
is_noncommittee = False
is_superpac = False
if ctype:
if ctype.upper() in ['O', 'U']:
is_superpac = True
elif ctype.upper() in ['V', 'W']:
is_hybrid = True
elif ctype.upper() in ['I']:
is_noncommittee = True
is_paper_filer = False
if ctype == 'S':
is_paper_filer = True
#print cycle, nc.name, nc.fec_id, is_superpac, is_hybrid, is_noncommittee, ctype
cm = Committee_Overlay.objects.create(
cycle = cycle,
name = nc.cmte_nm,
fec_id = nc.cmte_id,
slug = slugify(nc.cmte_nm),
is_superpac = is_superpac,
is_hybrid = is_hybrid,
is_noncommittee = is_noncommittee,
ctype = ctype,
is_dirty=True,
is_paper_filer=is_paper_filer,
street_1 = nc.cmte_st1,
street_2 = nc.cmte_st2,
city = nc.cmte_city,
zip_code = nc.cmte_zip,
state = nc.cmte_st,
filing_frequency = nc.filing_freq,
treasurer = nc.tres_nm
)
return cm
def make_committee_overlay_from_masterfile(committee_id, cycle_to_copy_from=2016, cycle_to_copy_to=2016, verify_does_not_exist=True):
if committee_id == 'C00507947':
return None
c = None
try:
c = Committee.objects.get(cmte_id=committee_id, cycle=cycle_to_copy_from)
except Committee.MultipleObjectsReturned:
print "Multiple committees found with id=%s cycle=%s!" % (committee_id, cycle_to_copy_from)
return None
if verify_does_not_exist:
try:
Committee_Overlay.objects.get(fec_id=committee_id, cycle=cycle_to_copy_from)
# if it exists, update it with the current information
update_committee_from_masterfile(committee_id, cycle_to_copy_from=2016, cycle_to_copy_to=2016)
return None
except Committee_Overlay.DoesNotExist:
pass
ctype = c.cmte_tp
is_hybrid = False
is_noncommittee = False
is_superpac = False
is_paper_filer = False
if ctype:
if ctype.upper() in ['O', 'U']:
is_superpac = True
if ctype.upper() in ['V', 'W']:
is_hybrid = True
if ctype.upper() in ['I']:
is_noncommittee = True
if ctype.upper() in ['S']:
is_paper_filer = True
party = c.cmte_pty_affiliation
if party:
party = get_party_from_pty(party)
cm = Committee_Overlay.objects.create(
cycle = cycle_to_copy_to,
name = c.cmte_name,
fec_id = c.cmte_id,
slug = slugify(c.cmte_name),
party = party,
treasurer = c.tres_nm,
street_1 = c.cmte_st1,
street_2 = c.cmte_st2,
city = c.cmte_city,
state = c.cmte_st,
connected_org_name = c.connected_org_nm,
filing_frequency = c.cmte_filing_freq,
candidate_id = c.cand_id,
is_superpac = is_superpac,
is_hybrid = is_hybrid,
is_noncommittee = is_noncommittee,
designation = c.cmte_dsgn,
ctype = ctype,
is_dirty=True,
is_paper_filer=is_paper_filer
)
return cm
"""
from summary_data.utils.overlay_utils import make_candidate_overlay_from_masterfile
make_candidate_overlay_from_masterfile('H0CA48024', '2016') # issa
from summary_data.utils.overlay_utils import make_committee_overlay_from_masterfile
make_committee_overlay_from_masterfile('C00542779', '2016')
""" | {
"repo_name": "sunlightlabs/read_FEC",
"path": "fecreader/summary_data/utils/overlay_utils.py",
"copies": "1",
"size": "11848",
"license": "bsd-3-clause",
"hash": -7563190967634687000,
"line_mean": 35.5709876543,
"line_max": 241,
"alpha_frac": 0.6190074274,
"autogenerated": false,
"ratio": 3.585956416464891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4704963843864891,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP, error_perm, all_errors, error_temp
import io
import logging
from ..storage import Storage
logger = logging.getLogger(__name__)
class FTPStorage(Storage):
def __init__(self, root_dir='/historisation', host='localhost', username=None, password=None):
super(Storage, self).__init__()
self.root_dir = root_dir
self.host = host
self.username = username
self.password = password
self._ftp = None
def get(self, path):
ftp = self._get_ftp()
stream = io.StringIO()
def write(line):
stream.write(line.decode('utf8'))
try:
ftp.retrlines('RETR %s' % path, write)
stream.seek(0)
return stream.read()
except error_perm:
return None
def exists(self, path):
ftp = self._get_ftp()
try:
ftp.nlst(path)
return True
except error_temp:
return False
def save(self, path, lines):
ftp = self._get_ftp()
if type(lines) is str:
lines = io.StringIO(lines.decode('utf8'))
ftp.storlines('APPE %s' % path, lines)
def mkdir(self, path):
ftp = self._get_ftp()
try:
ftp.mkd(path)
except error_perm:
pass
def get_dirs(self):
ftp = self._get_ftp()
return ftp.nlst()
def get_files(self, path):
ftp = self._get_ftp()
return [file_path.split('/')[-1] for file_path in ftp.nlst(path)]
def join(self, *paths):
return '/'.join(paths)
def move(self, source, dest):
ftp = self._get_ftp()
ftp.rename(source, dest)
def _get_ftp(self):
if not self._ftp:
return self._get_new_ftp()
try:
self._ftp.cwd(self.root_dir)
return self._ftp
except all_errors:
return self._get_new_ftp()
def _get_new_ftp(self):
self._ftp = FTP(self.host)
self._ftp.login(self.username, self.password)
try:
self._ftp.mkd(self.root_dir)
except error_perm:
pass
self._ftp.cwd(self.root_dir)
return self._ftp
def cd_root(self, ftp):
ftp.cwd(self.root_dir)
return ftp
| {
"repo_name": "opendatasoft/ods-cookbook",
"path": "historisation/historisation/storages/ftp.py",
"copies": "1",
"size": "2290",
"license": "mit",
"hash": 1338464288513369900,
"line_mean": 23.8913043478,
"line_max": 98,
"alpha_frac": 0.5323144105,
"autogenerated": false,
"ratio": 3.729641693811075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4761956104311075,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP, error_perm
import os
from os.path import dirname
import re
from transmat.util import do_it
LIST_RE = re.compile('^(\S+).*\s+(20\d\d|\d\d\:\d\d)\s(.*?)$')
class FTPSession(object):
"""An object representing a session with an FTP server.
This attempts to implement a robust session. If the FTP server
disconnects us during an upload or download, we try to reconnect.
"""
client = None
dir_known_to_exist = {'/': True}
def __init__(self, options=None, host=None, username=None, password=None,
root='/', index=None):
self.options = options
self.host = host
self.username = username
self.password = password
self.root = root
self.index = index
def connect(self):
print "Connecting to %s..." % self.host
self.client = FTP(self.host)
self.client.login(self.username, self.password)
self.client.cwd(self.root)
print self.client.getwelcome()
def disconnect(self):
self.client.quit()
def reconnect(self):
print "! Resetting connection to server..."
self.disconnect()
self.connect()
def mkdir(self, dir):
if self.options.dry_run:
return
if dir not in self.dir_known_to_exist:
print "Creating directory %s..." % dir
try:
self.client.mkd(dir)
except error_perm, e:
if "No such file" in str(e):
# assume this means parent doesn't exist yet
print "(Trying to create %s now...)" % dirname(dir)
self.mkdir(dirname(dir))
self.mkdir(dir)
# almost... :)
if "File exists" not in str(e):
raise
self.dir_known_to_exist[dir] = True
def delete(self, filename):
self.client.delete(filename)
def upload(self, filename):
return self.upload_to(filename, os.path.join(self.root, filename))
def upload_to(self, local_filename, remote_filename):
if self.options.dry_run:
print "WOULD UPLOAD %s -> %s" % (local_filename, remote_filename)
if self.options.compare and remote_filename != '/filehash.txt':
temp_filename = "/tmp/file"
try:
self.download_to(remote_filename, temp_filename)
except Exception as e:
print str(e)
do_it("echo -n '' >%s" % temp_filename)
do_it("diff -u '%s' '%s' || echo" %
(temp_filename, local_filename))
return False
print "UPLOAD %s -> %s" % (local_filename, remote_filename)
dir = dirname(remote_filename)
self.mkdir(dir)
tries = 0
local_file = open(local_filename)
done = False
while not done:
try:
stor = "STOR %s" % remote_filename.encode('utf-8')
self.client.storbinary(stor, local_file)
done = True
except Exception, e:
print "FAILURE: FTP STOR failed: " + str(e)
if tries < 10:
self.reconnect()
tries += 1
print "RETRY UPLOAD %s -> %s" % (local_filename,
remote_filename)
else:
msg = "Can't connect to server, tried %s times" % tries
raise Exception(msg)
local_file.close()
return True
def upload_if_newer(self, filename):
"""Uses the file index.
"""
if self.index.has_been_touched(filename):
if self.upload(filename):
self.index.update(filename)
else:
dir = "/" + dirname(filename)
self.dir_known_to_exist[dir] = True
def download_to(self, remote_filename, local_filename):
local_dir = os.path.dirname(local_filename)
if local_dir and not os.path.exists(local_dir):
print "Dir '%s' does not exist, creating first..." % local_dir
os.makedirs(local_dir)
file = open(local_filename, "w")
def receive(data):
file.write(data)
self.client.retrbinary("RETR %s" % remote_filename, receive)
file.close()
# XXX ignored_paths support is weak
# XXX ignored_dir support is nonexistent
def traverse_ftp_dir(self, process, dir):
remote_dir = os.path.normpath(os.path.join(self.root, dir))
if remote_dir in ['/' + x for x in self.options.ignored_paths]:
return
self.client.cwd(remote_dir)
entries = []
subdirs = []
def gather(line):
entries.append(line)
try:
self.client.retrlines('LIST', gather)
except EOFError as e:
print "Hit EOF while listing remote directory %s" % remote_dir
print "Gathered entries: %s" % entries
# XXX try to recover?
raise
for entry in entries:
match = LIST_RE.match(entry)
if match is not None:
filename = match.group(3)
is_dir = False
if match.group(1).startswith('d') and \
filename not in ['', '.', '..']:
is_dir = True
if self.options.verbose_traversal:
print "%s%s" % (
os.path.join(remote_dir, filename),
"/" if is_dir else ""
)
if is_dir:
subdirs.append(filename)
else:
if filename in self.options.ignored_files:
continue
process(dir, remote_dir, filename)
else:
# XXX raise or ignore?
if self.options.verbose_traversal:
print "No match on '%s'" % entry
for subdir in subdirs:
self.traverse_ftp_dir(process, os.path.join(dir, subdir))
| {
"repo_name": "cpressey/transmat",
"path": "src/transmat/remote.py",
"copies": "1",
"size": "6155",
"license": "unlicense",
"hash": -3599429425810284000,
"line_mean": 33.0055248619,
"line_max": 77,
"alpha_frac": 0.5127538587,
"autogenerated": false,
"ratio": 4.253628196268141,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5266382054968141,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
from differ import DictDiffer
import settings
import pprint
import pickle
import os
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def connectFtp(url, cwd, login, passwd):
ftp = FTP(url)
if login is not None and passwd is not None:
ftp.login(login, passwd)
else:
ftp.login()
ftp.cwd(cwd)
return ftp
def main():
pp = pprint.PrettyPrinter(indent=4)
ftp = connectFtp(settings.FTP_URL,
settings.BASE,
settings.LOGIN,
settings.PASSWD)
tasks = []
hist = {}
if os.path.isfile(os.path.join(__location__, 'hist.pkl')):
hist_pkl = open(os.path.join(__location__, 'hist.pkl'), 'rb')
hist = pickle.load(hist_pkl)
hist_pkl.close()
for folder in settings.FOLDERS:
print folder
if not hist.has_key(folder):
hist[folder] = {}
latest = {s : os.path.join(folder, s) for s in ftp.nlst(folder)}
diff = DictDiffer(latest, hist[folder])
if len(diff.added()) > 0:
tasks += [(item, latest[item]) for item in diff.added()]
if diff.changed():
hist[folder] = latest
hist_pkl = open(os.path.join(__location__, 'hist.pkl'), 'wb')
pickle.dump(hist, hist_pkl)
hist_pkl.close()
print 'To be downloaded:'
pp.pprint(tasks)
c = raw_input('Continue? (y/n)')
if c in 'Yy':
for fname, path in tasks:
print 'Downloading... %s' % fname
if not os.path.exists(os.path.join(__location__, settings.DOWNLOAD)):
os.makedirs(os.path.join(__location__, settings.DOWNLOAD))
ftp.retrbinary('RETR %s' % path, open(os.path.join(__location__, settings.DOWNLOAD, fname), 'wb').write)
else:
print 'diff stored, bye!'
ftp.close()
if __name__ == "__main__":
main()
| {
"repo_name": "liusiqi43/diffftp",
"path": "run.py",
"copies": "1",
"size": "1931",
"license": "mit",
"hash": -1771974883195103200,
"line_mean": 27.3970588235,
"line_max": 116,
"alpha_frac": 0.5660279648,
"autogenerated": false,
"ratio": 3.562730627306273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4628758592106273,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
from fnmatch import fnmatch
import paramiko
import netrc
month_dict = {'1': 'jan', '2': 'feb', '3': 'mar', '4': 'apr', '5': 'may',
'6': 'jun', '7': 'jul', '8': 'aug', '9': 'sep', '10': 'oct',
'11': 'nov', '12': 'dec'}
def get_last_svlbi_schedule(month, year, save_fn):
"""
Get SVLBI schedule for given month and year.
:param month:
Month [1 - 12].
:param year:
Year (eg. 2016).
:param save_fn:
File to save.
"""
ftp = FTP(host='jet.asc.rssi.ru', user='anonymous', passwd='')
ftp.cwd('/outgoing/yyk/Radioastron/block_schedule/')
fname = None
year = str(year)
for fn in ftp.nlst():
if fnmatch(fn, 'RA_block_schedule.{}{}'.format(month_dict[month],
year[2:])):
fname = fn
if not fname:
raise Exception()
with open(save_fn, "wb") as write_file:
ftp.retrbinary('RETR %s' % fname, write_file.write)
def get_last_srt_schedule(month, year, save_fn):
"""
Get SVLBI schedule for given month and year.
:param month:
Month [1 - 12].
:param year:
Year (eg. 2016).
:param save_fn:
File to save.
"""
server = 'webinet.asc.rssi.ru'
transport = paramiko.Transport((server, 21))
secrets = netrc.netrc()
netrclogin, netrcaccount, netrcpassword = secrets.authenticators(server)
transport.connect(username=netrclogin, password=netrcpassword)
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.chdir('/schedule/opersched/monthly/{}{}'.format(year, month))
fnames = list()
for fn in sftp.listdir():
if fnmatch(fn, 'srt_{}{}_v*.txt'):
fnames.append(fn)
if not fnames:
raise Exception("No schedules!")
fname = sorted(fnames)
sftp.get(fname, save_fn)
| {
"repo_name": "ipashchenko/watcher",
"path": "watcher/watcher.py",
"copies": "1",
"size": "1884",
"license": "mit",
"hash": 499657064305442240,
"line_mean": 28.4375,
"line_max": 76,
"alpha_frac": 0.5652866242,
"autogenerated": false,
"ratio": 3.3345132743362833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4399799898536283,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
from getpass import getpass
from os import path
host = input("Host > ")
port = 21
user = input("User for {}:{} > ".format(host, port))
password = getpass("Password for {} on {}:{} > ".format(user, host, port))
debuglevel = 1
def store(connection, *args):
"Store a file (opened in binary mode) specified by the user"
filename = input("Filename to store > ") if not args else args[0]
if path.exists(filename):
fn_to_send = path.split(filename)[1]
with open(filename, "rb") as fileobject:
ret = connexion.storbinary("STOR " + fn_to_send, fileobject)
print(ret)
else:
print("[!] {} didn't exist in the current local working directory !".format(filename))
def ls(connection, *args):
"List the file in the current working directory"
ret = connection.dir(*args)
print(ret)
def del_(connection, *args):
"Delete a file specified by the user"
to_del = input("File to delete > ") if not args else args[0]
ret = connection.delete(to_del)
print(ret)
def ren(connection, *args):
"Rename a file specified by the user"
to_ren = input("Filename to change > ") if not args else args[0]
new_name = input("New name for {} > ".format(to_ren)) if len(args) <= 1 else args[1]
ret = connection.rename(to_ren, new_name)
print(ret)
def mkd(connection, *args):
"Make a new directory specified by the user"
dir_name = input("New dir name > ") if not args else args[0]
ret = connection.mkd(dir_name)
print(ret)
def cwd(connection, *args):
"Change the current working directory"
new_cwd = input("New working directory > ") if not args else args[0]
ret = connection.cwd(new_cwd)
print(ret)
def cd(connection, *args):
"Get the current working directory"
ret = connection.pwd()
print(ret)
def size(connection, *args):
"Get the size of a filename specified by the user"
filename = input("File to size > ") if not args else args[0]
ret = connection.size(filename)
print(ret)
def rmd(connection, *args):
"Remove a directory specified by the user"
dir_name = input("Directory to delete > ") if not args else args[0]
ret = connection.rmd(dir_name)
print(ret)
commands = {
"store": store,
"ls": ls,
"del": del_,
"ren": ren,
"mkd": mkd,
"cwd": cwd,
"cd": cd,
"size": size,
"rmd": rmd
}
print()
connection = FTP()
connection.connect(host, port)
connection.login(user, password)
connection.set_debuglevel(debuglevel)
print("\n" + connection.getwelcome() + "\n")
while True:
usercommand = input("\n$ ")
cmd, *args = usercommand.split(" ")
if cmd in commands.keys():
commands[cmd](connection, *args)
elif cmd == "quit":
break
elif cmd == "help":
for k, v in commands.items():
print("\t- {} :\n{}\n".format(k, v.__doc__))
print("\t- quit :\nClose the connection")
else:
print("Unrecognized command.")
connection.quit()
| {
"repo_name": "Loodoor/Simple-command-line-FTP",
"path": "main.py",
"copies": "1",
"size": "3028",
"license": "mit",
"hash": 8714897396668212000,
"line_mean": 25.3304347826,
"line_max": 94,
"alpha_frac": 0.6215323646,
"autogenerated": false,
"ratio": 3.44874715261959,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.457027951721959,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
from iniclass import INI
import subprocess as sp
import os
class FTPclass():
def __init__(self):
self.ini = INI()
def DLLN(self,scripts,directorylinx):
ip = self.ini.Config('ip')
name = self.ini.Config('name')
ftp = FTP(ip)
ftp.login(name)
os.chdir(directorylinx)
count = 0
access = 0
for v in scripts.values(): # iterates through number of dependencies needed
access = access + 1
value = scripts[str(access)] # dls them based off of the order in ini file
size = ftp.size(value)
ftp.retrbinary('RETR ' + value, open(value, 'wb').write)
if os.path.exists(directorylinx + '//' + value):
while os.path.getsize(directorylinx + '\\' + value) != size: # checks to make sure file size is correct, if it is not correct this would indicate a bad download
continue
sp.Popen([directorylinx+'//'+value])
else:
print('Download did not start for ' + value)
count = count + 1
def DLWS(self, scripts, directorywin): # downloads for windows execution
ip = self.ini.Config('ip')
name = self.ini.Config('name')
ftp = FTP(ip)
ftp.login(name)
os.chdir(directorywin)
count = 0
access = 0
for v in scripts.values(): # iterates through number of dependencies needed
access = access + 1
value = scripts[str(access)] # dls them based off of the order in ini file
size = ftp.size(value)
ftp.retrbinary('RETR ' + value, open(value, 'wb').write)
if os.path.exists(directorywin + '\\' + value):
while os.path.getsize(directorywin + '\\' + value) != size: # checks to make sure file size is correct, if it is not correct this would indicate a bad download
continue
if '.ps1' in value:
powershell = sp.Popen([r'C:\WINDOWS\system32\WindowsPowerShell\v1.0\powershell.exe', '-ExecutionPolicy', 'Unrestricted','' + directory + '\\' + value]) #currently works, issue with powershell admin elevation but that is more of an issue with my powrshell script than this program
powershell.wait()
elif '.exe' or '.msi' in value:
sp.Popen([directorywin+'\\'+value])
else:
print('Download did not start for ' + value)
count = count + 1
# keep track of data in list
| {
"repo_name": "bandit145/PCsetup",
"path": "ftpclass.py",
"copies": "1",
"size": "2592",
"license": "mit",
"hash": 7515892738640259000,
"line_mean": 47,
"line_max": 300,
"alpha_frac": 0.5679012346,
"autogenerated": false,
"ratio": 4.140575079872204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004251166726392652,
"num_lines": 54
} |
from ftplib import FTP
from tkinter import messagebox
import pymysql as mysql
import sqlite3 as sqlite
import os
def flag_write(mode, flag):
if flag == "ftp":
return open("settings.conf", mode)
elif flag == "pdf":
return open("pdf.conf", mode)
class SQLConfig:
def __init__(self, placeholder = False):
if placeholder == False:
fileSource = open("mysql.conf", "r+")
fetchedInfo = fileSource.read().split("\n")
self.host = fetchedInfo[0]
self.username = fetchedInfo[1]
self.password = fetchedInfo[2]
self.database = fetchedInfo[3]
self.table = fetchedInfo[4]
fileSource.close()
else:
self.host = "Host"
self.username = "Kullanıcı Adı"
self.password = "Şifre"
self.database = "Veri Bankası"
self.table = "Tablo"
class Configuration:
def __init__(self, flag="ftp", placeholder = False):
if placeholder == False:
fileSource = flag_write("r+", flag=flag)
fetchedInfo = fileSource.read().split("\n")
self.domain = fetchedInfo[0]
self.username = fetchedInfo[1]
self.password = fetchedInfo[2]
fileSource.close()
else:
self.domain = "Domain"
self.username = "Kullanıcı Adı"
self.password = "Şifre"
def saveSQL(host, username, password, database, table):
fileTarget = open("mysql.conf", "w")
fileTarget.write(host + "\n" + username + "\n" + password + "\n" + database
+ "\n" + table)
fileTarget.close()
messagebox.showinfo("Başarılı", "Ayarlar başarı ile kaydedildi.")
def save(domain, domainSent, password, flag="ftp"):
fileTarget = flag_write("w+", flag=flag)
fileTarget.write(domain + "\n" + domainSent + "\n" + password)
fileTarget.close()
messagebox.showinfo("Başarılı", "Ayarlar başarı ile kaydedildi.")
def equlize():
conf = SQLConfig()
initSQL = mysql.connect(conf.host, conf.username, conf.password,
conf.database, charset="utf8")
initCursor = initSQL.cursor()
initCursor.execute("SELECT * FROM {}".format(conf.field1, conf.field2,
conf.table))
fetchedData = initCursor.fetchall()
initSQL.close()
os.remove("kütüphane.db")
targetSQL = sqlite.connect("kütüphane.db")
targetCursor = targetSQL.cursor()
targetCursor.execute("CREATE TABLE kitaplar(Isim TEXT, Sahip TEXT)")
targetSQL.commit()
for i in range(len(fetchedData)):
j = fetchedData[i]
owner = j[3]
if j[3] != "Kütüphane":
owner = "Kütüphane"
targetCursor.execute("INSERT INTO kitaplar values(\"{}\","
" \"{}\")".format(j[0], owner))
targetSQL.commit()
targetSQL.close()
def sync():
conf = Configuration()
fileSource = open("kütüphane.db", "rb")
beacon = FTP(conf.domain)
beacon.login(conf.username, conf.password)
beacon.storbinary("STOR library.db", fileSource)
beacon.quit()
fileSource.close()
messagebox.showinfo("Başarılı", "Aktarım başarı ile tamamlandı.")
def uploadEbook(pdffile):
conf = Configuration(flag="pdf")
fileSource = open(pdffile, "rb")
beacon = FTP(conf.domain)
beacon.login(conf.username, conf.password)
beacon.storbinary("STOR {}".format(pdffile), fileSource)
beacon.quit()
fileSource.close()
messagebox.showinfo("Başarılı", "Aktarım başarı ile tamamlandı.")
| {
"repo_name": "egeemirozkan/ProjectLib",
"path": "beacon.py",
"copies": "1",
"size": "3619",
"license": "mit",
"hash": 2254296624313650200,
"line_mean": 31.5090909091,
"line_max": 79,
"alpha_frac": 0.5998322148,
"autogenerated": false,
"ratio": 3.44177093358999,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45416031483899905,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
from urllib.parse import urlparse
from io import BytesIO
class FtpClient:
def __init__(self, url):
self.parsed_url = urlparse(url)
def upload(self, content, filename):
with FTP() as ftp:
port = self.parsed_url.port or 21
ftp.connect(self.parsed_url.hostname, port=port)
ftp.login(user=self.parsed_url.username, passwd=self.parsed_url.password)
if self.parsed_url.path:
ftp.cwd(self.parsed_url.path)
bytes = BytesIO(content)
ftp.storbinary('STOR ' + filename, bytes)
def download(self, filename=None):
filename = filename or self.parsed_url.path
with FTP() as ftp:
port = self.parsed_url.port or 21
ftp.connect(self.parsed_url.hostname, port=port)
ftp.login(user=self.parsed_url.username, passwd=self.parsed_url.password)
buff = BytesIO()
ftp.retrbinary('RETR ' + filename, buff.write)
return buff.getvalue().decode()
| {
"repo_name": "g8os/grid",
"path": "pyclient/zeroos/orchestrator/sal/FtpClient.py",
"copies": "2",
"size": "1049",
"license": "apache-2.0",
"hash": -3382464499749958000,
"line_mean": 35.1724137931,
"line_max": 85,
"alpha_frac": 0.6110581506,
"autogenerated": false,
"ratio": 3.8566176470588234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5467675797658823,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
from XenAPI import Session
import requests
from os import path, makedirs, remove
import time
###############################################
################ configuration ################
###############################################
# FTP
ftp_host = "localhost"
ftp_user = "anonymous"
ftp_pw = "anonymous"
ftp_dir = "/backup"
ftp_count = 10
#xenserver
xen_host = "https://localhost/"
xen_user = "root"
xen_pw = "root"
#arbitrary
arb_backup_path = "/backup"
arb_exclude = []
arb_vms_without_ram = []
arb_time = time.strftime("%Y-%m-%d-%H-%M-%S")
def get_session():
session = Session(xen_host)
session.login_with_password(xen_user, xen_pw)
return session
def get_all_vms(session):
return session.xenapi.VM.get_all()
def snapshot_vm(session, record, vm):
if record["name_label"] not in arb_vms_without_ram and record["power_state"] != "Halted":
return session.xenapi.VM.checkpoint(vm, record["name_label"] + "_" + arb_time)
else:
return session.xenapi.VM.snapshot(vm, record["name_label"] + "_" + arb_time)
def export_vm(snapshot_id, vm_name):
if not path.exists(path.join(arb_backup_path, vm_name)):
makedirs(path.join(arb_backup_path, vm_name))
url = xen_host + "export?uuid=" + snapshot_id
filename = vm_name + "_" + arb_time + ".xva"
f = FTP(ftp_host)
f.login(ftp_user, ftp_pw)
f.cwd(ftp_dir)
if vm_name not in f.nlst():
f.mkd(vm_name)
f.cwd(vm_name)
r = requests.get(url, stream=True, auth=(xen_user, xen_pw), verify=False)
f.storbinary("STOR {}".format(filename), r.raw, blocksize=(1024 * 1024 * 10))
def cleanup_backup(vm_name):
f = FTP(ftp_host)
f.login(ftp_user, ftp_pw)
f.cwd(ftp_dir)
f.cwd(vm_name)
files = f.nlst()
files.remove(".")
files.remove("..")
if len(files) > ftp_count:
files.sort()
for file_name in files[:-ftp_count]:
f.delete(file_name)
def delete_snapshot(session, vm, snapshot_id):
record = session.xenapi.VM.get_record(vm)
for snapshot in record["snapshots"]:
uuid = session.xenapi.VM.get_uuid(snapshot)
if uuid == snapshot_id:
all_vms = session.xenapi.VM.get_all_records()
all_vbds = session.xenapi.VBD.get_all_records()
all_vdis = session.xenapi.VDI.get_all_records()
snapshot_record = all_vms[snapshot]
session.xenapi.VM.destroy(snapshot)
for vbd in snapshot_record["VBDs"]:
vbd_record = all_vbds[vbd]
if vbd_record["type"] == "Disk":
vdi_record = all_vdis[vbd_record["VDI"]]
vdi = session.xenapi.VDI.get_by_uuid(vdi_record["uuid"])
session.xenapi.VDI.destroy(vdi)
def backup():
session = get_session()
vms = get_all_vms(session)
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record["name_label"] not in arb_exclude and record["is_a_template"] is False and record["is_control_domain"] is False:
snapshot = snapshot_vm(session, record, vm)
snapshot_id = session.xenapi.VM.get_uuid(snapshot)
export_vm(snapshot_id, record["name_label"])
cleanup_backup(record["name_label"])
delete_snapshot(session, vm, snapshot_id)
backup()
| {
"repo_name": "curcas/xenserver-backup",
"path": "backup.py",
"copies": "1",
"size": "3356",
"license": "mit",
"hash": -7415859415885226000,
"line_mean": 26.9666666667,
"line_max": 129,
"alpha_frac": 0.5870083433,
"autogenerated": false,
"ratio": 3.3260654112983152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9402388912872344,
"avg_score": 0.002136968345194158,
"num_lines": 120
} |
from ftplib import FTP
import bs4
import calendar
import requests
import time
seconds_in_hour = 60 * 60
def get_list(url, greenwich_time):
r = requests.get(url)
soup = bs4.BeautifulSoup(r.text, "html.parser")
streams = []
for it in soup.find_all("span"):
if "data-broadcast-end" in it.attrs and "data-redir" in it.attrs:
if int(it["data-broadcast-end"]) >= greenwich_time:
info_dict = {
'url': it["data-redir"],
'title': it.findChild("span", {"class": "title"}),
'time': it.findChild("span", {"class": "time"}),
"category": it.findChild("span", {"class": "category"})
}
streams.append(info_dict)
return streams
def get_modified_time(page_index):
return time.time() + seconds_in_hour * page_index
def get_piwik():
piwik = """
<!-- Piwik -->
<script type="text/javascript">
var _paq = _paq || [];
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="//piwik.linuxpl.com/";
_paq.push(['setTrackerUrl', u+'piwik.php']);
_paq.push(['setSiteId', 9702]);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.type='text/javascript'; g.async=true; g.defer=true; g.src=u+'piwik.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<noscript><p><img src="//piwik.linuxpl.com/piwik.php?idsite=9702" style="border:0;" alt="" /></p></noscript>
<!-- End Piwik Code -->
"""
return piwik
def write_header(f, page_index):
header = """<!DOCTYPE html>
<html>
<head>
<title>
Transmisje z Rio</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="keywords" content="Rio Rio2016 Igrzyska Olimpijskie Olimpiada ">
<meta name="description" content="Transmisje z Rio">
<style type="text/css">
body {background-color:ffffff;background-repeat:no-repeat;background-position:top left;background-attachment:fixed;}
h1{font-family:Arial;color:000000;}
p {font-family:Arial;font-size:24px;font-style:normal;font-weight:normal;color:000000;}
</style>
""" + get_piwik() + """
</head>
<body>
<h1>Transmisje z Rio godzina
"""
f.write(header)
local_time = time.localtime(get_modified_time(page_index))
f.write(str(local_time.tm_hour) + ":" + str(local_time.tm_min))
f.write("</h1><p>")
def write_footer(f):
footer = """</p></body></html>"""
f.write(footer)
def get_page_filename(page_index):
filename = "index.html"
if page_index > 0:
filename = "rio" + str(page_index) + ".html"
return filename
def create_page(page_index):
cur_time = calendar.timegm(time.gmtime(get_modified_time(page_index))) * 1000
url1 = "http://www.api.v3.tvp.pl/shared/listing.php?portal_name=rio2016.tvp.pl&portal_id=19369963&parent_id=24035157&type=directory_standard©=false&direct=true&order=position%2C1&count=-1&epg_start=" \
+ str(cur_time) + "&epg_end=" \
+ str(cur_time) + "&template=epg%2Fdisciplines-listing.html"
url2 = "http://www.api.v3.tvp.pl/shared/listing.php?portal_name=rio2016.tvp.pl&portal_id=19369963&parent_id=25851771&type=virtual_channel©=false&direct=true&order=position%2C1&count=-1&epg_start=" \
+ str(cur_time) + "&epg_end=" \
+ str(cur_time) + "&template=epg%2Fchannels-listing.html"
streams = get_list(url1, cur_time)
streams += get_list(url2, cur_time)
streams = [dict(p) for p in set(tuple(i.items()) for i in streams)]
streams = sorted(streams, key=lambda k: k['title'].text)
filename = get_page_filename(page_index)
with open(filename, 'w') as f:
write_header(f, page_index)
for s in streams:
if s["category"]:
value = "<a href=""{3}""> {0}: {1} {2}</a><br>\n".format(s["title"].text, s["category"].text,
s["time"].text, s["url"])
f.write(value)
else:
value = "<a href=""{2}""> {0} {1}</a><br>\n".format(s["title"].text, s["time"].text, s["url"])
f.write(value)
if page_index < 3:
value = "<a href=""{1}""> {0}</a><br>\n".format(
"Transmisje kończace się w następnej godzinie", "rio" + str(page_index + 1) + ".html")
f.write(value)
write_footer(f)
def send_page(page_index):
filename = get_page_filename(page_index)
ftp = FTP('s34.linuxpl.com')
ftp.login(user='piotrbv', passwd='wyytm2705')
ftp.cwd('public_html') # change into "debian" directory
ftp.storbinary('STOR ' + filename, open(filename, 'rb'))
ftp.quit()
def main():
while True:
for i in range(4):
create_page(i)
for i in range(4):
send_page(i)
ten_minutes = 10*60
print(time.localtime())
time.sleep(ten_minutes)
if __name__ == "__main__":
main()
| {
"repo_name": "piotrbla/pyExamples",
"path": "getRio.py",
"copies": "1",
"size": "4982",
"license": "mit",
"hash": 1434672224573805800,
"line_mean": 34.5642857143,
"line_max": 209,
"alpha_frac": 0.5914842338,
"autogenerated": false,
"ratio": 3.0249088699878492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9109066367757284,
"avg_score": 0.0014653472061129861,
"num_lines": 140
} |
from ftplib import FTP
import csv
# Download the NASDAQ's list of stock symbols
def fetchNASDAQfile():
print "Fetching NASDAQ file..."
try:
ftp = FTP('ftp.nasdaqtrader.com')
ftp.login()
ftp.cwd('SymbolDirectory')
ftp.retrbinary('RETR nasdaqlisted.txt', open('nasdaqlisted.txt', 'wb').write)
print("Done fetching NASDAQ file.")
except:
print("Error! Could not fetch list of NASDAQ stock symbols.")
# Reads the NASDAQ file, parses it, and returns a list of all stock symbols
def parseNASDAQfile():
print "Parsing NASDAQ file..."
list = []
# Open the file and grab the first column
with open('nasdaqlisted.txt', 'rb') as csvfile:
symbolReader = csv.reader(csvfile, delimiter='|', quotechar='"')
for row in symbolReader:
list = list + [row[0]]
print("Done parsing NASDAQ file.")
del list[0]
del list [-1]
return list
# Returns a list of all stock symbols
def getStockList():
fetchNASDAQfile()
return parseNASDAQfile()
| {
"repo_name": "trswany/topStocks",
"path": "stockList.py",
"copies": "1",
"size": "1045",
"license": "mit",
"hash": 7704432297151843000,
"line_mean": 30.6666666667,
"line_max": 85,
"alpha_frac": 0.64784689,
"autogenerated": false,
"ratio": 3.448844884488449,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9553135211215,
"avg_score": 0.008711312654689801,
"num_lines": 33
} |
from ftplib import FTP
import ftplib
import settings as settings
import glob
import os
"""
Will deliver to the named HW ftp bucket.
## Gotchas
I noted that on one run with a large number of PDFs the upload speed
was very slow, might need to be investigated
# TODO - elife - imulvany - put in place a check to see that zip files contain all expected data
# TODO - elife - imulvany - find out the final location for FTP delivery
"""
source_dir = settings.FTP_TO_HW_DIR
ftpuri = settings.FTP_URI
ftpusername = settings.FTP_USERNAME
ftppassword = settings.FTP_PASSWORD
ftpcwd = settings.FTP_CWD
def upload(ftp, file):
ext = os.path.splitext(file)[1]
print file
uploadname = file.split(os.sep)[-1]
if ext in (".txt", ".htm", ".html"):
ftp.storlines("STOR " + file, open(file))
else:
print "uploading " + uploadname
ftp.storbinary("STOR " + uploadname, open(file, "rb"), 1024)
print "uploaded " + uploadname
def ftp_cwd_mkd(ftp, sub_dir):
"""
Given an FTP connection and a sub_dir name
try to cwd to the directory. If the directory
does not exist, create it, then cwd again
"""
cwd_success = None
try:
ftp.cwd(sub_dir)
cwd_success = True
except ftplib.error_perm:
# Directory probably does not exist, create it
ftp.mkd(sub_dir)
cwd_success = False
if cwd_success is not True:
ftp.cwd(sub_dir)
return cwd_success
def ftp_to_endpoint(zipfiles, sub_dir = None):
for zipfile in zipfiles:
ftp = FTP(ftpuri, ftpusername, ftppassword)
ftp_cwd_mkd(ftp, "/")
if ftpcwd != "":
ftp_cwd_mkd(ftp, ftpcwd)
if sub_dir is not None:
ftp_cwd_mkd(ftp, sub_dir)
upload(ftp, zipfile)
ftp.quit()
if __name__ == "__main__":
zipfiles = glob.glob(source_dir + "/*.zip")
ftp_to_endpoint(zipfiles)
workflow_logger.info("files uploaded to endpoint using ftp")
| {
"repo_name": "gnott/elife-poa-xml-generation",
"path": "ftp_to_highwire.py",
"copies": "1",
"size": "1805",
"license": "mit",
"hash": 7951888752992876000,
"line_mean": 22.75,
"line_max": 96,
"alpha_frac": 0.6958448753,
"autogenerated": false,
"ratio": 2.959016393442623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4154861268742623,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
import os
class FTPClient(object):
url = None
user = None
password = None
pasv = None
def __enter__(self):
self.ftp = FTP(self.url,self.user,self.password)
self.ftp.set_pasv(self.pasv)
return self
def __exit__(self,exc_type,exc_value,traceback):
if self.ftp:
self.ftp.quit()
self.ftp = None
def get(self,remotefile,localfile):
with open(localfile,"wb") as f:
def saveFile(data):
f.write(data)
self.ftp.retrbinary("RETR {}".format(remotefile),saveFile)
def getMdtm(self,remotefile):
response = self.ftp.sendcmd("Mdtm " + remotefile).split(None,1)
if response[0] == "213":
return response[1]
else:
raise Exception("{}:{}".format(*response))
class BomFTP(FTPClient):
url = os.environ.get("BOM_FTP_URL")
user = os.environ.get("BOM_FTP_USER")
password = os.environ.get("BOM_FTP_PASSWORD")
pasv = (os.environ.get("BOM_FTP_PASV") or "false").lower() in ("true","yes","t","y","on")
| {
"repo_name": "wilsonc86/gokart",
"path": "gokart/ftp.py",
"copies": "1",
"size": "1110",
"license": "apache-2.0",
"hash": 6852485512550168000,
"line_mean": 26.75,
"line_max": 93,
"alpha_frac": 0.572972973,
"autogenerated": false,
"ratio": 3.3433734939759034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44163464669759034,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
import os
def getsize( ftp, filename ):
try:
ftp.sendcmd('TYPE i')
return( ftp.size( filename ) )
except:
return( -1 )
def listdir( ftp ):
filenames = []
try:
ftp.retrlines( 'NLST', filenames.append )
return( [ ( x, getsize( ftp, x ) ) for x in filenames ] )
except:
return( [] )
def uploadLunch( filename, percent_callback=None ):
return( upload( filename, folder="Lunch", percent_callback=None ) )
def uploadMorning( filename, percent_callback=None ):
return( upload( filename, folder="Morning", percent_callback=None ) )
def listMorning():
pass
def listMorning():
return( open_and_list( "Morning" ) )
def listLunch():
return( open_and_list( "Lunch" ) )
def open_and_list( folder ):
try:
result = []
ftp = FTP( '192.168.0.108', 'daily', 'password' )
try:
ftp.cwd( 'usbdisk1/{}'.format( folder ) )
result = listdir( ftp )
finally:
ftp.quit()
return( result )
except:
return( [] )
def upload( filename, folder, percent_callback=None ):
try:
fsize = os.path.getsize( filename )
sentsize = 0
result = False
ftp = FTP( '192.168.0.108', 'daily', 'password' )
try:
ftp.cwd( 'usbdisk1/{}'.format( folder ) )
progfunc = None
if( percent_callback ):
def progfunc( data ):
global sentsize
lastprog = int( 100.0 * sentsize / fsize )
sentsize += len( data )
progress = int( 100.0 * sentsize / fsize )
if ( progress > lastprog ):
if( percent_callback ):
percent_callback( progress )
with open( filename, 'rb' ) as infile:
ftp.storbinary("STOR {}".format( os.path.basename( filename ) ), infile, callback=progfunc)
result = True
finally:
ftp.quit()
return( result )
except:
return( False )
| {
"repo_name": "wilkinsg/piweb",
"path": "ftp.py",
"copies": "1",
"size": "2119",
"license": "mit",
"hash": -3041397624167142400,
"line_mean": 27.6351351351,
"line_max": 107,
"alpha_frac": 0.5191127891,
"autogenerated": false,
"ratio": 3.8880733944954127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.964634762346338,
"avg_score": 0.05216771202640647,
"num_lines": 74
} |
from ftplib import FTP
import popen2, time, sys
directory = sys.argv[1]
filename = sys.argv[2]
download_dir = sys.argv[3]
host = sys.argv[4]
size_ftp = sys.argv[5]
path_to_cvs = sys.argv[6]
'''this commands needs to be redirected to /dev/null or it won't work, also can't read out from this for debugging b/c process will wait'''
command = "python " + path_to_cvs + "/download_remote.py " + directory + " " + filename + " " + download_dir + " " + host + " >& /dev/null"
print command
e = popen2.Popen4(command,1)
#output = e.fromchild.readlines()
#errors = e.childerr.read()
#print output
#print errors
downloaded = 'no'
import time
for j in range(600):
time.sleep(1)
print e.poll(), e.pid, j
#break time cycle if completed
from glob import glob
file_there = glob(download_dir + "/" + directory + "/" + filename)
print file_there
downloaded = 'not'
downloading = 'not'
if len(file_there) > 0:
import os
stats = os.stat(download_dir + "/" + directory + "/" + filename)
last_time = stats[-1]
size = stats[6]
#print int(size_ftp), int(size), filename
if int(size_ftp) == int(size):
import os
import signal
os.kill(e.pid,signal.SIGKILL)
os.system("kill -9 " + str(e.pid))
downloaded = 'yes'
if e.poll() != -1:
import os
os.system("mv " + download_dir + "/" + directory + "/tmp" + host + filename + " " + download_dir + "/" + directory + "/" + filename)
break
if e.poll() != -1:
import os
os.system("rm " + download_dir + directory + "/tmp" + host + filename)
break
#kill if download times out and delete image
if j == 598:
import os
import signal
os.kill(e.pid,signal.SIGKILL)
os.system("kill -9 " + str(e.pid))
os.system("rm " + download_dir + directory + "/tmp" + host + filename)
| {
"repo_name": "deapplegate/wtgpipeline",
"path": "download_manager.py",
"copies": "1",
"size": "1950",
"license": "mit",
"hash": -8201728422261608000,
"line_mean": 32.0508474576,
"line_max": 139,
"alpha_frac": 0.5748717949,
"autogenerated": false,
"ratio": 3.2072368421052633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4282108637005263,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
import pprint
pp = pprint.PrettyPrinter(indent = 4)
def _munge_fields(s, the_dict):
fields = s.split('=')
if len(fields) == 1:
key = 'name'
value = fields[0]
else:
key = fields[0]
value = fields[1]
if key == 'size':
value = int(value)
the_dict[key] = value
def get_paths(year):
ls = _get_paths(year)
the_dict = {}
for l in ls:
the_dict = {}
fields = l.split(";")
if fields[-1].strip() == '.' or fields[-1].strip() == '..':
continue
for field in fields:
x = _munge_fields(field.strip(), the_dict)
yield the_dict
def _get_paths(year):
ftp_url = 'ftp.ncdc.noaa.gov'
with FTP(ftp_url) as ftp:
ftp.login()
ls = []
ftp.cwd('/pub/data/noaa/{0}/'.format(year))
ftp.retrlines("MLSD", ls.append)
return ls
def make_chunks(chunk_size):
final = []
temp = []
size = 0
for year in range(1901, 2018):
g = get_paths(year)
size_of_chunk = 0
for info in g:
info['year'] = year
size += info['size']
if size > chunk_size:
yield temp
size = 0
temp = []
temp.append(info)
| {
"repo_name": "paulhtremblay/boto_emr",
"path": "boto_emr/ftp_chunker.py",
"copies": "1",
"size": "1308",
"license": "mit",
"hash": -5170773513033123000,
"line_mean": 23.2222222222,
"line_max": 67,
"alpha_frac": 0.4816513761,
"autogenerated": false,
"ratio": 3.442105263157895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4423756639257895,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
import sys, os, os.path, operator
def upload(handle,filename):
f = open(filename,"rb")
(base,ext) = os.path.splitext(filename)
picext = ".bmp .jpg .jpeg .dib .tif .tiff .gif .png"
if(operator.contains(picext,ext)):
try:
handle.storbinary("STOR " + filename,f,1)
except Exception:
print "Successful upload."
else:
print "Successful upload."
f.close()
return
try:
handle.storbinary("STOR " + filename,f)
except Exception:
print "Successful upload."
else:
print "Successful upload."
f.close()
return
def download(handle,filename):
f2 = open(filename,"wb")
try:
handle.retrbinary("RETR " + filename,f2.write)
except Exception:
print "Error in downloading the remote file."
return
else:
print "Successful download!"
f2.close()
return
print "CLIFTP ~ NSP Corp.\n\n"
host_name = raw_input("Enter website name to connect to, exclude ftp notation: ")
if "http://" in host_name:
host_name = host_name.replace("http://","")
host_name = host_name.replace("\n","")
user = raw_input("Enter username: ")
pwd = raw_input("Enter password: ")
try: ftph = FTP(host_name)
except:
print "Host could not be resolved."
raw_input()
sys.exit()
else: pass
try:
ftph.login(user,pwd)
except Exception:
if user == "anonymous" or user == "Anonymous" and pwd == "anonymous" or pwd == "Anonymous":
print "The server does not accept anonymous requests."
raw_input()
sys.exit()
else:
print "Invalid login combination."
raw_input()
sys.exit()
else:
print "Successfully connected!\n"
print ftph.getwelcome()
flag = 1
count = 0
path = ftph.pwd()
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
print "Press help at any time to see proper usage.\n"
while flag:
command = raw_input("FTP ]> ")
if "get " in command:
rf = command.replace("get ","")
rf = rf.replace("\n","")
download(ftph,rf)
continue
elif "put " in command:
lf = command.replace("put ","")
lf = lf.replace("\n","")
upload(ftph,lf)
ftph.close()
ftph = FTP(host_name)
ftph.login(user,pwd)
continue
elif "makedir " in command:
mkdirname = command.replace("makedir ","")
mkdirname = mkdirname.replace("\n","")
try: ftph.mkd(mkdirname)
except:
print "Incorrect usage."
continue
else:
print "Directory created."
continue
elif "remdir " in command:
rmdirname = command.replace("remdir ","")
rmdirname = rmdirname.replace("\n","")
current = ftph.pwd()
ftph.cwd(rmdirname)
allfiles = ftph.nlst()
for file in allfiles:
try:
ftph.delete(file)
except Exception:
pass
else:
pass
ftph.cwd(current)
try:
ftph.rmd(rmdirname)
except Exception:
print "All files within the directory have been deleted, but there is still another directory inside. As deleting this directory automatically goes against true FTP protocol, you must manually delete it, before you can delete the entire directory."
else:
print "Directory deleted."
continue
elif command == "dir":
print ftph.dir()
continue
elif command == "currdir":
print ftph.pwd()
continue
elif "chdir " in command:
dirpath = command.replace("chdir ","")
dirpath = dirpath.replace("\n","")
ftph.cwd(dirpath)
print "Directory changed to " + dirpath
continue
elif command == "up":
dir = ftph.pwd()
temp = dir
index = len(dir) - 1
for i in range(index,0,-1):
if temp[i] == "/" and i != len(dir):
ftph.cwd(temp)
print "One directory back."
continue
if(operator.contains(charset,dir[i])):
temp = temp[:-1]
if temp=="/":
ftph.cwd(temp)
print "One directory back."
elif command == "rename":
fromname = raw_input("Current file name: ")
toname = raw_input("To be changed to: ")
ftph.rename(fromname,toname)
print "Successfully renamed."
continue
elif "delete " in command:
delfile = command.replace("delete ","")
delfile = delfile.replace("\n","")
ftph.delete(delfile)
print "File successfully deleted."
continue
elif command == "term":
ftph.close()
print "Session ended."
raw_input()
sys.exit()
elif "size " in command:
szfile = command.replace("size ","")
szfile = szfile.replace("\n","")
print "The file is " + str(ftph.size(szfile)) + " bytes."
continue
elif command == "debug -b":
ftph.set_debuglevel(1)
print "Debug mode set to base."
continue
elif command == "debug -v":
ftph.set_debuglevel(2)
print "Debug mode set to verbose."
continue
elif command == "debug -o":
ftph.set_debuglevel(0)
print "Debug mode turned off."
continue
elif command == "help":
print "debug -o - turns off debug output\n"
print "debug -v - turns the debug output to verbose mode\n"
print "debug -b - turns the debug output to base\n"
print "size [filename] - returns the size in bytes of the specified file"
print "term - terminate the ftp session\n"
print "delete [filename] - delete a file\n"
print "rename - rename a file\n"
print "up - navigate 1 directory up\n"
print "chdir [path] - change which directory you're in\n"
print "currdir - prints the path of the directory you are currently in\n"
print "dir - lists the contents of the directory\n"
print "remdir [directory path] - removes/deletes an entire directory\n"
print "makedir [directory path] - creates a new directory\n"
print "put [filename] - stores a local file onto the server (does not work with microsoft office document types)\n"
print "get [filename] - download a remote file onto your computer\n\n"
continue
else:
print "Sorry, invalid command. Check 'help' for proper usage."
continue
#EoF
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/521925_Python_FTP_Client/recipe-521925.py",
"copies": "1",
"size": "5592",
"license": "mit",
"hash": 4435415135911788500,
"line_mean": 26.96,
"line_max": 252,
"alpha_frac": 0.676323319,
"autogenerated": false,
"ratio": 3.0912106135986734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42675339325986733,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
class ftp(FTP):
def __init__(self,host, user='', passwd=''):
super(ftp,self).__init__(host)
self.login(user=user, passwd=passwd)
def read_file(self, file):
class Reader:
def __init__(self):
pass
def __call__(self,s):
self.data = s
r = Reader()
self.retrbinary('RETR ' + file, r)
return r.data
def get_files(self, dir=None):
print (self.pwd(),dir)
if dir is None:
folders = self.get_folders()
return [x for x in self.nlst() if not x in folders]
folders = [dir+'/'+x for x in self.get_folders(dir)]
try:
return [x for x in self.nlst(self.pwd()+'/'+dir) if not x in folders]
except:
return [x for x in self.nlst() if not x in folders]
def get_folders(self,dir=None):
ret = []
def parse(line):
if line[0] == 'd':
ret.append(line.rpartition(' ')[2]) # gives you the name of a directory
if dir is not None:
self.cwd(dir)
self.dir(parse)
self.cwd('/')
return ret
def remove_file(self, filename):
original = filename
while '/' in filename:
folder, filename = filename.split('/')[0], filename.replace(filename.split('/')[0]+'/','')
if not folder in self.nlst():
self.mkd(folder)
self.cwd(folder)
if '/' in filename:
continue
else:
self.delete(filename)
self.cwd('/')
if not '/' in original:
self.delete(filename)
def write_file(self,filename, newname=None):
original = filename
while '/' in filename:
folder, filename = filename.split('/')[0], filename.replace(filename.split('/')[0]+'/','')
if not folder in self.nlst():
self.mkd(folder)
self.cwd(folder)
if '/' in filename:
continue
else:
file = open(original, 'rb')
stor_str = "STOR {0}".format(filename)
self.storbinary(stor_str, file)
self.cwd('/')
file.close()
if not '/' in original:
file = open(filename, 'rb')
if newname is None:
stor_str = "STOR {0}".format(filename)
else:
stor_str = "STOR {0}".format(newname)
self.storbinary(stor_str, file)
file.close()
if __name__ == '__main__':
a = ftp('127.0.0.1','geek','12345')
a.write_file('steps.md', newname='process.md')
with open('new.md','wb') as f:
f.write(a.read_file('process.md'))
print (a.nlst()) | {
"repo_name": "geekpradd/Zoe",
"path": "core.py",
"copies": "1",
"size": "2248",
"license": "mit",
"hash": -384871630325597000,
"line_mean": 25.1511627907,
"line_max": 93,
"alpha_frac": 0.606316726,
"autogenerated": false,
"ratio": 2.8857509627727858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8667073984164788,
"avg_score": 0.06499874092159943,
"num_lines": 86
} |
from ftplib import FTP
class ftp(FTP):
def __init__(self,host, user='', passwd=''):
super(ftp,self).__init__(host)
self.login(user=user, passwd=passwd)
def read_file(self, file):
class Reader:
def __init__(self):
pass
def __call__(self,s):
self.data = s
r = Reader()
self.retrbinary('RETR ' + file, r)
return r.data
def get_files(self):
return self.nlst()
def remove_file(self, filename):
original = filename
while '/' in filename:
folder, filename = filename.split('/')[0], filename.replace(filename.split('/')[0]+'/','')
if not folder in self.nlst():
self.mkd(folder)
self.cwd(folder)
if '/' in filename:
continue
else:
self.delete(filename)
self.cwd('/')
if not '/' in original:
self.delete(filename)
def write_file(self,filename, newname=None):
original = filename
while '/' in filename:
folder, filename = filename.split('/')[0], filename.replace(filename.split('/')[0]+'/','')
if not folder in self.nlst():
self.mkd(folder)
self.cwd(folder)
if '/' in filename:
continue
else:
file = open(original, 'rb')
stor_str = "STOR {0}".format(filename)
self.storbinary(stor_str, file)
self.cwd('/')
file.close()
if not '/' in original:
file = open(filename, 'rb')
if newname is None:
stor_str = "STOR {0}".format(filename)
else:
stor_str = "STOR {0}".format(newname)
self.storbinary(stor_str, file)
file.close()
if __name__ == '__main__':
#Some demo usage
a = ftp('127.0.0.1','geek','12345')
a.write_file('steps.md', newname='process.md')
with open('new.md','wb') as f:
f.write(a.read_file('process.md'))
print (a.nlst()) | {
"repo_name": "Uxio0/PyFTP",
"path": "ftp.py",
"copies": "2",
"size": "1684",
"license": "mit",
"hash": -2296664120277303000,
"line_mean": 23.7794117647,
"line_max": 93,
"alpha_frac": 0.6086698337,
"autogenerated": false,
"ratio": 2.918544194107452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4527214027807452,
"avg_score": null,
"num_lines": null
} |
from ftplib import FTP
"""
giFTP - Git commit to FTP upload made easy.
The MIT License (MIT)
Copyright (c) 2013 Eka Putra
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class BaseException(Exception):
pass
class ConnectionErrorException(BaseException):
""" When something wrong with the connection. """
pass
class RemotePathNotExistException(BaseException):
""" If remote path not exist """
pass
class OperationStatus(object):
""" Simple wrapper to store Operation status """
def __init__(self, op, path, reason=None):
self.operation = op
self.path = path
self.reason = reason
def __repr__(self):
if self.operation == 'A' and not self.reason:
return u'Successfully add %s' % self.path
elif self.operation == 'A' and self.reason:
return u'Failed add %s' % self.path
elif self.operation == 'M' and not self.reason:
return u'Successfully update %s' % self.path
elif self.operation == 'M' and self.reason:
return u'Failed update %s' % self.path
elif self.operation == 'D' and not self.reason:
return u'Successfully delete %s' % self.path
elif self.operation == 'D' and self.reason:
return u'Failed delete %s' % self.path
class FTPSession(object):
"""
A class that handle all FTP operation.
"""
def __init__(self, host, username=None, password=None, path=None,
simulate=False):
"""
Get FTP credentials during initialization.
Args:
- host : required.
- username : optional and the default connection will be
anonymous FTP connection.
- password : optional.
- path : Path on the remote server where all transfered file will
goes to.
- simulate : When simulate is True, all actual FTP action will be
skipped.
"""
self.host = host
self.username = username
self.password = password
self.path = path
self.simulate = simulate
self.session = None
self.success_operation = []
self.failed_operation = []
def start(self):
"""
Start the connection.
"""
try:
self.session = FTP(self.host)
if self.username and self.password:
self.session.login(self.username, self.password)
except:
raise ConnectionErrorException('> [ERROR] Failed connecting to server.\n')
if self.path:
try:
self.session.cwd(self.path)
except:
self.stop()
raise RemotePathNotExistException(
'> [ERROR] Path "%s" does not exists on the server\n' % self.path)
def stop(self):
"""
Stop connection.
"""
self.session.quit()
def mkdir(self, segments):
"""
Handle directory creation if not yet exists on the server.
"""
if self.simulate: return
dirs = []
for segment in segments:
dirs.append(segment)
path = '/'.join(dirs)
try:
self.session.mkd(path)
except Exception as e:
# let's ignore it for now.
# its means the dir already exist.
pass
return
def push(self, path, stream, is_new=True):
"""
Add new file to remote server.
"""
if self.simulate: return
segments = path.split('/')
operation = 'A'
if not is_new:
operation = 'M'
# Check if the file is located inside directory structure.
# If yes, create the dirs if not exists.
if len(segments) > 1:
self.mkdir(segments[:-1])
try:
# Let's just always transfer the file as binary.
self.session.storbinary('STOR %s' % path, stream)
except Exception as e:
# We don't want the whole operation stopped
# instead, just log the status.
self.failed_operation.append(OperationStatus(operation, path, e))
else:
self.success_operation.append(OperationStatus(operation, path))
def delete(self, path):
"""
Delete file on the remote server.
"""
if self.simulate: return
operation = 'D'
try:
self.session.delete(path)
except Exception as e:
self.failed_operation.append(OperationStatus(operation, path, e))
else:
self.success_operation.append(OperationStatus(operation, path))
| {
"repo_name": "ekaputra07/giFTP",
"path": "giftp/ftp_session.py",
"copies": "1",
"size": "5739",
"license": "mit",
"hash": -5926478426148148000,
"line_mean": 31.9827586207,
"line_max": 86,
"alpha_frac": 0.5957483882,
"autogenerated": false,
"ratio": 4.617055510860821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002150852060707495,
"num_lines": 174
} |
from ftp_parser import Parser
class Filer:
suffix_of_interest = '.hdr.sgml'
content = []
content_parsed = []
parser = Parser()
def __init__(self, cik, connection):
self.cik = str(int(cik))
self.connection = connection
self._go_to_directory()
def _go_to_directory(self):
self.connection.cwd('/edgar/data/' + self.cik)
def ls(self):
return self.connection.dir()
def ls_d(self):
return self.connection.retrlines('LIST */')
def files_of_interest(self):
cmd = 'LIST */*/*' + self.suffix_of_interest
lines = []
self.connection.retrlines(cmd, lambda x: self._process_foi(x, lines))
return lines
def _process_foi(self, line, lines):
lines.append(line.split(' ')[-1])
def download_file(self, path):
x = []
self.connection.retrbinary('RETR ' + path, x.append)
return ''.join(x)
def download_foi(self):
foi = self.files_of_interest()
for f in foi:
print('downloading ' + f)
self.content.append(self.download_file(f))
return self.content
def parse_foi(self):
self.content_parsed = [self.parser.run_header(c) for c in self.content]
return self.content_parsed
| {
"repo_name": "bkj/ernest",
"path": "enrich/modules/filer.py",
"copies": "2",
"size": "1285",
"license": "apache-2.0",
"hash": 7516083619486281000,
"line_mean": 26.3404255319,
"line_max": 79,
"alpha_frac": 0.5813229572,
"autogenerated": false,
"ratio": 3.4823848238482387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5063707781048239,
"avg_score": null,
"num_lines": null
} |
from ftracer_script import *
## note the capture details file is
## warnings is needed to suppress errors from mousehook tracker
def fxn():
warnings.warn("depreciated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
if __name__ == "__main__":
# this code allows this module to be run as standalone
import sys
# Captures mouse events and writes into log file (common_script.py). Use win32 py to hook on mouse events
# This mouse module will run in a continual loop until application is stopped
bm = pyHook.HookManager()
bm.MouseAll = OnMouseEvent
bm.HookMouse()
bk = pyHook.HookManager()
bk.KeyDown = OnKeyboardEvent
bk.HookKeyboard()
## c = wmi.WMI()
## for i in c.classes:
## if "app" in i.lower():
## print i
##
## for m in c.Win32_Process():
## if "tas" in str(m):
## print m
## def winEnumHandler( hwnd, ctx ):
## if win32gui.IsWindowVisible( hwnd ):
## print hex(hwnd), win32gui.GetWindowText( hwnd )
##
## win32gui.EnumWindows( winEnumHandler, None );
pythoncom.PumpMessages()
#shell = win32com.client.Dispatch("WScript.Shell")
#shell.AppActivate('Command Prompt') # this sets window to focus
#x1 = win32com.client.DispatchEx("PDFcreator.Application")
w = win32gui
w.GetWindowText (w.GetForegroundWindow()) | {
"repo_name": "mirageglobe/upp-tracker",
"path": "tracer/ftracer.py",
"copies": "1",
"size": "1412",
"license": "apache-2.0",
"hash": -7412879201999883000,
"line_mean": 27.26,
"line_max": 109,
"alpha_frac": 0.6529745042,
"autogenerated": false,
"ratio": 3.512437810945274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9601958703901385,
"avg_score": 0.012690722248777592,
"num_lines": 50
} |
from ftracer_script import *
## note the capture details file is
## warnings is needed to suppress errors from mousehook tracker
## Function: This is the main script file which houses all the scripts
import sys
import pythoncom, pyHook
import win32con, win32com.client
import win32gui, win32api
import codecs
import wmi
import chardet
import time, pickle
import warnings
import sqlite3
## ----------------------------------
## Global Var
## ----------------------------------
ftraceadvance_lastaction = 'start' # this stores the last action of the user
ftraceadvance_sqlitedb = 'tracemouse.sqlite' # structure is id int and (mousetimeepoch, mousetimeboot, mousepos, winfile, winhandle, winname, winforename)
## ----------------------------------
## Mouse / Keyboard Tracing Functions
## ----------------------------------
def winEnumHandler( hwnd, ctx ):
if win32gui.IsWindowVisible( hwnd ):
print hex(hwnd), win32gui.GetWindowText( hwnd )
def strutf8encode(sstring):
rtnString = ""
if sstring != None:
## codecs.lookup(sstring)
## ustring = unicode(sstring,'utf_8')
## print ustring
rtn_encoding = chardet.detect(sstring)
if rtn_encoding['encoding'] != None:
rtnString = sstring.decode(rtn_encoding['encoding'],'replace')
return rtnString.encode('utf_8')
def OnKeyboardEvent(event):
## Function: Allows escape key to be pressed to exit any script is running
if event.Key == "Escape":
exit()
return True
def OnMouseEvent(event):
## this function uses mouse to trace the user input of applications
global ftraceadvance_lastaction
global ftraceadvance_sqlitedb
# called when mouse events are received. prints out mouse events
if event.MessageName != "mouse move":
print 'MessageName:', event.MessageName
print 'Message:', event.Message
print 'Time:', event.Time
print 'WindowHandler:', hex(event.Window)
print 'WindowName:', strutf8encode(event.WindowName)
print 'Position:', event.Position
print 'Wheel:', event.Wheel #not used in wheel detection
print 'Injected:', event.Injected #rarely used
print time.time()
if event.WindowName == None:
window_name = 'None'
else:
window_name = event.WindowName
ftemp_wfore = strutf8encode(win32gui.GetWindowText(win32gui.GetForegroundWindow())) # This special method captures window name
ftemp_wname = AppDetector(strutf8encode(event.WindowName))
ftemp_whand = str(event.Window) #window handler
ftemp_mpos = str(event.Position)
ftemp_mact = str(event.MessageName)
ftemp_mnum = int(event.Message)
ftemp_epoc = time.time() #epoch time of mouse
ftemp_rtime = event.Time #running counter of mouse
ftemp_wfile = str('')
conn = sqlite3.connect(ftraceadvance_sqlitedb)
conn.text_factory = str
curs = conn.cursor()
if ftraceadvance_lastaction != window_name:
print ftraceadvance_lastaction
curs.execute('insert into ftrace(mousetimeepoch, mousetimeboot, mousepos, mouseact, mousenum, winfile, winhandle, winname, winforename) values(?, ?, ?, ?, ?, ?, ?, ?, ?)',(ftemp_epoc,ftemp_rtime,ftemp_mpos,ftemp_mact,ftemp_mnum,ftemp_wfile,ftemp_whand,ftemp_wname,ftemp_wfore))
ftraceadvance_lastaction = strutf8encode(event.WindowName)
print ftraceadvance_lastaction
conn.commit()
curs.close()
return True # return true is always needed, otherwise it will show an error
def AppDetector(data_window=''):
## This novel function tries to put in a value for the application detected
values = {
'': 'Unknown',
'Unknown': 'Unknown',
'C:\Python27\python.exe': 'Python',
'C:\Python26\python.exe': 'Python',
'FolderView': 'Windows Explorer - Folderview',
'Downloads': 'Windows Explorer - downloads',
'OpenOffice.org Writer': 'OpenOffice Writer'
}
return values.get(data_window, 'Unknown')
## ----------------------------------
## SQLite Writing Functions
## ----------------------------------
def sqlite_table(file_write='tracemouse.sqlite'):
# function creates sqlite 3 db and connects to a new file
conn = connect(file_write)
curs = conn.cursor()
curs.execute('''create table if not exists ftrace (id integer primary key, mousetimeepoch float, mousetimeboot float, mousepos text, mouseact text, mousenum integer, winfile text, winhandle text, winname text, winforename text)''')
curs.execute('''create table if not exists fswitch (id integer primary key, objsource text, objtarget text, rstrength integer)''')
conn.commit()
curs.close()
return True
def sqlite_query(mquery, file_write='tracemouse.sqlite'):
# function inserts into a sqlite table
conn = connect(file_write)
curs = conn.cursor()
curs.execute(mquery)
conn.commit()
curs.close()
return True
def sqlite_cleardb():
conn = connect('tracemouse.sqlite')
curs = conn.cursor()
conn.commit()
curs.close()
return True
## ----------------------------------
## Other handy Functions
## ----------------------------------
def rem_duplicates(seq, idfun=None):
# order preserving
# remove duplicates from a list
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def fxn():
warnings.warn("depreciated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
if __name__ == "__main__":
# Captures mouse events and writes into log file (common_script.py). Use win32 py to hook on mouse events
# This trace module will run in a continual loop until application is stopped
sqlite_table('tracemouse.sqlite')
bm = pyHook.HookManager()
bm.MouseAll = OnMouseEvent
bm.HookMouse()
bk = pyHook.HookManager()
bk.KeyDown = OnKeyboardEvent
bk.HookKeyboard()
pythoncom.PumpMessages()
#shell = win32com.client.Dispatch("WScript.Shell")
#shell.AppActivate('Command Prompt') # this sets window to focus
#x1 = win32com.client.DispatchEx("PDFcreator.Application")
| {
"repo_name": "mirageglobe/upp-tracker",
"path": "tracer/ftraceadvance.py",
"copies": "1",
"size": "6390",
"license": "apache-2.0",
"hash": -7871875556618615000,
"line_mean": 29.8695652174,
"line_max": 283,
"alpha_frac": 0.6511737089,
"autogenerated": false,
"ratio": 3.7855450236966823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.990935281154233,
"avg_score": 0.005473184210870487,
"num_lines": 207
} |
from ftw import ruleset, http, errors
"""
This script assumes that default blocking action is 403
and sampling is one. It will send a know bad request
that is expected to be blocked. If sampling is on it
will only block a certain percentage. We send 1000
requests to verify this. In order to do this we must
also turn off IP Reputation blocking.
SecAction "id:900005,phase:1,nolog,pass,ctl:ruleEngine=on,ctl:ruleRemoveById=910000"
"""
def send_requests(input_data,subiters,result,index):
http_ua = http.HttpUA()
for i in range(0,subiters):
new_index = str(index)+str(i)
http_ua.send_request(input_data)
result[new_index] = http_ua.response_object.status
def run_requests(iterations):
"""Post request with no content-type AND no content-length"""
x = ruleset.Input(method="GET", protocol="http",port=80,uri='/?X="><script>alert(1);</script>',dest_addr="localhost",headers={"Host":"localhost","User-Agent":"ModSecurity CRS 3 test"})
import threading
returns = {}
threads = []
for i in range(5):
t = threading.Thread(target=send_requests,args=(x,100, returns,i,))
threads.append(t)
t.start()
for t in threads:
t.join()
status_not_403 = 0
status_403 = 0
for status in returns.values():
if status == 403:
status_403 += 1
else:
status_not_403 += 1
x = (status_403/(len(returns)*1.0))*100
y = (status_not_403/(len(returns)*1.0))*100
print "403s =", x
print "not 403s =", y
return (x,y)
def test_sampling():
print "running"
block,passed = run_requests(100)
assert block < 55 and block > 45 | {
"repo_name": "SpiderLabs/OWASP-CRS-regressions",
"path": "utils/testSampling.py",
"copies": "3",
"size": "1532",
"license": "apache-2.0",
"hash": -399656757327182660,
"line_mean": 32.3260869565,
"line_max": 185,
"alpha_frac": 0.703002611,
"autogenerated": false,
"ratio": 2.9689922480620154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5171994859062016,
"avg_score": null,
"num_lines": null
} |
from ftw import ruleset, http, errors
"""
This script reads in a list of popular Useragents and checks to see if it triggers
It expects 403's to be returned for a rule firing
"""
def read_useragents(filename):
f = open(filename,'r')
useragents = [agent.strip() for agent in f.readlines()]
return useragents
def run_requests(useragent_list):
status_not_403 = 0
status_403 = 0
for useragent in useragent_list:
# get me a counter while i'm waiting
if (status_not_403 + status_403)%15 == 0:
print("Send",status_not_403 + status_403, "Out of",len(useragent_list))
input_data = ruleset.Input(method="GET", protocol="http",port=80,uri='/',dest_addr="localhost",headers={"Host":"localhost","User-Agent":useragent})
http_ua = http.HttpUA()
http_ua.send_request(input_data)
status = http_ua.response_object.status
if status == 403:
status_403 += 1
else:
status_not_403 += 1
x = (status_403/(len(useragent_list)*1.0))*100
y = (status_not_403/(len(useragent_list)*1.0))*100
print "403s =", x
print "not 403s =", y
def main():
uas = read_useragents('./data/popularUAs.data')
run_requests(uas)
main()
| {
"repo_name": "csjperon/OWASP-CRS-regressions",
"path": "utils/testUserAgents.py",
"copies": "3",
"size": "1251",
"license": "apache-2.0",
"hash": 2691167147439547000,
"line_mean": 32.8108108108,
"line_max": 155,
"alpha_frac": 0.621902478,
"autogenerated": false,
"ratio": 3.371967654986523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5493870132986524,
"avg_score": null,
"num_lines": null
} |
from ftw import ruleset, logchecker, testrunner
import datetime
import pytest
import sys
import re
import os
def test_crs(ruleset, test, logchecker_obj):
runner = testrunner.TestRunner()
for stage in test.stages:
runner.run_stage(stage, logchecker_obj)
class FooLogChecker(logchecker.LogChecker):
def __init__(self, config):
super(FooLogChecker, self).__init__()
self.log_location = config['log_location_linux']
self.log_date_regex = config['log_date_regex']
self.log_date_format = config['log_date_format']
def reverse_readline(self, filename):
with open(filename) as f:
f.seek(0, os.SEEK_END)
position = f.tell()
line = ''
while position >= 0:
f.seek(position)
next_char = f.read(1)
if next_char == "\n":
yield line[::-1]
line = ''
else:
line += next_char
position -= 1
yield line[::-1]
def get_logs(self):
pattern = re.compile(r'%s' % self.log_date_regex)
our_logs = []
for lline in self.reverse_readline(self.log_location):
# Extract dates from each line
match = re.match(pattern, lline)
if match:
log_date = match.group(1)
log_date = datetime.datetime.strptime(
log_date, self.log_date_format)
# NGINX doesn't give us microsecond level by detail, round down.
if "%f" not in self.log_date_format:
ftw_start = self.start.replace(microsecond=0)
else:
ftw_start = self.start
ftw_end = self.end
if log_date <= ftw_end and log_date >= ftw_start:
our_logs.append(lline)
# If our log is from before FTW started stop
if log_date < ftw_start:
break
return our_logs
@pytest.fixture(scope='session')
def logchecker_obj(config):
return FooLogChecker(config)
| {
"repo_name": "SpiderLabs/owasp-modsecurity-crs",
"path": "tests/regression/CRS_Tests.py",
"copies": "1",
"size": "2142",
"license": "apache-2.0",
"hash": -4453423894128167400,
"line_mean": 32.46875,
"line_max": 80,
"alpha_frac": 0.5308123249,
"autogenerated": false,
"ratio": 4.049149338374291,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.507996166327429,
"avg_score": null,
"num_lines": null
} |
from ftw import ruleset, logchecker, testrunner
import pytest
import pdb
import sys
import re
import os
import ConfigParser
def test_crs(ruleset, test, logchecker_obj, destaddr):
runner = testrunner.TestRunner()
for stage in test.stages:
if destaddr is not None:
stage.input.dest_addr = destaddr
runner.run_stage(stage, logchecker_obj)
class FooLogChecker(logchecker.LogChecker):
def reverse_readline(self, filename):
with open(filename) as f:
f.seek(0, os.SEEK_END)
position = f.tell()
line = ''
while position >= 0:
f.seek(position)
next_char = f.read(1)
if next_char == "\n":
yield line[::-1]
line = ''
else:
line += next_char
position -= 1
yield line[::-1]
def get_logs(self):
import datetime
config = ConfigParser.ConfigParser()
config.read("settings.ini")
log_location = config.get('settings', 'log_location')
our_logs = []
pattern = re.compile(r"\[([A-Z][a-z]{2} [A-z][a-z]{2} \d{1,2} \d{1,2}\:\d{1,2}\:\d{1,2}\.\d+? \d{4})\]")
for lline in self.reverse_readline(log_location):
# Extract dates from each line
match = re.match(pattern,lline)
if match:
log_date = match.group(1)
# Convert our date
log_date = datetime.datetime.strptime(log_date, "%a %b %d %H:%M:%S.%f %Y")
ftw_start = self.start
ftw_end = self.end
# If we have a log date in range
if log_date <= ftw_end and log_date >= ftw_start:
our_logs.append(lline)
# If our log is from before FTW started stop
if(log_date < ftw_start):
break
return our_logs
@pytest.fixture
def logchecker_obj():
return FooLogChecker()
| {
"repo_name": "csjperon/OWASP-CRS-regressions",
"path": "CRS_Tests.py",
"copies": "1",
"size": "2040",
"license": "apache-2.0",
"hash": 6173164529445813000,
"line_mean": 33.5762711864,
"line_max": 112,
"alpha_frac": 0.5147058824,
"autogenerated": false,
"ratio": 3.849056603773585,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4863762486173585,
"avg_score": null,
"num_lines": null
} |
from ftw import ruleset, testrunner, http, errors
import pytest
import re
import random
import threading
def test_logcontains(ruleset, test):
runner = testrunner.TestRunner()
for stage in test.stages:
runner.run_stage(stage)
# Should return a test error because its searching before response
def test_search1():
runner = testrunner.TestRunner()
x = ruleset.Input(dest_addr="example.com",headers={"Host":"example.com"})
http_ua = http.HttpUA()
with pytest.raises(errors.TestError):
runner.test_response(http_ua.response_object,re.compile('dog'))
# Should return a failure because it is searching for a word not there
def test_search2():
runner = testrunner.TestRunner()
x = ruleset.Input(dest_addr="example.com",headers={"Host":"example.com"})
http_ua = http.HttpUA()
http_ua.send_request(x)
with pytest.raises(AssertionError):
runner.test_response(http_ua.response_object,re.compile('dog'))
# Should return a success because it is searching for a word not there
def test_search3():
runner = testrunner.TestRunner()
x = ruleset.Input(dest_addr="example.com",headers={"Host":"example.com"})
http_ua = http.HttpUA()
http_ua.send_request(x)
runner.test_response(http_ua.response_object,re.compile('established to be used for'))
# Should return a success because we found our regex
def test_search4():
runner = testrunner.TestRunner()
x = ruleset.Input(dest_addr="example.com",headers={"Host":"example.com"})
http_ua = http.HttpUA()
http_ua.send_request(x)
runner.test_response(http_ua.response_object,re.compile('.*'))
| {
"repo_name": "fastly/ftw",
"path": "test/integration/test_htmlcontains.py",
"copies": "1",
"size": "1658",
"license": "apache-2.0",
"hash": 7240484422778786000,
"line_mean": 36.6818181818,
"line_max": 90,
"alpha_frac": 0.6954161641,
"autogenerated": false,
"ratio": 3.5732758620689653,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4768692026168965,
"avg_score": null,
"num_lines": null
} |
from ftwpy import FTW, FTW_F
from mutagen.mp3 import EasyMP3 as MP3
import re
import os
import sys
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == os.errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class FTW_psp(FTW):
def __init__(self, path=".", out=None):
FTW.__init__(self,path)
if not out:
out="All"
out+='.m3u8'
out_path = path + "/PSP/PLAYLIST/MUSIC/"
mkdir_p(out_path);
self.out_path = out_path + out
def run(self):
self.out = open(self.out_path, "w");
self.out.write(u'#EXTM3U\n')
FTW.run(self)
def fn(self, fpath, stat, typeflag):
if not typeflag == FTW_F or not re.match('^.*.mp3$', fpath):
return
mp3 = MP3(fpath);
extinf = u"".join(("#EXTINF:",
str(int(mp3.info.length)),", ",
mp3.tags['artist'][0], " - ",
mp3.tags['title'][0])).encode('utf-8').strip()
path = fpath.replace(self.path, "", 1).replace("/","\\");
if path[0] != '\\':
path = '\\' + path
self.out.write(extinf)
self.out.write('\n')
self.out.write(path)
self.out.write('\n')
path = "."
out=None
if len(sys.argv) > 1:
path = sys.argv[1]
if len(sys.argv) > 2:
out = sys.argv[2]
psp = FTW_psp(path, out);
psp.run()
| {
"repo_name": "ein-shved/libpyhon",
"path": "psp_playlist.py",
"copies": "1",
"size": "1474",
"license": "mit",
"hash": 3926481822880168000,
"line_mean": 25.8,
"line_max": 70,
"alpha_frac": 0.4966078697,
"autogenerated": false,
"ratio": 3.077244258872651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4073852128572651,
"avg_score": null,
"num_lines": null
} |
from fudge import Fake
from fudge.inspector import arg
from sqlalchemy.orm import exc
from belt.tests import patch
pypi_base_url = 'https://pypi.python.org/packages'
class TestPackageList(object):
def test_requests_package_data_from_pypi(self, dummy_request):
from belt.views import package_list
dummy_request.matchdict = {'package': u'foo'}
pkg = Fake('Package').has_attr(name='foo', releases=[1])
with patch('belt.views.Package') as Package:
Package.expects('by_name').with_args('foo').raises(exc.NoResultFound)
(Package.expects('create_from_pypi')
.with_args(name='foo', package_dir=arg.any())
.returns(pkg))
package_list(dummy_request)
@patch('belt.views.DBSession')
@patch('belt.views.Package')
@patch('belt.views.package_releases')
def test_requests_package_releases_if_none_exist(self, DBSession, Package,
package_releases,
dummy_request):
from belt.views import package_list
rel = Fake('Release').has_attr(version='106')
package_releases.expects_call().with_args('quux', location=arg.any()).returns([rel])
pkg = (Fake('pkg').has_attr(name='quux', releases=set()))
Package.expects('by_name').with_args('quux').returns(pkg)
DBSession.expects('add').with_args(pkg)
dummy_request.matchdict = {'package': u'quux'}
package_list(dummy_request)
| {
"repo_name": "rob-b/belt",
"path": "belt/tests/views/test_package_list.py",
"copies": "1",
"size": "1538",
"license": "bsd-3-clause",
"hash": -288082528055928420,
"line_mean": 38.4358974359,
"line_max": 92,
"alpha_frac": 0.6079323797,
"autogenerated": false,
"ratio": 3.8163771712158807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9921367523977,
"avg_score": 0.0005884053877759621,
"num_lines": 39
} |
from fudge import Fake
from neomodel.exception import DoesNotExist
from .index import FakeIndex
from .relationship_manager import FakeCategoryRelation
INDEX_REGISTER = []
class FakeCategoryNode(object):
def __init__(self, index):
self.instance = FakeCategoryRelation(self, index.nodes)
def factory_reset():
for index in INDEX_REGISTER:
index._reset()
class FakeNodeMeta(type):
def __new__(mcs, name, bases, dct):
inst = super(FakeNodeMeta, mcs).__new__(mcs, name, bases, dct)
inst.index = FakeIndex()
INDEX_REGISTER.append(inst.index)
return inst
FakeNodeBase = FakeNodeMeta('NodeBase', (), {})
class FakeNode(FakeNodeBase):
DoesNotExist = DoesNotExist
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
self._id = self.index.last
self.index.last += 1
for node_id, node in self.__class__.index.nodes.iteritems():
if node.__str__() == self.__str__():
self._id = node_id
self.index.last += 1
def save(self):
self.__class__.index.register(self)
self.__node__ = Fake('__node__').has_attr(id=self._id)
return self
def delete(self):
self.index.delete(self._id)
# TODO: delete relationships
del self
def cypher(self, query, params=None):
pass
@classmethod
def category(cls):
return FakeCategoryNode(cls.index)
| {
"repo_name": "joealcorn/mock-neomodel",
"path": "mock_neomodel/core.py",
"copies": "1",
"size": "1493",
"license": "mit",
"hash": -7375430865013173000,
"line_mean": 25.1929824561,
"line_max": 70,
"alpha_frac": 0.6115204287,
"autogenerated": false,
"ratio": 3.8380462724935733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4949566701193573,
"avg_score": null,
"num_lines": null
} |
from fuel.converters.ilsvrc2010 import IMAGE_TARS
from fuel.downloaders.base import default_downloader
def fill_subparser(subparser):
"""Sets up a subparser to download the ILSVRC2010 dataset files.
Note that you will need to use `--url-prefix` to download the
non-public files (namely, the TARs of images). This is a single
prefix that is common to all distributed files, which you can
obtain by registering at the ImageNet website [DOWNLOAD].
Note that these files are quite large and you may be better off
simply downloading them separately and running ``fuel-convert``.
.. [DOWNLOAD] http://www.image-net.org/download-images
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2010` command.
"""
urls = [
('http://www.image-net.org/challenges/LSVRC/2010/'
'ILSVRC2010_test_ground_truth.txt'),
('http://www.image-net.org/challenges/LSVRC/2010/'
'download/ILSVRC2010_devkit-1.0.tar.gz'),
] + ([None] * len(IMAGE_TARS))
filenames = [None, None] + list(IMAGE_TARS)
subparser.set_defaults(urls=urls, filenames=filenames)
subparser.add_argument('-P', '--url-prefix', type=str, default=None,
help="URL prefix to prepend to the filenames of "
"non-public files, in order to download them. "
"Be sure to include the trailing slash.")
return default_downloader
| {
"repo_name": "aalmah/fuel",
"path": "fuel/downloaders/ilsvrc2010.py",
"copies": "10",
"size": "1513",
"license": "mit",
"hash": 4355386616024203000,
"line_mean": 39.8918918919,
"line_max": 79,
"alpha_frac": 0.6516853933,
"autogenerated": false,
"ratio": 3.8794871794871795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.953117257278718,
"avg_score": null,
"num_lines": null
} |
from fuel.converters.ilsvrc2012 import ALL_FILES
from fuel.downloaders.base import default_downloader
def fill_subparser(subparser):
"""Sets up a subparser to download the ILSVRC2012 dataset files.
Note that you will need to use `--url-prefix` to download the
non-public files (namely, the TARs of images). This is a single
prefix that is common to all distributed files, which you can
obtain by registering at the ImageNet website [DOWNLOAD].
Note that these files are quite large and you may be better off
simply downloading them separately and running ``fuel-convert``.
.. [DOWNLOAD] http://www.image-net.org/download-images
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
"""
urls = ([None] * len(ALL_FILES))
filenames = list(ALL_FILES)
subparser.set_defaults(urls=urls, filenames=filenames)
subparser.add_argument('-P', '--url-prefix', type=str, default=None,
help="URL prefix to prepend to the filenames of "
"non-public files, in order to download them. "
"Be sure to include the trailing slash.")
return default_downloader
| {
"repo_name": "mila-udem/fuel",
"path": "fuel/downloaders/ilsvrc2012.py",
"copies": "3",
"size": "1270",
"license": "mit",
"hash": 1200634227538630700,
"line_mean": 38.6875,
"line_max": 79,
"alpha_frac": 0.662992126,
"autogenerated": false,
"ratio": 4.276094276094276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6439086402094276,
"avg_score": null,
"num_lines": null
} |
from fuel.datasets.hdf5 import H5PYDataset
class H5PyMTDataset(H5PYDataset):
def __init__(self, target_source, **kwargs):
super(H5PyMTDataset, self).__init__(**kwargs)
self.open()
self._token_map_cache = {}
self.word2num = self.token_map(target_source)
self.num2word = {num: word for word, num in self.word2num.items()}
self.num_labels = len(self.num2word)
self.bos_label = self.word2num['<s>']
self.eos_label = self.word2num['</s>']
def token_map(self, source):
if not source in self._token_map_cache:
self._token_map_cache[source] = dict(self._file_handle[source + '_vocab'])
return self._token_map_cache[source]
def decode(self, labels, keep_all=False):
return [self.num2word[label] for label in labels
if (label != self.eos_label or keep_all)
and (label != self.bos_label or keep_all)]
def pretty_print(self, labels, example):
labels = self.decode(labels)
return ' '.join(labels)
def monospace_print(self, labels):
labels = self.decode(labels, keep_all=True)
return ' '.join(labels)
| {
"repo_name": "rizar/actor-critic-public",
"path": "lvsr/datasets/mt.py",
"copies": "1",
"size": "1179",
"license": "mit",
"hash": 8212923724793673000,
"line_mean": 34.7272727273,
"line_max": 86,
"alpha_frac": 0.6089906701,
"autogenerated": false,
"ratio": 3.427325581395349,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4536316251495349,
"avg_score": null,
"num_lines": null
} |
from fuel.datasets import Dataset
from fuel.utils import do_not_pickle_attributes
@do_not_pickle_attributes('nodes')
class H5Dataset(Dataset):
"""An eder's H5PY dataset.
Parameters
----------
sources : tuple of strings
Sources which the dataset returns
start : int
Start index
stop : int
Stop index
data_node : str
Parent data node in HDF5 file
sources_in_file : tuple of strings
Names of nodes in HDF5 file which contain sources. Should the same
length as `sources`.
Optional, if not set will be equal to `sources`.
"""
def __init__(self, sources, start, stop, h5py_obj, data_node='Data',
sources_in_file=None):
if sources_in_file is None:
sources_in_file = sources
self.sources_in_file = sources_in_file
self.provides_sources = sources
self.h5py_obj = h5py_obj
self.data_node = data_node
self.start = start
self.stop = stop
self.num_examples = self.stop - self.start
self.nodes = None
self.open_file(self.h5py_obj)
super(H5Dataset, self).__init__(self.provides_sources)
def open_file(self, h5py_obj):
h5file = self.h5py_obj
node = h5file[self.data_node]
self.nodes = [node[source] for source in self.sources_in_file]
def load(self):
self.open_file(self.h5py_obj)
def get_data(self, state=None, request=None):
""" Returns data from HDF5 dataset.
.. note:: The best performance if `request` is a slice.
"""
if self.start:
if isinstance(request, slice):
request = slice(request.start + self.start,
request.stop + self.start, request.step)
elif isinstance(request, list):
request = [index + self.start for index in request]
else:
raise ValueError
data = [node[request] for node in self.nodes]
return data
| {
"repo_name": "EderSantana/blocks_contrib",
"path": "datasets/h5py.py",
"copies": "1",
"size": "2034",
"license": "mit",
"hash": -5925280822021783000,
"line_mean": 30.78125,
"line_max": 74,
"alpha_frac": 0.5865290069,
"autogenerated": false,
"ratio": 3.941860465116279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5028389472016279,
"avg_score": null,
"num_lines": null
} |
from fuel.datasets import Dataset
import numpy as np
import theano
floatX = theano.config.floatX
class Pylearn2Dataset(Dataset):
'''Pylearn2Dataset wraps a `pylearn2.dataset` object and adds only the
minimal `fuel` interface. An object of this class can be used as input to
`fuel.streams.DataStream`.
Parameters
----------
dataset: `pylearn2.dataset` object
Note that this is expecting the actual the object will be initialized inside
batch_size: int
Batch size to be used by the `pylearn2.dataset` iterator.
'''
def __init__(self, dataset, batch_size, which_sources, **kwargs):
self.pylearn2_dataset = dataset
self.sources = self.pylearn2_dataset.get_data_specs()[1]
self.sources = tuple([self.sources[i] for i in which_sources])
self.sources = self.sources + tuple('eps')
self.batch_size = batch_size
self.which_sources = which_sources
def open(self):
num_examples = self.pylearn2_dataset.get_num_examples()
iterator = self.pylearn2_dataset.iterator(
self.batch_size,
num_examples/self.batch_size,
mode='sequential',
data_specs=self.pylearn2_dataset.get_data_specs(),
return_tuple=True)
return iterator
def get_data(self,state=None,request=None):
batch = next(state)
batch = tuple([batch[i] for i in self.which_sources])
return batch
class Pylearn2DatasetNoise(Dataset):
'''Pylearn2DatasetNoise is the same as `Pylearn2Dataset` with some an
extra batch of random nubmer.
Parameters
----------
dataset: `pylearn2.dataset` object
Note that this is expecting the actual the object will be
initialized inside
batch_size: int
Batch size to be used by the `pylearn2.dataset` iterator.
noise_dim: int
Dimmension of the noise batch
'''
def __init__(self, dataset, batch_size, noise_dim, which_sources=[0,1],
**kwargs):
self.pylearn2_dataset = dataset
self.sources = self.pylearn2_dataset.get_data_specs()[1]
self.sources = tuple([self.sources[i] for i in which_sources])
self.sources = self.sources + tuple('eps')
self.batch_size = batch_size
self.noise_dim = noise_dim
self.which_sources = which_sources
def open(self):
num_examples = self.pylearn2_dataset.get_num_examples()
iterator = self.pylearn2_dataset.iterator(
self.batch_size,
num_examples/self.batch_size,
mode='sequential',
data_specs=self.pylearn2_dataset.get_data_specs(),
return_tuple=True)
return iterator
def get_data(self,state=None,request=None):
batch = next(state)
timelen = batch[0].shape[0]
batch = tuple([batch[i] for i in self.which_sources])
eps = np.random.normal(0,1,size=(timelen,
self.batch_size,
self.noise_dim)).astype(floatX)
batch = batch + tuple(eps)
return (batch,)
| {
"repo_name": "EderSantana/blocks_contrib",
"path": "pylearn2fuel/__init__.py",
"copies": "1",
"size": "3179",
"license": "mit",
"hash": -6869398699987569000,
"line_mean": 38.7375,
"line_max": 84,
"alpha_frac": 0.6086819755,
"autogenerated": false,
"ratio": 3.9539800995024876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004190458308540738,
"num_lines": 80
} |
from fuel.datasets import H5PYDataset
from fuel.transformers.defaults import uint8_pixels_to_floatX
from fuel.utils import find_in_data_path
class CelebA(H5PYDataset):
"""The CelebFaces Attributes Dataset (CelebA) dataset.
CelebA is a large-scale face
attributes dataset with more than 200K celebrity images, each
with 40 attribute annotations. The images in this dataset cover
large pose variations and background clutter. CelebA has large
diversities, large quantities, and rich annotations, including:
* 10,177 number of identities
* 202,599 number of face images
* 5 landmark locations per image
* 40 binary attributes annotations per image.
The dataset can be employed as the training and test sets for
the following computer vision tasks:
* face attribute recognition
* face detection
* landmark (or facial part) localization
Parameters
----------
which_format : {'aligned_cropped, '64'}
Either the aligned and cropped version of CelebA, or
a 64x64 version of it.
which_sets : tuple of str
Which split to load. Valid values are 'train', 'valid' and
'test' corresponding to the training set (162,770 examples), the
validation set (19,867 examples) and the test set (19,962
examples).
"""
_filename = 'celeba_{}.hdf5'
default_transformers = uint8_pixels_to_floatX(('features',))
def __init__(self, which_format, which_sets, **kwargs):
self.which_format = which_format
super(CelebA, self).__init__(
file_or_path=find_in_data_path(self.filename),
which_sets=which_sets, **kwargs)
@property
def filename(self):
return self._filename.format(self.which_format)
| {
"repo_name": "dribnet/fuel",
"path": "fuel/datasets/celeba.py",
"copies": "8",
"size": "1766",
"license": "mit",
"hash": 9131920495863321000,
"line_mean": 34.32,
"line_max": 72,
"alpha_frac": 0.6834654587,
"autogenerated": false,
"ratio": 3.9070796460176993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.85905451047177,
"avg_score": null,
"num_lines": null
} |
from fuel.downloaders.base import default_downloader
BASE_URL = 'https://people.cs.umass.edu/~marlin/data/'
FILENAME = 'caltech101_silhouettes_{}_split1.mat'
def silhouettes_downloader(size, **kwargs):
if size not in (16, 28):
raise ValueError("size must be 16 or 28")
actual_filename = FILENAME.format(size)
actual_url = BASE_URL + actual_filename
default_downloader(urls=[actual_url],
filenames=[actual_filename], **kwargs)
def fill_subparser(subparser):
"""Sets up a subparser to download the Silhouettes dataset files.
The following CalTech 101 Silhouette dataset files can be downloaded
from Benjamin M. Marlin's website [MARLIN]:
`caltech101_silhouettes_16_split1.mat` and
`caltech101_silhouettes_28_split1.mat`.
.. [MARLIN] https://people.cs.umass.edu/~marlin/data.shtml
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `caltech101_silhouettes` command.
"""
subparser.add_argument(
"size", type=int, choices=(16, 28),
help="height/width of the datapoints")
return silhouettes_downloader
| {
"repo_name": "udibr/fuel",
"path": "fuel/downloaders/caltech101_silhouettes.py",
"copies": "15",
"size": "1165",
"license": "mit",
"hash": -8469283536065501000,
"line_mean": 30.4864864865,
"line_max": 72,
"alpha_frac": 0.6815450644,
"autogenerated": false,
"ratio": 3.4064327485380117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from fuel.schemes import IterationScheme
import sqlite3
import random
import os
from picklable_itertools import iter_
import data
first_time = 1372636853
last_time = 1404172787
class TaxiTimeCutScheme(IterationScheme):
def __init__(self, num_cuts=100, dbfile=None, use_cuts=None):
self.num_cuts = num_cuts
self.dbfile = os.path.join(data.path, 'time_index.db') if dbfile == None else dbfile
self.use_cuts = use_cuts
def get_request_iterator(self):
cuts = self.use_cuts
if cuts == None:
cuts = [random.randrange(first_time, last_time) for _ in range(self.num_cuts)]
l = []
with sqlite3.connect(self.dbfile) as db:
c = db.cursor()
for cut in cuts:
part = [i for (i,) in
c.execute('SELECT trip FROM trip_times WHERE begin >= ? AND begin <= ? AND end >= ?',
(cut - 40000, cut, cut))]
l = l + part
random.shuffle(l)
return iter_(l)
| {
"repo_name": "Saumya-Suvarna/machine-learning",
"path": "Route_prediction/data/cut.py",
"copies": "1",
"size": "1044",
"license": "apache-2.0",
"hash": -6309165706530802000,
"line_mean": 36.2857142857,
"line_max": 105,
"alpha_frac": 0.5708812261,
"autogenerated": false,
"ratio": 3.6,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46708812261,
"avg_score": null,
"num_lines": null
} |
from FuelSDK.rest import ET_CUDSupportRest
########
##
## wrap an Exact Target Push Message
##
########
class ET_PushMessage(ET_CUDSupportRest):
def __init__(self):
super(ET_PushMessage, self).__init__()
self.endpoint = 'https://www.exacttargetapis.com/push/v1/message/{id}'
self.urlProps = ["id"]
self.urlPropsRequired = []
########
##
## wrap an Exact Target Push Message Contact and Deliveries
##
########
class ET_PushMessageContact(ET_CUDSupportRest):
def __init__(self):
super(ET_PushMessageContact, self).__init__()
self.endpoint = 'https://www.exacttargetapis.com/push/v1/messageContact/{messageId}/send'
self.urlProps = ["messageId"]
self.urlPropsRequired = []
class ET_PushMessageContact_Deliveries(ET_CUDSupportRest):
def __init__(self):
super(ET_PushMessageContact_Deliveries, self).__init__()
self.endpoint = 'https://www.exacttargetapis.com/push/v1/messageContact/{messageId}/deliveries/{tokenId}'
self.urlProps = ["messageId", "tokenId"]
self.urlPropsRequired = []
########
##
## wrap an Exact Target Interaction Events
##
########
class ET_InteractionEvents(ET_CUDSupportRest):
def __init__(self):
super(ET_InteractionEvents, self).__init__()
self.endpoint = 'https://www.exacttargetapis.com/interaction/v1/events'
self.urlProps = []
self.urlPropsRequired = []
| {
"repo_name": "tzmfreedom/et-cli",
"path": "etcli/et_objects.py",
"copies": "1",
"size": "1432",
"license": "mit",
"hash": 5225243001491860000,
"line_mean": 28.8333333333,
"line_max": 113,
"alpha_frac": 0.6361731844,
"autogenerated": false,
"ratio": 3.450602409638554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9577982623592605,
"avg_score": 0.0017585940891900464,
"num_lines": 48
} |
from fuel.streams import AbstractDataStream
from fuel.iterator import DataIterator
import numpy as np
import theano
class IMAGENET(AbstractDataStream):
"""
A fuel DataStream for imagenet data
from fuel:
A data stream is an iterable stream of examples/minibatches. It shares
similarities with Python file handles return by the ``open`` method.
Data streams can be closed using the :meth:`close` method and reset
using :meth:`reset` (similar to ``f.seek(0)``).
"""
def __init__(self, partition_label='train', datadir='/home/jascha/data/imagenet/JPEG/', seed=12345, fraction=0.9, width=256, **kwargs):
# ignore axis labels if not given
kwargs.setdefault('axis_labels', '')
# call __init__ of the AbstractDataStream
super(self.__class__, self).__init__(**kwargs)
# get a list of the images
import glob
print "getting imagenet images"
image_files = glob.glob(datadir + "*.JPEG")
print "filenames loaded"
self.sources = ('features',)
self.width = width
# shuffle indices, subselect a fraction
np.random.seed(seed=seed)
np.random.shuffle(image_files)
num_train = int(np.round(fraction * np.float32(len(image_files))))
train_files = image_files[:num_train]
test_files = image_files[num_train:]
if 'train' in partition_label:
self.X = train_files
elif 'test' in partition_label:
self.X = test_files
self.num_examples = len(self.X)
self.current_index = 0
def get_data(self, data_state, request=None):
"""Get a new sample of data"""
if request is None:
request = [self.current_index]
self.current_index += 1
return self.load_images(request)
def apply_default_transformers(self, data_stream):
return data_stream
def open(self):
return None
def close(self):
"""Close the hdf5 file"""
pass
def reset(self):
"""Reset the current data index"""
self.current_index = 0
def get_epoch_iterator(self, **kwargs):
return super(self.__class__, self).get_epoch_iterator(**kwargs)
# return None
# TODO: implement iterator
def next_epoch(self, *args, **kwargs):
self.current_index = 0
return super(self.__class__, self).next_epoch(**kwargs)
# return None
def load_images(self, inds):
print ".",
output = np.zeros((len(inds), 3, self.width, self.width), dtype=theano.config.floatX)
for ii, idx in enumerate(inds):
output[ii] = self.load_image(idx)
return [output]
def load_image(self, idx):
filename = self.X[idx]
import Image
import ImageOps
# print "loading ", self.X[idx]
image = Image.open(self.X[idx])
width, height = image.size
if width > height:
delta2 = int((width - height)/2)
image = ImageOps.expand(image, border=(0, delta2, 0, delta2))
else:
delta2 = int((height - width)/2)
image = ImageOps.expand(image, border=(delta2, 0, delta2, 0))
image = image.resize((self.width, self.width), resample=Image.BICUBIC)
try:
imagenp = np.array(image.getdata()).reshape((self.width,self.width,3))
imagenp = imagenp.transpose((2,0,1)) # move color channels to beginning
except:
# print "reshape failure (black and white?)"
imagenp = self.load_image(np.random.randint(len(self.X)))
return imagenp.astype(theano.config.floatX) | {
"repo_name": "Sohl-Dickstein/Diffusion-Probabilistic-Models",
"path": "imagenet_data.py",
"copies": "2",
"size": "3663",
"license": "mit",
"hash": 7059201344131353000,
"line_mean": 30.0508474576,
"line_max": 139,
"alpha_frac": 0.5995085995,
"autogenerated": false,
"ratio": 3.8356020942408375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5435110693740838,
"avg_score": null,
"num_lines": null
} |
from fuel.transformers import Mapping
import numpy
import logging
from matplotlib.mlab import specgram
def log_spectrogram(signal):
return numpy.log(specgram(signal)[0].T)
logger = logging.getLogger(__name__)
class Normalization(object):
def __init__(self, data_stream, source):
index = data_stream.sources.index(source)
sum_features = 0
sum_features2 = 0
num_examples = 0
iterator = data_stream.get_epoch_iterator()
for number, data in enumerate(iterator):
features = data[index]
sum_features += features.sum(axis=0)
sum_features2 += (features ** 2).sum(axis=0)
num_examples += len(features)
logger.info("Used {} examples to compute normalization".format(number + 1))
mean_features = sum_features / num_examples
std_features = (sum_features2 / num_examples - mean_features ** 2) ** 0.5
self.mean_features = mean_features
self.std_features = std_features
self.index = index
def apply(self, data):
data = list(data)
data[self.index] = ((data[self.index] - self.mean_features)
/ self.std_features)
return tuple(data)
def wrap_stream(self, stream):
return Mapping(stream, Invoke(self, 'apply'))
class Invoke(object):
def __init__(self, object_, method):
self.object_ = object_
self.method = method
def __call__(self, *args, **kwargs):
return getattr(self.object_, self.method)(*args, **kwargs)
| {
"repo_name": "nke001/attention-lvcsr",
"path": "lvsr/preprocessing.py",
"copies": "3",
"size": "1559",
"license": "mit",
"hash": 7081670229342429000,
"line_mean": 27.8703703704,
"line_max": 83,
"alpha_frac": 0.6100064144,
"autogenerated": false,
"ratio": 3.8975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6007506414399999,
"avg_score": null,
"num_lines": null
} |
from fuel.transformers import Transformer
import numpy as np
class DropSources(Transformer):
"""Drops some sources from a stream
Parameters
----------
data_stream : :class:`DataStream` or :class:`Transformer`.
The data stream.
sources: list
A list of sources to drop
"""
def __init__(self, data_stream, sources):
super(DropSources, self).__init__(data_stream)
old_sources = list(self.data_stream.sources)
self.mask = [True for _ in old_sources]
cur_sources = old_sources[:]
for i,s in enumerate(sources):
if s not in cur_sources:
raise KeyError("%s not in the sources of the stream" % s)
else:
cur_sources.remove(s)
self.mask[old_sources.index(s)] = False
self.sources = tuple(cur_sources)
def get_data(self, request=None):
if request is not None:
raise ValueError
data = next(self.child_epoch_iterator)
new_data = tuple([source for source,mask in zip(data, self.mask) if mask==True ])
return new_data
| {
"repo_name": "lukemetz/MLFun",
"path": "IMDB/transformers.py",
"copies": "1",
"size": "1119",
"license": "mit",
"hash": 5175895396965176000,
"line_mean": 31.9117647059,
"line_max": 89,
"alpha_frac": 0.5933869526,
"autogenerated": false,
"ratio": 4.083941605839416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5177328558439416,
"avg_score": null,
"num_lines": null
} |
from fuel.transformers import Transformer
class DropSources(Transformer):
"""Drops some sources from a stream
Parameters
----------
data_stream : :class:`DataStream` or :class:`Transformer`.
The data stream.
sources: list
A list of sources to drop
"""
def __init__(self, data_stream, sources):
super(DropSources, self).__init__(data_stream)
old_sources = list(self.data_stream.sources)
self.mask = [True for _ in old_sources]
cur_sources = old_sources[:]
for i, s in enumerate(sources):
if s not in cur_sources:
raise KeyError("%s not in the sources of the stream" % s)
else:
cur_sources.remove(s)
self.mask[old_sources.index(s)] = False
self.sources = tuple(cur_sources)
def get_data(self, request=None):
if request is not None:
raise ValueError
data = next(self.child_epoch_iterator)
new_data = tuple([source for source, mask in zip(data, self.mask)
if mask])
return new_data
| {
"repo_name": "lukemetz/cuboid",
"path": "cuboid/transformers.py",
"copies": "1",
"size": "1122",
"license": "mit",
"hash": 8197580196244888000,
"line_mean": 31.0571428571,
"line_max": 73,
"alpha_frac": 0.5748663102,
"autogenerated": false,
"ratio": 4.186567164179104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 35
} |
from fuel.transformers import Transformer
class NGrams(Transformer):
"""Return n-grams from a stream.
This data stream wrapper takes as an input a data stream outputting
sentences. From these sentences n-grams of a fixed order (e.g. bigrams,
trigrams, etc.) are extracted and returned. It also creates a
``targets`` data source. For each example, the target is the word
immediately following that n-gram. It is normally used for language
modeling, where we try to predict the next word from the previous *n*
words.
Parameters
----------
ngram_order : int
The order of the n-grams to output e.g. 3 for trigrams.
data_stream : :class:`.DataStream` instance
The data stream providing sentences. Each example is assumed to be
a list of integers.
target_source : str, optional
This data stream adds a new source for the target words. By default
this source is 'targets'.
"""
def __init__(self, ngram_order, data_stream, target_source='targets'):
if len(data_stream.sources) > 1:
raise ValueError
super(NGrams, self).__init__(data_stream)
self.sources = self.sources + (target_source,)
self.ngram_order = ngram_order
self.sentence = []
self.index = 0
def get_data(self, request=None):
while not self.index < len(self.sentence) - self.ngram_order:
self.sentence, = next(self.child_epoch_iterator)
self.index = 0
ngram = self.sentence[self.index:self.index + self.ngram_order]
target = self.sentence[self.index + self.ngram_order]
self.index += 1
return (ngram, target)
| {
"repo_name": "nke001/attention-lvcsr",
"path": "libs/fuel/fuel/transformers/text.py",
"copies": "8",
"size": "1696",
"license": "mit",
"hash": 3376102955260386000,
"line_mean": 38.4418604651,
"line_max": 75,
"alpha_frac": 0.6491745283,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 43
} |
from fuel.transformers import Transformer
class Window(Transformer):
"""Return pairs of source and target windows from a stream.
This data stream wrapper takes as an input a data stream outputting
sequences of potentially varying lengths (e.g. sentences, audio tracks,
etc.). It then returns two sliding windows (source and target) over
these sequences.
For example, to train an n-gram model set `source_window` to n,
`target_window` to 1, no offset, and `overlapping` to false. This will
give chunks [1, N] and [N + 1]. To train an RNN you often want to set
the source and target window to the same size and use an offset of 1
with overlap, this would give you chunks [1, N] and [2, N + 1].
Parameters
----------
offset : int
The offset from the source window where the target window starts.
source_window : int
The size of the source window.
target_window : int
The size of the target window.
overlapping : bool
If true, the source and target windows overlap i.e. the offset of
the target window is taken to be from the beginning of the source
window. If false, the target window offset is taken to be from the
end of the source window.
data_stream : :class:`.DataStream` instance
The data stream providing sequences. Each example is assumed to be
an object that supports slicing.
target_source : str, optional
This data stream adds a new source for the target words. By default
this source is 'targets'.
"""
def __init__(self, offset, source_window, target_window,
overlapping, data_stream, target_source='targets', **kwargs):
if not data_stream.produces_examples:
raise ValueError('the wrapped data stream must produce examples, '
'not batches of examples.')
if len(data_stream.sources) > 1:
raise ValueError('{} expects only one source'
.format(self.__class__.__name__))
super(Window, self).__init__(data_stream, produces_examples=True,
**kwargs)
self.sources = self.sources + (target_source,)
self.offset = offset
self.source_window = source_window
self.target_window = target_window
self.overlapping = overlapping
self.sentence = []
self._set_index()
def _set_index(self):
"""Set the starting index of the source window."""
self.index = 0
# If offset is negative, target window might start before 0
self.index = -min(0, self._get_target_index())
def _get_target_index(self):
"""Return the index where the target window starts."""
return (self.index + self.source_window * (not self.overlapping) +
self.offset)
def _get_end_index(self):
"""Return the end of both windows."""
return max(self.index + self.source_window,
self._get_target_index() + self.target_window)
def get_data(self, request=None):
if request is not None:
raise ValueError
while not self._get_end_index() <= len(self.sentence):
self.sentence, = next(self.child_epoch_iterator)
self._set_index()
source = self.sentence[self.index:self.index + self.source_window]
target = self.sentence[self._get_target_index():
self._get_target_index() + self.target_window]
self.index += 1
return (source, target)
class NGrams(Window):
"""Return n-grams from a stream.
This data stream wrapper takes as an input a data stream outputting
sentences. From these sentences n-grams of a fixed order (e.g. bigrams,
trigrams, etc.) are extracted and returned. It also creates a
``targets`` data source. For each example, the target is the word
immediately following that n-gram. It is normally used for language
modeling, where we try to predict the next word from the previous *n*
words.
.. note::
Unlike the :class:`Window` stream, the target returned by
:class:`NGrams` is a single element instead of a window.
Parameters
----------
ngram_order : int
The order of the n-grams to output e.g. 3 for trigrams.
data_stream : :class:`.DataStream` instance
The data stream providing sentences. Each example is assumed to be
a list of integers.
target_source : str, optional
This data stream adds a new source for the target words. By default
this source is 'targets'.
"""
def __init__(self, ngram_order, *args, **kwargs):
super(NGrams, self).__init__(
0, ngram_order, 1, False, *args, **kwargs)
def get_data(self, *args, **kwargs):
source, target = super(NGrams, self).get_data(*args, **kwargs)
return (source, target[0])
| {
"repo_name": "udibr/fuel",
"path": "fuel/transformers/sequences.py",
"copies": "7",
"size": "4963",
"license": "mit",
"hash": 1376859687782830000,
"line_mean": 39.3495934959,
"line_max": 78,
"alpha_frac": 0.6258311505,
"autogenerated": false,
"ratio": 4.3006932409012135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8426524391401213,
"avg_score": null,
"num_lines": null
} |
from .fuelux_widget import FuelUxWidget
# from .. import utils
class Pillbox(FuelUxWidget):
"""
Widget for FuelUX's pillbox.
See http://getfuelux.com/javascript.html#pillbox.
Possible attributes:
- auto_init (boolean)
- whether to add 'data-initialize="pillbox"'
- id (string)
- items (list of dictionaries with keys 'class', 'value' and 'text')
- list of items that are already present in the pillbox
- more (dictionary)
- used to generate something like 'and 8 more'
- more.before (string)
- more.after (string)
- add_item (string)
- placeholder text to display in the area the user has to click
in order to add items
- js (dictionary or boolean).
Set this to False to take care ot the JavaScript yourself.
The dictionary can contain any of the following keys:
- acceptKeyCodes (list of integers)
- edit (boolean)
- readonly (boolean or -1)
- truncate (boolean)
- suggestions (list of strings)
See FuelUX for more details.
"""
template_name = "pillbox"
# required_attrs = ["id"]
default_attrs = {
"auto_init": True,
"add_item": "add item",
"more": {
"before": "and",
"after": "more",
},
"js": {
"edit": False
}
}
def use_required_attribute(self, initial):
return False
def set_items(self, items):
if not items.__iter__:
raise ValueError(
"Pillbox::set_items: items must be iterable."
)
self.attrs["items"] = items
return self
def set_suggestions(self, suggestions):
if not suggestions.__iter__:
raise ValueError(
"Pillbox::set_suggestions: suggestions must be iterable."
)
self.attrs["js"]["suggestions"] = suggestions
return self
| {
"repo_name": "jneuendorf/what-should-i-eat",
"path": "fuelux_widgets/widgets/pillbox.py",
"copies": "1",
"size": "1948",
"license": "mit",
"hash": 924874416910955800,
"line_mean": 28.9692307692,
"line_max": 73,
"alpha_frac": 0.5703285421,
"autogenerated": false,
"ratio": 4.075313807531381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5145642349631381,
"avg_score": null,
"num_lines": null
} |
from fulcrum.api import Client
from fulcrum.api.endpoints import (Forms, Records, Webhooks, Photos,
Memberships, Roles, ChoiceLists, Signatures,
ClassificationSets, Projects, Videos, Audio,
Changesets, ChildRecords, AuditLogs, Layers,
Authorizations)
from fulcrum.utils import is_string
__version__ = '1.12.0'
default_uri = 'https://api.fulcrumapp.com'
def create_authorization(email, password, organization_id, note,
timeout=None, user_id=None):
if timeout is not None and not isinstance(timeout, int):
raise ValueError('timeout must be an integer.')
if user_id is not None and not is_string(user_id):
raise ValueError('user_id must be a string.')
auth = (email, password)
client = Client(None, default_uri)
data = {
'authorization': {
'organization_id': organization_id,
'note': note,
'timeout': timeout,
'user_id': user_id
}
}
api_resp = client.call('post', 'authorizations', auth=auth, data=data,
extra_headers={'Content-Type': 'application/json'})
return api_resp
def get_user(email, password):
auth = (email, password)
client = Client(None, default_uri)
api_resp = client.call('get', 'users', auth=auth)
return api_resp
class Fulcrum(object):
def __init__(self, key, uri=default_uri):
self.client = Client(key=key, uri=uri)
self.forms = Forms(client=self.client)
self.records = Records(client=self.client)
self.webhooks = Webhooks(client=self.client)
self.photos = Photos(client=self.client)
self.signatures = Signatures(client=self.client)
self.memberships = Memberships(client=self.client)
self.roles = Roles(client=self.client)
self.choice_lists = ChoiceLists(client=self.client)
self.classification_sets = ClassificationSets(client=self.client)
self.projects = Projects(client=self.client)
self.videos = Videos(client=self.client)
self.audio = Audio(client=self.client)
self.changesets = Changesets(client=self.client)
self.child_records = ChildRecords(client=self.client)
self.audit_logs = AuditLogs(client=self.client)
self.layers = Layers(client=self.client)
self.authorizations = Authorizations(client=self.client)
def query(self, sql, format = 'json'):
obj = {'q': sql, 'format': format}
kwargs = {
'data': obj,
'extra_headers': {'Content-Type': 'application/json'}
}
kwargs['json_content'] = False if format == 'csv' else True
api_resp = self.client.call('post', 'query', **kwargs)
return api_resp
| {
"repo_name": "fulcrumapp/fulcrum-python",
"path": "fulcrum/__init__.py",
"copies": "1",
"size": "2872",
"license": "apache-2.0",
"hash": -4832148072024034000,
"line_mean": 36.7894736842,
"line_max": 79,
"alpha_frac": 0.6058495822,
"autogenerated": false,
"ratio": 3.8191489361702127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4924998518370212,
"avg_score": null,
"num_lines": null
} |
from fulcrum.mixins import Findable, Deleteable, Createable, Searchable, Updateable, Media, Track, MediaCreateable
from . import BaseAPI
class Forms(BaseAPI, Findable, Deleteable, Createable, Searchable, Updateable):
path = 'forms'
def history(self, id, url_params=None):
api_resp = api_resp = self.client.call('get', '{0}/{1}/history'.format(self.path, id), url_params=url_params)
return api_resp
class Records(BaseAPI, Findable, Deleteable, Createable, Searchable, Updateable):
path = 'records'
def history(self, id):
api_resp = api_resp = self.client.call('get', '{0}/{1}/history'.format(self.path, id))
return api_resp
class Webhooks(BaseAPI, Findable, Deleteable, Createable, Searchable, Updateable):
path = 'webhooks'
class Photos(BaseAPI, Findable, Searchable, Media, MediaCreateable):
path = 'photos'
ext = 'jpg'
sizes = ['thumbnail', 'large']
media_upload_path = ''
media_form_field_name = 'photo'
default_content_type = 'image/jpeg'
class Signatures(BaseAPI, Findable, Searchable, Media, MediaCreateable):
path = 'signatures'
ext = 'png'
sizes = ['thumbnail', 'large']
media_upload_path = ''
media_form_field_name = 'signature'
default_content_type = 'image/png'
class Videos(BaseAPI, Findable, Searchable, Media, Track, MediaCreateable):
path = 'videos'
ext = 'mp4'
sizes = ['small', 'medium']
media_upload_path = '/upload'
media_form_field_name = 'video'
default_content_type = 'video/mp4'
class Audio(BaseAPI, Findable, Searchable, Media, Track, MediaCreateable):
path = 'audio'
ext = 'mp4'
sizes = []
media_upload_path = '/upload'
media_form_field_name = 'audio'
default_content_type = 'audio/mp3'
class Memberships(BaseAPI, Searchable):
path = 'memberships'
def change(self, resource_type, id, action, membership_ids):
change = {
'type': '{}_members'.format(resource_type),
'{}_id'.format(resource_type): id,
action: membership_ids
}
data = {'change': change}
api_resp = self.client.call('post', 'memberships/change_permissions',
data=data,
extra_headers={'Content-Type': 'application/json'})
return api_resp
class Roles(BaseAPI, Searchable):
path = 'roles'
class ChoiceLists(BaseAPI, Findable, Deleteable, Createable, Searchable, Updateable):
path = 'choice_lists'
class ClassificationSets(BaseAPI, Findable, Deleteable, Createable, Searchable, Updateable):
path = 'classification_sets'
class Projects(BaseAPI, Findable, Deleteable, Createable, Searchable, Updateable):
path = 'projects'
class Changesets(BaseAPI, Findable, Createable, Searchable, Updateable):
path = 'changesets'
def close(self, id):
api_resp = api_resp = self.client.call('put', '{0}/{1}/close'.format(self.path, id))
return api_resp
class ChildRecords(BaseAPI, Searchable):
path = 'child_records'
class AuditLogs(BaseAPI, Searchable, Findable):
path = 'audit_logs'
class Layers(BaseAPI, Findable, Deleteable, Createable, Searchable, Updateable):
path = 'layers'
class Authorizations(BaseAPI, Findable, Deleteable, Searchable, Updateable):
path = 'authorizations'
def regenerate(self, id):
api_resp = self.client.call('post', '{}/{}/regenerate'.format(self.path, id))
return api_resp
| {
"repo_name": "fulcrumapp/fulcrum-python",
"path": "fulcrum/api/endpoints.py",
"copies": "1",
"size": "3490",
"license": "apache-2.0",
"hash": -2294210087985207000,
"line_mean": 29.0862068966,
"line_max": 117,
"alpha_frac": 0.6532951289,
"autogenerated": false,
"ratio": 3.51460221550856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.466789734440856,
"avg_score": null,
"num_lines": null
} |
from fulcrum.utils import is_string, generate_uuid
class Findable(object):
def find(self, id):
api_resp = self.client.call('get', '{0}/{1}'.format(self.path, id))
return api_resp
class Deleteable(object):
def delete(self, id):
self.client.call('delete', '{0}/{1}'.format(self.path, id))
class Createable(object):
def create(self, obj):
api_resp = self.client.call('post', self.path, data=obj, extra_headers={'Content-Type': 'application/json'})
return api_resp
class Searchable(object):
def search(self, url_params=None):
api_resp = self.client.call('get', self.path, url_params=url_params)
return api_resp
class Updateable(object):
def update(self, id, obj):
api_resp = self.client.call('put', '{0}/{1}'.format(self.path, id), data=obj, extra_headers={'Content-Type': 'application/json'})
return api_resp
class Media(object):
def media(self, id, size='original'):
if size == 'original':
path = '{}/{}.{}'.format(self.path, id, self.ext)
else:
if not size in self.sizes:
raise ValueError('Size {} not supported'.format(size))
path = '{}/{}/{}.{}'.format(self.path, id, size, self.ext)
api_resp = self.client.call('get', path, json_content=False)
return api_resp
class Track(object):
track_formats = {
'json': 'json',
'geojson': 'geojson',
'gpx': 'gpx',
'kml': 'kml',
'geojson_points': 'geojson?type=points',
}
def track(self, id, format='json'):
if not format in self.track_formats.keys():
raise ValueError('Format {} not supported'.format(format))
path = '{}/{}/track.{}'.format(self.path, id, self.track_formats[format])
is_json_resp = format in ('json', 'geojson', 'geojson_points')
api_resp = self.client.call('get', path, json_content=is_json_resp)
return api_resp
class MediaCreateable(object):
def create(self, media_or_path, content_type=None, access_key=None):
if is_string(media_or_path):
media = open(media_or_path, 'rb')
else:
media = media_or_path
data = {
'{}[access_key]'.format(self.media_form_field_name): access_key or generate_uuid()
}
files = {
'{}[file]'.format(self.media_form_field_name): (media.name, media, content_type or self.default_content_type)
}
api_resp = self.client.call('post', self.path + self.media_upload_path, data=data, files=files)
return api_resp
| {
"repo_name": "fulcrumapp/fulcrum-python",
"path": "fulcrum/mixins.py",
"copies": "1",
"size": "2611",
"license": "apache-2.0",
"hash": 8030780666054748000,
"line_mean": 31.2345679012,
"line_max": 137,
"alpha_frac": 0.5878973573,
"autogenerated": false,
"ratio": 3.504697986577181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4592595343877181,
"avg_score": null,
"num_lines": null
} |
from .FullScreenEffectClass import FullScreenEffect
from ..backends.RenderTargetBase import attachmentTypeEnum, renderTextureTypeEnum
from ..backends.base_backend import CompilationError, BaseBackend
from glaze.GL import glDetachShader
class FSEManager(object):
class states(object):
disable = 0
enabled = 1
def __init__(self, engine, backend):
"""
@type backend: BaseBackend
@rtype : FSEManager
@type engine: ManagersReferenceHolder
"""
self._orderedEffects = []
self._effectsDict = {}
self._disabledEffects = {}
self._glShaders = {}
self._e3dShaders = {}
self._builtRenderTargets = {}
self._engine = engine
self._maxColorAtachments = backend._getMaxColorAttachments()
self._sceneRT = None
self.renderTarget = backend.getRenderTarget()
self._backend = backend
def _initializeSceneRT(self):
# self._sceneRT = self.addEffect('defaults/screenQuad.fse')
self._sceneRT = self.__buildSceneRT()
def __buildSceneRT(self):
rt = self.renderTarget(self._engine, self._backend, '2d', True)
size = self._backend._window.size
rt.createDepthAttachment(size)
rt.createColorAttachments(['_scene'], [size])
return rt
def _getMaxColorAtachments(self):
return self._maxColorAtachments
maxColorAtachments = property(_getMaxColorAtachments)
def moveEffect(self, ID, newIndex):
f = self._effectsDict[ID]
i = self._orderedEffects.index(f)
self._orderedEffects.insert(self._orderedEffects.pop(i), newIndex)
def getEffectIndex(self, ID):
f = self._effectsDict[ID]
i = self._orderedEffects.index(f)
return i
def addEffect(self, filepath, ID, activeTechnique='', index=-1):
"""
@rtype : FullScreenEffect
"""
try:
fse = FullScreenEffect(filepath, ID, self._engine.path.defaults.shaders)
fse.activeTechnique = activeTechnique
if index == -1:
self._orderedEffects.append(fse)
else:
self._orderedEffects.insert(fse)
self._effectsDict[ID] = fse
self._buildTextures(fse)
if activeTechnique != '':
self._buildTechnique(fse)
self._buildRenderTarget(fse)
except Exception as ex:
self._engine.log('Error adding effect \'{}\' {}'.format(ID, str(ex)))
raise
return fse
def removeEffect(self, ID):
self._orderedEffects.remove(self.getEffectIndex(ID))
self._effectsDict.pop(ID)
if ID in self._disabledEffects.keys():
self._disabledEffects.pop(ID)
def setEffectState(self, state):
if state == self.states.enabled:
self._disabledEffects.pop(ID)
else:
self._disabledEffects[ID] = 0
def _buildTextures(self, effect):
"""
@type effect: FullScreenEffect
"""
for t in effect.textures2d.values():
if 'file' in t.members.keys():
self._engine.textures.loadTexture(t.members['file'].strip('\'').strip('"'), effect.ID + '_' + t.name)
for t in effect.texturesCube.items():
if 'file' in t.members.keys():
self._engine.textures.loadCubeTexture(t.members['file'].strip('\'').strip('"'),
effect.ID + '_' + t.name)
def _buildTechnique(self, effect):
"""
@type effect: FullScreenEffect
"""
shaders = self._engine.shaders
t = effect.getActiveTechnique()
es = 'while building shader \'{}\': {}'
for i in range(len(t.passes)):
p = t.passes[i]
sid = effect.ID + p.members['vertex'] + p.members['fragment']
if sid in self._e3dShaders.keys():
continue
if p.members['vertex'] not in self._glShaders.keys():
vsource = effect.shaders[p.members['vertex']].strip()
try:
compiledVS = shaders.checkAndCompile(vsource, shaders.shaderTypesEnum.vertex)
except CompilationError as err:
raise CompilationError(es.format(p.members['vertex'], shaders._dissectErrors(err.args[1],
self._engine.globals.oglversionraw)))
self._glShaders[p.members['vertex']] = compiledVS
else:
compiledVS = self._glShaders[p.members['vertex']]
if p.members['fragment'] not in self._glShaders.keys():
fsource = effect.shaders[p.members['fragment']]
try:
compiledFS = shaders.checkAndCompile(fsource, shaders.shaderTypesEnum.fragment)
except CompilationError as err:
raise CompilationError(es.format(p.members['fragment'], shaders._dissectErrors(err.args[1],
self._engine.globals.oglversionraw)))
self._glShaders[p.members['fragment']] = compiledFS
else:
compiledFS = self._glShaders[p.members['fragment']]
try:
prog = shaders.compileProgram(compiledVS, compiledFS)
except CompilationError:
# es = 'while compiling shader program \'{}\': {}'
raise # CompilationError(es.format(sid, err.message))
finally:
try:
glDetachShader(prog, compiledFS)
glDetachShader(prog, compiledVS)
except Exception:
pass
s = shaders.ShaderClass(prog, sid, shaders)
self._e3dShaders[sid] = s
self._engine.shaders._shadersCache[sid] = s
effect.builtTechniques.append(effect.activeTechnique)
def _buildRenderTarget(self, effect):
targets = {}
es = 'Error in effect \'{}\''.format(effect.ID) + ' while building render target: {}'
tech = effect.getActiveTechnique()
hasdepth = False
ttexs = []
for cpass in tech.passes:
alls = list(cpass.members['in'])
alls.extend(list(cpass.members['out']))
excludes = ['_raw', '_scene', '_final', '_depth']
if '_depth' in alls or '_raw' in alls:
hasdepth = True
textureNames = []
for tex in effect.textures2d.values():
textureNames.append(tex.name)
for tex in effect.texturesCube.values():
textureNames.append(tex.name)
excludes.extend(textureNames)
ttexs.extend([t for t in alls if t not in excludes])
try:
winsize = self._backend._window.size
ttype = effect.textureType
if ttype not in ['2d', 'cube']:
raise AttributeError('Unknown render texture type: \'{}\''.format(ttype))
for tar in ttexs:
targetName = '{}_{}'.format(effect.ID, tar)
etar = effect.targets.get(tar)
if etar:
ssize = etar.members.get('size', winsize)
if len(ssize) == 1:
ssize = [winsize[0] / ssize[0], winsize[1] / ssize[0]]
targets[targetName] = ssize
# Todo: check GL_MAX_FRAMEBUFFER_WIDTH and GL_MAX_FRAMEBUFFER_HEIGHT
if targets.__len__() == 0:
return
zippedc, zippeds = zip(*targets.items())
rt = self.renderTarget(self._engine, self._backend, ttype)
rt.createColorAttachments(zippedc, zippeds)
if hasdepth:
depth = effect.targets.get('_depth') # todo: check if this line is correct
if depth:
ssize = depth.members.get('size', winsize)
if len(ssize) == 1:
ssize = [winsize[0] / ssize[0], winsize[1] / ssize[0]]
else:
ssize = winsize
rt.createDepthAttachment(ssize)
self._builtRenderTargets[effect.ID] = rt
except Exception as ex:
ex.message = es.format(str(ex))
if hasattr(ex, 'args'):
ex.args = tuple([ex.message])
raise
# def terminate(self):
# try:
# glDeleteShader(FRAGMENT_SHADER)
# glDeleteShader(VERTEX_SHADER)
# except:
# pass
| {
"repo_name": "jr-garcia/Engendro3D",
"path": "e3d/fse_management/FSEManagerClass.py",
"copies": "1",
"size": "8694",
"license": "mit",
"hash": -2044046759900511200,
"line_mean": 37.2995594714,
"line_max": 117,
"alpha_frac": 0.5393374741,
"autogenerated": false,
"ratio": 4.171785028790787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5211122502890787,
"avg_score": null,
"num_lines": null
} |
from fumblerooski.college.models import College
from fumblerooski.utils import *
from fumblerooski.scrapers.games import game_updater
def full_load(year, week):
"""
Given a year and week, performs a full load of games including all player and game stats.
>>> full_load(2010, 13)
"""
game_updater(year, None, week)
def full_nostats_load(year, week):
"""
Given a year and week, performs a full load of games, but just scores, not player and game stats. Useful for
updates on a Saturday before game xml files are available on ncaa.org.
>>> full_nostats_load(2010, 13)
"""
game_updater(year, None, week, nostats=True)
def partial_loader(year, id, week):
"""
Given a year, team id and week, performs a full load beginning with that team, in ascending order of team id.
>>> partial_loader(2010, 235, 13)
"""
teams = College.objects.filter(updated=True, id__gte=id).order_by('id')
game_updater(year, teams, week)
def prepare_new_season(year):
add_college_years(year)
update_conference_membership(year)
game_updater(year, None, 15)
create_weeks(year)
game_weeks(year)
update_conf_games(year)
games = Game.objects.filter(season=year, coach1__isnull=True, coach2__isnull=True)
for game in games:
populate_head_coaches(game)
| {
"repo_name": "dwillis/fumblerooski",
"path": "scrapers/main.py",
"copies": "1",
"size": "1327",
"license": "bsd-3-clause",
"hash": 3042847344331374000,
"line_mean": 34.8648648649,
"line_max": 113,
"alpha_frac": 0.6887716654,
"autogenerated": false,
"ratio": 3.276543209876543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9441643535149629,
"avg_score": 0.004734268025382826,
"num_lines": 37
} |
from funcat import *
from string import punctuation
import sys
NGram = alias("N-Gram", ListOf(String))
@typed (Integer, String, NGram)
def ngrams(n, text):
"Produces the ngrams in a text given n"
tokens = reduce(lambda t, p: t.replace(p, ''), punctuation, text).split(' ')
return flat_map(lambda t: [t[i:i+n] for i in range(len(t) - n + 1)],
filter(lambda token: len(token) >= n, tokens))
@typed (NGram, Dictionary(String, Float))
def frequency_matrix(ngrams):
return { gram : float(ngrams.count(gram)) / len(ngrams)
for gram in ngrams }
@typed (NGram, NGram, Float)
def difference(x, y):
"Computes the difference between two texts given the ngrams of each"
nmx = frequency_matrix(x)
nmy = frequency_matrix(y)
ngrams = set(x) | set(y)
return sum(
abs((nmx[gram] if gram in nmx else 0.0) -\
(nmy[gram] if gram in nmy else 0.0))
for gram in ngrams)
@typed (NGram, NGram, Float)
def similarity(x, y):
"Computes the similarity between two texts given the ngrams of each"
return 1.0 - difference(x, y) / 2.0
def main():
if len(sys.argv) < 4:
print "usage: python {0} filename n file1name file2name ...".format(sys.argv[0])
return
master, n, files = sys.argv[1], int(sys.argv[2]), sys.argv[3:]
mngrams = ngrams(n, open(master).read().replace('\n', ' '))
similarities = []
for i in range(len(files)):
fngrams = ngrams(n, open(files[i]).read().replace('\n', ' '))
sim = similarity(mngrams, fngrams)
similarities.append(sim)
print ">>> Sim(\"{0}\", \"{1}\") = %.3f".format(master, files[i]) %sim
max_index = similarities.index(max(similarities))
print "File \"{0}\" is most similar to file \"{1}\"".format(files[max_index], master)
if __name__ == '__main__':
main()
| {
"repo_name": "zsck/NLPHomework",
"path": "Assignment1/tcomp1.py",
"copies": "2",
"size": "1771",
"license": "mit",
"hash": 2488503596082323500,
"line_mean": 33.0576923077,
"line_max": 87,
"alpha_frac": 0.6346696781,
"autogenerated": false,
"ratio": 2.9128289473684212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4547498625468421,
"avg_score": null,
"num_lines": null
} |
from funcat import *
import sys
punctuation = "!'\"'()?:;,."
Index = alias("Index", Dictionary(String, ListOf(Integer)))
Occurrence = alias("Occurence", TupleOf(String, Integer))
@typed (Index, Occurrence, Index)
def add_index(index, occur):
"Append an entry into an index mapping a word to the list of lines the word occurs on"
word = first(occur)
linenum = second(occur)
if word not in index:
index[word] = [linenum]
else:
index[word].append(linenum)
return index
@typed (ListOf(String), String, Boolean)
def ignored(ignored_words, word):
"Determines whether a word is ignored, given a list of ignored words"
return word in ignored_words
@typed (String, ListOf(String))
def words(line):
"Produces the words in a line of text"
return [reduce(lambda w, pc: w.replace(pc, ''), punctuation, word)
for word in line.split()]
def main():
if len(sys.argv) != 4:
print "usage: python {0} word-file text-file index-file".format(sys.argv[0])
return
ignore = flat_map(words, open(sys.argv[1]).readlines())
lines = map(words, open(sys.argv[2]).readlines())
ignoring = partial(ignored, ignore)
occurrences = [[(w, i+1) for w in ws if not ignoring(w)] for i, ws, in enumerate(lines)]
index = reduce(add_index, flatten(occurrences), {})
f = open(sys.argv[3], 'w')
for word in sorted(index.keys()):
f.write(word + ': ' + ' '.join(map(str, index[word])) + '\n')
f.close()
if __name__ == "__main__":
main()
| {
"repo_name": "zsck/NLPHomework",
"path": "Assignment1/index1.py",
"copies": "2",
"size": "1467",
"license": "mit",
"hash": 3078730080886201300,
"line_mean": 30.8913043478,
"line_max": 90,
"alpha_frac": 0.6591683708,
"autogenerated": false,
"ratio": 3.2171052631578947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48762736339578944,
"avg_score": null,
"num_lines": null
} |
from funcat import *
import sys
@typed (String, SetOf(String))
def words(text):
"Produce a set of words contained in a text"
return set(filter(lambda s: len(s) > 0, text.replace('\n', ' ').split(' ')))
@typed (SetOf(String), Integer)
def num_words(words):
"Compute the number of words in the list of words provided"
return len(words)
@typed (Set, Set, Set)
def symmetric_difference(s1, s2):
return s1 ^ s2
nw = num_words
sd = symmetric_difference
@typed (SetOf(String), SetOf(String), Float)
def similarity(x, y):
"Computes the similarity between two sets of words"
return 1.0 - float(nw(sd(x, y))) / (nw(x) + nw(y))
def main():
if len(sys.argv) < 3:
print "usage: python {0} filename file1name file2name ...".format(sys.argv[0])
return
master, files = sys.argv[1], sys.argv[2:]
mwords = words(open(master).read())
similarities = []
for f in files:
sim = similarity(mwords, words(open(f).read()))
similarities.append(sim)
print ">>> Sim(\"{0}\", \"{1}\") = %.3f".format(master, f) %sim
max_index = similarities.index(max(similarities))
print "File \"{0}\" is most similar to \"{1}\"".format(files[max_index], master)
if __name__ == "__main__":
main()
| {
"repo_name": "zsck/NLPHomework",
"path": "Assignment1/tcomp2.py",
"copies": "2",
"size": "1209",
"license": "mit",
"hash": -5556095163100945000,
"line_mean": 27.7857142857,
"line_max": 82,
"alpha_frac": 0.6426799007,
"autogenerated": false,
"ratio": 2.9559902200489,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9481747674444552,
"avg_score": 0.023384489260869484,
"num_lines": 42
} |
from FuncDef import FuncDef
class FuncTable:
"""Store a function table"""
def __init__(self, base_name):
self.funcs = []
self.base_name = base_name
self.bias_map = {}
self.name_map = {}
self.max_bias = 0
def get_base_name(self):
return self.base_name
def get_funcs(self):
return self.funcs
def get_func_by_bias(self, bias):
if bias in self.bias_map:
return self.bias_map[bias]
else:
return None
def get_max_bias(self):
return self.max_bias
def has_func(self, name):
return name in self.name_map
def get_func_by_name(self, name):
if name in self.name_map:
return self.name_map[name]
else:
return None
def get_num_funcs(self):
return len(self.funcs)
def add_func(self, f):
# add to list
self.funcs.append(f)
# store by bias
bias = f.get_bias()
self.bias_map[bias] = f
# store by name
name = f.get_name()
self.name_map[name] = f
# adjust max bias
if bias > self.max_bias:
self.max_bias = bias
def dump(self):
print("FuncTable:",self.base_name)
for f in self.funcs:
f.dump()
| {
"repo_name": "alpine9000/amiga_examples",
"path": "tools/external/amitools/amitools/fd/FuncTable.py",
"copies": "1",
"size": "1159",
"license": "bsd-2-clause",
"hash": -1382948756755592000,
"line_mean": 20.0727272727,
"line_max": 38,
"alpha_frac": 0.5970664366,
"autogenerated": false,
"ratio": 3.1928374655647382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8977908223783824,
"avg_score": 0.06239913567618303,
"num_lines": 55
} |
from func import *
class Jugador(pygame.sprite.Sprite):
# Atributos
# velocidad del jugador
vel_x = 0
vel_y = 0
imaged=[]
imagei=[]
# Lista de elementos con los cuales chocar
nivel = None
def __init__(self):
pygame.sprite.Sprite.__init__(self)
# creamos el bloque
ancho = 40
alto = 60
matrizimg = cargar_fondo(curdir+"/enviroment/levels/images/maximus.png", 32,48)
self.image = matrizimg[0][1]
self.imaged.append(self.image)
self.image = matrizimg[1][1]
self.imaged.append(self.image)
self.image = matrizimg[2][1]
self.imaged.append(self.image)
self.image = matrizimg[3][1]
self.imaged.append(self.image)
self.image = matrizimg[0][2]
self.imagei.append(self.image)
self.image = matrizimg[1][2]
self.imagei.append(self.image)
self.image = matrizimg[2][2]
self.imagei.append(self.image)
self.image = matrizimg[3][2]
self.imagei.append(self.image)
self.rect = self.image.get_rect()
self.life = 100
self.score = 0
self.dir = 0 #0 derecha , 1 izquierda, 2 arriba, 3 abajo
#imagenes para movimiento
self.imagenar = [] #arriba
self.imagena = [] #abajo
self.enemigos = 0
#speed
self.increment_x = 6
self.increment_y = 0
self.cont=0
self.municion = 40
def update(self):
""" Mueve el jugador. """
# Gravedad
self.calc_grav()
# Mover izq/der
self.rect.x += self.vel_x
# Revisar si golpeamos con algo (bloques con colision)
bloque_col_list = pygame.sprite.spritecollide(self, self.nivel.plataforma_lista, False)
for bloque in bloque_col_list:
# Si nos movemos a la derecha,
# ubicar jugador a la izquierda del objeto golpeado
if(bloque.tipo == ""):
if self.vel_x > 0:
self.rect.right = bloque.rect.left
elif self.vel_x < 0:
# De otra forma nos movemos a la izquierda
self.rect.left = bloque.rect.right
# Mover arriba/abajo
self.rect.y += self.vel_y
# Revisamos si chocamos
bloque_col_list = pygame.sprite.spritecollide(self, self.nivel.plataforma_lista, False)
for bloque in bloque_col_list:
if(bloque.tipo == ""):
# Reiniciamos posicion basado en el arriba/bajo del objeto
if self.vel_y > 0:
self.rect.bottom = bloque.rect.top
elif self.vel_y < 0:
self.rect.top = bloque.rect.bottom
# Detener movimiento vertical
self.vel_y = 0
def calc_grav(self):
""" Calculamos efecto de la gravedad. """
if self.vel_y == 0:
self.vel_y = 1
else:
self.vel_y += .35
# Revisamos si estamos en el suelo
if self.rect.y >= ALTO - self.rect.height and self.vel_y >= 0:
self.vel_y = 0
self.rect.y = ALTO - self.rect.height
def salto(self):
""" saltamos al pulsar boton de salto """
print "en salto"
# Nos movemos abajo un poco y revisamos si hay una plataforma bajo el jugador
self.rect.y += 2
plataforma_col_lista = pygame.sprite.spritecollide(self, self.nivel.plataforma_lista, False)
self.rect.y -= 2
# Si es posible saltar, aumentamos velocidad hacia arriba
if len(plataforma_col_lista) > 0 or self.rect.bottom >= ALTO:
self.vel_y = -10
# Control del movimiento
def ir_izq(self):
""" Usuario pulsa flecha izquierda """
if(self.cont<3):
self.cont+=1
else:
self.cont=0
self.image=self.imaged[self.cont]
self.vel_x = -self.increment_x
def ir_der(self):
""" Usuario pulsa flecha derecha """
if(self.cont<3):
self.cont+=1
else:
self.cont=0
self.image=self.imagei[self.cont]
self.vel_x = self.increment_x
def no_mover(self):
""" Usuario no pulsa teclas """
self.vel_x = 0
def getLife(self):
return self.life
def setLife(self,life):
if(life > 100):
self.life = 100
else:
self.life = life
def crash(self):
for e in self.nivel.enemigos_lista:
#Quita vida segun el tipo de zombie
if(e.tipo == 1 or e.tipo == 2):
self.setLife(self.getLife() - 1) #dano normal
if(e.tipo == 3 or e.tipo == 5 or e.tipo == 10):
self.setLife(self.getLife() - 3)#dano muy alto
if(e.tipo == 4):
self.setLife(self.getLife() - 0.5)#dano bajo
def getDir(self):
return self.dir
def setDir(self,dir):
self.dir = dir
def getMargen(self):
return (self.rect[2],self.rect[3])#x,y
def getPos(self):
return [self.rect.x,self.rect.y]
def setPos(self,pos):
self.rect.x = pos[0]
self.rect.y = pos[1]
def setSpeed(self,speed):
self.increment_x = speed
def getScore(self):
return self.score
class Weapon(pygame.sprite.Sprite): #Hereda de la clase sprite
def __init__(self, img_name, pos): #img para cargar, y su padre(de donde debe salir la bala)
pygame.sprite.Sprite.__init__(self)
self.image = load_image(img_name, curdir, alpha=True)
self.rect = self.image.get_rect()
self.pos = pos
self.rect.x = pos[0]
self.rect.y = pos[1]
self.speed = 5
self.name = img_name.split(".png")[0]
def getName(self):
return self.name
def getRect(self):
return self.rect
def getPos(self):
return [self.rect.x,self.rect.y]
def setPos(self,pos):
self.rect.x = pos[0]
self.rect.y = pos[1]
class Bullet(Weapon): #Hereda de la clase sprite
def __init__(self, img_name, pos): #img para cargar, y su padre(de donde debe salir la bala)
Weapon.__init__(self, img_name, pos)
self.magiciandir = 0 #dispara dependiendo de la posicion del magician
self.tipo = "normal"
self.tipo2 = ""
def setDir(self,dir):
self.magiciandir = dir
def getDir(self):
return self.magiciandir
def update(self):
if(self.magiciandir == 0): #derecha
self.rect.x += self.speed
if(self.magiciandir == 1):#izquierda
self.rect.x -= self.speed
if(self.magiciandir == 2):#arriba
self.rect.y -= self.speed
if(self.magiciandir == 3):#abajo
self.rect.y += self.speed
class RectBullet(Weapon):
def __init__(self, img_name, pos): #img para cargar, y su padre(de donde debe salir la bala)
Weapon.__init__(self, img_name, pos)
self.i = 0
self.moves = [] #movimientos que debe realizar
self.life = 300
self.tipo = "rect"
self.tipo2 = ""
def getLife(self):
return self.life
def restartMovements(self,pos):#calcula el camino por donde debe moverse (recibe el punto final)
self.moves = Bresenhamrecta([self.getPos(),pos])
self.i = 0 #debe empezar a recorrerla desde cero
def update(self): #se mueve
self.life -= 1
if(self.i < len(self.moves) - 1):
self.setPos(self.moves[self.i])
self.i += 2 #para que recorra el siguiente
else :
self.i = 0
class CircleBullet(Weapon): #Primero va a la izquierda y despues va todo a la derecha
owner = None
def __init__(self, img_name, pos, r): #img para cargar, y su padre(de donde debe salir la bala)
Weapon.__init__(self, img_name, pos)
self.r = r + 5 #radio de la circunferencia
self.i = 0
self.moves = [0 for x in range(16)] #movimientos que debe realizar
self.life = 100
self.tipo = "circle"
self.tipo2 = ""
self.die = False
def getLife(self):
return self.life
def restartMovements(self,pos):#calcula el camino por donde debe moverse (recibe el punto final)
self.moves = CircunfPtoMedio(self.owner.getPos(),self.r)#carga los nuevos movimientos
self.order= sorted(self.moves, key=lambda tup: tup[1])
self.i = 0 #debe empezar a recorrerla desde cero
def update(self): #se mueve
if(self.i < len(self.moves)):
self.setPos(self.moves[self.i])
self.i += 2 #para que recorra el siguiente
else:
self.die = True
class RectBulletBoss(Weapon):
def __init__(self, img_name, pos): #img para cargar, y su padre(de donde debe salir la bala)
Weapon.__init__(self, img_name, pos)
self.i = 0
self.moves = [] #movimientos que debe realizar
self.tipo = "rect"
self.tipo2 = "bulletboss"
self.playerpos = [0,0]
self.die = False
self.life = 100 #no importa, pero hay que tenerla definida
def getLife(self):
return self.life
def restartMovements(self,pos):#calcula el camino por donde debe moverse (recibe el punto final)
self.moves = Bresenhamrecta([self.getPos(),pos])#self.playerpos
self.i = 0 #debe empezar a recorrerla desde cero
def update(self): #se mueve
if(self.i < len(self.moves) - 1):
self.setPos(self.moves[self.i])
self.i += 2 #para que recorra el siguiente
else :
self.die = True
class Enemy(pygame.sprite.Sprite): #Hereda de la clase sprite
nivel = None
plat = None
def __init__(self, img_name, pos):
pygame.sprite.Sprite.__init__(self)
self.image = load_image(img_name, curdir, alpha=True)
self.rect = self.image.get_rect()
self.pos = pos
self.rect.x = pos[0]
self.rect.y = pos[1]
self.jugador = (0,0)
self.direccion = 0
def getDir(self):
return self.direccion
def setDir(self, dir):
self.direccion = dir
def getRect(self):
return self.rect
def getPos(self):
return [self.rect.x,self.rect.y]
def setPos(self,pos):
self.rect.x = pos[0]
self.rect.y = pos[1]
def getMargen(self):
return (self.rect[2],self.rect[3])
def getLife(self):
return self.life
def setLife(self,life):
self.life = life
def crash(self):
self.setLife(self.getLife() - random.randrange(10,20))
class Zombie1(Enemy):
imaged=[]
imagei=[]
def __init__(self, img_name, pos):
Enemy.__init__(self, img_name, pos)
matrizimg = cargar_fondo(dirimg+"zombie2.png", 32,32)
self.image = matrizimg[0][1]
self.imaged.append(self.image)
self.image = matrizimg[1][1]
self.imaged.append(self.image)
self.image = matrizimg[2][1]
self.imaged.append(self.image)
self.image = matrizimg[0][2]
self.imagei.append(self.image)
self.image = matrizimg[1][2]
self.imagei.append(self.image)
self.image = matrizimg[2][2]
self.imagei.append(self.image)
self.i = 1
self.cont = 0
self.speed_aux = 0
self.reloj = 0
self.life = 100
self.speed = 1
self.tipo = 1
self.aux = True
self.cont3 = 0
def move(self): #se mueve solo
if(self.speed_aux >= 1):
self.speed_aux = 0
self.rect.x += self.speed
self.cont += 1
for platx in self.nivel.plataforma_lista:
if(platx.tipo == "mascota"):
if(platx.tipo2 == "escudo"):
if(checkCollision(self,platx)):
self.rect.x -= self.speed
self.cont -= 1
if(self.cont == 350):
self.cont = 0
self.speed *= -1
self.changeDirection()
else:
self.speed_aux += 1
def update(self):
if(self.aux):
self.move()
if(self.getDir() == 0):
if self.cont3 <= 2:
self.image = self.imagei[self.cont3]
self.cont3+=1
else:
self.cont3=0
elif (self.getDir() == 1):
if self.cont3 <= 2:
self.image = self.imaged[self.cont3]
self.cont3+=1
else:
self.cont3=0
def changeDirection(self):
if(self.getDir() == 0): #der
self.setDir(1) #izq
else: #izq
self.setDir(0)#der
def StopMovements(self):
self.aux = False
def StartMovements(self):
self.aux = True
class Zombie2(Enemy):#El que me persigue
vel_x = 0
vel_y = 0
imaged=[]
imagei=[]
def __init__(self, img_name, pos,nivel):
Enemy.__init__(self, img_name, pos)
matrizimg = cargar_fondo(curdir+"/enviroment/levels/images/zombie.png", 32,32)
self.image = matrizimg[0][1]
self.imaged.append(self.image)
self.image = matrizimg[1][1]
self.imaged.append(self.image)
self.image = matrizimg[2][1]
self.imaged.append(self.image)
self.image = matrizimg[3][1]
self.imaged.append(self.image)
self.image = matrizimg[0][2]
self.imagei.append(self.image)
self.image = matrizimg[1][2]
self.imagei.append(self.image)
self.image = matrizimg[2][2]
self.imagei.append(self.image)
self.image = matrizimg[3][2]
self.imagei.append(self.image)
self.rect = self.image.get_rect()
self.life = 80
self.speed = 2
self.rect.x = pos[0]
self.rect.y = pos[1]
self.moves = [0 for x in range(ANCHO)] #movimientos que debe realizar
self.i = 0
self.nivel = nivel
self.tipo = 2
self.dir = 0
self.cont=0
self.turn = 0
self.aux = True
self.cont = 0
def setDir(self,dir):
self.dir = dir
def getDir(self):
return self.dir
def StopMovements(self):
print "stop moving 2"
self.aux = False
def StartMovements(self):
self.aux = True
def restartMovements(self,pos):#calcula el camino por donde debe moverse (recibe el punto final)
if(self.aux):
self.moves = Bresenhamrecta([self.getPos(),pos])#carga los nuevos movimientos
last_x = self.moves[-1][0]
aux = self.getMargen()[0]
if(self.getDir() == 0):
aux *= -1
self.moves[-1] = [last_x + aux, self.moves[-1][1]]
self.i = 0 #debe empezar a recorrerla desde cero
def update(self): #se mueve
if(self.aux):
if self.turn == 0:
if self.rect.x > 120:
if self.cont <= 2:
self.image = self.imaged[self.cont]
self.cont+=1
else:
self.cont=0
self.setPos([self.rect.x-5,self.rect.y])
for platx in self.nivel.plataforma_lista:
if(platx.tipo == "mascota"):
if(platx.tipo2 == "escudo"):
if(checkCollision(self,platx)):
self.setPos([self.rect.x+5,self.rect.y])
else:
self.turn = 1
if self.turn == 1:
bloque_col_list = pygame.sprite.spritecollide(self, self.nivel.plataforma_lista, False)
if self.cont <= 2:
self.image = self.imagei[self.cont]
self.cont+=1
else:
self.cont=0
if(len(bloque_col_list) == 0):
self.setPos([self.rect.x+5,self.rect.y])
else:
self.turn = 0
class Zombie3(Enemy):#El que me dispara y esta en la plataforma de arriba
imaged=[]
imagei=[]
def __init__(self, img_name, pos):
Enemy.__init__(self, img_name, pos)
matrizimg = cargar_fondo(curdir+"/enviroment/levels/images/zombie.png", 32,32)
self.image = matrizimg[6][1]
self.imaged.append(self.image)
self.image = matrizimg[7][1]
self.imaged.append(self.image)
self.image = matrizimg[8][1]
self.imaged.append(self.image)
self.image = matrizimg[6][2]
self.imagei.append(self.image)
self.image = matrizimg[7][2]
self.imagei.append(self.image)
self.image = matrizimg[8][2]
self.imagei.append(self.image)
self.rect = self.image.get_rect()
self.life = 150
self.speed = 1
self.rect.x = pos[0]
self.rect.y = pos[1]
self.cont = 0
self.tipo = 3
self.dir = 0 #derecha
self.speed_aux = 0
self.aux = True
self.cont = 0
self.cont3 = 0
def setDir(self,dir):
self.dir = dir
def getDir(self):
return self.dir
def StopMovements(self):
self.aux = False
def StartMovements(self):
self.aux = True
def move(self): #se mueve solo
if(self.speed_aux >= 1):
self.speed_aux = 0
self.rect.x += self.speed
self.cont += 1
for platx in self.nivel.plataforma_lista:
if(platx.tipo == "mascota"):
if(platx.tipo2 == "escudo"):
if(checkCollision(self,platx)):
self.rect.x -= self.speed
self.cont -= 1
if(self.cont == 400):
self.cont = 0
self.speed *= -1
self.changeDirection()
else:
self.speed_aux += 1
def update(self):
if(self.aux):
self.move()
if(self.getDir() == 0):
if self.cont3 <= 2:
self.image = self.imagei[self.cont3]
self.cont3+=1
else:
self.cont3=0
elif (self.getDir() == 1):
if self.cont3 <= 2:
self.image = self.imaged[self.cont3]
self.cont3+=1
else:
self.cont3=0
def changeDirection(self):
if(self.getDir() == 0): #der
self.setDir(1) #izq
else: #izq
self.setDir(0)#der
class Zombie4(Enemy):#El que no se mueve, solo lanza ratas
def __init__(self, img_name, pos):
Enemy.__init__(self, img_name, pos)
self.life = 100
self.speed = 1
self.rect.x = pos[0]
self.rect.y = pos[1]
self.tipo = 4
class Rata(Enemy):#Hereda de la clase Enemigo
def __init__(self, img_name, pos,nivel):
Enemy.__init__(self, img_name, pos)
self.life = 500
self.speed = 1
self.rect.x = pos[0]
self.rect.y = pos[1]
self.moves = [0 for x in range(ANCHO)] #movimientos que debe realizar
self.i = 0
self.nivel = nivel
self.dir = 0
self.name = img_name.split(".png")[0]
self.tipo = "rata"
self.tipo2 = ""
def getName(self):
return self.name
def restartMovements(self,pos):#calcula el camino por donde debe moverse (recibe el punto final)
self.moves = Bresenhamrecta([self.getPos(),pos])#carga los nuevos movimientos
self.i = 0 #debe empezar a recorrerla desde cero
def update(self): #se mueve
self.life -= 1 #para que se muera
#print "rata life: ", self.life
bloques = self.nivel.plataforma_lista
if(self.i < len(self.moves)):
pos = self.moves[self.i]
if(pos == 0):
self.setPos([self.rect.x,self.rect.y])
else:
for e in bloques:
if(checkCollision(self,e) == False): # si no se choca con los objetos del nivel
self.setPos(pos)
self.i += 1 #para que recorra el siguiente
class Zombie5(Enemy):#El que salta y dispara
vel_x = 0
vel_y = 0
def __init__(self, img_name, pos):
Enemy.__init__(self, img_name, pos)
self.i = 1
self.cont = 0
self.reloj = 0
self.life = 200
self.speed = 1
self.tipo = 5
self.increment_y = 0
self.jumping = False
self.aux = True
def StopMovements(self):
self.aux = False
def StartMovements(self):
self.aux = True
def calc_grav(self):
""" Calculamos efecto de la gravedad. """
if self.increment_y < 0: #esta saltando
self.jumping = True
self.increment_y += 1#1.5
#self.setPos([self.getPos()[0],ALTO + self.increment_y - self.getMargen()[1]])
self.setPos([self.getPos()[0],self.increment_y +self.pos[1]])
if self.increment_y >= 0: #ya no salta mas
self.jumping = False
#self.setPos([self.getPos()[0],(ALTO - self.getMargen()[1])])
self.setPos([self.getPos()[0],self.pos[1]])
def salto(self):
if(self.jumping == False): #Si ya no esta saltando, puede vovler a saltar
self.increment_y -= 10
def update(self):
if(self.aux):
self.calc_grav()
class Zombie6(Enemy):#Hereda de la clase Enemigo
def __init__(self, img_name, pos):
Enemy.__init__(self, img_name, pos)
self.i = 1
self.cont = 0
self.reloj = 0
self.life = 100
self.speed = 1
self.tipo = 6
self.r = 10
self.moves = [0 for x in range(ANCHO)] #movimientos que debe realizar
def StartMovements(self):#se mueve sobre si mismo
self.moves = CircunfPtoMedio([random.randrange(30,ANCHO - 30),random.randrange(30,ALTO/2 - 30)],self.r)#carga los nuevos movimientos
self.order= sorted(self.moves, key=lambda tup: tup[1])
self.i = 0 #debe empezar a recorrerla desde cero
def update(self): #se mueve
if(self.moves[self.i] != 0):
if(self.i < len(self.moves) - 1):
self.setPos(self.moves[self.i])
self.i += 1 #para que recorra el siguiente
else :
self.i = 0
class Boss(Enemy):#Hereda de la clase Enemigo
def __init__(self, img_name, pos):
Enemy.__init__(self, img_name, pos)
self.life = 200
self.speed = 1
self.rect.x = pos[0]
self.rect.y = pos[1]
self.cont = 0
self.tipo = 10
self.dir = 0 #derecha
self.speed_aux = 0
self.aux = True
self.des = 0
self.playerpos=[0,0]
self.cont3 = 0
self.i = 1
self.speed = 3
def changeDirection(self):
if(self.getDir() == 0): #der
self.setDir(1) #izq
else: #izq
self.setDir(0)#der
def StopMovements(self):
self.aux = False
def StartMovements(self):
self.aux = True
def move(self): #se mueve solo
if(self.speed_aux >= 1):
self.speed_aux = 0
self.rect.x += self.speed
self.cont += 1
if(self.cont == 290/3):
self.cont = 0
self.speed *= -1
self.changeDirection()
else:
self.speed_aux += 1
def update(self):
if(self.aux):
self.move()
if(self.getDir() == 0):
if self.cont3 <= 2:
#self.image = self.imagei[self.cont3]
self.cont3 += 1
else:
self.cont3 = 0
elif (self.getDir() == 1):
if self.cont3 <= 2:
#self.image = self.imaged[self.cont3]
self.cont3 += 1
else:
self.cont3 = 0
class Mascota(Enemy):#Hereda de la clase Enemigo
def __init__(self, img_name, pos):
Enemy.__init__(self, img_name, pos)
self.i = 0
self.cont = 0
self.reloj = 0
self.life = 100
self.speed = 1
self.tipo = "mascota"
self.tipo2 = ""
self.r = 45
self.moves = [0 for x in range(ANCHO)] #movimientos que debe realizar
self.aux = False
self.jugador = None
def StartMovements(self):#se mueve sobre si mismo
self.aux = True
def update(self): #se mueve
if(self.aux):
pos = self.jugador.getPos()
self.setPos([pos[0] - self.jugador.getMargen()[0],pos[1] - self.jugador.getMargen()[1]/2])
class Plataforma(pygame.sprite.Sprite): #Hereda de la clase sprite
def __init__(self, img_name, pos):
pygame.sprite.Sprite.__init__(self)
self.image = load_image(img_name, curdir, alpha=True)
self.rect = self.image.get_rect()
self.pos = pos
self.rect.x = pos[0]
self.rect.y = pos[1]
self.tipo = ""
self.tipo2 = ""
| {
"repo_name": "jdiegoh3/gameproyect_pygame",
"path": "objetos.py",
"copies": "1",
"size": "25087",
"license": "mit",
"hash": 7179060580074756000,
"line_mean": 29.743872549,
"line_max": 140,
"alpha_frac": 0.531510344,
"autogenerated": false,
"ratio": 3.2015058703420114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9167494355841254,
"avg_score": 0.013104371700151527,
"num_lines": 816
} |
from func import *
startbuild = datetime.datetime.now()
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True,
'local_num_threads': 4,
'resolution': 0.1})
logger = logging.getLogger('neuromodulation')
logger.debug("* * * Building layers")
build_model(GlobalColumns)
# Init parameters of our synapse models
DOPA_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
DOPA_synparams_in['vt'] = nest.Create('volume_transmitter')[0]
nest.CopyModel('static_synapse', gen_static_syn, static_syn)
nest.CopyModel('stdp_synapse', glu_synapse, STDP_synparams_Glu)
nest.CopyModel('stdp_synapse', gaba_synapse, STDP_synparams_GABA)
nest.CopyModel('stdp_synapse', ach_synapse, STDP_synparams_ACh)
nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_ex, DOPA_synparams_ex)
nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_in, DOPA_synparams_in)
# Initialize connections
logger.debug("* * * Connecting layers in columns")
for column in range(GlobalColumns):
''' L2 '''
# for Glu
connect(Cortex[L2][Glu][column], Cortex[L2][GABA][column], syn_type=Glu, weight_coef=0.3) #0.3
connect(Cortex[L2][Glu][column], Cortex[L5][Glu][column], syn_type=Glu, weight_coef=0.3)
# for GABA
connect(Cortex[L2][GABA][column], Cortex[L3][Glu][column], syn_type=GABA, weight_coef=0.2)
connect(Cortex[L2][GABA][column], Cortex[L5][Glu][column], syn_type=GABA, weight_coef=0.3)
''' L3 '''
# for Glu
connect(Cortex[L3][Glu][column], Cortex[L2][Glu][column], syn_type=Glu, weight_coef=1.3)
connect(Cortex[L3][Glu][column], Cortex[L4][GABA][column], syn_type=Glu, weight_coef=0.1) #0/5
''' L4 '''
# for Glu
connect(Cortex[L4][Glu][column], Cortex[L3][Glu][column], syn_type=Glu, weight_coef=0.9)
#connect(Cortex[L4][Glu][column], Cortex[L5][Glu][column], syn_type=Glu, weight_coef=0.3)
connect(Cortex[L4][Glu][column], Cortex[L4][GABA][column], syn_type=Glu, weight_coef=0.2) #0.3
# for GABA
connect(Cortex[L4][GABA][column], Cortex[L4][Glu][column], syn_type=GABA, weight_coef=0.2) #0.2
connect(Cortex[L4][GABA][column], Cortex[L3][Glu][column], syn_type=GABA, weight_coef=0.1) #0.3
connect(Cortex[L4][GABA][column], Cortex[L2][GABA][column], syn_type=GABA, weight_coef=0.3)
''' L5 '''
# for Glu
connect(Cortex[L5][Glu][column], Cortex[L6][Glu][column], syn_type=Glu, weight_coef=0.1)
connect(Cortex[L5][Glu][column], Cortex[L5][GABA][column], syn_type=Glu, weight_coef=0.1)
# for GABA
connect(Cortex[L5][GABA][column], Cortex[L5][GABA][column], syn_type=GABA )
''' L6 '''
# for Glu
connect(Cortex[L6][Glu][column], Cortex[L4][GABA][column], syn_type=Glu, weight_coef=0.2) #0.3
connect(Cortex[L6][Glu][column], Cortex[L4][Glu][column], syn_type=Glu, weight_coef=0.3) ###0.4
# for GABA
connect(Cortex[L6][GABA][column], Cortex[L6][Glu][column], syn_type=GABA, weight_coef=0.4)
logger.debug("* * * Adding neighbors connections")
for column in range(X*Y):
for neighbor in getNeighbors(column):
# L2 layer
# TO L3 !!!!!!
connect(Cortex[L2][Glu][column], Cortex[L3][Glu][neighbor], syn_type=Glu, weight_coef=0.1)
connect(Cortex[L2][GABA][column], Cortex[L2][GABA][neighbor], syn_type=Glu, weight_coef=0.3)
# L4 layer
connect(Cortex[L4][Glu][column], Cortex[L4][Glu][neighbor], syn_type=Glu, weight_coef=0.2)
connect(Cortex[L4][GABA][column], Cortex[L4][GABA][neighbor], syn_type=Glu, weight_coef=0.3)
logger.debug("* * * Connect detectors")
connect_detectors()
connect_mm()
logger.debug("* * * Attaching spikes detectors")
generate_neurons(10000)
logger.debug("* * * Start connection initialisation")
# * * * NIGROSTRIATAL PATHWAY* * *
connect(Cortex[L4][Glu], striatum[D1], syn_type=Glu, weight_coef=0.005)
connect(Cortex[L4][Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.000005)
connect(Cortex[L4][Glu], striatum[D2], syn_type=Glu, weight_coef=0.05)
connect(Cortex[L4][Glu], thalamus[thalamus_Glu], syn_type=Glu, weight_coef=0.008)
connect(Cortex[L4][Glu], stn[stn_Glu], syn_type=Glu, weight_coef=7)
connect(Cortex[L4][Glu], striatum[D1], syn_type=Glu)
connect(Cortex[L4][Glu], striatum[D2], syn_type=Glu)
connect(Cortex[L4][Glu], thalamus[thalamus_Glu], syn_type=Glu)
connect(Cortex[L4][Glu], stn[stn_Glu], syn_type=Glu)
connect(Cortex[L4][Glu], nac[nac_GABA0])
connect(striatum[tan], striatum[D1])
connect(striatum[tan], striatum[D2], syn_type=Glu)
connect(striatum[D1], snr[snr_GABA], weight_coef=0.00001)
connect(striatum[D1], gpi[gpi_GABA], weight_coef=0.00001)
connect(striatum[D1], gpe[gpe_GABA], weight_coef=0.000005)
connect(striatum[D2], gpe[gpe_GABA], weight_coef=1)
connect(gpe[gpe_GABA], stn[stn_Glu], weight_coef=0.0001)
connect(gpe[gpe_GABA], striatum[D1], weight_coef=0.001)
connect(gpe[gpe_GABA], striatum[D2], weight_coef=0.3)
connect(snc[snc_DA], gpe[gpe_GABA], weight_coef=0.3, syn_type=DA_ex)
connect(amygdala[amygdala_Glu], gpe[gpe_GABA], weight_coef=0.3, syn_type=Glu)
connect(gpe[gpe_GABA], amygdala[amygdala_Glu], weight_coef=0.1, syn_type=Glu)
connect(gpe[gpe_GABA], snc[snc_DA], weight_coef=0.2, syn_type=GABA)
connect(gpe[gpe_GABA], snr[snr_GABA], weight_coef=0.0001)
connect(stn[stn_Glu], snr[snr_GABA], syn_type=Glu, weight_coef=20)
connect(stn[stn_Glu], gpi[gpi_GABA], syn_type=Glu, weight_coef=20)
connect(stn[stn_Glu], gpe[gpe_GABA], syn_type=Glu, weight_coef=0.3)
#connect(stn[stn_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.000001)
connect(gpi[gpi_GABA], thalamus[thalamus_Glu], weight_coef=3)
connect(snr[snr_GABA], thalamus[thalamus_Glu], weight_coef=3)
connect(thalamus[thalamus_Glu], Cortex[L2][Glu], syn_type=Glu)
#connect(thalamus[thalamus_Glu], stn[stn_Glu], syn_type=Glu, weight_coef=1) #005
#connect(thalamus[thalamus_Glu], striatum[D1], syn_type=Glu, weight_coef=0.0001)
#connect(thalamus[thalamus_Glu], striatum[D2], syn_type=Glu, weight_coef=0.0001)
#connect(thalamus[thalamus_Glu], striatum[tan], syn_type=Glu, weight_coef=0.0001)
#connect(thalamus[thalamus_Glu], nac[nac_GABA0], syn_type=Glu)
#connect(thalamus[thalamus_Glu], nac[nac_GABA1], syn_type=Glu)
#connect(thalamus[thalamus_Glu], nac[nac_ACh], syn_type=Glu)
# * * * MESOCORTICOLIMBIC PATHWAY * * *
connect(nac[nac_ACh], nac[nac_GABA1], syn_type=ACh)
connect(nac[nac_GABA0], nac[nac_GABA1])
connect(nac[nac_GABA1], vta[vta_GABA2])
connect(vta[vta_GABA0], prefrontal[pfc_Glu0])
connect(vta[vta_GABA0], prefrontal[pfc_Glu1])
connect(vta[vta_GABA0], pptg[pptg_GABA])
connect(vta[vta_GABA1], vta[vta_DA0])
connect(vta[vta_GABA1], vta[vta_DA1])
connect(vta[vta_GABA2], nac[nac_GABA1])
connect(pptg[pptg_GABA], vta[vta_GABA0])
connect(pptg[pptg_GABA], snc[snc_GABA], weight_coef=0.000005)
connect(pptg[pptg_ACh], vta[vta_GABA0], syn_type=ACh)
connect(pptg[pptg_ACh], vta[vta_DA1], syn_type=ACh)
connect(pptg[pptg_Glu], vta[vta_GABA0], syn_type=Glu)
connect(pptg[pptg_Glu], vta[vta_DA1], syn_type=Glu)
connect(pptg[pptg_ACh], striatum[D1], syn_type=ACh, weight_coef=0.3)
connect(pptg[pptg_ACh], snc[snc_GABA], syn_type=ACh, weight_coef=0.000005)
connect(pptg[pptg_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.000005)
# * * * INTEGRATED PATHWAY * * *
connect(prefrontal[pfc_Glu0], vta[vta_DA0], syn_type=Glu)
connect(prefrontal[pfc_Glu0], nac[nac_GABA1], syn_type=Glu)
connect(prefrontal[pfc_Glu1], vta[vta_GABA2], syn_type=Glu)
connect(prefrontal[pfc_Glu1], nac[nac_GABA1], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_GABA0], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_GABA1], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_ACh], syn_type=Glu)
connect(amygdala[amygdala_Glu], striatum[D1], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[D2], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[tan], syn_type=Glu, weight_coef=0.3)
if dopamine_flag:
logger.debug("* * * Making neuromodulating connections...")
# NIGROSTRIATAL
connect(snc[snc_DA], striatum[D1], syn_type=DA_ex)
connect(snc[snc_DA], gpe[gpe_GABA], syn_type=DA_ex)
connect(snc[snc_DA], stn[stn_Glu], syn_type=DA_ex)
connect(snc[snc_DA], nac[nac_GABA0], syn_type=DA_ex)
connect(snc[snc_DA], nac[nac_GABA1], syn_type=DA_ex)
connect(snc[snc_DA], striatum[D2], syn_type=DA_in)
connect(snc[snc_DA], striatum[tan], syn_type=DA_in)
# MESOCORTICOLIMBIC
connect(vta[vta_DA0], striatum[D1], syn_type=DA_ex)
connect(vta[vta_DA0], striatum[D2], syn_type=DA_in)
connect(vta[vta_DA0], prefrontal[pfc_Glu0], syn_type=DA_ex)
connect(vta[vta_DA0], prefrontal[pfc_Glu1], syn_type=DA_ex)
connect(vta[vta_DA1], nac[nac_GABA0], syn_type=DA_ex)
connect(vta[vta_DA1], nac[nac_GABA1], syn_type=DA_ex)
logger.debug("* * * Creating spike generators...")
#connect_generator(pptg[pptg_GABA], 400., 600., rate=250, coef_part=1)
#connect_generator(pptg[pptg_Glu], 400., 600., rate=250, coef_part=1)
#connect_generator(pptg[pptg_ACh], 400., 600., rate=250, coef_part=1)
#connect_generator(amygdala[amygdala_Glu], 400., 600., rate=250, coef_part=1)
connect_generator(snc[snc_DA], 200., 400., rate=250, coef_part=1)
connect_generator(vta[vta_DA0], 400., 600., rate=250, coef_part=1)
logger.debug("* * * Attaching spikes detector")
for part in getAllParts():
connect_detector(part)
logger.debug("* * * Attaching multimeters")
for part in getAllParts():
connect_multimeter(part)
del build_model, connect
endbuild = datetime.datetime.now()
simulate()
get_log(startbuild, endbuild)
save(status_gui)
| {
"repo_name": "research-team/robot-dream",
"path": "direct_translation/scripts/step_1 (columns+dopa)/neuromodulation.py",
"copies": "1",
"size": "9698",
"license": "mit",
"hash": -6550723429476623000,
"line_mean": 42.6846846847,
"line_max": 112,
"alpha_frac": 0.6827180862,
"autogenerated": false,
"ratio": 2.226865671641791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3409583757841791,
"avg_score": null,
"num_lines": null
} |
from func import *
class SoftmaxActiveFn():
def __init__(self):
pass
@staticmethod
def forward(Z):
expY = np.exp(Z)
expYshape = np.copy(Z.shape)
expYshape[-1] = 1
Y = expY / np.sum(expY, axis=-1).reshape(expYshape).repeat(Z.shape[-1], axis=-1)
return Y
@staticmethod
def backward(dEdY, Y, Z):
timespan = Y.shape[0]
U = dEdY * Y
dEdZ = U - np.sum(U, axis=-1).reshape(timespan, 1) * Y
return dEdZ
class SigmoidActiveFn():
def __init__(self):
pass
@staticmethod
def forward(Z):
Y = sigmoidFn(Z)
return Y
@staticmethod
def backward(dEdY, Y, Z):
dEdZ = dEdY * Y * (1 - Y)
return dEdZ
class TanhActiveFn():
def __init__(self):
pass
@staticmethod
def forward(Z):
Y = np.tanh(Z)
return Y
@staticmethod
def backward(dEdY, Y, Z):
dEdZ = dEdY * (1 - Y * Y)
return dEdZ
class IdentityActiveFn():
def __init__(self):
pass
@staticmethod
def forward(Z):
return Z
@staticmethod
def backward(dEdY, Y, Z):
return dEdY
class ReluActiveFn():
def __init__(self):
pass
@staticmethod
def forward(Z):
return np.maximum(0, Z)
@staticmethod
def backward(dEdY, Y, Z):
return (Y > 0).astype(int) * dEdY
| {
"repo_name": "renmengye/imageqa-public",
"path": "src/nn/active_func.py",
"copies": "1",
"size": "1470",
"license": "mit",
"hash": -5120825137008048000,
"line_mean": 18.4166666667,
"line_max": 88,
"alpha_frac": 0.5034013605,
"autogenerated": false,
"ratio": 3.442622950819672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4446024311319672,
"avg_score": null,
"num_lines": null
} |
from .func import XClass
def test_XClass():
X = XClass()
class A():
x = 'a'
assert (X == 'A')('A')
assert ('A' == X)('A')
assert (X.x.call('upper') == 'A')(A())
assert ((X.call('lower').upper)('a'))() == 'A'
assert (X[0][1] == 2)([(1, 2), (3, 4)])
assert (X + 2 + 3 == 6)(1)
assert (X[0] + 2 + 3 == 6)([1])
assert (X * 2 * 3 == 6)(1)
assert (X[0] * 2 * 3 == 6)([1])
assert (X - 2 - 3 == -4)(1)
assert (X[0] - 2 - 3 == -4)([1])
assert (X % 5 == 2)(7)
assert (X[0] % 5 == 2)([7])
assert (X ** 5 == 32)(2)
assert (X[0] ** 5 == 32)([2])
assert (X ^ 1 == 3)(2)
assert (X[0] ^ 1 == 3)([2])
assert (X // 2 == 0)(1)
assert (X[0] // 2 == 0)([1])
assert (X / 2 == 0.5)(1)
assert (X[0] / 2 == 0.5)([1])
assert (X << 2 == 4)(1)
assert (X[0] << 2 == 4)([1])
assert (X >> 2 == 0)(1)
assert (X[0] >> 2 == 0)([1])
assert (X < 2)(1)
assert (X[0] < 2)([1])
assert (X <= 2)(2)
assert (X[0] <= 2)([2])
assert (X > 2)(3)
assert (X[0] > 2)([3])
assert (X >= 2)(2)
assert (X[0] >= 2)([2])
assert (X == 2)(2)
assert (X[0] == 2)([2])
assert (X != 2)(3)
assert (X[0] != 2)([3])
assert (-X == -2)(2)
assert (-X[0] == -2)([2])
assert (+X == -2)(-2)
assert (+X[0] == -2)([-2])
assert (~X == -2)(1)
assert (~X[0] == -2)([1])
assert (2 + X + 3 == 6)(1)
assert (2 + X[0] + 3 == 6)([1])
assert (2 * X * 3 == 6)(1)
assert (2 * X[0] * 3 == 6)([1])
assert (2 - X - 3 == -2)(1)
assert (2 - X[0] - 3 == -2)([1])
assert (7 % X == 2)(5)
assert (7 % X[0] == 2)([5])
assert (2 ** X == 32)(5)
assert (2 ** X[0] == 32)([5])
assert (2 ^ X == 3)(1)
assert (2 ^ X[0] == 3)([1])
assert (1 // X == 0)(2)
assert (1 // X[0] == 0)([2])
assert (1 / X == 0.5)(2)
assert (1 / X[0] == 0.5)([2])
assert (1 << X == 4)(2)
assert (1 << X[0] == 4)([2])
assert (1 >> X == 0)(2)
assert (1 >> X[0] == 0)([2])
assert (1 < X)(2)
assert (1 < X[0])([2])
assert (2 <= X)(2)
assert (2 <= X[0])([2])
assert (3 > X)(2)
assert (3 > X[0])([2])
assert (2 >= X)(2)
assert (2 >= X[0])([2])
assert (2 == X)(2)
assert (2 == X[0])([2])
assert (3 != X)(2)
assert (3 != X[0])([2])
assert (-2 == -X)(2)
assert (-2 == -X[0])([2])
assert (-2 == +X)(-2)
assert (-2 == +X[0])([-2])
assert (-2 == ~X)(1)
assert (-2 == ~X[0])([1])
| {
"repo_name": "aymazon/functools-ex",
"path": "functoolsex/tests.py",
"copies": "1",
"size": "2487",
"license": "mit",
"hash": -8848559449395613000,
"line_mean": 26.3296703297,
"line_max": 50,
"alpha_frac": 0.3634901488,
"autogenerated": false,
"ratio": 2.386756238003839,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3250246386803839,
"avg_score": null,
"num_lines": null
} |
from funcparserlib import parser as fp
from globibot.lib.helpers import parsing as p
class Value:
def __init__(self, unit, value):
self.unit = unit
self.value = value
def __add__(self, other):
assert(type(self.unit) is type(other.unit))
n1 = increase_max(self)
n2 = increase_max(other)
if n1.unit != n2.unit:
n2 = increase_max(system_convert(n2))
assert(n1.unit == n2.unit)
return normalize(Value(n1.unit, n1.value + n2.value))
def __str__(self):
output = '{:.2f} {}'.format(self.value, self.unit.name)
normalized = normalize(self)
if normalized.unit != self.unit:
output += ' ({})'.format(normalized)
return output
class Unit:
def __init__(self, name, *abrevs):
self.name = name
self.names = tuple(n.lower() for n in (name,) + abrevs)
def __call__(self, value):
return Value(self, value)
class Length(Unit): pass
inch = Length('in', 'inch', 'inches', "''", '"')
foot = Length('ft', 'foot', 'feet', "'")
yard = Length('yd', 'yard', 'yards')
mile = Length('mi', 'mile', 'miles')
mm = Length('mm', 'millimeter', 'millimeters')
cm = Length('cm', 'centimeter', 'centimeters')
m = Length('m', 'meter', 'meters')
km = Length('km', 'kilometer', 'kilometers')
class Mass(Unit): pass
oz = Mass('oz', 'ounce', 'ounces')
lb = Mass('lb', 'pound', 'pounds')
mg = Mass('mg', 'milligram', 'milligrams')
g = Mass('g', 'gram', 'grams')
kg = Mass('kg', 'kilogram', 'kilograms')
class Volume(Unit): pass
pt = Volume('pt', 'pint', 'pints')
gallon = Volume('gallon', 'gallons')
ml = Volume('mL', 'milliliter', 'milliliters')
l = Volume('L', 'liter', 'liters')
class Temperature(Unit): pass
fahrenheit = Temperature('°F', 'fahrenheit', 'fahrenheits', 'f')
centigrad = Temperature('°C', 'centigrad', 'centigrads', 'c')
UNITS = [
# Lengths
inch, foot, yard, mile,
mm, cm, m, km,
# Masses
oz, lb,
mg, g, kg,
# Volumes,
pt, gallon,
ml, l,
# Temperatures
fahrenheit,
centigrad
]
UNITS_BY_NAME = dict(
(name, unit) for unit in UNITS
for name in unit.names
)
class Conversion:
def __init__(self, u_from, u_to, converter):
assert(type(u_from) is type(u_to))
self.u_from = u_from
self.u_to = u_to
self.converter = converter
def __call__(self, val):
assert(val.unit == self.u_from)
return Value(self.u_to, self.converter(val.value))
def simple_ratio(v1, v2):
return Conversion(
v1.unit,
v2.unit,
lambda v: v * (v2.value / v1.value)
)
def two_way_ratio(v1, v2):
return (
simple_ratio(v1, v2),
simple_ratio(v2, v1)
)
SYSTEM_CONVERSIONS = [
*two_way_ratio(inch(1), mm(25.4)),
*two_way_ratio(foot(1), cm(30.48)),
*two_way_ratio(yard(1), m(0.9144)),
*two_way_ratio(mile(1), km(1.60934)),
*two_way_ratio(oz(1), mg(28349.5)),
*two_way_ratio(oz(1), g(28.3495)),
*two_way_ratio(lb(1), kg(0.453592)),
*two_way_ratio(pt(1), ml(473.176)),
*two_way_ratio(gallon(1), l(3.78541)),
Conversion(fahrenheit, centigrad, lambda f: (f - 32) / 1.8),
Conversion(centigrad, fahrenheit, lambda c: c * 1.8 + 32),
]
SYSTEM_CONVERSIONS_BY_UNIT = dict(
(conversion.u_from, conversion)
for conversion in SYSTEM_CONVERSIONS
)
REDUCE_CONVERSIONS = [
simple_ratio(mm(10), cm(1)),
simple_ratio(cm(100), m(1)),
simple_ratio(m(1000), km(1)),
simple_ratio(inch(12), foot(1)),
simple_ratio(foot(3), yard(1)),
simple_ratio(yard(1760), mile(1)),
simple_ratio(oz(16), lb(1)),
simple_ratio(mg(1000), g(1)),
simple_ratio(g(1000), kg(1)),
simple_ratio(pt(8), gallon(1)),
simple_ratio(ml(1000), l(1)),
]
REDUCE_CONVERSIONS_BY_UNIT = dict(
(conversion.u_from, conversion)
for conversion in REDUCE_CONVERSIONS
)
INCREASE_CONVERSIONS = [
simple_ratio(cm(1), mm(10)),
simple_ratio(m(1), cm(100)),
simple_ratio(km(1), m(1000)),
simple_ratio(foot(1), inch(12)),
simple_ratio(yard(1), foot(3)),
simple_ratio(mile(1), yard(1760)),
simple_ratio(lb(1), oz(16)),
simple_ratio(g(1), mg(1000)),
simple_ratio(kg(1), g(1000)),
simple_ratio(gallon(1), pt(8)),
simple_ratio(l(1), ml(1000)),
]
INCREASE_CONVERSIONS_BY_UNIT = dict(
(conversion.u_from, conversion)
for conversion in INCREASE_CONVERSIONS
)
def system_convert(value):
try:
conversion = SYSTEM_CONVERSIONS_BY_UNIT[value.unit]
except KeyError:
return value
return conversion(value)
def reduce_convert(value):
try:
conversion = REDUCE_CONVERSIONS_BY_UNIT[value.unit]
except KeyError:
return value
converted = conversion(value)
if converted.value >= 1:
return reduce_convert(converted)
return value
def increase_convert(value):
try:
conversion = INCREASE_CONVERSIONS_BY_UNIT[value.unit]
except KeyError:
return value
converted = conversion(value)
if converted.value < 1:
return increase_convert(converted)
return value
def increase_max(value):
try:
conversion = INCREASE_CONVERSIONS_BY_UNIT[value.unit]
return increase_max(conversion(value))
except KeyError:
return value
def normalize(value):
if value.value >= 1:
return reduce_convert(value)
else:
return increase_convert(value)
def sum_units(unit, *units):
acc = unit
for u in units:
acc += u
return acc
to_s_lowered = lambda tok: tok.value.lower()
unit_parser = p.some(
lambda tok: to_s_lowered(tok) in UNITS_BY_NAME
) >> to_s_lowered
@fp.Parser
def unit_value_parser(tokens, s):
if s.pos >= len(tokens) - 1:
raise fp.NoParseError(u'no tokens left in the stream', s)
else:
value, s1 = p.number.run(tokens, s)
unit_name, s2 = unit_parser.run(tokens, s1)
return Value(UNITS_BY_NAME[unit_name], value), s2
unit_value_parser.name = 'U'
| {
"repo_name": "best-coloc-ever/globibot",
"path": "bot/plugins/utils/units.py",
"copies": "1",
"size": "6097",
"license": "mit",
"hash": -6573832254576999000,
"line_mean": 22.6240310078,
"line_max": 65,
"alpha_frac": 0.5937653815,
"autogenerated": false,
"ratio": 2.9190613026819925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8985650237870925,
"avg_score": 0.005435289262213453,
"num_lines": 258
} |
from funcparserlib.lexer import make_tokenizer
from funcparserlib import parser as p
from collections import namedtuple
from re import DOTALL
class TokenType:
Space = 'SPACE'
Integer = 'INTEGER'
Float = 'FLOAT'
Mention = 'MENTION'
Channel = 'CHANNEL'
Emoji = 'EMOJI'
Snippet = 'SNIPPET'
Word = 'WORD'
TOKEN_SPEC = [
(TokenType.Space, (r'\s+',)),
(TokenType.Float, (r'[+-]?[0-9]*\.[0-9]+',)),
(TokenType.Integer, (r'[+-]?[0-9]+',)),
(TokenType.Mention, (r'<@!?[0-9]+>',)),
(TokenType.Channel, (r'<#[0-9]+>',)),
(TokenType.Emoji, (r'<:\S+:[0-9]+>',)),
(TokenType.Snippet, (r'```\S+\n(.*?)```', DOTALL)),
(TokenType.Word, (r'\S+',)), # Word is currently a catch-all
]
default_tokenizer = make_tokenizer(TOKEN_SPEC)
def tokenize(string, tokenizer=default_tokenizer, ignores=(TokenType.Space,)):
return [
token for token in tokenizer(string)
if token.type not in ignores
]
# Transformers
to_i = lambda tok: int(tok.value)
to_f = lambda tok: float(tok.value)
to_s = lambda tok: str(tok.value)
to_a = lambda toks: [tok.value for tok in toks]
const = lambda value: lambda _: value
def extract_mention_id(tok):
if '!' in tok.value:
return int(tok.value[3:-1])
else:
return int(tok.value[2:-1])
def extract_channel_id(tok):
return int(tok.value[2:-1])
def extract_emoji_id(tok):
start_idx = tok.value.rfind(':') + 1
end_idx = tok.value.rfind('>')
return int(tok.value[start_idx:end_idx])
Snippet = namedtuple('Snippet', ['language', 'code'])
def extract_snippet(tok):
val = tok.value
new_line_idx = val.index('\n')
return Snippet(
language = val[3:new_line_idx].lower(),
code = val[new_line_idx + 1:-3]
)
# Parsers
def not_parser(parser):
@p.Parser
def _not_parser(tokens, s):
if s.pos >= len(tokens):
raise p.NoParseError('no tokens left in the stream', s)
try:
parser.run(tokens, s)
except p.NoParseError:
pos = s.pos + 1
s2 = p.State(pos, max(pos, s.max))
return None, s2
else:
raise p.NoParseError('parsing failed', s)
_not_parser.name = '!{}'.format(parser.name)
return _not_parser
a = lambda value: p.some(lambda tok: tok.value == value)
string = lambda s: p.some(lambda tok: tok.value.lower() == s.lower()) .named(s)
some_type = lambda t: p.some(lambda tok: tok.type == t) .named(t)
not_type = lambda t: p.some(lambda tok: tok.type != t) .named('!{}'.format(t))
any_type = p.some(lambda _: True) .named('Any')
eof = p.finished .named('')
some = p.some
maybe = lambda parser: p.maybe(parser) .named('[{}]'.format(parser.name))
many = lambda parser: p.many(parser) .named('[{}]...'.format(parser.name))
skip = lambda parser: p.skip(parser) .named('')
oneplus = lambda parser: p.oneplus(parser) .named('{},[{}]...'.format(parser.name, parser.name))
sparsed = lambda parser: (skip(many(not_parser(parser))) + parser)\
.named('_{}'.format(parser.name))
integer = (some_type(TokenType.Integer) >> to_i) .named('I')
number = (
( some_type(TokenType.Integer)
| some_type(TokenType.Float)
)
>> to_f) .named('N')
word = (some_type(TokenType.Word) >> to_s) .named('W')
mention = (some_type(TokenType.Mention) >> extract_mention_id) .named('M')
channel = (some_type(TokenType.Channel) >> extract_channel_id) .named('C')
emoji = (some_type(TokenType.Emoji) >> extract_emoji_id) .named('E')
snippet = (some_type(TokenType.Snippet) >> extract_snippet) .named('S')
# High level helpers
on_off_switch = (
(string('on') >> const(True) ) |
(string('off') >> const(False))
)
url = p.some(
lambda tok: tok.type == TokenType.Word and tok.value.startswith('http')
)
def int_range(low, high):
def predicate(token):
if token.type != TokenType.Integer:
return False
return to_i(token) in range(low, high + 1)
return p.some(predicate) >> to_i
def one_of(parser, first, *rest):
combined_parser = parser(first)
for possibility in rest:
combined_parser = combined_parser | parser(possibility)
return combined_parser
# Context
class BoundPair(tuple): pass
def bind(transformed, name):
bind_expr = lambda value: BoundPair((name, value)) if value is not None else None
return (transformed >> bind_expr).named('<{}#{}>'.format(name, transformed.name))
| {
"repo_name": "best-coloc-ever/globibot",
"path": "bot/src/globibot/lib/helpers/parsing.py",
"copies": "1",
"size": "4706",
"license": "mit",
"hash": -1776550524899081200,
"line_mean": 31.2328767123,
"line_max": 97,
"alpha_frac": 0.5796855079,
"autogenerated": false,
"ratio": 3.210095497953615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4289781005853615,
"avg_score": null,
"num_lines": null
} |
from func_prototypes.util import dictjoin
from functools import partial, wraps
try:
from inspect import getfullargspec as inspector
except ImportError:
from inspect import getargspec as inspector # Python 2 fallback.
"""
Adapts args, kwargs into **kwargs only with help of function prototype.
Prototype should be a list of arg names.
"""
def to_kwargs(func, args, kwargs):
fname = func.__name__
spec = inspector(func)
# spec 2 is kerwords for getargspec or varkw for getfullargspec
if spec.varargs is not None or spec[2] is not None:
raise TypeError( "Cannot convert arguments for function %s because it uses args or kwargs." % fname)
prototype = spec.args
out = kwargs.copy()
if len(prototype) < len(args):
raise TypeError("%s takes at most %d arguments (%s given)" % (fname, len(prototype), len(args)))
for name, value in zip(prototype, args):
if name in out:
raise TypeError("%s got multiple values for keyword argument '%s'" % (fname, name))
out[name] = value
return out
"""Function which adapts arguments for function foo using convert"""
def adapt(foo, convert, context=None):
@wraps(foo)
def wrapped(*args, **kwargs):
kwargs = to_kwargs(foo, args, kwargs)
new_kwargs = convert(foo.__name__, kwargs, context)
return foo(**new_kwargs)
return wrapped
"""Decorator which allows for easy use of adapt"""
def adapter(convert, context=None):
def wrap(foo):
adapted = adapt(foo, convert, context)
return adapted
return wrap
"""
Decorator which applies constructors to args before calling the wrapped function with new_args
"""
def constructors(*constructors):
def map_constructors(func_name, kwargs, kwconstructors):
try:
ctor_values = dictjoin(kwargs, kwconstructors).items() # Returns [(name,(arg,constructor)),...]
except KeyError as e:
raise TypeError("%s got an unexpected keyword argument '%s'" % (func_name, e.args[0]))
new_kwargs = {n:c(a) for (n,(a,c)) in ctor_values}
return new_kwargs
def wrap(foo):
kwconstructors = to_kwargs(foo, constructors, {})
foo2 = adapt(foo, map_constructors, kwconstructors)
return foo2
return wrap
"""
Decorator which applies type checking to args before calling the wrapped function.
"""
def typed(*types):
def check_types(func_name, kwargs, kwtypes):
for name, value in kwargs.items():
try:
type_ = kwtypes[name]
except KeyError as e:
raise TypeError("%s got an unexpected keyword argument '%s'" % (func_name, e.args[0]))
if not isinstance(value, type_):
raise TypeError("Argument %s to %s must be of %s" % (name, func_name, type_))
return kwargs
def wrap(foo):
kwtypes = to_kwargs(foo, types, {})
foo2 = adapt(foo, check_types, kwtypes)
return foo2
return wrap
"""
Decorator which converts the output of the wrapped function to out_type.
"""
def returns(out_type):
def wrap(foo):
@wraps(foo)
def wrapped(*args, **kwargs):
ret = foo(*args, **kwargs)
return out_type(ret)
return wrapped
return wrap
"""
Decorator which checks that the retuned value of the wrapped function is an instance of out_type.
"""
def returned(out_type):
def wrap(foo):
@wraps(foo)
def wrapped(*args, **kwargs):
ret = foo(*args, **kwargs)
if not isinstance(ret, out_type):
raise TypeError("Return value to %s must be of %s, not %s" % (foo, out_type, type(ret)))
else:
return ret
return wrapped
return wrap
| {
"repo_name": "andrewguy9/func_prototypes",
"path": "func_prototypes/__init__.py",
"copies": "1",
"size": "3510",
"license": "mit",
"hash": 2623010867424563000,
"line_mean": 31.8037383178,
"line_max": 104,
"alpha_frac": 0.6743589744,
"autogenerated": false,
"ratio": 3.7580299785867237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4932388952986724,
"avg_score": null,
"num_lines": null
} |
from .func_resolver import Library
from .context import Context
class CodeGenerator:
'''
The code generator for PhenoWL DSL
'''
def __init__(self):
self.context = Context()
self.code = ''
self.imports = set()
self.indent = 0
def get_params(self, expr):
v = []
for e in expr:
v.append(self.eval(e))
return v
def indent_stmt(self, str):
return " " * self.indent + str
def dofunc(self, expr):
'''
Execute func expression.
:param expr:
'''
function = expr[0] if len(expr) < 3 else expr[1]
package = expr[0][:-1] if len(expr) > 2 else None
params = expr[1] if len(expr) < 3 else expr[2]
v = self.get_params(params)
# call task if exists
if package is None and function in self.context.library.tasks:
return self.context.library.code_run_task(function, v, self.dotaskstmt)
if not self.context.library.check_function(function, package):
raise Exception(r"'{0}' doesn't exist.".format(function))
return self.context.library.code_func(self.context, package, function, v)
def dorelexpr(self, expr):
'''
Executes relative expression.
:param expr:
'''
left = self.eval(expr[0])
right = self.eval(expr[2])
operator = expr[1]
if operator == '<':
return "{0} < {1}".format(str(left), str(right))
elif operator == '>':
return "{0} > {1}".format(str(left), str(right))
elif operator == '<=':
return "{0} <= {1}".format(str(left), str(right))
elif operator == '>=':
return "{0} >= {1}".format(str(left), str(right))
else:
return "{0} == {1}".format(str(left), str(right))
def doand(self, expr):
'''
Executes "and" expression.
:param expr:
'''
if expr is empty:
return True
right = self.eval(expr[-1])
if len(expr) == 1:
return right
left = expr[:-2]
if len(left) > 1:
left = ['ANDEXPR'] + left
left = self.eval(left)
return "{0} and {1}".format(str(left), str(right))
def dopar(self, expr):
code = 'taskManager = TaskManager()\n'
# for stmt in expr:
# code += 'taskManager.submit_func(lambda: ' + self.eval(stmt) + ')\n'
return code
def dopar_stmt(self, expr):
'''
Execute a parallel expression.
:param expr:
'''
self.run_multstmt(lambda: self.eval(expr))
def run_multstmt(self, f):
return f()
def dolog(self, expr):
'''
Executes a logical expression.
:param expr:
'''
right = self.eval(expr[-1])
if len(expr) == 1:
return right
left = expr[:-2]
if len(left) > 1:
left = ['LOGEXPR'] + left
left = self.eval(left)
return "{0} or {1}".format(str(left), str(right))
def domult(self, expr):
'''
Executes a multiplication/division operation
:param expr:
'''
right = self.eval(expr[-1])
if len(expr) == 1:
return right
left = expr[:-2]
if len(left) > 1:
left = ['MULTEXPR'] + left
left = self.eval(left)
return "{0} / {1}".format(str(left), str(right)) if expr[-2] == '/' else "{0} * {1}".format(str(left), str(right))
def doarithmetic(self, expr):
'''
Executes arithmetic operation.
:param expr:
'''
right = self.eval(expr[-1])
if len(expr) == 1:
return right
left = expr[:-2]
if len(left) > 1:
left = ['NUMEXPR'] + left
left = self.eval(left)
return "{0} + {1}".format(str(left), str(right)) if expr[-2] == '+' else "{0} - {1}".format(str(left), str(right))
def doif(self, expr):
'''
Executes if statement.
:param expr:
'''
code = "if " + self.eval(expr[0]) + ":\n"
code += self.run_multstmt(lambda: self.eval(expr[1]))
if len(expr) > 3:
code += "else:\n"
code += self.run_multstmt(lambda: self.eval(expr[3]))
return code
def dolock(self, expr):
if not self.context.symtab.var_exists(expr[0]) or not isinstance(self.context.symtab.get_var(expr[0]), _thread.RLock):
self.context.symtab.add_var(expr[0], threading.RLock())
with self.context.symtab.get_var(expr[0]):
self.eval(expr[1])
pass
def doassign(self, expr):
'''
Evaluates an assignment expression.
:param expr:
'''
return "{0} = {1}".format(expr[0], self.eval(expr[1]))
def dofor(self, expr):
'''
Execute a for expression.
:param expr:
'''
code = "for {0} in {1}:\n".format(self.eval(expr[0]), self.eval(expr[1]))
code += self.run_multstmt(lambda: self.eval(expr[2]))
return code
def eval_value(self, str_value):
'''
Evaluate a single expression for value.
:param str_value:
'''
return str_value
def dolist(self, expr):
'''
Executes a list operation.
:param expr:
'''
v = []
for e in expr:
v.append(self.eval(e))
return v
def dolistidx(self, expr):
val = self.context.get_var(expr[0])
return val[self.eval(expr[1])]
def dostmt(self, expr):
if len(expr) > 1:
logging.debug("Processing line: {0}".format(expr[0]))
self.line = int(expr[0])
return self.indent_stmt(self.eval(expr[1:])) + '\n'
def dotaskdefstmt(self, expr):
if not expr[0]:
v = self.get_params(expr[1])
return self.dotaskstmt(expr, v)
else:
self.context.library.add_task(expr[0], expr)
return ''
def dotaskstmt(self, expr, args):
server = args[0] if len(args) > 0 else None
user = args[1] if len(args) > 1 else None
password = args[2] if len(args) > 2 else None
if not server:
server = self.eval(expr[1][0]) if len(expr[1]) > 0 else None
if not user:
user = self.eval(expr[1][1]) if len(expr[1]) > 1 else None
if not password:
password = self.eval(expr[1][2]) if len(expr[1]) > 2 else None
try:
self.context.append_dci(server, user, password)
return 'if True:\n' + self.eval(expr[2])
finally:
self.context.pop_dci()
def eval(self, expr):
'''
Evaluate an expression
:param expr: The expression in AST tree form.
'''
if not isinstance(expr, list):
return self.eval_value(expr)
if not expr:
return
if len(expr) == 1:
return self.eval(expr[0])
if expr[0] == "FOR":
return self.dofor(expr[1])
elif expr[0] == "ASSIGN":
return self.doassign(expr[1:])
elif expr[0] == "CONST":
return self.eval_value(expr[1])
elif expr[0] == "NUMEXPR":
return self.doarithmetic(expr[1:])
elif expr[0] == "MULTEXPR":
return self.domult(expr[1:])
elif expr[0] == "CONCAT":
return self.doarithmetic(expr[1:])
elif expr[0] == "LOGEXPR":
return self.dolog(expr[1:])
elif expr[0] == "ANDEXPR":
return self.doand(expr[1:])
elif expr[0] == "RELEXPR":
return self.dorelexpr(expr[1:])
elif expr[0] == "IF":
return self.doif(expr[1])
elif expr[0] == "LIST":
return self.dolist(expr[1])
elif expr[0] == "FUNCCALL":
code, imports = self.dofunc(expr[1])
self.imports.update(imports)
return code
elif expr[0] == "LISTIDX":
return self.dolistidx(expr[1])
elif expr[0] == "PAR":
return self.dopar(expr[1])
elif expr[0] == "LOCK":
return self.dolock(expr[1:])
elif expr[0] == "STMT":
return self.dostmt(expr[1:])
elif expr[0] == "TASK":
return self.dotaskdefstmt(expr[1:])
elif expr[0] == "MULTISTMT":
try:
self.indent = int(expr[1].pop()) - 1
return self.eval(expr[2:])
finally:
self.indent = int(expr[1].pop()) - 1
else:
code = ''
for subexpr in expr:
code += self.eval(subexpr)
return code
# Run it
def run(self, prog):
'''
Run a new program.
:param prog: Pyparsing ParseResults
'''
try:
self.context.reload()
stmt = prog.asList()
code = self.eval(stmt)
imports = ''
for i in self.imports:
imports = i + '\n';
self.context.out = imports + '\n' + code
except Exception as err:
self.context.err.append("Error at line {0}: {1}".format(self.line, err))
| {
"repo_name": "mainulhossain/phenoproc",
"path": "app/biowl/dsl/pygen.py",
"copies": "1",
"size": "9425",
"license": "mit",
"hash": 9199486036215626000,
"line_mean": 30.6275167785,
"line_max": 126,
"alpha_frac": 0.4913527851,
"autogenerated": false,
"ratio": 3.6989795918367347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46903323769367344,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.