text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'aje'
__version__ = '0.1.0'
#
# Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sys
import re
import datetime
import pymongo
# The Blog Post Data Access Object handles interactions with the Posts collection
class BlogPostDAO:
# constructor for the class
def __init__(self, database):
self.db = database
self.posts = database.posts
# inserts the blog entry and returns a permalink for the entry
def insert_entry(self, title, post, tags_array, author):
print("inserting blog entry", title, post)
# fix up the permalink to not include whitespace
exp = re.compile('\W') # match anything not alphanumeric
whitespace = re.compile('\s')
temp_title = whitespace.sub("_",title)
permalink = exp.sub('', temp_title)
# Build a new post
post = {"title": title,
"author": author,
"body": post,
"permalink":permalink,
"tags": tags_array,
"comments": [],
"date": datetime.datetime.utcnow()}
# now insert the post
try:
# XXX HW 3.2 Work Here to insert the post
print("Inserting the post")
self.posts.insert_one( post )
except:
print("Error inserting post")
print("Unexpected error:", sys.exc_info()[0])
return permalink
# returns an array of num_posts posts, reverse ordered by datetime
def get_posts(self, num_posts):
cursor = [] # Placeholder so blog compiles before you make your changes
# XXX HW 3.2 Work here to get the posts
cursor = self.posts.find().limit(num_posts).sort([("date", pymongo.DESCENDING),])
l = []
for post in cursor:
post['date'] = post['date'].strftime("%A, %B %d %Y at %I:%M%p") # fix up date
if 'tags' not in post:
post['tags'] = [] # fill it in if its not there already
if 'comments' not in post:
post['comments'] = []
l.append({'title':post['title'], 'body':post['body'], 'post_date':post['date'],
'permalink':post['permalink'],
'tags':post['tags'],
'author':post['author'],
'comments':post['comments']})
return l
# find a post corresponding to a particular permalink
def get_post_by_permalink(self, permalink):
post = None
# XXX Work here to retrieve the specified post
post = self.posts.find_one({"permalink":permalink})
if post is not None:
# fix up date
post['date'] = post['date'].strftime("%A, %B %d %Y at %I:%M%p")
return post
# add a comment to a particular blog post
def add_comment(self, permalink, name, email, body):
comment = {'author': name, 'body': body}
if (email != ""):
comment['email'] = email
try:
last_error = {'n':-1} # this is here so the code runs before you fix the next line
# XXX HW 3.3 Work here to add the comment to the designated post
#http://docs.mongodb.org/manual/reference/operator/update-array/
result = self.posts.find_one_and_update({"permalink":permalink}, {"$push": {"comments": comment}})
if result:
return 1
return last_error['n'] # return the number of documents updated
except Exception as e:
print("Could not update the collection, error")
print(e)
print("Unexpected error:", sys.exc_info()[0])
return 0
| {
"repo_name": "thedemz/M101P",
"path": "chapter3/blogPostDAO.py",
"copies": "1",
"size": "4260",
"license": "apache-2.0",
"hash": 1699624974823343900,
"line_mean": 32.8095238095,
"line_max": 110,
"alpha_frac": 0.576056338,
"autogenerated": false,
"ratio": 4.111969111969112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004502173289611113,
"num_lines": 126
} |
__author__ = 'ajitkumar'
import requests
from bs4 import BeautifulSoup
import unicodedata
def search_page(search_string):
try:
payload = search_string
response = requests.get('http://www.coupondunia.in/'+payload)
soup = BeautifulSoup(response.content)
offers = soup.find_all('div', {'class': 'coupon-big coupon single-merchant'})
offer_list = list()
for offer in offers:
new_offer = dict()
offers_coupon = offer.find_all('div', {'class': 'online_offer offer-title get-title-code'})
offer_title = offers_coupon[0].span.text
offer_title = unicodedata.normalize('NFKD', offer_title).encode('ascii', 'ignore')
new_offer['offer_title'] = offer_title
url = offers_coupon[0]['data-coupon-url']
new_offer['coupon_code'] = get_offer_code(url)
offers_description = offer.find_all('div', {'class': 'meta offer-desc'})
offer_desc = offers_description[0].text
offer_desc = offer_desc.strip()
offer_desc = unicodedata.normalize('NFKD', offer_desc).encode('ascii', 'ignore')
new_offer['offer_description'] = offer_desc
offer_list.append(new_offer)
return offer_list
except Exception, excpt:
print excpt
return offer_list
def get_offer_code(url):
try:
response = requests.get(url)
soup = BeautifulSoup(response.content)
val = soup.find_all('div', {'class': 'coupon-code_new coupon-code inline-block'})
offer_code = val[0].text
offer_code = offer_code.strip()
return offer_code
except Exception, excpt:
print excpt
return ''
#print search_page('paytm')
| {
"repo_name": "Akcps/coupondunia_api",
"path": "coupondunia.py",
"copies": "1",
"size": "1752",
"license": "mit",
"hash": -658436608865840300,
"line_mean": 34.7551020408,
"line_max": 103,
"alpha_frac": 0.6050228311,
"autogenerated": false,
"ratio": 3.859030837004405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4964053668104405,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajitkumar'
import requests
from bs4 import BeautifulSoup
class Torrent:
def __init__(self):
self.name = ''
self.link = ''
self.verified_by = ''
self.uploaded_date = ''
self.size = ''
self.seeds = ''
self.peers = ''
self.trackers = []
self.magnetic_link = {}
def search(search_string):
try:
payload = {'q': search_string}
res = requests.get('https://torrentz.com/search', params=payload)
soup = BeautifulSoup(res.content)
links = soup.find_all('dl')
torrentz_list = []
for torrent in links[:3]:
torrentz = Torrent()
torrent_link = 'https://torrentz.in'
torrent_link += torrent.a['href']
torrentz.link = torrent_link
title_description = torrent.a.contents[-1]
title_list = torrent.find_all('b')
title = []
for title_desc in title_list:
title.append(title_desc.contents[0])
title = (' ').join(title)
title += title_description
torrentz.name = title
descriptions = torrent.find_all('span')
torrentz.verified_by = descriptions[0].contents[0]
torrentz.uploaded_date = descriptions[1].contents[0]['title']
torrentz.size = descriptions[3].contents[0]
torrentz.seeds = descriptions[4].contents[0]
torrentz.peers = descriptions[5].contents[0]
torrentz_list.append(torrentz)
return torrentz_list
except Exception, excpt:
return torrentz_list
def search_trackers(torrent_list):
for torrent in torrent_list:
try:
res = requests.get(torrent_list[0].link)
soup = BeautifulSoup(res.content)
download_link = soup.find_all('dt')
download_link_list, magnetic_link = list(), list()
maglink = dict()
for link in download_link[:3]:
if str(link.a['href']).startswith('http'):
download_link_list.append(link.a['href'])
torrent_site = link.a['href'].split('/')
magnet = get_magnetic_link(link.a['href'])
if magnet:
maglink[torrent_site[2]] = magnet
torrent.trackers = download_link_list
torrent.magnetic_link = maglink
except Exception, excpt:
torrent.trackers = download_link_list
torrent.magnetic_link = magnetic_link
def get_magnetic_link(link):
res = requests.get(link)
soup = BeautifulSoup(res.content)
magnetic_links = soup.find_all('a')
maglink = ''
try:
for magnetic_link in magnetic_links:
if str(magnetic_link['href']).startswith('magnet'):
maglink = magnetic_link['href']
return maglink
except Exception, excpt:
return maglink
def convert_to_json(object_list):
torrent_list = list()
for obj in object_list:
torrent_list.append(obj.__dict__)
return torrent_list
def search_torrent(params):
val = search(params)
search_trackers(val)
return convert_to_json(val)
| {
"repo_name": "Akcps/torrents",
"path": "torrentz.py",
"copies": "1",
"size": "3233",
"license": "mit",
"hash": -6092345530614668000,
"line_mean": 30.3883495146,
"line_max": 73,
"alpha_frac": 0.5589236004,
"autogenerated": false,
"ratio": 3.9235436893203883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4982467289720388,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajrenold'
__author__ = 'ajrenold'
# Lib Imports
from flask import (
redirect, url_for, abort,
render_template, request,
Blueprint
)
from flask.ext.stormpath import (
login_required,
user,
)
from stormpath.error import Error as StormpathError
# Our Imports
from core import db
from models import ( Author, AppUser, Book, BookUploader, Genre, stormpathUserHash )
from settings import CLOUDFRONTURL, S3URL
author_routes = Blueprint('author_routes', __name__,
template_folder='templates')
@author_routes.route('/author/<author_slug>')
def authorpage(author_slug):
if author_slug:
author = Author.query.filter_by(slug=author_slug).first()
if author:
#return jsonify(author.as_dict())
return render_template('authorpage.html', author=author)
return redirect(url_for('index'))
@author_routes.route('/author_settings/', methods=['GET', 'POST'])
@login_required
def settings():
user_id = user.get_id()
app_user = AppUser.query.get(stormpathUserHash(user_id))
if request.method == 'GET':
return render_template('author_settings.html', author=app_user.author)
if not app_user.is_author:
author = Author.author_from_dict(**request.form.to_dict())
db.session.add(author)
db.session.commit()
app_user.become_author(author)
## TODO Handle edit author attributes
"""
if app_user.is_author:
author_name = request.form.get('author_name')
if author_name != app_user.author.name:
app_user.author.update_name(author_name)
"""
return render_template('author_settings.html', author=app_user.author)
@author_routes.route('/dashboard/')
@login_required
def author_dashboard():
user_id = user.get_id()
app_user = AppUser.query.get(stormpathUserHash(user_id))
if app_user.is_author:
return render_template('author_dashboard.html', author=app_user.author)
@author_routes.route('/dashboard/add', methods=['GET', 'POST'])
@login_required
def add_book():
user_id = user.get_id()
app_user = AppUser.query.get(stormpathUserHash(user_id))
if app_user.is_author:
if request.method == 'GET':
return render_template('add_book.html', author=app_user.author)
book_file = request.files.get('epub_file', None)
cover_file = request.files.get('cover_file', None)
# POST is a epub file upload
if book_file.content_type == 'application/epub+zip' or book_file.content_type == 'application/octet-stream':
book_upload = BookUploader(book_file.filename, book_file, cover_file)
epub_url = S3URL + 'epubs/' + book_upload.epub_key
cover_url = CLOUDFRONTURL + book_upload.cover_key
genres = []
for g in request.form.get('genres').split(','):
genre_name = g.strip().title()
if not genre_name.isspace():
genre = Genre.query.filter_by(name=genre_name).first()
if not genre:
genre = Genre(genre_name)
db.session.add(genre)
genres.append(genre)
book_data = {
'author': app_user.author,
'isbn': request.form.get('isbn'),
'title': request.form.get('title'),
'publisher': request.form.get('publisher'),
'description': request.form.get('description'),
'genres': genres,
'epub_url': epub_url,
'cover_large': cover_url
}
book = Book.book_from_dict(**book_data)
db.session.add(book)
db.session.commit()
return redirect(url_for('author_routes.author_dashboard'))
return redirect(url_for('index'))
@author_routes.route('/dashboard/edit/<int:book_id>', methods=['GET', 'POST'])
@login_required
def edit_book(book_id):
user_id = user.get_id()
app_user = AppUser.query.get(stormpathUserHash(user_id))
book = Book.query.get(book_id)
if book in app_user.author.books:
if request.method == 'GET':
return render_template('edit_book.html', book=book)
genres = []
for g in request.form.get('genres').split(','):
genre_name = g.strip().title()
if not genre_name.isspace():
genre = Genre.query.filter_by(name=genre_name).first()
if not genre:
genre = Genre(genre_name)
db.session.add(genre)
genres.append(genre)
book.genres = genres
book.title = request.form.get('title')
book.isbn = request.form.get('isbn')
book.publisher = request.form.get('publisher')
book.description = request.form.get('description')
db.session.add(book)
db.session.commit()
return redirect(url_for('author_routes.author_dashboard'))
return redirect(url_for('index'))
| {
"repo_name": "futurepress/futurepress",
"path": "futurepress/author_routes.py",
"copies": "1",
"size": "5175",
"license": "bsd-2-clause",
"hash": -3833986396660922400,
"line_mean": 33.0460526316,
"line_max": 116,
"alpha_frac": 0.5806763285,
"autogenerated": false,
"ratio": 3.788433382137628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9862572411569588,
"avg_score": 0.0013074598136081628,
"num_lines": 152
} |
__author__ = 'ajrenold'
from copy import deepcopy
from test.data import books, authors
from models import Book, Author, AppUser, Genre, stormpathUserHash
def bootstrapTestDB(db):
"""
Takes an created SQLAlchemy db and bootstraps the tables
with dummy data
"""
books_copy, authors_copy = deepcopy(books), deepcopy(authors)
# load authors
for author_data in authors:
db.session.add(Author.author_from_dict(**author_data))
db.session.commit()
# load genres
for book_data in books_copy:
for genre in book_data['genres']:
g = Genre.query.filter_by(name=genre).first()
if not g:
db.session.add(Genre(genre))
db.session.flush()
db.session.commit()
# load books
for book_data in books_copy:
book_data['genres'] = [ Genre.query.filter_by(name=genre_item).first()
for genre_item in book_data['genres'] ]
book_data['author'] = Author.query.filter_by(name=book_data['author']).first()
db.session.add(Book.book_from_dict(**book_data))
# commit the changes
db.session.commit()
#load users
for author_data in authors_copy:
author = Author.query.filter_by(name=author_data['name']).first()
db.session.add(AppUser(author_data['user_href'], author))
db.session.commit() | {
"repo_name": "futurepress/futurepress",
"path": "test/db_bootstrap.py",
"copies": "1",
"size": "1382",
"license": "bsd-2-clause",
"hash": 4324591252087356400,
"line_mean": 31.9285714286,
"line_max": 86,
"alpha_frac": 0.6215629522,
"autogenerated": false,
"ratio": 3.6657824933687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47873454455687,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajrenold'
from core import db
from model_utils import stormpathUserHash
user_books = db.Table('user_books',
db.Column('book_id', db.Integer, db.ForeignKey('books.book_id')),
db.Column('user_id', db.String(128), db.ForeignKey('app_users.user_id'))
)
class AppUser(db.Model):
__tablename__ = 'app_users'
# primary key
user_id = db.Column(db.String(128), primary_key=True)
# relationships
author = db.relationship('Author', uselist=False, backref='app_user')
# user_books table
books = db.relationship('Book', secondary=user_books,
backref=db.backref('app_users', lazy='joined'), lazy='dynamic')
#other columns
user_href = db.Column(db.String(1024), nullable=False)
is_author = db.Column(db.Boolean, nullable=False)
ios_token = db.Column(db.String(1024))
def __init__(self, storm_path_user_href):
self.user_id = stormpathUserHash(storm_path_user_href)
self.user_href = storm_path_user_href
self.is_author = False
def become_author(self, Author):
self.author = Author
self.is_author = True
try:
db.session.commit()
except:
# TODO flash error message
db.session.rollback()
def purchase_book(self, book):
self.books.append(book)
try:
db.session.commit()
except:
# TODO flash error message
db.session.rollback()
def set_ios_token(self, ios_token):
self.ios_token = ios_token
try:
db.session.commit()
except:
# TODO flash error message
db.session.rollback()
def __repr__(self):
return '<user {}>'.format(self.user_id)
def as_dict(self):
return {
'user_id': self.user_id,
'user_href': self.user_href,
'is_author': self.is_author,
'author': "" if not self.is_author else self.author.as_dict()
} | {
"repo_name": "futurepress/futurepress",
"path": "models/appuser_model.py",
"copies": "1",
"size": "2002",
"license": "bsd-2-clause",
"hash": 594132150376745900,
"line_mean": 26.8194444444,
"line_max": 90,
"alpha_frac": 0.5804195804,
"autogenerated": false,
"ratio": 3.6268115942028984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47072311746028983,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajrenold'
# Lib Imports
from flask import ( request, session, g,
redirect, url_for, abort,
render_template, flash, jsonify,
make_response, Blueprint
)
from flask.ext.login import make_secure_token
from flask.ext.stormpath import (StormpathManager,
User,
login_required,
login_user,
logout_user,
user
)
from stormpath.error import Error as StormpathError
# Our Imports
from models import ( AppUser, stormpathUserHash )
from core import db
auth_routes = Blueprint('auth_routes', __name__,
template_folder='templates')
stormpath_manager = StormpathManager()
@auth_routes.record_once
def on_load(state):
stormpath_manager.init_app(state.app)
stormpath_manager.login_view = 'auth_routes.login'
@auth_routes.route('/register', methods=['GET', 'POST'])
def register():
"""
This view allows a user to register for the site.
This will create a new User in Stormpath, and then log the user into their
new account immediately (no email verification required).
"""
if request.method == 'GET':
if user.is_authenticated():
return redirect(url_for('index'))
return render_template('register.html')
try:
_user = stormpath_manager.application.accounts.create({
'username': request.form.get('username'),
'email': request.form.get('email'),
'password': request.form.get('password'),
'given_name': request.form.get('first_name'),
'surname': request.form.get('last_name')
})
_user.__class__ = User
app_user = AppUser(_user.get_id())
db.session.add(app_user)
db.session.commit()
except StormpathError, err:
return render_template('register.html', error=err.message)
login_user(_user, remember=True)
return redirect(url_for('index'))
@auth_routes.route('/login', methods=['GET', 'POST'])
def login():
""" User login/auth/session management """
if request.method == 'GET':
if user.is_authenticated():
return redirect(url_for('index'))
return render_template('login.html')
try:
_user = User.from_login(
request.form.get('email'),
request.form.get('password'),
)
except StormpathError, err:
return render_template('login.html', error=err.message)
login_user(_user, remember=True)
return redirect(url_for('index'))
@auth_routes.route('/authorize_ios', methods=['POST'])
def authorize_iOS():
""" User login/auth/session management """
username = request.form.get('username', '')
password = request.form.get('password', '')
user = None
try:
user = User.from_login(username, password)
except StormpathError, err:
pass
if user:
app_user = AppUser.query.get(stormpathUserHash(user.get_id()))
t = make_secure_token(username + password)
if app_user.ios_token != t:
app_user.set_ios_token(t)
return jsonify({ 'username': user.username,
'user_id': app_user.user_id,
'authenticated': True,
'ios_token': t
})
else:
return jsonify({ 'username': username,
'authenticated': False,
'ios_token': None
})
@auth_routes.route('/logout')
@login_required
def logout():
"""User logout/auth/session managment"""
logout_user()
return redirect(url_for('index')) | {
"repo_name": "futurepress/futurepress",
"path": "futurepress/auth_routes.py",
"copies": "1",
"size": "3750",
"license": "bsd-2-clause",
"hash": -1569051312197705200,
"line_mean": 29.7459016393,
"line_max": 78,
"alpha_frac": 0.5725333333,
"autogenerated": false,
"ratio": 4.084967320261438,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5157500653561438,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajrenold'
# Libs
from datetime import date
from urllib2 import urlopen, HTTPError, Request
from urlparse import urlparse
import re
from zipfile import ZipFile
import StringIO
from mimetypes import guess_type
import boto
from boto.s3.key import Key
from boto.s3.connection import S3Connection
# Our Imports
from core import db
from model_utils import slugify
from key import s3access, s3secret
from settings import S3BUCKET
genre_relations = db.Table('genre_relations',
db.Column('genre_id', db.Integer, db.ForeignKey('genres.genre_id')),
db.Column('book_id', db.Integer, db.ForeignKey('books.book_id'))
)
class Book(db.Model):
__tablename__ = 'books'
# primary key
book_id = db.Column(db.Integer, primary_key=True)
# foreign key
author_id = db.Column(db.Integer, db.ForeignKey('author.author_id'))
# relationships
genres = db.relationship('Genre', secondary=genre_relations,
backref=db.backref('books', lazy='joined'), lazy='dynamic')
# user_books table
# other columns
title = db.Column(db.String(256), nullable=False)
isbn = db.Column(db.String(256), nullable=False)
publisher = db.Column(db.String(256), nullable=False)
description = db.Column(db.String(1024))
cover_large = db.Column(db.String(256), nullable=False)
cover_thumb = db.Column(db.String(256), nullable=False)
slug = db.Column(db.String(256), nullable=False)
last_updated = db.Column(db.Date)
published = db.Column(db.Date)
epub_url = db.Column(db.String(256), nullable=False)
stream_url = db.Column(db.String(256), nullable=False)
atom_entry_url = db.Column(db.String(256), nullable=False)
def __init__(self, title, isbn, author, publisher, description,
genres, cover_large, cover_thumb, epub_url, stream_url, atom_entry_url):
self.title = title
self.isbn = isbn
self.author = author
self.publisher = publisher
self.description = description
self.genres = genres
self.cover_large = cover_large
self.cover_thumb = cover_thumb
self.slug = slugify(title)
self.last_updated = date.today()
self.published = date(2010, 10, 31)
self.epub_url = epub_url
self.stream_url = stream_url
self.atom_entry_url = atom_entry_url
@staticmethod
def book_from_dict(**kwargs):
return Book(kwargs.get('title', ""),
kwargs.get('isbn', ""),
kwargs.get('author', ""),
kwargs.get('publisher', ""),
kwargs.get('description', ""),
kwargs.get('genres', ""),
kwargs.get('cover_large', ""),
kwargs.get('cover_thumb', ""),
kwargs.get('epub_url', ""),
kwargs.get('stream_url', ""),
kwargs.get('atom_entry_url', ""))
def __repr__(self):
return '<title {}>'.format(self.title)
def as_dict(self):
return { 'title': self.title,
'isbn': self.isbn,
'author': self.author.as_dict(),
'publisher': self.publisher,
'description': self.description,
'genres': [ g.as_dict() for g in self.genres ],
'slug': self.slug }
## also see gutenberg book mirror, where 135 =
## http://snowy.arsc.alaska.edu/gutenberg/cache/generated/135/pg135-images.epub
class BookUploader():
""" Book class
init with book_file hand an uploaded file
"""
def __init__(self, filename, book_file, cover_file):
if book_file:
self.file_dir = self.getFileDir(filename)
self.epub_key = self.uploadEpubS3(book_file, self.file_dir)
self.cover_key = self.uploadCoverS3(cover_file, self.file_dir)
#self.zip_file = self.getZipFile(book_file)
#self.uploadUnzippedS3(self.zip_file, self.file_dir) ## Only uploads unzipped epub
def getZipFile(self, book_file):
return ZipFile(StringIO.StringIO(book_file.read()))
def getFileDir(self, filename):
return filename[:filename.find('.epub')] + '/'
def uploadEpubS3(self, book_file, file_dir):
conn = S3Connection(s3access, s3secret)
bucket = conn.get_bucket(S3BUCKET)
k = Key(bucket)
key_name = 'epubs/' + book_file.filename
k.key = key_name
k.set_metadata('Content-Type', 'application/epub+zip')
k.set_contents_from_string(book_file.read())
return key_name
def uploadCoverS3(self, cover_file, file_dir):
conn = S3Connection(s3access, s3secret)
bucket = conn.get_bucket(S3BUCKET)
k = Key(bucket)
key_name = 'covers/' + cover_file.filename
k.key = key_name
file_mime = guess_type(cover_file.filename)[0]
k.set_metadata('Content-Type', file_mime)
k.set_contents_from_string(cover_file.read())
return key_name
def uploadUnzippedS3(self, zip_file, file_dir):
for f in zip_file.filelist:
file_mime = guess_type(f.filename)[0]
conn = S3Connection(s3access, s3secret)
bucket = conn.get_bucket(S3BUCKET)
k = Key(bucket)
k.key = 'books/' + file_dir + f.filename
if file_mime:
k.set_metadata('Content-Type', file_mime)
k.set_contents_from_string(zip_file.read(f))
class ValidationError(Exception):
pass | {
"repo_name": "futurepress/futurepress",
"path": "models/book_model.py",
"copies": "1",
"size": "5530",
"license": "bsd-2-clause",
"hash": -2443821840085558300,
"line_mean": 31.9226190476,
"line_max": 94,
"alpha_frac": 0.6028933092,
"autogenerated": false,
"ratio": 3.6309914642153642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9720494371032599,
"avg_score": 0.002678080476553037,
"num_lines": 168
} |
__author__ = 'ajrenold'
# Libs
from flask import url_for
from flask.ext.sqlalchemy import ( SQLAlchemy )
# Our Imports
from core import db
from model_utils import slugify
class Author(db.Model):
__tablename__ = 'author'
# primary key
author_id = db.Column(db.Integer, primary_key=True)
# relations
books = db.relationship('Book', backref='author',
lazy='dynamic')
# foreign keys
user_id = db.Column(db.String(128), db.ForeignKey('app_users.user_id'))
# other columns
name = db.Column(db.String(256), nullable=False, unique=True)
bio = db.Column(db.String(10000), nullable=False)
picture = db.Column(db.String(256), nullable=False)
website = db.Column(db.String(256), nullable=False)
blog = db.Column(db.String(256), nullable=False)
twitter_id = db.Column(db.String(256), nullable=False)
slug = db.Column(db.String(256), nullable=False, unique=True)
def __init__(self, name, bio, picture, website, blog, twitter_id):
self.name = name
self.bio = bio
self.picture = picture
self.website = website
self.blog = blog
self.twitter_id = twitter_id
self.slug = slugify(name)
@staticmethod
def author_from_dict(**kwargs):
return Author(kwargs.get('name', ""),
kwargs.get('bio', ""),
kwargs.get('picture', ""),
kwargs.get('website', ""),
kwargs.get('blog', ""),
kwargs.get('twitter_id', ""))
def update_name(self, name):
self.name = name
self.slug = slugify(name)
try:
db.session.commit()
except:
# TODO flash error message
db.session.rollback()
def __repr__(self):
return '<author {}>'.format(self.name)
def as_dict(self):
return { 'author_id': self.author_id,
'uri': url_for('author_routes.authorpage', author_slug=self.slug, _external=True),
'name': self.name,
'slug': self.slug } | {
"repo_name": "futurepress/futurepress",
"path": "models/author_model.py",
"copies": "1",
"size": "2105",
"license": "bsd-2-clause",
"hash": -4771559474105328000,
"line_mean": 30.4328358209,
"line_max": 99,
"alpha_frac": 0.5662707838,
"autogenerated": false,
"ratio": 3.8553113553113554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9904184525728992,
"avg_score": 0.003479522676472709,
"num_lines": 67
} |
__author__ = "ajshajib", "sibirrer"
"""
Multi-Gaussian expansion fitting, based on Capellari 2002, http://adsabs.harvard.edu/abs/2002MNRAS.333..400C
"""
import numpy as np
from scipy.optimize import nnls
from LightProfiles.gaussian import Gaussian
gaussian_func = Gaussian()
def gaussian(R, sigma, amp):
"""
:param R: radius
:param sigma: gaussian sigma
:param amp: normalization
:return: Gaussian function
"""
c = amp / (2 * np.pi * sigma**2)
return c * np.exp(-(R/float(sigma))**2/2.)
def mge_1d(r_array, flux_r, N=20):
"""
:param r_array: list or radii (numpy array)
:param flux_r: list of flux values (numpy array)
:param N: number of Gaussians
:return: amplitudes and Gaussian sigmas for the best 1d flux profile
"""
try:
amplitudes, sigmas, norm= _mge_1d(r_array, flux_r, N)
except:
N_new = N - 1
if N_new == 0:
raise ValueError("Number of MGE went down to zero! This should not happen!")
amplitudes, sigmas, norm = mge_1d(r_array, flux_r, N=N_new)
return amplitudes, sigmas, norm
def _mge_1d(r_array, flux_r, N=20):
"""
:param r_array:
:param flux_r:
:param N:
:return:
"""
sigmas = np.logspace(np.log10(r_array[0]), np.log10(r_array[-1] / 2.), N + 2)[1:-1]
# sigmas = np.linspace(r_array[0], r_array[-1]/2, N + 2)[1:-1]
A = np.zeros((len(flux_r), N))
for j in np.arange(A.shape[1]):
A[:, j] = gaussian(r_array, sigmas[j], 1.)
amplitudes, norm = nnls(A, flux_r)
return amplitudes, sigmas, norm
def de_projection_3d(amplitudes, sigmas):
"""
de-projects a gaussian (or list of multiple Gaussians from a 2d projected to a 3d profile)
:param amplitudes:
:param sigmas:
:return:
"""
amplitudes_3d = amplitudes / sigmas / np.sqrt(2*np.pi)
return amplitudes_3d, sigmas | {
"repo_name": "sibirrer/astrofunc",
"path": "astrofunc/multi_gauss_expansion.py",
"copies": "1",
"size": "1881",
"license": "mit",
"hash": 7297643700440295000,
"line_mean": 26.6764705882,
"line_max": 108,
"alpha_frac": 0.6135034556,
"autogenerated": false,
"ratio": 2.911764705882353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40252681614823527,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajtag'
from math import pi, sin
# import math
# import os.path
# from TrinRoofPlayer.Renderer import ceiling, new_random
# import pygame
from TrinRoofPlayer.utils import *
from TrinRoofPlayer.Objects import *
from TrinRoofPlayer.Constants import *
from pygame.math import Vector2
import numpy as np
#To change random seed add self.rand.seed(1) to innit funciton of group find a number that works!
white = 255, 255, 255
transparent = 255, 255, 255, 0
black = 0, 0, 0
class Star(Sprite):
def __init__(self, lamp, duration, color):
super().__init__()
self.color = color
self.lamp = lamp
self.time = -1.0
self.rate = 1.0 / (get_fps() * duration)
def update(self):
self.time += self.rate
if self.time >= 1.0:
self.kill()
def draw(self, surface, fade):
bright = (1.0 - abs(self.time)) * fade
color = tuple(int(c * bright) for c in self.color)
pos = (self.lamp.x, self.lamp.y)
surface.set_at(pos, color)
class StarrySky(Group):
def __init__(self, max_stars, ramp_time, min_time, max_time):
super().__init__()
self.lamps = ceiling.lamps
self.ramp_rate = max_stars / (get_fps() * ramp_time)
self.num_stars = 1.0
self.max_stars = max_stars
self.fade = 1.0
self.fade_rate = 0.0
self.min_time = min_time
self.max_time = max_time
def add_star(self):
lamp = self.rand.choice(self.lamps)
color = hls_to_rgb(self.rand.randint(40, 60), self.rand.randint(20, 100), self.rand.randint(80, 100))
self.add(Star(lamp, self.rand.uniform(self.min_time, self.max_time), color))
def update(self):
self.fade += self.fade_rate
if self.fade <= 0.0:
for obj in self:
obj.kill()
raise StopIteration
if self.num_stars < self.max_stars:
self.num_stars += self.ramp_rate
elif self.num_stars > self.max_stars:
self.num_stars = self.max_stars
while len(self) < self.num_stars:
self.add_star()
super().update()
def draw(self, surface):
for star in self:
star.draw(surface, self.fade)
def end(self, fade_time):
self.fade_rate = -1.0 / (get_fps() * fade_time)
class Sun(MoveableThing):
def __init__(self, pos, size, extra_bright, ripple_height, ripple_count, ripple_speed, duration = None):
super().__init__(pos, size, duration)
self.s = pygame.Surface(MADRIX_SIZE, flags = pygame.SRCALPHA)
self.ripple_speed = ripple_speed * math.pi * 2.0 / get_fps()
self.ripple_distance = ripple_count * math.pi * 2.0
self.ripple_height = ripple_height / 2.0
self.ripple = 0.0
self.extra_bright = extra_bright
def update(self):
super().update()
self.ripple += self.ripple_speed
if self.ripple > math.pi * 2.0:
self.ripple -= math.pi * 2.0
def draw(self, surface):
self.s.fill(transparent)
a = pygame.PixelArray(self.s)
left = max(int(self.x - self.size) - 1, 0)
right = min(int(self.x + self.size) + 2, a.shape[0])
top = max(int(self.y - self.size) - 1, 0)
bottom = min(int(self.y + self.size) + 2, a.shape[1])
for x in range(left, right):
for y in range(top, bottom):
dx = self.x - x
dy = self.y - y
dist = dx * dx + dy * dy
dist = pythagoras((dx, dy))
if dist <= self.size + 1:
height = max(1.0 - dist / self.size, 0.0)
ripple_pos = self.ripple + height * self.ripple_distance
height -= height * self.ripple_height * (sin(ripple_pos) + 1)
if dist <= self.size:
alpha = 255
else:
alpha = 255 * (self.size + 1 - dist)
alpha = int(alpha * self.fade)
height = int((190 * height + 255 * self.extra_bright))
r = 255
g = min(height, 255)
b = max(height - 255, 0)
a[x, y] = (r, g, b)
del a
surface.blit(self.s, (0,0))
class Fog(Sprite):
def __init__(self, color, duration = None):
if duration is None:
self.level = 1.0
self.rate = None
else:
self.rate = 1.0 / (get_fps() * duration)
self.level = 0.0
self.s = pygame.Surface(MADRIX_SIZE)
self.s.fill(color)
def update(self):
if self.rate is not None:
self.level += self.rate
if self.level > 1.0:
self.level = 1.0
self.rate = None
if self.level <= 0.0:
raise StopIteration
def end(self, duration = None):
if duration is None:
raise StopIteration
self.rate = -1.0 / (get_fps() * duration)
def draw(self, surface):
self.s.set_alpha(int(255 * self.level))
surface.blit(self.s, (0,0))
class Cloud(Sprite):
def __init__(self, max_x, y, size, rand):
super().__init__()
self.x = float(-size)
self.y = y
self.size = size
self.bitmap = np.zeros((size + 1, size))
self.max_x = max_x
for x in range(size):
for y in range(size):
self.bitmap[x, y] = rand.random()
def update(self):
self.x += 0.2
if self.x > self.max_x:
self.kill()
def draw(self, pixels, fade):
"""Anti-aliased x transparency mask"""
x_start = int(self.x)
if self.x < 0:
x_start -= 1
x_offset = self.x - x_start
bb = pygame.Rect((x_start, self.y), (self.size + 1, self.size))
for r in offscreen:
if r.contains(bb):
return
for y in range(self.size):
py = y + self.y
if py < 0 or py >= pixels.shape[1]:
continue
for x in range(self.size + 1):
px = x_start + x
if px < 0 or px >= pixels.shape[0]:
continue
v1 = self.bitmap[x - 1, y]
v2 = self.bitmap[x, y]
val = v2 + (v1 - v2) * x_offset
new_alpha = int(255 * val * fade)
alpha = pixels[px, py]
alpha = max(alpha, new_alpha)
pixels[px, py] = alpha
class Clouds(Group):
CLOUD_NORMAL = 1
CLOUD_GREY = 2
CLOUD_BLACK = 3
def __init__(self, size, cloud_size, initial_prob, final_prob, ramp_duration):
super().__init__()
self.cloud_size = cloud_size
self.s = pygame.Surface(size, flags = pygame.SRCALPHA)
self.color = (255, 255, 255, 0)
self.max_x = size[0]
self.max_y = size[1] - cloud_size
self.initial_prob = initial_prob
self.final_prob = final_prob
self.ramp_speed = None
self.time = None
self.set_ramp(ramp_duration)
self.phase = self.CLOUD_NORMAL
self.dirtyness = 0.0
def set_ramp(self, duration):
self.ramp_speed = 1.0 / (get_fps() * duration)
self.time = 0.0
def grey(self, whiteness, duration):
self.set_ramp(duration)
self.dirtyness = 1.0 - whiteness
self.phase = self.CLOUD_GREY
def update(self):
if self.time >= 1.0:
p = self.final_prob
else:
a = self.initial_prob
b = self.final_prob
p = a + (b - a) * self.time
while True:
p -= self.rand.random()
if p < 0.0:
break
self.add(Cloud(self.max_x, self.rand.randrange(self.max_y), self.cloud_size, self.rand))
self.time += self.ramp_speed
super().update()
def draw(self, surface):
fade = 1.0
shade = int(255 - 255 * self.dirtyness)
if self.phase == self.CLOUD_BLACK:
if self.time > 1.0:
for s in self:
s.kill()
raise StopIteration
fade = 1.0 - self.time
if self.phase == self.CLOUD_GREY:
if self.time < 1.0:
shade = int(255 - 255 * self.dirtyness * self.time)
self.s.fill((shade, shade, shade, 0))
a = pygame.surfarray.pixels_alpha(self.s)
for cloud in self:
cloud.draw(a, fade)
del a
surface.blit(self.s, (0, 0))
def end(self, duration):
self.set_ramp(duration)
self.phase = self.CLOUD_BLACK
class Raindrops(Group):
def __init__(self, drop_size, drop_duration, max_drops, ramp_time):
super().__init__()
self.num_drops = 1.0
self.max_drops = max_drops
self.ramp_rate = max_drops / (get_fps() * ramp_time)
self.drop_size = drop_size
self.drop_speed = drop_size / (get_fps() * drop_duration)
def update(self):
if self.ramp_rate > 0.0:
if self.num_drops < self.max_drops:
self.num_drops += self.ramp_rate
else:
self.num_drops = self.max_drops
self.ramp_rate = 0
else:
self.num_drops += self.ramp_rate
max_drops = int(self.num_drops / get_fps()) + 1
missing = int(self.num_drops) - len(self)
for _ in range(min(missing, max_drops)):
lamp = self.rand.choice(ceiling.lamps)
self.add(RainSplash(self.drop_size, self.drop_speed, lamp))
super().update()
if len(self) == 0 and self.num_drops <= 0:
raise StopIteration
def draw(self, surface):
for drop in self:
drop.draw(surface)
def end(self, ramp_time = None):
if ramp_time is not None:
self.ramp_rate = -self.num_drops / (get_fps() * ramp_time)
else:
self.num_drops = 0
class RainSplash(Sprite):
def __init__(self, max_r, speed, lamp):
super().__init__()
self.pos = (lamp.x, lamp.y)
self.max_radius = max_r
self.radius = 0
self.speed = speed
def update(self):
self.radius += self.speed
if self.radius >= self.max_radius:
self.kill()
return
def draw(self, surface):
color = (0, 0, 255, 255)
pygame.draw.circle(surface, color, self.pos, int(self.radius))
class Thunderstorm(Group):
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
Group.__init__(self)
self.ticks = 0
self.group_trigger = False
self.log.info('started storm')
self.named_groups = {}
self.program = None
self.next_program = None
def add_group(self, groupname, sprite):
if groupname not in self.named_groups:
self.named_groups[groupname] = Group()
self.named_groups[groupname].add(sprite)
self.add(sprite)
def del_group(self, groupname):
if groupname in self.named_groups:
for s in self.named_groups[groupname].sprites():
s.kill()
def big_hit(self):
self.log.info('big_hit')
self.add_group('big_hit_sheet', SheetLighting(pygame.Rect((0, 0), MADRIX_SIZE)))
self.add_group('big_hit', ForkLighting(MADRIX_SIZE, (67, 51), (67, 0), '0'))
self.add_group('big_hit', ForkLighting(MADRIX_SIZE, (67, 51), (3, 44), '1'))
self.add_group('big_hit', ForkLighting(MADRIX_SIZE, (67, 51), (128, 45), '2'))
self.trigger_flash(None, pulse=2*get_fps())
def incoming(self, duration):
self.log.info('incoming')
self.empty()
self.add_group('outer', SheetLighting(pygame.Rect( 0, -70, 132, 70), Vector2( 0, 36), duration, '1'))
self.add_group('outer', SheetLighting(pygame.Rect(-132, 0, 132, 70), Vector2( 51, 0), duration, '2'))
self.add_group('outer', SheetLighting(pygame.Rect( 132, 0, 132, 70), Vector2(-52, 0), duration, '3'))
def outgoing(self, duration):
self.log.info('outgoing')
self.empty()
self.add_group('outer', SheetLighting(pygame.Rect( 0, -34, 132, 70), Vector2( 0, -36), duration, '4'))
self.add_group('outer', SheetLighting(pygame.Rect(-81, 0, 132, 70), Vector2( -51, 0), duration, '5'))
self.add_group('outer', SheetLighting(pygame.Rect( 80, 0, 132, 70), Vector2(52, 0), duration, '6'))
def set_group_trigger(self, state):
self.group_trigger = state
def trigger_flash(self, groupname=None, ignore=0, pulse=0):
if self.group_trigger:
if groupname in self.named_groups:
g = self.named_groups[groupname]
else:
g = self
for s in g.sprites():
if s != ignore:
s.charge()
s.flash(1, group_trigger=True, pulse=pulse)
return
def add_sheet(self, r, dx, duration):
self.add(SheetLighting(r, dx, duration))
def add_fork(self, size, start, end):
self.add(ForkLighting(size, start, end))
def end(self):
for n in self.named_groups:
self.del_group(n)
class Lightning(Sprite):
def __init__(self, rect, random_seed='taaaash'):
# Call the parent class (Sprite) constructor
self.rect = rect
Sprite.__init__(self, rect.width, rect.height, surface_flags=pygame.SRCALPHA)
self.potential = 800
self.breakdown_potential = 800
self.flashing = False
self.power = 0
self.rand = new_random(random_seed)
self.ticks = 0
self.pulse = 0
self.pulse_duration = 0
def update(self):
if self.flashing:
self.flash(self.power)
return
self.potential += self.rand.randint(0, 30)
if self.potential > self.breakdown_potential:
chance = self.rand.randrange(100)
power = self.rand.randint(self.potential, 3 * self.potential)
if chance < 50:
self.flash(power / (3 * self.potential))
self.potential = max(0, self.potential - power)
else:
self.image.fill(transparent)
self.ticks += 1
def flash(self, power):
"""
"""
self.flashing = False
pass # start lightning incoming
def charge(self):
self.potentential += self.potential
class SheetLighting(Lightning):
def __init__(self, r, move=pygame.math.Vector2(0, 0), duration=0, random_seed='0'):
super().__init__(r, random_seed)
self.color = (255, 200, 255)
self.random_seed = '0'
self.duration = duration * get_fps()
self.move = move
self.origin = self.rect.topleft
def move_to(self, move, duration):
self.move = move
self.duration = duration * get_fps()
self.ticks = 0
def update(self):
# move position if required
if self.duration is not None and self.duration != 0:
newpos = self.origin + (self.move * min(1, (self.ticks/self.duration)))
self.rect.topleft = (round(newpos.x), round(newpos.y))
super().update()
def flash(self, power, group_trigger=False, pulse=False):
self.log.debug((power, group_trigger, pulse))
if group_trigger:
for g in self.groups():
try:
g.trigger_flash(ignore=self, pulse=pulse)
except AttributeError:
pass
self.log.debug('flash power {}'.format(power * 255))
self.image.set_alpha(power * 255)
self.image.fill(self.color)
self.flashing = False
class ForkLighting(Lightning):
def __init__(self, size, start, end, seed = '0'):
self.color = [246, 255, 71, 255]
self.start = pygame.math.Vector2(start)
self.end = pygame.math.Vector2(end)
self.ionised = [self.start]
self.pulse = 0
self.pulse_duration = 500
super().__init__(pygame.Rect((0, 0), size), random_seed = seed)
def update(self):
super().update()
# render to image
self.image.fill(transparent)
start_segment = self.ionised[0]
a_color = self.color
if self.flashing or self.pulse < self.pulse_duration:
for point in self.ionised[1:]:
pygame.draw.line(self.image, self.color, start_segment, point, 1)
if not self.flashing:
if self.pulse <= self.pulse_duration:
a_color[3] = 128 * sin(self.pulse/24) + 128
self.pulse += 1
pygame.draw.line(self.image, a_color, start_segment, point, 3)
start_segment = point
else:
self.ionised = [self.ionised[0]]
self.image.fill(transparent)
def flash(self, power, group_trigger=False, pulse=None):
self.flashing = True
if pulse is not None:
self.pulse_duration = pulse
if self.rand.randint(0, 100) < 20:
self.pulse_duration = 2 * get_fps()
self.pulse = 0
if group_trigger:
for g in self.groups():
try:
g.trigger_flash(ignore=self, pulse=self.pulse_duration)
except AttributeError:
pass
for i in range(self.rand.randrange(3, 8)):
last = self.ionised[-1]
togo = self.end - last
lp = togo.as_polar()
if lp[0] > 2:
togo.from_polar((1.5, self.rand.triangular(-180, 180) + lp[1]))
n = last + togo
self.ionised.append(n)
else:
self.flashing = False
return
class Bird(Sprite):
def __init__(self, rect):
# Call the parent class (Sprite) constructor if self.flashing or self.pulse < self.pulse_duration:
self.ticks = 0
self.active_frame = 0
self.rect = rect
self.frames = []
self.action = 'bob'
self.next_action = 'bob'
self.frame_loader()
self.actions = {'bob':(0, ),
'takeoff': (1, 2, 3, 4, 5, 6, 32),
'flap': ( 33, 34, 35, 36, 37, 38, 39, 40, 41, 42),
'rotate_camera': (12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30),
'soar': (31,)
}
Sprite.__init__(self, self.rect.width, self.rect.height, surface_flags=pygame.SRCALPHA)
def set_action(self, action):
self.next_action = action
def frame_loader(self, frameid=0):
max_x = 0
max_y = 0
try:
while True:
fn = os.path.join('Resources', 'bird', 'bird_{}.png'.format(frameid))
if not os.path.isfile(fn):
raise LookupError
frame = pygame.image.load(fn)
frame.convert_alpha()
max_x = max(max_x, frame.get_rect().width)
max_y = max(max_y, frame.get_rect().height)
self.frames.append(frame)
frameid += 1
except LookupError:
pass
if len(self.frames) == 0:
raise StopIteration
def update(self):
if self.ticks % 5 == 0:
self.active_frame += 1
self.active_frame = self.active_frame % len(self.actions[self.action])
if self.active_frame == 0:
self.action = self.next_action
#self.image.fill((255, 0, 0, 255,))
pygame.transform.scale(self.frames[self.actions[self.action][self.active_frame]], self.rect.size, self.image)
if self.action == 'takeoff':
self.set_action('flap')
if self.action == 'rotate_camera':
self.set_action('soar')
self.ticks += 1
def end(self):
raise StopIteration
class Constellation(Sprite):
def __init__(self, x, y):
self.pole = Vector2(16, 16)
self.patterns = {'ursamajor': (
Vector2(8, 5),
Vector2(10, 6),
Vector2(8, 10),
Vector2(10, 10),
Vector2(6, 12),
Vector2(5, 14),
Vector2(5, 17)
)
}
self.rect = pygame.Rect(x, y, 31, 31)
# Call the parent class (Sprite) constructor
Sprite.__init__(self, 32, 32)
self.angle = 180
self.dangle = 2
def update(self):
self.image.fill(white)
self.image.set_at((int(self.pole.x), int(self.pole.y)), (255,229,0) )
for star in self.patterns['ursamajor']:
star = star.rotate(self.angle) + self.pole
self.image.set_at((int(star.x), int(star.y)), (255,229,0) )
self.angle = (self.angle + self.dangle) % 360
def end(self):
raise StopIteration
class HSMoon(MoveableThing):
def __init__(self, pos, size, fade_duration = None):
super().__init__(pos, size, fade_duration)
self.raw_base = pygame.image.load(os.path.join('Resources', 'moon_base.png'))
self.raw_base = self.raw_base.convert()
self.raw_overlay = pygame.image.load(os.path.join('Resources', 'moon_overlay.png'))
self.raw_overlay = self.raw_overlay.convert()
self.scaled_base = None
self.scaled_overlay = None
self.overlay_pos = 0.0
self.overlay_rate = 0.0
self.overlay_enabled = False
def overlay(self, fade_duration = None):
if fade_duration is None:
self.overlay_enabled = False
else:
self.overlay_enabled = True
self.overlay_rate = math.pi * 2 / (get_fps() * fade_duration)
def update(self):
super().update()
size = int(round(self.size * 2))
image_size = (size, size)
if self.overlay_rate != 0.0:
self.overlay_pos += self.overlay_rate
if self.overlay_pos > math.pi * 2.0:
self.overlay_pos -= math.pi * 2.0
if not self.overlay_enabled:
self.overlay_rate = 0.0
self.overlay_pos = 0.0
if self.scaled_base is None or self.scaled_base.get_size() != image_size:
self.scaled_base = pygame.transform.scale(self.raw_base, image_size)
self.scaled_base.set_colorkey(black)
self.scaled_overlay = pygame.transform.scale(self.raw_overlay, image_size)
self.scaled_overlay.set_colorkey(black)
def draw(self, surface):
pos = (int(self.x - self.size), int(self.y - self.size))
alpha = int(255 * self.fade)
self.scaled_base.set_alpha(alpha)
surface.blit(self.scaled_base, pos)
alpha = int(255 * (1.0 - math.cos(self.overlay_pos)) / 2)
if alpha > 0:
self.scaled_overlay.set_alpha(alpha)
surface.blit(self.scaled_overlay, pos)
class Wave(Sprite):
def __init__(self, direction, width):
super().__init__()
self.width = width
self.direction = direction
speed = pythagoras(direction)
self.norm = (direction[1] / speed, -direction[0] / speed)
self.max_x = MADRIX_X
self.max_y = MADRIX_Y
if direction[0] > 0.0:
start_x = 0.0
else:
start_x = self.max_x
if direction[1] > 0.0:
start_y = 0.0
else:
start_y = self.max_y
self.pos = (start_x, start_y)
def update(self):
x = self.pos[0] + self.direction[0]
y = self.pos[1] + self.direction[1]
self.pos = (x, y)
d = self.distance((0, 0))
d = min(d, self.distance((0, self.max_y)))
d = min(d, self.distance((self.max_x, self.max_y)))
d = min(d, self.distance((self.max_x, 0)))
if d > self.width * 2.0 + 1.0:
self.kill()
def distance(self, point):
# for line X = A + tN, the distance to point P is
# (A - P) - ((A - P).N)N
ap_x = self.pos[0] - point[0]
ap_y = self.pos[1] - point[1]
dot = ap_x * self.norm[0] + ap_y * self.norm[1]
perp = (ap_x - dot * self.norm[0], ap_y - dot * self.norm[1])
dist = pythagoras(perp)
# Set sign based on which side of the line we are on
dot2 = perp[0] * self.norm[1] - perp[1] * self.norm[0]
if dot2 < 0:
return dist
else:
return -dist
def colormixer(color):
def do_mix(orig, value):
(r, g, b) = orig
if color & 1:
r = max(r, value)
if color & 2:
g = max(g, value)
if color & 4:
b = max(b, value)
return (r, g, b)
return do_mix
class Beacon(Sprite):
red = 1
green = 2
blue = 4
purple = red + blue
yellow = red + green
cyan = green + blue
colors = [
red,
green,
]
def __init__(self, pos, color, max_r, speed):
super().__init__()
self.pos = pos
self.radius = 0.5
self.triggered = False
self.max_r = max_r
self.speed = speed
self.color = color
def update(self):
self.radius += self.speed
if self.radius > self.max_r:
self.kill()
def distance(self, point):
x = point[0] - self.pos[0]
y = point[1] - self.pos[1]
return pythagoras((x, y))
def mix(self, color, value):
(r, g, b) = color
if self.color & 1:
r = max(r, value)
if self.color & 2:
g = max(g, value)
if self.color & 4:
b = max(b, value)
return (r, g, b)
class ProtoWave(object):
def __init__(self, delay, width, angle):
self.delay = int(delay)
self.width = width
self.angle = angle * math.pi * 2 / 360
def update(self):
self.delay -= 1
return self.delay < 0
class Sea(Group):
def __init__(self, wave_speed, beacon_speed, beacon_size):
super().__init__()
size = (MADRIX_X, MADRIX_Y)
self.s = pygame.Surface(size, flags = pygame.SRCALPHA)
self.time = 0
self.beacons = Group()
self.num_beacons = 0
self.future = []
self.wave_speed = wave_speed
self.beacon_speed = beacon_speed
self.beacon_size = beacon_size
def spawn(self, width, angle, num_waves, interval):
"""Trigger a set of waves"""
dt = interval * get_fps()
for i in range(num_waves):
pw = ProtoWave(dt * i, width, angle)
self.future.append(pw)
def add_wave(self, pw):
direction = (math.cos(pw.angle) * self.wave_speed, math.sin(pw.angle) * self.wave_speed)
self.add(Wave(direction, pw.width))
def beacon(self, n):
self.num_beacons = n
def wave_collision(self, pos):
for wave in self:
dist = wave.distance(pos)
if dist >= 0 and dist < self.wave_speed * 2.0:
return True
return False
def add_beacon(self):
while True:
lamp = self.rand.choice(ceiling.bubbleroof_lamps)
pos = (lamp.x, lamp.y)
if not self.wave_collision(pos):
break;
color = self.rand.choice(Beacon.colors)
b = Beacon(pos, color, self.beacon_size, self.beacon_speed)
self.beacons.add(b)
def end(self):
self.num_waves = 0
def update(self):
if len(self.beacons) < self.num_beacons:
self.add_beacon()
if len(self.future) == 0 and len(self) == 0:
raise StopIteration
tomorrow = []
for pw in self.future:
if pw.update():
self.add_wave(pw)
else:
tomorrow.append(pw)
self.future = tomorrow
for wave in self:
wave.update()
for b in self.beacons:
if b.triggered:
b.update()
elif self.wave_collision(b.pos):
b.triggered = True
def draw(self, surface):
if len(self) == 0:
return
self.s.fill(transparent)
a = pygame.PixelArray(self.s)
for lamp in ceiling.lamps:
x = lamp.x
y = lamp.y
#for x in range(self.size[0]):
# for y in range(self.size[1]):
close = None
for wave in self:
dist = wave.distance((x, y))
if dist > 0.0:
if dist < 1.0:
dist -= 1.0
else:
dist = (dist - 1.0) / wave.width
if close is None or dist < close:
close = dist
color = (0, 0, 0)
if close is not None:
if close <= 0.0:
p = int(255 * (close + 1.0))
color = (p, p, p)
elif close < 1.0:
p = int(255 * (1.0 - close))
color = (p, p, 255)
elif close < 2.0:
p = int(255 * (2.0 - close))
color = (0, 0, p)
height = 0.0
mix = None
for b in self.beacons:
dist = b.distance((x, y))
if dist < b.radius:
if dist > b.radius - 1.0:
new_height = 1.0
else:
new_height = dist / b.radius
elif dist < b.radius + 1.0:
dr = dist - b.radius
new_height = math.cos(dr * math.pi / 2.0)
else:
new_height = 0.0
new_height *= min(2.0*math.cos((b.radius / b.max_r) * math.pi / 2.0), 1.0)
if new_height > height:
height = new_height
mix = b.mix
if mix is not None:
intens = int(255 * height)
color = mix(color, intens)
r = color[0]
g = color[1]
b = color[2]
alpha = max(r, g, b)
if alpha < 255:
r = min(r * 255, 255)
g = min(g * 255, 255)
b = min(b * 255, 255)
color = (r, g, b, alpha)
a[x, y] = color
del a
surface.blit(self.s, (0, 0))
class Ripples(Sprite):
def __init__(self):
super().__init__( MADRIX_X, MADRIX_Y, pygame.SRCALPHA)
self.rect = pygame.Rect((0, 0), MADRIX_SIZE)
self.color = (210, 60, 70, 0)
self.h = 210
self.tgt_h = 210
self.dh = 0
self.l = 60
self.tgt_l = 60
self.dl = 0
self.s = 70
self.tgt_s = 70
self.ds = 0
self.a = 0
self.tgt_a = 128
self.da = 0
def takeoff(self):
self.dspeed = 1
self.speed = 0
self.da = -5
# def fade_in(self, duration):
# if self.ticks < 180:
# self.alpha += 128/180
# self.alpha = 1
# self.image.fill(hlsa_to_rgba(210, self.brightness, self.saturation, self.alpha))
def fade_to(self, h=None, s=None, l=None, a=None, duration=3):
if h is not None:
self.tgt_h = h
self.dh = (self.tgt_h - self.h) / (duration * get_fps()) #TODO: change so path is always < 180
if s is not None:
self.tgt_s = s
self.ds = (self.tgt_s - self.s) / (duration * get_fps())
if l is not None:
self.tgt_l = l
self.dl = (self.tgt_l - self.l) / (duration * get_fps())
if a is not None:
self.tgt_a = a
self.da = (self.tgt_a - self.a) / (duration * get_fps())
def update(self):
if self.tgt_h != self.h:
if abs(self.h - self.tgt_h) < self.dh:
self.h = self.tgt_h
else:
self.h += self.dh
if self.tgt_s != self.s:
if abs(self.s - self.tgt_s) < self.ds:
self.s = self.tgt_s
else:
self.s += self.ds
if self.tgt_l != self.l:
if abs(self.l - self.tgt_l) < self.dl:
self.l = self.tgt_l
else:
self.l += self.dl
if self.tgt_a != self.a:
if abs(self.a - self.tgt_a) < self.da:
self.a = self.tgt_a
else:
self.a += self.da
self.color = hlsa_to_rgba(self.h, self.l, self.s, self.a)
self.ticks += 1
def draw(self, surface):
px = pygame.PixelArray(self.image)
for lamp in ceiling.lamps:
i = lamp.x
j = lamp.y
#for j in range(self.rect.height):
# for i in range(self.rect.width):
self.color[3] = min(255, max(0, int(self.a + ((math.sin(i + self.ticks/50) - math.sin(j))) * ((0.5 * math.sin(self.ticks/15))) * min(self.ticks*0.1, 64))))
self.color = [min(255, max(0, g)) for g in self.color]
px[i, j] = tuple(self.color)
del px
surface.blit(self.image, self.rect)
class PlasmaBlob(Sprite):
def __init__(self, pos, size, duration, angle, color):
super().__init__()
self.x = pos[0]
self.y = pos[1]
self.width = size[0]
self.height = size[1]
rad = angle * math.pi * 2.0 / 360
self.cos = math.cos(rad)
self.sin = math.sin(rad)
if self.width < self.height:
Exception("Wibble")
self.ell = self.width + self.height
self.speed = 1.0 / (get_fps() * duration)
self.time = 0.0
self.color = color
def update(self):
self.time += self.speed
if self.time > 2.0:
self.kill()
def draw(self, pixels):
for lamp in ceiling.lamps:
x = lamp.x
y = lamp.y
dx = x - self.x
dy = y - self.y
du = (dx * self.cos + dy * self.sin) / self.width
dv = (dy * self.cos - dx * self.sin) / self.height
dist = pythagoras((du, dv))
if dist > 1.0:
continue
dist = dist
height = (1.0 - math.cos(dist * math.pi * 2.0)) / 2.0
height = math.sqrt(height)
sd = self.time - dist
if sd < 0 or sd > 1.0:
continue
shade = height * (1.0 - math.cos(sd * math.pi * 2)) / 2.0
r = int(self.color[0] * shade)
g = int(self.color[1] * shade)
b = int(self.color[2] * shade)
prev = pixels[x, y]
if prev != 0:
r = max(r, (prev >> 16) & 0xff)
g = max(g, (prev >> 8) & 0xff)
b = max(b, prev & 0xff)
pixels[x, y] = (r, g, b)
class Aurora(Group):
blob_colors = [
(128, 0, 0),
(255, 0, 255),
(0, 0, 128),
(0, 255, 0),
]
def __init__(self, pos, blob_duration, num_blobs):
super().__init__()
self.x = pos[0]
self.y = pos[1]
self.time = 0.0
self.speed = 0.0
self.blob_duration = blob_duration
self.num_blobs = 0
self.rate = 0.0
self.s = pygame.Surface(MADRIX_SIZE)
self.s.set_colorkey(black)
self.spawn(num_blobs)
def spawn(self, n):
self.num_blobs = n
self.rate = n / (get_fps() * self.blob_duration)
def add_blob(self):
width = 25
height = 4
x = self.x + self.rand.randrange(-width, width)
y = self.y + self.rand.randrange(-height, height)
angle = self.rand.randrange(360)
color = self.rand.choice(self.blob_colors)
self.add(PlasmaBlob((x, y), (width, height), self.blob_duration, angle, color))
def update(self):
super().update()
self.time -= self.rate * self.rand.random()
if self.time < 0.0 and len(self) < self.num_blobs:
self.add_blob()
self.time += 1.0
if self.num_blobs == 0 and len(self) == 0:
raise StopIteration
def draw(self, surface):
self.s.fill(0)
pixels = pygame.PixelArray(self.s)
for blob in self:
blob.draw(pixels)
del pixels
surface.blit(self.s, (0,0))
def end(self):
self.num_blobs = 0
| {
"repo_name": "ajtag/ln2015",
"path": "ln_objects.py",
"copies": "1",
"size": "37025",
"license": "mit",
"hash": -7258909217383839000,
"line_mean": 31.8818827709,
"line_max": 168,
"alpha_frac": 0.4981769075,
"autogenerated": false,
"ratio": 3.483066792097836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9469237761590414,
"avg_score": 0.00240118760148434,
"num_lines": 1126
} |
__author__ = 'ajtag'
from pygame import Rect
import collections
import csv
import os.path
MADRIX_X = 132
MADRIX_Y = 70
MADRIX_SIZE = (MADRIX_X, MADRIX_Y)
white = 255, 255, 255, 0xff
black = 0, 0, 0, 0xff
red = 255, 0, 0, 0xff
green = 0, 255, 0, 0xff
blue = 0, 0, 255, 0xff
dark_grey = 0x30, 0x30, 0x30, 0xff
transparent = 0xff, 0xff, 0xff, 0xff
bubbleroof = Rect((50, 34), (28, 33))
island = Rect((0, 41), (12, 7))
left_arm = Rect(16, 37, 37, 14)
left_outer_arm = Rect((16, 42), (18, 8))
left_inner_arm = Rect((33, 37), (21, 14))
top_arm = Rect(60, 0, 12, 36)
top_outer_arm = Rect((57, 0), (12, 18))
top_inner_arm = Rect((60, 18), (9, 18))
right_arm = Rect(77, 39, 51, 12)
right_inner_arm = Rect((78, 39), (20, 13))
right_outer_arm = Rect((98, 40), (33, 11))
offscreen = [
Rect((0, 0), (62, 39)),
Rect((71, 0), (57, 40)),
Rect((0, 50), (53, 20)),
Rect((80, 52), (52, 18)),
]
Lamp = collections.namedtuple("Lamp", ["x", "y"])
def readlamps(filename):
# Generate array of lights fixture locations
lamps = []
f = open(filename)
csv_f = csv.DictReader(f)
for row in csv_f:
# Adjusted XY coordinates -1 as Madrix counts from 1
lamps.append(Lamp(int(row['X']) - 1, int(row['Y']) - 1))
return lamps
class Ceiling:
def __init__(self, spawn_filename, render_filename):
self.lamps = readlamps(render_filename)
spawn_lamps = readlamps(spawn_filename)
self.bubbleroof_lamps = list(filter(lambda lamp: bubbleroof.collidepoint(lamp.x, lamp.y), spawn_lamps))
ceiling = Ceiling(spawn_filename=os.path.join('Resources', 'pixels_rework.csv'), render_filename=os.path.join('Resources', 'pixels_mapped.csv'))
| {
"repo_name": "ajtag/TrinRoofPlayer",
"path": "Constants.py",
"copies": "1",
"size": "1703",
"license": "mit",
"hash": 903801524269797900,
"line_mean": 26.9180327869,
"line_max": 144,
"alpha_frac": 0.6124486201,
"autogenerated": false,
"ratio": 2.6,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37124486201,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajtag'
from Renderer import *
class Sprite(pygame.sprite.Sprite):
def __init__(self, x=None, y=None, surface_flags=0):
self.log = logging.getLogger(self.__class__.__name__)
super().__init__()
if x is not None:
self.image = pygame.Surface((abs(x), abs(y)), surface_flags)
self.image.set_colorkey(white)
self.image.fill(white)
self.log.debug('##init##')
self.ticks = 0
class Group(pygame.sprite.Group):
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
super().__init__()
self.rand = new_random(self.__class__.__name__)
def end(self):
raise StopIteration
class MoveableThing(Group):
def __init__(self, pos, size, fade_duration):
super().__init__()
self.x = float(pos[0])
self.y = float(pos[1])
self.dx = 0.0
self.dy = 0.0
self.steps = 0
self.size = size
self.size_speed = 0
if fade_duration is not None:
self.fade = 0.0
self.fade_speed = 1.0 / (get_fps() * fade_duration)
else:
self.fade = 1.0
self.fade_speed = None
def update(self):
if self.steps > 0:
self.steps -= 1
self.x += self.dx
self.y += self.dy
self.size += self.size_speed
if self.fade_speed is not None:
self.fade += self.fade_speed
if self.fade_speed > 0.0 and self.fade >= 1.0:
self.fade = 1.0;
self.fade_speed = None
elif self.fade_speed < 0.0 and self.fade <= 0.0:
raise StopIteration
def move(self, newpos, newsize, duration=None):
if duration is None:
self.steps = 1
else:
self.steps = max(int(duration * get_fps()), 1)
if newpos is None:
self.dx = 0.0
self.dy = 0.0
else:
self.dx = (newpos[0] - self.x) / self.steps
self.dy = (newpos[1] - self.y) / self.steps
if newsize is None:
self.size_speed = 0
else:
self.size_speed = (newsize - self.size) / self.steps
def end(self, fade_duration=None):
if fade_duration is None:
raise StopIteration
else:
self.fade_speed = -1.0 / (get_fps() * fade_duration)
| {
"repo_name": "ajtag/TrinRoofPlayer",
"path": "Objects.py",
"copies": "1",
"size": "2413",
"license": "mit",
"hash": -1870416546428524800,
"line_mean": 29.1625,
"line_max": 72,
"alpha_frac": 0.5076668048,
"autogenerated": false,
"ratio": 3.512372634643377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9518145500049437,
"avg_score": 0.0003787878787878788,
"num_lines": 80
} |
__author__ = 'ajtag'
import colorsys
import math
def hls_to_rgb(hue, lightness, saturation):
"""
:param hue: 0-360
:param lightness: 0-100
:param saturation: 0-100
:return: list(int)
"""
return [int(i * 255) for i in colorsys.hls_to_rgb(hue / 360.0, lightness / 100.0, saturation / 100.0)]
def pythagoras(vector):
return math.sqrt(vector[0] * vector[0] + vector[1] * vector[1])
def hlsa_to_rgba(hue, lightness, saturation, alpha):
"""
:param hue: 0-360
:param lightness: 0-100
:param saturation: 0-100
:return: list(int)
"""
rgb = colorsys.hls_to_rgb(hue / 360.0, lightness / 100.0, saturation / 100.0)
rgba = [0,0,0,alpha]
for n, i in enumerate(rgb):
rgba[n] = int(i * 255)
return rgba
# dist_Point_to_Segment(): get the distance of a point to a segment
# Input: a Point P and a Segment S (in any dimension)
# Return: the shortest distance from P to S
def dist_Point_to_Segment( point, line):
v = line[0] - line[1] #
w = point - line[0] # P - S.P0;
c1 = w.dot(v) #dot(w,v);
if c1 <= 0:
point.distance_to(line[0])
c2 = v.dot(v)
if c2 <= c1:
point.distance_to(line[1])
b = c1 / c2
Pb = line[0] + b * v
return point.distance_to(Pb)
| {
"repo_name": "ajtag/TrinRoofPlayer",
"path": "utils.py",
"copies": "1",
"size": "1305",
"license": "mit",
"hash": 6785830712328659000,
"line_mean": 22.3035714286,
"line_max": 106,
"alpha_frac": 0.5762452107,
"autogenerated": false,
"ratio": 2.7473684210526317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8652902398885316,
"avg_score": 0.03414224657346319,
"num_lines": 56
} |
__author__ = 'ajtag'
import random
from collections import namedtuple
import logging
import pygame
from pygame.math import Vector2
from Constants import *
from utils import *
import sys
import platform
import Renderer
import math
from Renderer import get_fps
import random
log = logging.getLogger()
logging.basicConfig()
log.setLevel(logging.INFO)
FONT = pygame.font.Font(None, 24)
neighbours = []
for i in (-1, 0, 1):
for j in (-1, 0, 1):
if i==0 and j ==0:
pass
else:
neighbours.append ((i, j))
class GOL():
def __init__(self):
self.rect = pygame.Rect((0, 0), MADRIX_SIZE)
self.image = pygame.Surface(MADRIX_SIZE)
self.tomorrow = pygame.Surface(MADRIX_SIZE)
self.ticks = 0
self.alive = 0
for lamp in ceiling.lamps:
if random.randint(0, 100) < (300 / 8):
self.image.set_at((lamp.x, lamp.y), white)
self.alive += 1
log.info(self.alive)
def draw(self, s):
s.blit(self.image, (0, 0))
def update(self):
self.tomorrow.fill(black)
im = pygame.PixelArray(self.image)
t = pygame.PixelArray(self.tomorrow)
for lamp in ceiling.lamps:
if random.randint(0, 100) < 0.1 * (300 / 8):
self.image.set_at((lamp.x, lamp.y), white)
self.alive += 1
self.alive = 0
for lamp in ceiling.lamps:
live = False
next_to = 0
for neighbour in neighbours:
try:
if im[lamp.x, lamp.y] > 0x80:
live = True
if im[lamp.x + neighbour[0], lamp.y + neighbour[1]] > 0x808080:
next_to += 1
except IndexError:
log.error(('error', (lamp.x, lamp.y)))
if lamp.x * lamp.y >= 0:
raise
if next_to > 3:
# kill
t[lamp.x, lamp.y] = black
elif next_to == 3:
# stay or birth
t[lamp.x, lamp.y] = white
self.alive += 1
elif next_to == 2 and live:
# stay
t[lamp.x, lamp.y] = white
self.alive += 1
else:
t[lamp.x, lamp.y] = black
# log.info((next_to, live))
del im
del t
t = self.image
self.image = self.tomorrow
self.tomorrow = t
self.ticks += 1
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig()
args = Renderer.cmd_line_args()
LN2015 = Renderer.Player('gameoflife', MADRIX_X, MADRIX_Y, fps=24, args=args)
LN2015.load_sprite("gameoflife", 50, GOL())
alive = True
while alive:
alive = LN2015.run()
if 'windows' in platform.platform().lower():
ffmpeg_exe = 'C:\\Users\\admin\\Desktop\\ffmpeg-20150921-git-74e4948-win64-static\\bin\\ffmpeg.exe'
else:
ffmpeg_exe = 'ffmpeg'
LN2015.export_video(ffmpeg_exe)
LN2015.end()
sys.exit()
| {
"repo_name": "ajtag/TrinRoofPlayer",
"path": "patterns/gameoflife.py",
"copies": "1",
"size": "3161",
"license": "mit",
"hash": 8181613793315273000,
"line_mean": 23.8897637795,
"line_max": 111,
"alpha_frac": 0.5099652009,
"autogenerated": false,
"ratio": 3.4660087719298245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4475973972829824,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajtag'
import subprocess as sp
import glob
import pygame
import logging
from Constants import *
import os
import math
import random
import argparse
pygame.font.init()
FONT = pygame.font.Font(None, 24)
_fps = None
def cmd_line_args():
parser = argparse.ArgumentParser()
parser.add_argument("--warp", type=float, default=-1.0)
parser.add_argument("--no-mask", action="store_false", dest="mask")
parser.add_argument("--image-format", default="png")
parser.add_argument("--no-images", action="store_const", dest="image_format", const=None)
parser.add_argument("--save-video", action="store_true")
parser.add_argument("--quick", action="store_true")
parser.add_argument("--avconv", action="store_true")
parser.add_argument("--random-seed", type=str, default="LN2015")
parser.add_argument("--sparse", type=int, default=2)
parser.add_argument("--solid", dest='sparse', action="store_const", const=0)
parser.add_argument("--pause", action="store_true")
parser.add_argument("--scale", type=int, default=8)
parser.add_argument("--export-display", action="store_true")
return parser.parse_args()
def get_fps():
return _fps
random_seed = str(random.random())
random_sum = 0.0
num_random = 0
random_dead = False
class HashedRandom(random.Random):
def __init__(self, *args, **kwargs):
global num_random
self.myseed = args[0]
num_random += 1
super().__init__(*args, **kwargs)
def __del__(self):
global num_random
global random_sum
if random_dead:
print("UnRandom %s" % self.myseed)
num_random -= 1
random_sum += self.random()
try:
super().__del__()
except:
pass
def new_random(name):
return HashedRandom(random_seed + name)
class Trigger(object):
"""Create a new Group, or run a method on an existing group"""
def __init__(self, scene, method=None, *args):
self.scene = scene
self.method = method
self.args = args
def __repr__(self):
return "Trigger(%s,%s,%s)" % (self.scene, self.method, self.args)
def clean_images():
# delete any files saved from previous runs
for ext in ["bmp", "png"]:
path = os.path.join('images', '*.{}'.format(ext))
for f in glob.glob(path):
os.unlink(f)
class Player:
log = logging.getLogger('Player')
def __init__(self, title, width, height, fps=24, args=()):
pygame.init()
global _fps
_fps = fps
try:
global random_seed
random_seed = args.random_seed
except AttributeError:
pass
self.title = title
self.width = width
self.height = height
self.size = (width, height)
self.display_scale = args.scale
self.lightmask = args.mask
self.mask = pygame.Surface(self.size)
self.mask.fill(dark_grey)
self.mask.set_colorkey(white)
for x, y in ceiling.lamps:
self.mask.set_at((x, y), white)
self.screen = pygame.Surface(self.size)
self.display = pygame.display.set_mode((self.display_scale * width, self.display_scale * height))
self.clock = pygame.time.Clock()
self.fps = fps
self.pause = args.pause
self.step = False
self.objects = {}
self.ticks = 0
self.background = black
self.image_format = args.image_format
self.save_video = args.save_video
self.export_display = args.export_display
self.cursor_loc_start = None
self.cursor_loc_end = None
self.warp = int(self.fps * args.warp)
if self.save_video and (self.image_format is None):
raise Exception("Video requires images")
if self.warp >= 0 and (self.image_format is not None):
raise Exception("Can not save when warping")
if self.warp < 0:
self.warp = None
self.quick = args.quick
self.sparse = args.sparse
if self.sparse > self.display_scale:
raise Exception("Pixels bigger than screen")
self.scene_data = {}
self.scene_layer = {}
self.key_triggers = {}
self.timed_events = {}
self.log.info('done init')
def export_surface(self):
if self.export_display:
return self.display
else:
return self.screen
def set_key_triggers(self, key, trig):
self.key_triggers[key] = trig
def load_sprite(self, name, layer, sprite):
self.scene_layer[name] = layer
self.objects[name] = sprite
def load_scene(self, scene_name, layer, *scene_data):
self.scene_data[scene_name] = scene_data
self.scene_layer[scene_name] = layer
def load_timed_event(self, time, events):
ticks = int(time * self.fps)
current_events = self.timed_events.get(ticks, [])
if isinstance(events, Trigger):
current_events.append(events)
else:
for e in events:
current_events.append(e)
self.timed_events[ticks] = current_events
def export_video(self, ffmpeg_exe='ffmpeg', video_length=None):
if not self.save_video:
return
size = self.export_surface().get_size()
size_str = '{}x{}'.format(size[0], size[1])
command = [ffmpeg_exe,]
if video_length is not None:
command.extend(['-t', '{}'.format(video_length)])
command.extend([
'-y', # (optional) overwrite output file if it exists
'-r', '{}'.format(self.fps), # frames per second
'-i', os.path.join('images', '{}_%d.{}'.format(self.title, self.image_format)),
'-s', size_str,
'-an', # Tells FFMPEG not to expect any audio
'-c:v', 'qtrle',
'-tune', 'animation',
'-q', '0',
'-s', size_str, # size of one frame
'{}.mov'.format(self.title)
]
)
self.log.info(' '.join(command))
sp.call(command)
def run_trigger(self, trigger):
if trigger.method is None:
try:
d = self.scene_data[trigger.scene]
except:
self.log.error("No such scene '%s'" % trigger.scene)
return
try:
self.objects[trigger.scene] = d[0](*d[1:])
except:
self.log.error("Failed to create '%s' %s" % (trigger.scene, d))
raise
else:
try:
try:
o = self.objects[trigger.scene]
except KeyError:
self.log.error("Scene '%s' not running" % trigger.scene)
return
getattr(o, trigger.method)(*trigger.args)
except StopIteration:
del self.objects[trigger.scene]
except:
self.log.error("%s" % (trigger))
raise
def sparse_blit(self):
self.display.fill(0)
scale = int(self.display_scale)
width = self.sparse
offset = (scale - width) // 2
dest = pygame.PixelArray(self.display)
src = pygame.PixelArray(self.screen)
if not(self.lightmask):
lamps = []
for i in range(self.size[0]):
for j in range(self.size[1]):
lamps.append(Lamp(i,j))
else:
lamps = ceiling.lamps
for lamp in lamps:
base_x = lamp.x * scale + offset
base_y = lamp.y * scale + offset
for x in range(self.sparse):
for y in range(self.sparse):
dest[base_x + x, base_y + y] = src[lamp.x, lamp.y]
def run(self):
for event in pygame.event.get():
# Check for quit
if event.type == pygame.QUIT:
return False
# Mouse events
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: # left start click
self.cursor_loc_start = event.pos
self.cursor_loc_end = None
if event.type == pygame.MOUSEBUTTONUP and event.button == 1: # left finish click
self.cursor_loc_end = event.pos
print('pygame.Rect({}, {}, {}, {})'.format(
math.floor(min(self.cursor_loc_start[0], self.cursor_loc_end[0])/self.display_scale),
math.floor(min(self.cursor_loc_start[1], self.cursor_loc_end[1])/self.display_scale),
math.floor((max(self.cursor_loc_end[0], self.cursor_loc_start[0]) - min(self.cursor_loc_end[0], self.cursor_loc_start[0]))/self.display_scale),
math.floor((max(self.cursor_loc_start[1], self.cursor_loc_end[1]) - min(self.cursor_loc_start[1], self.cursor_loc_end[1]))/self.display_scale)
)
)
if event.type == pygame.MOUSEBUTTONUP and event.button == 3: # right click
self.cursor_loc_start = None
self.cursor_loc_end = None
# Check Keys
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return False
elif event.key == pygame.K_HASH:
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
self.log.info('Verbose Output Off')
logging.getLogger().setLevel(logging.INFO)
else:
self.log.info('Verbose Output On')
logging.getLogger().setLevel(logging.DEBUG)
elif event.key == pygame.K_SLASH:
self.log.info('''
/ - help
# - print key triggers
F1 - save video on exit
F2 - view mask
F3 - toggle fps limiter
F4 - play/pause
F5 - step frame
esc - quit
========================================================
''')
for k,t in self.key_triggers.items():
self.log.info('{} - {}'.format(chr(k), t))
elif event.key == pygame.K_F4:
self.pause = not(self.pause)
self.log.info('pause video: {}'.format(self.pause))
elif event.key == pygame.K_F5:
self.step = True
elif event.key == pygame.K_F1:
self.save_video = not self.save_video
self.log.info('save video: {}'.format(self.save_video))
elif event.key == pygame.K_F2:
self.lightmask = not self.lightmask
self.log.info('Mask: {}'.format(self.lightmask))
elif event.key == pygame.K_F3:
self.quick = not self.quick
self.log.info('FPS de-limiter: {}'.format(self.quick))
if event.key in self.key_triggers:
self.log.debug('pressed {}'.format(event.key))
self.run_trigger(self.key_triggers[event.key])
running = not self.pause
if self.step or self.warp is not None:
running = True
if running:
self.background = black
self.screen.fill(self.background)
for e in self.timed_events.get(self.ticks, []):
self.run_trigger(e)
self.log.info(('{}.{}'.format(self.ticks/self.fps, self.ticks%self.fps), e))
if self.warp is not None and self.ticks == self.warp:
self.log.info("Warp finished")
self.warp = None
draw = (self.warp is None) or (self.ticks % (2 * self.fps) == 0)
remove = []
items = [(k,v) for k,v in self.scene_layer.items() if k in self.objects.keys()]
items.sort(key=lambda ele: ele[1])
for name, layer in items:
element = self.objects[name]
try:
element.update()
if draw:
try:
drawfn = element.draw
except AttributeError:
self.screen.blit(element.image, element.rect.topleft)
else:
drawfn(self.screen)
except StopIteration:
remove.append(name)
except:
self.log.error('Error while drawing {}'.format(name))
raise
for name in remove:
del self.objects[name]
self.step = False
self.ticks += 1
else:
draw = True
if draw:
if self.sparse == 0:
if self.lightmask:
pygame.Surface.blit(self.screen, self.mask, (0, 0))
pygame.transform.scale(self.screen, self.display.get_size(), self.display)
else:
self.sparse_blit()
# draw a red rect overlay to the display surface by dragging the mouse
if self.cursor_loc_start is not None:
i, j = self.cursor_loc_start
if self.cursor_loc_end is None:
x, y = pygame.mouse.get_pos()
else:
x, y = self.cursor_loc_end
r = pygame.Rect((min(i, x), min(j, y)), (max(i, x) - min(i, x), max(j, y) - min(j, y)))
pygame.draw.rect(self.display, (255, 0, 0), r, 2)
self.display.blit(FONT.render('{:.2f}/{:0} fps'.format(self.clock.get_fps(), self.fps), False, (255, 0, 0), ), (10,10))
self.display.blit(FONT.render('{:.2f}'.format(
self.ticks/self.fps
), False, (255, 0, 0),), (10,45))
if running and (self.image_format is not None):
savepath = os.path.join('images')
if not (os.path.isdir(savepath)):
os.mkdir(savepath)
savefile = os.path.join('images', '{}_{}.{}'.format(self.title, self.ticks, self.image_format))
pygame.image.save(self.export_surface(), savefile)
pygame.display.flip()
if self.quick or self.warp is not None:
self.clock.tick()
else:
self.clock.tick(self.fps)
return True
def end(self):
global random_dead
pygame.quit()
del self.objects
random_dead = True
if num_random != 0:
print("WARNING: %d objects still live" % num_random)
print("RNG: %g" % (random_sum))
| {
"repo_name": "ajtag/TrinRoofPlayer",
"path": "Renderer.py",
"copies": "1",
"size": "14696",
"license": "mit",
"hash": -8518442549218390000,
"line_mean": 34.0739856802,
"line_max": 167,
"alpha_frac": 0.5194610778,
"autogenerated": false,
"ratio": 3.946294307196563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4965755384996563,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajtag'
import csv
import xml.etree.ElementTree as ET
import os.path
import math
from collections import namedtuple
white = 255,255,255
Lamp = namedtuple("Lamp", ["x", "y", 'name', 'dmx', 'channel'])
def parse_imagemask_svg(x, y, scale, x_offset = 19, y_offset = 0):
tree = ET.parse('../Resources/LS-TRIN-0023 East Mall.svg')
root = tree.getroot()
groups = root.findall('{http://www.w3.org/2000/svg}g')
mnlx, mxlx, mnly, mxly = None, None, None, None
tmplamps = []
for g in groups:
paths = g.findall('{http://www.w3.org/2000/svg}path')
if mnlx is None:
mnlx = float(paths[0].attrib['{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cx'])
mxlx = float(paths[0].attrib['{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cx'])
mnly = float(paths[0].attrib['{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cy'])
mxly = float(paths[0].attrib['{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cy'])
for p in paths:
lampx = float(p.attrib['{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cx'])
lampy = float(p.attrib['{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}cy'])
mnlx = min(mnlx, lampx)
mxlx = max(mxlx, lampx)
mnly = min(mnly, lampy)
mxly = max(mxly, lampy)
tmplamps.append((lampx, lampy))
return [(x_offset + ((lamp[0] - mnlx)/(mxlx - mnlx) * x * scale), y_offset + ((lamp[1] - mnly)/(mxly - mnly) * y * scale)) for lamp in tmplamps]
f = open(os.path.join('..', 'Resources', 'pixels.csv'))
ch = csv.DictReader(f)
madrixlamps = [Lamp(i['X'], i['Y'], i['Name'], i['Universe'], i['Channel']) for i in ch]
f.close()
scale = 1
# missing light to the left of 19px in madrix lamps
planlamps = [Lamp(i[0], i[1], None, None, None) for i in parse_imagemask_svg(132, 70, scale)]
for i in enumerate(zip(madrixlamps, planlamps)):
print(i)
def distance(l1, l2):
return math.sqrt(pow(l1.x - l2.x, 2) + pow(l1.y - l2.y, 2, 2))
| {
"repo_name": "ajtag/ln2015",
"path": "utils/match_pixels.py",
"copies": "1",
"size": "2159",
"license": "mit",
"hash": 1895519296945222400,
"line_mean": 27.7866666667,
"line_max": 152,
"alpha_frac": 0.5794349236,
"autogenerated": false,
"ratio": 2.7822164948453607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3861651418445361,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akarapetyan'
from numpy import loadtxt
# A simple function for reading the file and checking whether it is compatible with the input rules
def checkMe(function):
#Loading the input file
temp = loadtxt('test.in', dtype='str')
#Checking if the input parameters are right
if temp[0].isdigit() and int(temp[0]) > 0 and len(temp) == int(temp[0])+1:
def Caller():pass
Caller.__code__ = function.__code__
Caller(temp)
else:
print "Something is wrong with the Input parameters, please check"
def Largest_in_n(temp):
temp = temp[1:len(temp)].tolist()
# Initial smallest element of the array
min = float(temp[0])
# Initial largest element of the array
max = float(temp[0])
for i in range(len(temp)):
if float(temp[i]) < min:
min = float(temp[i])
if float(temp[i]) > max:
max = float(temp[i])
largest = max - min
print "\nIt was critically difficult but i found what you have requested !"
print ("The largest absolute price difference is %1.3f\n" % largest)
checkMe(Largest_in_n) | {
"repo_name": "Arnukk/DAA",
"path": "Hmw1/Q3_3.py",
"copies": "1",
"size": "1170",
"license": "mit",
"hash": 3525120491220578000,
"line_mean": 32.4571428571,
"line_max": 99,
"alpha_frac": 0.6051282051,
"autogenerated": false,
"ratio": 3.75,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48551282051,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akarapetyan'
from random import choice
from numpy import loadtxt
# A simple function for reading the file and checking whether it is compatible with the input rules
def checkMe(function):
#Loading the input file
temp = loadtxt('test.in', dtype='str')
#Checking if the input parameters are right
if temp[0].isdigit() and int(temp[0]) > 0 and len(temp) == int(temp[0])+1:
def Caller():pass
Caller.__code__ = function.__code__
Caller(temp)
else:
print "Something is wrong with the Input parameters, please check"
def Partition(A, p, r):
# Choosing a random element from the array and substituting it with the last one
m = choice(A[p:r+1])
A[A.index(m)] = A[r]
A[r] = m
x = A[r]
i = p - 1
for j in range(p, r, 1):
if A[j] <= x:
i += 1
temp = A[i]
A[i] = A[j]
A[j] = temp
temp = A[i+1]
A[i+1] = A[r]
A[r] = temp
return i+1
def FindTheMedian(A, p, q, i):
if p == q:
return A[p]
r = Partition(A, p, q)
k = r - p + 1
if i == k:
return A[r]
elif i < k:
return FindTheMedian(A, p, r-1, i)
else:
return FindTheMedian(A, r+1, q, i-k)
def Randomized_in_nsquare(temp):
temp = temp[1:len(temp)].tolist()
median = []
for i in range(len(temp)-1):
for j in range(len(temp)):
median.append(abs(float(temp[i]) - float(temp[j])))
rc = FindTheMedian(median, 0, len(median)-1, (len(median)+1)/2)
print "\nDearest Master I got what you have requested !"
print ("The lower median absolute price difference is %1.3f\n" % rc)
checkMe(Randomized_in_nsquare) | {
"repo_name": "Arnukk/DAA",
"path": "Hmw1/Q3_5.py",
"copies": "1",
"size": "1738",
"license": "mit",
"hash": 3068227730424452000,
"line_mean": 26.6031746032,
"line_max": 99,
"alpha_frac": 0.5575373993,
"autogenerated": false,
"ratio": 3.10912343470483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9151827204360277,
"avg_score": 0.002966725928910803,
"num_lines": 63
} |
__author__ = 'akarapetyan'
from random import choice, randrange, shuffle
from numpy import loadtxt
import math
import numpy
#Output Buffer
f = open('test.out', 'r+')
FirstLine = []
OtherLines = []
# A simple function for reading the file and checking whether it is compatible with the input rules
def checkMe(function):
#Loading the input file
temp = loadtxt('test.in', dtype='str')
#Checking if the input parameters are right
if temp[0].isdigit() and int(temp[0]) > 0 and len(temp) == int(temp[0])+1:
def Caller():pass
Caller.__code__ = function.__code__
Caller(temp)
else:
print "Something is wrong with the Input parameters, please check"
def Table_Generation(temp):
str_error = "Not executed yet."
while str_error:
try:
#Finding the height of a table based on the input size
n = len(temp)
m = filter(lambda num: (num % numpy.arange(2, 1 + int(math.sqrt(num)))).all(), range(n, 2 * n+1))
m = m[len(m)-1]
#Generating the random sequence
a = []
table = [None] * m
for i in range(10):
a.append(randrange(0, m))
#First level hashing
for i in range(len(temp)):
sum = 0
for j in range(10):
sum += a[j] * float(temp[i][j])
index = int(sum % m)
if table[index] is None:
table[index] = float(temp[i])
else:
v = table[index]
table[index] = []
table[index].append(v)
table[index].append(float(temp[i]))
str_error = None
except:
pass
f.write(str(m) + " ")
for i in range(len(a)):
f.write(str(a[i]) + " ")
f.write("\n")
return table
def Perfect_Hash(temp):
temp = temp[1:len(temp)].tolist()
table = Table_Generation(temp)
#First level hashing is done
#Now looking for collisions in order to resolve them
for i in range(len(table)):
if table[i] is not None and type(table[i]) is not float:
#There is a collision here
trigger = True
while trigger:
for j in range(len(table[i])):
table[i][j] = str(table[i][j])
second_table = Table_Generation(table[i])
flag = False
for z in range(len(second_table)):
if type(second_table[z]) is not float and second_table[z] is not None:
flag = True
break
if flag:
trigger = True
else:
trigger = False
table[i] = second_table
else:
if table[i] is not None:
f.write(str(table[i]) + "\n")
else:
f.write("0 0" + "\n")
print table
checkMe(Perfect_Hash) | {
"repo_name": "Arnukk/DAA",
"path": "Hmw1/Q4.py",
"copies": "1",
"size": "3461",
"license": "mit",
"hash": 1508885607483522000,
"line_mean": 36.6304347826,
"line_max": 121,
"alpha_frac": 0.4348454204,
"autogenerated": false,
"ratio": 4.584105960264901,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.55189513806649,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akarapetyan'
#Global Variables
Vertexes = {}
RootVertexes = []
ordNodes = []
VertexesAlive = []
DeadVertexes = []
Memory = 0
n = 0
#Output Buffer
f = open('test.out', 'r+')
class Node:
def __init__(self, ID, Memory, Priority):
self.ID = ID
self.Priority = Priority
self.Memory = Memory
self.Sub = 0
self.children = []
# A simple function for reading the file and checking whether the data is compatible with the input rules
def checkMe(function):
#Loading the input file
global n
global Memory
f = open("test.in", "r")
temp = f.readline().split()
n = int(temp[0])
Memory = int(temp[1])
childs = []
for i in range(n):
temp = f.readline().split()
node = Node(int(temp[0]), int(temp[1]), int(temp[2]))
for j in range(3, len(temp)):
childs.append(int(temp[j]))
node.children.append(int(temp[j]))
Vertexes[int(temp[0])] = node
RootVertexes.extend(list(set(Vertexes.keys())-set(childs)))
#Everything is right with the data, let's manage it carefully
def Caller(): pass
Caller.__code__ = function.__code__
Caller()
def MagicOrdering(root):
sum = 0
if len(root.children) != 0:
for n in root.children:
sum += MagicOrdering(Vertexes[n])
root.indSub = sum + len(root.children)
ordNodes.append(root)
return root.indSub
def CoolNapsack():
L = []
for i in range(n+1):
L.append([])
for j in range(Memory+1):
L[i].append(0)
track = []
for i in range(n+1):
track.append([])
for j in range(Memory+1):
track[i].append(0)
for i in range(1, n+1):
for j in range(1, Memory+1):
if ordNodes[i-1].Memory > j:
L[i][j] = L[i-1-ordNodes[i-1].indSub][j]
track[i][j] = 1
elif L[i-1-ordNodes[i-1].indSub][j] > ordNodes[i-1].Priority + L[i-1][j-ordNodes[i-1].Memory]:
L[i][j] = L[i-1-ordNodes[i-1].indSub][j]
track[i][j] = 1
else:
L[i][j] = ordNodes[i-1].Priority + L[i-1][j-ordNodes[i-1].Memory]
track[i][j] = 2
col = Memory
row = n
while row >= 1 and col >= 1:
if track[row][col] == 1:
row = row-1-ordNodes[row-1].indSub
else:
VertexesAlive.append(ordNodes[row-1].ID)
col -= ordNodes[row-1].Memory
row -= 1
def main():
for v in RootVertexes:
MagicOrdering(Vertexes[v])
CoolNapsack()
nodesToTerm = list(set(Vertexes.keys())-set(VertexesAlive))
TerminatedMem = 0
for id in nodesToTerm:
TerminatedMem += Vertexes[id].Memory
f.write(str(TerminatedMem) + " ")
for i in nodesToTerm:
f.write(str(i) + " ")
f.close()
checkMe(main) | {
"repo_name": "Arnukk/DAA",
"path": "Hmw2/Q3.py",
"copies": "1",
"size": "2862",
"license": "mit",
"hash": -3995878129750631000,
"line_mean": 26.5288461538,
"line_max": 106,
"alpha_frac": 0.5457721873,
"autogenerated": false,
"ratio": 3.134720700985761,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9166983947006002,
"avg_score": 0.0027017882559518635,
"num_lines": 104
} |
__author__ = 'akarapetyan'
import random
from numpy import loadtxt
from numpy import inf
#Output Buffer
f = open('test.out', 'r+')
# A simple function for reading the file and checking whether the data is compatible with the input rules
def checkMe(function):
#Loading the input file
try:
temp = loadtxt('test1.in', dtype='str').tolist()
except:
print "Something is wrong with the Input parameters, please check"
exit()
#Checking if the input parameters are right
if isinstance(temp, list) and len(temp) > 0:
for i in temp:
if not isinstance(i, list) or len(i) != 2:
print "Something is wrong with the Input parameters, please check"
exit()
else:
for j in range(len(i)):
if not i[j].isdigit():
print "Something is wrong with the Input parameters, please check"
exit()
else:
i[j] = float(i[j])
else:
print "Something is wrong with the Input parameters, please check"
exit()
#This is in the case if you guys try to cheat
for i in range(len(temp)):
temp[i] = sorted(temp[i])
#Everything is right with the data, let's manage it carefully
def Caller(): pass
Caller.__code__ = function.__code__
Caller(temp)
class BST():
left = None
right = None
key = None
u = None
sum = None
max = None
# The class "constructor" - It's actually an initializer
def __init__(self, left, right, key, u, sum, max):
self.left = left
self.right = right
self.key = key
self.u = u
self.sum = sum
self.max = max
def printme(self):
output = 'my Key is %d ' % self.key + '\n'
output += 'my Left is %d' % self.left.key + '\n' if self.left else 'My Left is empty\n'
output += 'my Right is %d' % self.right.key + '\n' if self.right else 'My Right is empty\n'
output += 'my U is %d' % self.u + '\n'
output += 'my SUM is %d' % self.sum + '\n'
output += 'my MAX is %d' % self.max + '\n'
print output
mytree = None
def main(temp):
Right = []
Left = []
#putting our endpoints into 2 one dimensional arrays
random.shuffle(temp)
for i in temp:
Right.append(i[1])
Left.append(i[0])
#initializing the Binary tree
mytree = BST(None, None, Left[0], 1, 1, 1)
InsertintoBST(mytree, Right[0], -1)
UpdateMax(mytree)
ThePmo, counter = findthePMO(mytree, 0)
f.write(str(ThePmo) + " " + str(counter))
f.write("\n")
for i in range(1, len(Left), 1):
InsertintoBST(mytree, Left[i], 1)
InsertintoBST(mytree, Right[i], -1)
UpdateMax(mytree)
ThePmo, counter = findthePMO(mytree, 0)
f.write(str(ThePmo) + " " + str(counter))
f.write("\n")
f.close()
PrintTheTree(mytree)
ThePmo, counter = findthePMO(mytree, 0)
print "The PMO is %d" % ThePmo
def PrintTheTree(object):
object.printme()
if object.left:
PrintTheTree(object.left)
if object.right:
PrintTheTree(object.right)
def findthePMO(object, counter):
counter += 1
if not object:
return
else:
MaxLeft, MaxRight, Sumleft = CatchtheExceptions(object)
if object.max == Sumleft + object.u:
PMO = object.key
return PMO, counter
elif object.max == MaxLeft:
return findthePMO(object.left, counter)
elif object.max == Sumleft + object.u + MaxRight:
return findthePMO(object.right, counter)
def CatchtheExceptions(object):
UpdateMax(object.left)
UpdateMax(object.right)
if not object.left:
Sumleft = 0
MaxLeft = -inf
else:
Sumleft = object.left.sum
MaxLeft = object.left.max
if not object.right:
MaxRight = 0
else:
MaxRight = object.right.max
return MaxLeft, MaxRight, Sumleft
def UpdateMax(object):
if not object:
return
else:
MaxLeft, MaxRight, Sumleft = CatchtheExceptions(object)
object.max = max(MaxLeft, Sumleft + object.u, Sumleft + object.u + MaxRight)
def InsertintoBST(theobject, thevalue, u):
if not theobject.key:
return
elif thevalue < theobject.key:
#updating the path
theobject.sum += u
if theobject.left:
InsertintoBST(theobject.left, thevalue, u)
else:
theobject.left = BST(None, None, thevalue, u, u, 0)
return
else:
#updating the path
theobject.sum += u
if theobject.right:
InsertintoBST(theobject.right, thevalue, u)
else:
theobject.right = BST(None, None, thevalue, u, u, 0)
return
checkMe(main) | {
"repo_name": "Arnukk/DAA",
"path": "Hmw2/Q1.py",
"copies": "1",
"size": "4874",
"license": "mit",
"hash": -6462788520264640000,
"line_mean": 28.0178571429,
"line_max": 105,
"alpha_frac": 0.5750923266,
"autogenerated": false,
"ratio": 3.5733137829912023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4648406109591202,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akarapetyan'
import random
from numpy import loadtxt
import math
mycounter = 0
#Output Buffer
f = open('test.out', 'r+')
# A simple function for reading the file and checking whether the data is compatible with the input rules
def checkMe(function):
#Loading the input file
try:
temp = loadtxt('test2.in', dtype='str').tolist()
except:
print "Something is wrong with the Input parameters, please check"
exit()
#Checking if the input parameters are right
try:
if isinstance(temp, list) and len(temp) > 0:
for i in range(len(temp)):
if not temp[i].isdigit():
print "Something is wrong with the Input parameters, please check"
exit()
else:
temp[i] = int(temp[i])
else:
print "Something is wrong with the Input parameters, please check"
exit()
except:
print "Something is wrong with the Input parameters, please check"
exit()
#Everything is right with the data, let's manage it carefully
def Caller(): pass
Caller.__code__ = function.__code__
Caller(temp)
def main(temp):
#Counters
SuccessfulSearches = 0
SuceessfullComparisons = 0
UnsucessfullComparisons = 0
UnsucessfullSearches = 0
InsertionComparisons = 0
Insertions = 0
global mycounter
#Lets create the Data Structure
TheMagicData = []
Temporary = random.sample(range(100), 18)
n = "{0:b}".format(len(Temporary))
k = int(math.ceil(math.log(len(temp)+1, 2)))
flag = 0
for i in range(k):
A = []
#if int(n[-i-1]) == 1:
#for j in range(int(math.pow(2, i))):
# A.append(Temporary[flag])
# flag += 1
TheMagicData.append(A)
#Ok we have the data already, let's start playing with it
#Let's Search for what you are looking for
print temp
print TheMagicData
for i in temp:
OUTER, INNER, Comparisons = SEARCH(TheMagicData, i)
if INNER is not None:
print "You were looking for %d ? it is in ! %d, %d" % (i, OUTER, INNER)
SuccessfulSearches += 1
SuceessfullComparisons += Comparisons
else:
UnsucessfullSearches += 1
UnsucessfullComparisons += Comparisons
#Let's insert it
INSERT(TheMagicData, i)
InsertionComparisons += mycounter
Insertions += 1
mycounter = 0
print "Successfull Searches %d" % SuccessfulSearches
f.write(str(SuccessfulSearches) + " ")
bin = SuceessfullComparisons/SuccessfulSearches if SuccessfulSearches != 0 else 0
print "Amortized number of comparisons per Successfull Search is %d" % bin
f.write(str(bin) + " ")
bin = UnsucessfullComparisons/UnsucessfullSearches if UnsucessfullSearches !=0 else 0
print "Amortized number of comparisons per Unsuccessfull Search is %d" % bin
f.write(str(bin) + " ")
bin = InsertionComparisons/Insertions if Insertions !=0 else 0
print "Amortized number of comparisons per Insertion is %d" % bin
f.write(str(bin) + " ")
print TheMagicData
f.close
def SEARCH(theArray, theElement):
ANCSS = 0
Index = None
for i in range(len(theArray)):
Index, bin = binary_search(theArray[i], theElement)
ANCSS += bin
if Index is not None:
return i, Index, ANCSS
return None, Index, ANCSS
def INSERT(theArray, theElement):
Z = []
Z.append(theElement)
for i in range(len(theArray)):
if i == len(theArray)-1:
theArray.append(Z)
return
if not len(theArray[i]):
theArray[i] = Z
return
else:
Z.extend(theArray[i])
del theArray[i][:]
Z = merge_sort(Z)
def merge_sort(m):
if len(m) <= 1:
return m
middle = len(m) / 2
left = m[:middle]
right = m[middle:]
left = merge_sort(left)
right = merge_sort(right)
return list(merge(left, right))
def merge(left, right):
global mycounter
result = []
left_idx, right_idx = 0, 0
while left_idx < len(left) and right_idx < len(right):
mycounter += 1
if left[left_idx] <= right[right_idx]:
result.append(left[left_idx])
left_idx += 1
else:
result.append(right[right_idx])
right_idx += 1
if left:
result.extend(left[left_idx:])
if right:
result.extend(right[right_idx:])
return result
def binary_search(l, value):
counter = 0
low = 0
high = len(l)-1
while low <= high:
counter += 1
mid = (low+high)//2
if l[mid] > value: high = mid-1
elif l[mid] < value: low = mid+1
else: return mid, counter
return None, counter
checkMe(main) | {
"repo_name": "Arnukk/DAA",
"path": "Hmw2/Q2.py",
"copies": "1",
"size": "4925",
"license": "mit",
"hash": 9152319468933918000,
"line_mean": 28.3214285714,
"line_max": 105,
"alpha_frac": 0.5837563452,
"autogenerated": false,
"ratio": 3.7169811320754715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48007374772754713,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akarapetyan'
import matplotlib.pyplot as plt
from wnaffect import WNAffect
from emotion import Emotion
from nltk.corpus import wordnet as wn
from cursor_spinning import SpinCursor
import time
import sys
import numpy as np
import PorterStemmer as ps
from scipy.interpolate import interp1d
#CONSTANTS
#array of inapproriate words to be excluded
toexclude = ["world", "blue", "weight", "self", "hero", "identification", "will", "sympathy", "empathy", "preference", "compatibility", "softheartedness", "puppy"]
def fancy_output(msg, command, starting_time, *args):
"""
Just a fancy way of outputting the progress of a command
@param msg, command, params: the message to output, command to be executed, params
@return output: the result of the command
"""
spin = SpinCursor(msg=msg, minspin=5, speed=5)
spin.start()
output = command(*args)
if output: spin.stop()
sys.stdout.write("Elapsed time - %3.6f seconds" % (time.time()-starting_time))
print '\n'
return output
def preprocess_database(year_range):
"""
Filter the database of 1-grams according to the year range chosen
@param year_range
@return filtered_db
"""
path_pattern = "data\googlebooks-eng-1M-1gram-20090715-"
filtered_db = {}
for source in [path_pattern + '%d.csv' % i for i in range(10)]:
#df = pd.read_csv(source, names=['word', 'year', 'occurred', 'pages', 'books'], sep='\t', error_bad_lines=False)
#if len(df[(df['word'] == word) & (df['year'] == year)].index.tolist()) > 0:
#occur_count = df.loc[[df[(df['word'] == word) & (df['year'] == year)].index.tolist()[0]]].iloc[0]['occurred']
#return occur_count
with open(source) as f:
for line in f:
data = line.split('\t')
if int(data[1]) in year_range:
if int(data[1]) in filtered_db:
filtered_db[int(data[1])].append(line)
else:
filtered_db[int(data[1])] = []
filtered_db[int(data[1])].append(line)
return filtered_db
def get_mood_score(mood, year, filtered_db):
"""
Calculates the mood score of the give mood for a given year
:param mood, year, filtered_db:
:return moodscore
"""
moodcount = 0
the_count = 0
for item in filtered_db[year]:
data = item.split('\t')
if data[0] in mood or data[0].lower() in mood:
moodcount += int(data[2])
if data[0] == "the" or data[0].lower() == "the":
the_count += int(data[2])
moodscore = (1.0 * moodcount/the_count)/1.0*len(mood)
return moodscore
def get_emotion_terms(emotion):
"""
Given the emotion, the function returns all the terms related to that emotion
@param emotion: name of the emotion - string
@return terms_array
"""
terms_array = [emotion]
for term in Emotion.emotions[emotion].get_children([]):
if "-" in term:
for t in term.split("-"):
if t not in toexclude:
terms_array.append(t.lower()) if t not in terms_array and t.lower() not in terms_array else None
else:
terms_array.append(term) if term not in terms_array and term.lower() not in terms_array else None
if "-" in term:
for t in term.split("-"):
if t not in toexclude:
for synset in wn.synsets(t):
for lemma in synset.lemmas():
if "_" not in str(lemma.name()):
terms_array.append(str(lemma.name()).lower()) if str(lemma.name()) not in terms_array and str(lemma.name()).lower() not in terms_array else None
else:
for synset in wn.synsets(term):
for lemma in synset.lemmas():
if "_" not in str(lemma.name()):
terms_array.append(str(lemma.name()).lower()) if str(lemma.name()) not in terms_array and str(lemma.name()).lower() not in terms_array else None
return terms_array
def get_stems():
"""
Returns the array of the filtered stems according to the conditions mentioned in the paper
@return: stemarray
"""
stemarray = []
p = ps.PorterStemmer()
infile = open("./part-of-speech.txt", 'r')
while 1:
output = ''
line = infile.readline()
line = line.split('\t')[0]
if line == '':
break
for c in line:
if c.isalpha():
word += c.lower()
else:
if word:
output += p.stem(word, 0,len(word)-1)
word = ''
output += c.lower()
stemarray.append(output) if (len(output) > 2 and output not in stemarray) else None
infile.close()
return stemarray
if __name__ == "__main__":
starting_time = time.time()
print "\n+++++++++++++++++++++++++++++++++++"
print "TDS - Assignment 1"
print "+++++++++++++++++++++++++++++++++++\n"
"""
Inittializing Wordnet-Affect
@DEPENDENCIES: NLTK 3.1 or higher, WordNet 1.6 (unix-like version is utilised), WordNet-Domains 3.2
"""
YEAR_RANGE = range(1907, 2001, 4)
wna = fancy_output("Initializing Wordnet", WNAffect, starting_time, './wordnet-1.6/', './wn-domains-3.2/')
joy_terms = fancy_output("Getting the terms for the mood category JOY", get_emotion_terms, starting_time, 'joy')
joy_terms.extend([term for term in fancy_output("", get_emotion_terms, starting_time, 'liking') if term not in joy_terms])
joy_terms.extend([term for term in fancy_output("", get_emotion_terms, starting_time, 'love') if term not in joy_terms])
#joy_terms.extend([term for term in fancy_output("", get_emotion_terms, starting_time, 'levity') if term not in joy_terms])
#joy_terms.extend([term for term in fancy_output("", get_emotion_terms, starting_time, 'gratitude') if term not in joy_terms])
sadness_terms = fancy_output("Getting the terms for the mood category SADNESS", get_emotion_terms, starting_time, 'sadness')
filtered_dataset = fancy_output("Preprocessing the dataset", preprocess_database, starting_time, YEAR_RANGE)
spin = SpinCursor(msg="Computing the mood scores", minspin=5, speed=5)
spin.start()
joy_mood_scores = {}
sadness_mood_scores = {}
for year in YEAR_RANGE:
joy_mood_scores[year] = get_mood_score(joy_terms, year, filtered_dataset)
sadness_mood_scores[year] = get_mood_score(sadness_terms, year, filtered_dataset)
if len(joy_mood_scores) == len(YEAR_RANGE): spin.stop()
sys.stdout.write("Elapsed time - %3.6f seconds" % (time.time()-starting_time))
print '\n'
joy_mood_scores_mean = np.mean(joy_mood_scores.values())
joy_mood_scores_std = np.std(joy_mood_scores.values())
sadness_mood_scores_mean = np.mean(sadness_mood_scores.values())
sadness_mood_scores_std = np.std(sadness_mood_scores.values())
normalize = lambda mood_val: (mood_val - joy_mood_scores_mean)/(1.0 * joy_mood_scores_std)
joy_normalized = {}
for key in joy_mood_scores.keys():
joy_normalized[key] = normalize(joy_mood_scores[key])
normalize = lambda mood_val: (mood_val - sadness_mood_scores_mean)/(1.0 * sadness_mood_scores_std)
sadness_normalized = {}
for key in sadness_mood_scores.keys():
sadness_normalized[key] = normalize(sadness_mood_scores[key])
x = [year for year in YEAR_RANGE]
y = [joy_normalized[key] - sadness_normalized[key] for key in YEAR_RANGE]
f2 = interp1d(x, y, kind='cubic')
xnew = range(1907, 2001, 2)
plt.plot(xnew, f2(xnew))
markerline, stemlines, baseline = plt.stem(x, y, '-.')
plt.grid()
axes = plt.gca()
axes.set_xlim([1897, 2003])
plt.title('Historical periods of positive and negative moods')
plt.xlabel('Year')
plt.ylabel('Joy - Sadness (Z scores)')
plt.setp(markerline, 'markerfacecolor', 'b')
plt.setp(baseline, 'color', 'r', 'linewidth', 2)
plt.setp(stemlines, linewidth=1, color=[0.08,0.4,1])
plt.grid()
print "====== Simulation finished in ", time.time() - starting_time, " seconds =========\n"
plt.show()
| {
"repo_name": "Arnukk/TDS",
"path": "main_assignment1.py",
"copies": "1",
"size": "8251",
"license": "mit",
"hash": 2686718328171719000,
"line_mean": 39.8465346535,
"line_max": 176,
"alpha_frac": 0.6013816507,
"autogenerated": false,
"ratio": 3.339133953864832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4440515604564832,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akarapetyan'
# A simple function for reading the file and checking whether the data is compatible with the input rules
def checkMe(function):
#Loading the input file
try:
f = open("test.in", 'r')
firstLine = f.readline().split()
n = int(firstLine[0])
newweights = {}
for i in range(n+1):
newweights[i] = {}
for i in range(1,n+1):
newweights[0][i] = 0
for i in range(n):
line = f.readline().split(" ")
if "," in line[0]:
for j in line:
newweights[int(j.split(",")[0])][i+1] = -int(j.split(",")[1])
print j.split(",")[0], j.split(",")[1]
f.close()
except:
print "Something is wrong with the Input parameters, please check"
exit()
#Everything is right with the data, let's manage it carefully
def Caller(): pass
Caller.__code__ = function.__code__
Caller(newweights)
# Step 1: For each node prepare the destination and predecessor
def initialize(graph, source):
d = {} # Stands for destination
p = {} # Stands for predecessor
for node in graph:
d[node] = float('Inf') # We start admiting that the rest of nodes are very very far
p[node] = None
d[source] = 0 # For the source we know how to reach
return d, p
def relax(node, neighbour, graph, d, p):
# If the distance between the node and the neighbour is lower than the one I have now
if d[neighbour] > d[node] + graph[node][neighbour]:
# Record this lower distance
d[neighbour] = d[node] + graph[node][neighbour]
p[neighbour] = node
def bellman_ford(graph, source):
d, p = initialize(graph, source)
for i in range(len(graph)-1): #Run this until is converges
for u in graph:
for v in graph[u]: #For each neighbour of u
relax(u, v, graph, d, p) #Lets relax it
# Step 3: check for negative-weight cycles
for u in graph:
for v in graph[u]:
if d[v] > d[u] + graph[u][v]:
return False, False
return d, p
def main(newweights):
f = open('test.out', 'w+')
print newweights
d, r = bellman_ford(newweights, 0)
print d
if d:
result = d.values()[1:]
for i in range(len(result)):
result[i] = abs(result[i])
f.write(str(result[i]) + " ")
else:
f.write("INFEASIBLE")
f.close()
checkMe(main)
| {
"repo_name": "Arnukk/DAA",
"path": "Hmw3/Q2_2.py",
"copies": "1",
"size": "2499",
"license": "mit",
"hash": 4894109619976993000,
"line_mean": 30.2375,
"line_max": 105,
"alpha_frac": 0.5602240896,
"autogenerated": false,
"ratio": 3.4564315352697097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45166556248697093,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akarapetyan'
vertices = []
class Vertex:
def __init__(self, Id, p, childNodes):
self.Id = Id
self.p = p
self.alreadyDone = 0
self.childNodes = childNodes
# A simple function for reading the file and checking whether the data is compatible with the input rules
def checkMe(function):
#Loading the input file
try:
f = open("test.in", 'r')
firstLine = f.readline().split()
n = int(firstLine[0])
for i in range(n):
line = f.readline().split()
vertices.append(Vertex(i, int(line[0]), list(map(int, line[1:]))))
f.close()
except:
print "Something is wrong with the Input parameters, please check"
exit()
#Everything is right with the data, let's manage it carefully
def Caller(): pass
Caller.__code__ = function.__code__
Caller()
def CycleFinder(vertices):
flag = False
for i in range(len(vertices)):
temp = vertices[i]
if temp.alreadyDone == 0:
temp.alreadyDone = 1
flag = DFS_2(vertices, temp)
if flag:
break
return flag
def DFS_2(vertices, vertex):
flag = False
for i in range(len(vertex.childNodes)):
temp = vertices[vertex.childNodes[i]]
if temp.alreadyDone == 0:
temp.alreadyDone = 1
flag = DFS_2(vertices, temp)
if flag:
break
elif temp.alreadyDone == 1:
flag = True
break
vertex.alreadyDone = 2
return flag
def Sheduling_DP(vertex, Schedule):
if Schedule[vertex.Id] is not None:
return Schedule[vertex.Id]
if len(vertex.childNodes) == 0:
Schedule[vertex.Id] = 0
return Schedule[vertex.Id]
earliestStartTime = 0
for i in range(len(vertex.childNodes)):
temp = vertices[vertex.childNodes[i]]
if earliestStartTime < Sheduling_DP(temp, Schedule) + temp.p:
earliestStartTime = Sheduling_DP(temp, Schedule) + temp.p
Schedule[vertex.Id] = earliestStartTime
return Schedule[vertex.Id]
def main():
f = open('test.out', 'w+')
if CycleFinder(vertices):
f.write("INFEASIBLE")
else:
Schedule = {}
for vertex in vertices:
Schedule[vertex.Id] = None
for temp in vertices:
Sheduling_DP(temp, Schedule)
for temp in Schedule.keys():
f.write(str(Schedule[temp]) + str(" "))
f.close()
checkMe(main)
| {
"repo_name": "Arnukk/DAA",
"path": "Hmw3/Q2_1.py",
"copies": "1",
"size": "2520",
"license": "mit",
"hash": -7580752819547152000,
"line_mean": 27,
"line_max": 105,
"alpha_frac": 0.5757936508,
"autogenerated": false,
"ratio": 3.8181818181818183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4893975468981818,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akarpov'
def write_greyscale(filename, pixels):
# each pixel is a value from 0 to 255
height = len(pixels)
width = len(pixels[0])
with open(filename, 'wb') as bmp:
# BMP header
bmp.write(b'BM')
size_bookmark = bmp.tell() # the next four bytes hold the 32-bit integer filesize
bmp.write(b'\x00\x00\x00\x00') # as a little-endian integer, put placeholders for now
bmp.write(b'\x00\x00') # unused 16-bit integer
bmp.write(b'\x00\x00') # unused 16-bit integer
pixel_offset_bookmark = bmp.tell() # again 4 bytes
bmp.write(b'\x00\x00\x00\x00')
# Image header
# First, write a length of the image header as a 32-bit integer
bmp.write(b'\x28\x00\x00\x00') # remember - little-endian
bmp.write(_int32_to_bytes(width))
bmp.write(_int32_to_bytes(height))
# this is fixed for greyscale
bmp.write(b'\x01\x00') # number of image planes
bmp.write(b'\x08\x00') # 8 bits per pixel for greyscale
bmp.write(b'\x00\x00\x00\x00') # no compression
bmp.write(b'\x00\x00\x00\x00')
bmp.write(b'\x00\x00\x00\x00')
bmp.write(b'\x00\x00\x00\x00')
bmp.write(b'\x00\x00\x00\x00')
bmp.write(b'\x00\x00\x00\x00')
# that was 40 bytes total, as promised!
# color pallette - a linear grayscale
for c in range(256):
bmp.write(bytes((c, c, c, c, 0)))
# pixel data
pixel_data_bookmark = bmp.tell()
for row in reversed(pixels): # BMP files are bottom to top
row_data = bytes(row)
bmp.write(row_data)
# end of file
eof_bookmar = bmp.tell()
# fulfill promises
bmp.seek(size_bookmark)
bmp.write(_int32_to_bytes(eof_bookmar))
bmp.seek(pixel_offset_bookmark)
bmp.write(_int32_to_bytes(pixel_data_bookmark))
def _int32_to_bytes(i):
return bytes((i & 0xff,
i >> 8 & 0xff,
i >> 16 & 0xff,
i >> 24 & 0xff))
| {
"repo_name": "alexakarpov/interview_problems",
"path": "python/bmp.py",
"copies": "2",
"size": "2083",
"license": "mit",
"hash": -1251989388202386000,
"line_mean": 31.0461538462,
"line_max": 93,
"alpha_frac": 0.5679308689,
"autogenerated": false,
"ratio": 3.0364431486880465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4604374017588047,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akeenan'
from django.core.management.base import BaseCommand
from xml.dom import minidom as XML_Parser
import xmltodict
import json
from collections import OrderedDict
from backend.models import Project, Dictionary, Gloss, Survey, Variety, Transcription, PartOfSpeech
class Command(BaseCommand):
args = '<project_to_import_to> <[file_to_import].xml>'
help = 'Imports xml from WordSurv6 into the database'
def handle(self, *args, **options):
"""
This will import data from an xml export from wordsurv6.
"""
if not args or len(args) != 2:
self.stdout.write('You must have all arguments')
else:
project_name = args[0]
import_file_name = args[1]
if import_file_name[import_file_name.rfind('.') + 1:] == 'xml':
self.stdout.write('Running import_xml')
root_node = XML_Parser.parse(import_file_name)
xml_dict = xmltodict.parse(root_node.toxml())['survey']
self.fix_dict(xml_dict)
with open("debug_xml_to_dict.json", 'w') as f:
f.write(json.dumps(xml_dict, indent=2))
f.close()
self.dict_to_db(project_name, xml_dict)
else:
self.stdout("Must be an xml file!")
def fix_dict(self, dic):
"""
Names the entries the correct things to match the database. Removes unnecessary levels.
:param dic: The dictionary from xmltodict
:type dic: dict
"""
dic['gloss'] = dic['glosses']['gloss']
dic['variety'] = dic['word_lists']['word_list']
del dic['glosses']
del dic['word_lists']
if type(dic['gloss']) is not list:
dic['gloss'] = [dic['gloss']]
for gloss in dic['gloss']:
gloss['transcription'] = gloss['transcriptions']['transcription']
del gloss['transcriptions']
def dict_to_db(self, project_name, dic):
"""
Adds the dictionary from xmltodict to the database
:param project_name: Name of the project you wish to add the info to. If it doesn't exist, it will be created.
:type project_name: str
:param dic: The dictionary from xmltodict
:type dic: dict
"""
project = Project.objects.filter(name=project_name)
if len(project) > 0:
p_id = project[0].id
else:
p_id = self.create_project(project_name)
dict_id = self.create_dictionary('imported_dictionary', p_id)
survey_id = self.create_survey(dic['name'], dic['description'], dict_id)
variety_id = self.create_variety(dic['variety'], survey_id)
for gloss in dic['gloss']:
gloss_id = self.create_gloss(gloss['name'], gloss['definition'], gloss['part_of_speech'], dict_id)
if type(gloss['transcription']) is not list:
gloss['transcription'] = [gloss['transcription']]
for transcription in gloss['transcription']:
self.create_transcription(transcription['name'], gloss_id, variety_id)
def create_project(self, name):
return self.add_obj_to_db(Project, {'name': name})
def create_dictionary(self, name, project_id):
entry = {
'name': name,
'project_id': project_id,
'language_id': 1
}
return self.add_obj_to_db(Dictionary, entry)
def create_gloss(self, name, definition, part_of_speech, dict_id):
entry = {
'primary': name,
'comment_tip': definition,
'part_of_speech': PartOfSpeech.objects.filter(name=part_of_speech)[0],
'dictionary_id': dict_id
}
return self.add_obj_to_db(Gloss, entry)
def create_survey(self, name, description, dict_id):
entry = {
'name': name,
'full_title': description,
'dictionary_id': dict_id
}
return self.add_obj_to_db(Survey, entry)
def convert_time(self, str):
from dateutil.parser import parse
from dateutil.tz import tzlocal
from pytz import utc
return parse(str, tzinfos=tzlocal).astimezone(utc)
def create_variety(self, variety, survey_id):
entry = {
'name': variety['name'],
'description': variety['description'],
'start_date': self.convert_time(variety['start_date']),
'end_date': self.convert_time(variety['end_date']),
'surveyors': variety['surveyors'],
'consultants': variety['consultants'],
'language_helper': variety['language_helper'],
'language_helper_age': variety['language_helper_age'],
'reliability': variety['reliability'],
'village': variety['village'],
'province_state': variety['province_state'],
'district': variety['district'],
'subdistrict': variety['subdistrict'],
'country': variety['country'],
'coordinates': variety['coordinates'],
'survey_id': survey_id,
}
return self.add_obj_to_db(Variety, entry)
def create_transcription(self, ipa, gloss_id, variety_id):
entry = {
'ipa': ipa,
'gloss_id': gloss_id,
'variety_id': variety_id
}
return self.add_obj_to_db(Transcription, entry)
def add_obj_to_db(self, model, data):
self.stdout.write("Creating new {} with data: {}".format(model.__name__, data))
obj = model(**data)
obj.save()
return obj.id | {
"repo_name": "tu-software-studio/websurv",
"path": "backend/management/commands/import_xml.py",
"copies": "1",
"size": "5628",
"license": "mit",
"hash": -1446255277047457500,
"line_mean": 37.2925170068,
"line_max": 118,
"alpha_frac": 0.5717839375,
"autogenerated": false,
"ratio": 3.8653846153846154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49371685528846154,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Akhil'
import cv2
import storage
import sqlite3
import cvutils
import itertools
import shutil
from numpy.linalg.linalg import inv
from numpy import loadtxt
homographyFilename = "laurier-homography.txt"
homography = inv(loadtxt(homographyFilename))
databaseFilename = "laurier.sqlite"
newFilename = "corrected.sqlite"
videoFilename = "laurier.avi"
cObjects = storage.loadTrajectoriesFromSqlite(newFilename, "object")
objects = storage.loadTrajectoriesFromSqlite(databaseFilename, "object")
features = storage.loadTrajectoriesFromSqlite(databaseFilename, "feature")
drawing = False # true if mouse is pressed
cArray = [] # stores new trajectory positions (temporary) in order to display the trace
fNo = 0 # stores the frame number
objTag = None # stores selected object's id
track = False # turns on track mode if true
merge = False # turns on merge mode if true
split = False # turns on split mode if true
delete = False # turns on delete mode if true
mergeList = [] # holds the id of objects to be merged
trace = [] # holds the trace coordinates
splitSelect = [] # holds trajectory ids selected for splitting
deleteList = [] # holds object ids to be deleted
pace = 0 # to adjust the video speed
def findObject(frameNum, x=None, y=None): # finds the object clicked on (utilizes original sqlite)
global objects, features, objTag, merge, mergeList
box = []
for obj in objects:
if obj.existsAtInstant(frameNum):
objFeatures = [features[i] for i in obj.featureNumbers]
u = []
v = []
for f in objFeatures:
if f.existsAtInstant(frameNum):
projectedPosition = f.getPositionAtInstant(frameNum).project(homography)
u.append(projectedPosition.x)
v.append(projectedPosition.y)
xmin = min(u)
xmax = max(u)
ymin = min(v)
ymax = max(v)
if x is None and y is None: # utilized when the function call is from drawBox()
box.append([ymax, ymin, xmax, xmin, obj.getNum()])
if xmax > x > xmin and ymax > y > ymin:
print "object detected: " + format(obj.getNum())
print "object position: " + format(obj.getPositionAtInstant(frameNum).project(homography))
objTag = obj.getNum()
if merge is True:
mergeList.append(obj.getNum())
return box # returns pixel range for each object
def findTrajectory(frameNum): # finds the features selected by the user
global cObjects, features, cArray, splitSelect
for obj in cObjects:
if obj.existsAtInstant(frameNum):
cObjFeatures = [features[i] for i in obj.featureNumbers]
for cObjFeature in cObjFeatures:
if cObjFeature.existsAtInstant(frameNum):
for f in range(cObjFeature.getFirstInstant(), frameNum):
position = cObjFeature.getPositionAtInstant(f).project(homography)
for coord in cArray:
if coord[0]+5 > position[0] > coord[0]-5 and coord[1]+5 > position[1] > coord[1]-5:
if not cObjFeature.getNum() in splitSelect:
splitSelect.append(cObjFeature.getNum())
def drawTrajectory(frame, frameNum): # draws trajectory for each object
global cObjects, features, splitSelect
if split is False:
for obj in cObjects:
if obj.existsAtInstant(frameNum):
prevPosition = obj.getPositionAtInstant(obj.getFirstInstant()).project(homography)
for f in range(obj.getFirstInstant(), frameNum):
position = obj.getPositionAtInstant(f).project(homography)
cv2.line(frame, (position[0], position[1]), (prevPosition[0], prevPosition[1]), (0, 0, 255), 2)
prevPosition = position
else:
for obj in cObjects:
if obj.existsAtInstant(frameNum):
cObjFeatures = [features[i] for i in obj.featureNumbers]
for cObjFeature in cObjFeatures:
if cObjFeature.existsAtInstant(frameNum):
if cObjFeature.getNum() in splitSelect:
prevPosition = cObjFeature.getPositionAtInstant(cObjFeature.getFirstInstant()).project(homography)
for f in range(cObjFeature.getFirstInstant(), frameNum):
position = cObjFeature.getPositionAtInstant(f).project(homography)
cv2.line(frame, (position[0], position[1]), (prevPosition[0], prevPosition[1]), (0, 255, 255), 1)
prevPosition = position
else:
prevPosition = cObjFeature.getPositionAtInstant(cObjFeature.getFirstInstant()).project(homography)
for f in range(cObjFeature.getFirstInstant(), frameNum):
position = cObjFeature.getPositionAtInstant(f).project(homography)
cv2.line(frame, (position[0], position[1]), (prevPosition[0], prevPosition[1]), (0, 0, 255), 1)
prevPosition = position
def drawBox(frame, frameNum): # annotates each object and highlights when clicked
global objTag
if split is False:
box = findObject(frameNum)
for i in range(len(box)):
if box[i][4] == objTag:
cv2.rectangle(frame, (box[i][3], box[i][0]), (box[i][2], box[i][1]), (0, 255, 255), 3)
else:
cv2.rectangle(frame, (box[i][3], box[i][0]), (box[i][2], box[i][1]), (255, 0, 0), 3)
def drawEditBox(frame): # for the static text
global width, height
if track is True:
cv2.rectangle(frame, (0, 0), (width, height), (0, 255, 255), 3)
cv2.putText(frame,"track mode", (width-100, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
else:
cv2.putText(frame,"toggle track (t)", (width-130, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
if merge is True:
cv2.rectangle(frame, (0, 0), (width, height), (0, 255, 0), 3)
cv2.putText(frame,"merge mode", (width-125, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
else:
cv2.putText(frame,"toggle merge (m)", (width-150, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
if split is True:
cv2.rectangle(frame, (0, 0), (width, height), (255, 0, 0), 3)
cv2.putText(frame,"split mode", (width-125, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
else:
cv2.putText(frame,"toggle split (s)", (width-125, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
if delete is True:
cv2.rectangle(frame, (0, 0), (width, height), (0, 0, 255), 3)
cv2.putText(frame,"delete mode", (width-125, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
else:
cv2.putText(frame,"delete obj (o)", (width-125, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
cv2.putText(frame,"reset edits (r)", (width-125, 125), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
cv2.putText(frame,"video speed (0-4)", (width-150, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
def sqlDelete():
global deleteList
try:
connection = sqlite3.connect(newFilename)
cursor = connection.cursor()
id = None
for objID in deleteList:
if not id == objID:
id = objID
cursor.execute("delete from positions where trajectory_id in (select trajectory_id from objects_features where object_id = " + format(id) + ");")
cursor.execute("delete from objects_features where object_id = " + format(id) + ";")
cursor.execute("delete from objects where object_id = " + format(id) + ";")
connection.commit()
connection.close()
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
def sqlSplit(newObjID): # splits an object into two
global splitSelect, cObjects
try:
connection = sqlite3.connect(newFilename)
cursor = connection.cursor()
cursor.execute("SELECT object_id from objects_features where trajectory_id = " + format(splitSelect[0]) + ";")
objID = cursor.fetchone()[0]
cursor.execute("SELECT * from objects where object_id = " + format(objID) + ";")
data = cursor.fetchone()
cursor.execute("insert into objects (object_id, road_user_type, n_objects) values (?, ?, ?);", (len(cObjects), data[1], data[2]))
sql = "update objects_features set object_id = " + format(newObjID) + " where object_id = " + format(objID) + " and trajectory_id in (?"
extension = ''.join(itertools.repeat(', ?', len(splitSelect)-1))
sql = sql + extension + ");"
cursor.execute(sql, splitSelect)
del splitSelect[:]
connection.commit()
connection.close()
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
def sqlMerge(): # merges two or more objects selected by the user
global mergeList, cObjects
frameRange = [] # to store the first instant and last instant of the objects to be merged
if len(mergeList)>1:
try:
connection = sqlite3.connect(newFilename)
cursor = connection.cursor()
for i in range(1, len(mergeList)):
for obj in cObjects:
if obj.getNum() == mergeList[i]:
cursor.execute("delete from objects where object_id = " + format(mergeList[i]) + ";")
cursor.execute("update objects_features set object_id = " + format(mergeList[0]) + " where object_id = " + format(mergeList[i]) + ";")
for i in range(len(mergeList)):
for obj in cObjects:
if obj.getNum() == mergeList[i]:
frameRange.append([obj.getFirstInstant(), obj.getLastInstant(), obj.getNum()])
frameRange = sorted(frameRange)
for i in range(len(frameRange)-1):
if frameRange[i][1] < frameRange[i+1][0]: # looks for discontinuity
for obj in cObjects:
if obj.getNum() == frameRange[i][2]:
position = obj.getPositionAtInstant(frameRange[i][1])
cursor.execute("SELECT max(trajectory_id) from positions where trajectory_id in (select trajectory_id from objects_features where object_id = "
+ format(mergeList[0]) + ") and frame_number = " + format(frameRange[i][1]) + ";")
tID = cursor.fetchone()[0]
for f in range(frameRange[i][1]+1, frameRange[i+1][0]):
cursor.execute("insert into positions (trajectory_id, frame_number, x_coordinate, y_coordinate) values (?, ?, ?, ?);", (tID, f, position[0], position[1]))
connection.commit()
connection.close()
del mergeList[:]
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
def sqlTrack(objID, frames, coords): # performs delete and insert operations on the sqlite (new file)
try:
connection = sqlite3.connect(newFilename)
cursor = connection.cursor()
extension = ''.join(itertools.repeat(', ?', len(frames)-1))
sql = "select min(trajectory_id) from positions where trajectory_id in (select trajectory_id from objects_features where object_id = " + format(objID) + ") and frame_number in (?"
sql2 = "delete from positions where trajectory_id in (select trajectory_id from objects_features where object_id = " + format(objID) + ") and frame_number in (?"
sql = sql + extension + ");"
sql2 = sql2 + extension + ");"
cursor.execute(sql, frames)
tID = cursor.fetchone()[0] # tID will be the trajectory id of the new feature
cursor.execute(sql2, frames)
f = frames[0]
for i in range(len(frames)):
jump = frames[i] - f
if 5 > jump > 1:
c = [(coords[i][0] + coords[i-1][0])/2, (coords[i][1] + coords[i-1][1])/2]
for k in range(f+1, frames[i]):
cursor.execute("delete from positions where trajectory_id in (select trajectory_id from objects_features where object_id = " + format(objID) + ") and frame_number = " + format(k) + ";")
cursor.execute("insert into positions (trajectory_id, frame_number, x_coordinate, y_coordinate) values (?, ?, ?, ?);", (tID, k, c[0], c[1]))
f = frames[i]
cursor.execute("insert into positions (trajectory_id, frame_number, x_coordinate, y_coordinate) values (?, ?, ?, ?);", (tID, frames[i], coords[i][0], coords[i][1]))
connection.commit()
connection.close()
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
def tracing(): # extract data from the trace array, removing redundant data for a single frame
global trace
frames = []
coords = []
tempF = None
temp = None
for record in trace:
if not temp == record[0]:
if not len(frames) == 0:
sqlTrack(temp, frames, coords)
del frames[:]
del coords[:]
temp = record[0]
if not tempF == record[1]:
tempF = record[1]
frames.append(tempF)
point = [record[2], record[3]]
invH = cvutils.invertHomography(homography)
coord = cvutils.project(invH, point)
coords.append([coord[0][0], coord[1][0]])
sqlTrack(temp, frames, coords)
def coordinates(event, x, y, flags, param):
global drawing, cArray, fNo, objTag, trace, deleteList
if event == cv2.EVENT_LBUTTONDOWN:
print x, y
drawing = True
cArray.append([x, y])
findObject(fNo, x, y)
if objTag is not None and track == True:
trace.append([objTag, fNo, x, y])
print "tracing object: " + format(objTag) + " (" + format(x) + " ," + format(y) + ")"
if split is True:
findTrajectory(fNo)
if objTag is not None and delete == True:
deleteList.append(objTag)
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
cArray.append([x, y])
if objTag is not None and track == True:
trace.append([objTag, fNo, x, y])
print "tracing object: " + format(objTag) + " (" + format(x) + " ," + format(y) + ")"
elif event == cv2.EVENT_LBUTTONUP:
objTag = None # deselects the object
drawing = False
del cArray[:]
cap = cv2.VideoCapture(videoFilename)
cv2.namedWindow('Video')
cv2.setMouseCallback('Video', coordinates)
width = int(cap.get(3))
height = int(cap.get(4))
newObjID = len(cObjects)
while(cap.isOpened()):
ret, frame = cap.read()
drawBox(frame, fNo)
drawTrajectory(frame, fNo)
drawEditBox(frame)
if split is True:
findTrajectory(fNo)
for i in range(len(cArray)-1): # displays the user drawn trajectory
cv2.line(frame, (cArray[i][0], cArray[i][1]), (cArray[i+1][0], cArray[i+1][1]), (0, 255, 0), 2)
cv2.imshow('Video', frame)
fNo += 1
k = cv2.waitKey(pace) & 0xFF # set cv2.waitKey(0) for frame by frame editing
if k == 27: # exit with committing the trace
if trace:
tracing()
break
elif k == 116: # toggle track mode
track = track != True
merge = False
split = False
delete = False
elif k == 115: # toggle split mode
split = split != True
if split is False: # calling sqlSplit() while coming out of merge mode
if splitSelect:
sqlSplit(newObjID)
newObjID += 1
track = False
merge = False
delete = False
elif k == 109: # toggle merge mode
merge = merge != True
if merge is False: # calling sqlMerge() while coming out of merge mode
sqlMerge()
split = False
track = False
delete = False
elif k == 111: # toggle delete mode
delete = delete != True
if delete is False: # calling sqlDelete() while coming out of delete mode
if deleteList:
sqlDelete()
split = False
merge = False
track = False
elif k == 114: # creates a copy of the original sqlite
shutil.copy2(databaseFilename, newFilename)
elif k == 48:
pace = 0
elif k == 49:
pace = 150
elif k == 50:
pace = 100
elif k == 51:
pace = 50
elif k == 52:
pace = 25
cap.release()
cv2.destroyAllWindows()
| {
"repo_name": "Transience/tracker",
"path": "main.py",
"copies": "1",
"size": "16902",
"license": "mit",
"hash": 5986292736164098000,
"line_mean": 47.2914285714,
"line_max": 205,
"alpha_frac": 0.5788072417,
"autogenerated": false,
"ratio": 3.709019091507571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4787826333207571,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akil.harris'
import geojson
import argparse
import pprint
import psycopg2
from admin.postgres_connector import *
data = None
def read_file(filenames):
global data
for filename in filenames:
with open(filename, encoding='utf8', newline='') as jsonfile:
data = geojson.load(jsonfile)
for feature in data['features']:
properties = feature['properties']
# geometry = feature['geometry']
pprint.pprint(properties)
# pprint.pprint(geometry['coordinates'])
def save_zip_code_data():
db = PostgreSQLConnector('nyc_campaign_finance', 'akil', 'c4mpf1y@h', 'localhost', '5432')
cursor = db.get_cursor()
for feature in data['features']:
properties = feature['properties']
geometry = feature['geometry']
try:
cursor.execute("UPDATE zip_codes SET po_name = %s, state = %s, borough = %s, state_fips = %s, city_fips = %s, "
"bldg_postal_code = %s, shape_length = %s, shape_area = %s, api_url = %s, geojson = %s WHERE "
"zip_code = %s", (properties['PO_NAME'], properties['STATE'], properties['borough'],
properties['ST_FIPS'], properties['CTY_FIPS'],
properties['BLDGpostalCode'], properties['Shape_Leng'],
properties['Shape_Area'], properties['@id'], geojson.dumps(geometry),
properties['postalCode']))
print("Inserted data for:")
pprint.pprint(properties)
print("++++++++++++++++++++++++++++++")
except psycopg2.IntegrityError:
print("error inserting " + properties['postalCode'])
db.commit_changes()
parser = argparse.ArgumentParser(description='Parse geojson files.')
parser.add_argument('arg_files', metavar='file_name.geojson', type=str, nargs="+", help='A list of geojson files to parse.')
args = parser.parse_args()
read_file(['C:\\users\\akil.harris\\repos\\nyc_campaign_finanace\\data_files\\nyc-zip-code-tabulation-areas-polygons-1.geojson'])
save_zip_code_data() | {
"repo_name": "akilism/nyc-campaign-finance",
"path": "admin/zipcode_parser.py",
"copies": "1",
"size": "2193",
"license": "mit",
"hash": -5896953271314026000,
"line_mean": 40.3962264151,
"line_max": 129,
"alpha_frac": 0.5782033744,
"autogenerated": false,
"ratio": 4.03125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51094533744,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akilharris'
import httplib
from bs4 import BeautifulSoup
import os
#Load http://www.nyc.gov/html/nypd/html/traffic_reports/traffic_summons_reports.shtml
#grab all pdfs and save to a folder
path = "raw_data/pdf/"
def scrape(url):
conn = httplib.HTTPConnection("www.nyc.gov")
conn.request("GET", url)
response = conn.getresponse()
if response.status == 200:
temp_data = response.read()
conn.close()
return temp_data
def parse(extension, raw_data):
links = []
soup = BeautifulSoup(raw_data)
for link in soup.find_all("a"):
href = link.get("href")
if href.find(extension) != -1:
links.append(href)
return links
def save_links(links):
if not os.path.exists(path):
parts = path.split("/")
for part in parts:
if len(part) > 0:
os.mkdir(part)
os.chdir(part)
else:
os.chdir(path)
# print(os.getcwd())
for link in links:
link = str(link).replace("../..", "/html/nypd")
file_name = link.split("/")[-1]
precinct = file_name.strip("sum.pdf")
print("Saving: " + precinct + " - " + os.getcwd() + "/" + file_name)
file_data = scrape(link)
with open(file_name, "wb") as f:
f.write(file_data)
def getPDFs():
raw_data = scrape("/html/nypd/html/traffic_reports/traffic_summons_reports.shtml")
links = parse(".pdf", raw_data)
save_links(links)
getPDFs()
| {
"repo_name": "akilism/moving_violation_scraper",
"path": "scraper.py",
"copies": "1",
"size": "1500",
"license": "mit",
"hash": 3535982703556535300,
"line_mean": 24.4237288136,
"line_max": 86,
"alpha_frac": 0.5806666667,
"autogenerated": false,
"ratio": 3.2822757111597376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9357643407977972,
"avg_score": 0.0010597939763530401,
"num_lines": 59
} |
__author__ = 'akoziol'
import os, errno, re, shutil, subprocess, json, sys, time, gzip
from glob import glob
from argparse import ArgumentParser
from multiprocessing import Pool
from collections import defaultdict
#Parser for arguments
parser = ArgumentParser(description='Prep Illumina fastq metagenome files to be processed by OneCodex')
parser.add_argument('-v', '--version', action='version', version='%(prog)s')
parser.add_argument('-p', '--path', required=True, help='Specify path')
parser.add_argument('-u', '--upload', required=False, default=False, help='Upload metagenome files following preparation steps?')
# Get the arguments into a list
args = vars(parser.parse_args())
# Define variables from the arguments - there may be a more streamlined way to do this
path = args['path']
upload = args['upload']
apikey = '17d05d3bfde945fea1f3d1a1db5a0b1f'
# Start time
start = time.time()
# Welcome message
print("Welcome to the CFIA Metagenome preparation pipeline.")
def make_path(inPath):
"""from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL"""
try:
os.makedirs(inPath)
# os.chmod(inPath, 0777)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def make_dict():
"""Makes Perl-style dictionaries"""
return defaultdict(make_dict)
# Initialise the dictionary responsible for storing the report data
metadataFile = defaultdict(make_dict)
def commandR(sampleNames, path):
"""Opens the *name*_metadataCollection.json file and extracts any commands performed from previous iterations of the
pipeline on data in the supplied path"""
# Initialise the command dictionary
performedCommands = defaultdict(make_dict)
# Open the *name*_metadataCollection.json file for each sample
for name in sampleNames:
countSize = 0
if os.path.isfile("%s/%s/%s_metadataCollection.json" % (path, name, name)):
countSize = os.stat("%s/%s/%s_metadataCollection.json" % (path, name, name)).st_size
if countSize != 0:
with open("%s/%s/%s_metadataCollection.json" % (path, name, name)) as jsonReport:
# Load the data
jsonData = json.load(jsonReport)
if jsonData:
# Find the precise command used
for command in jsonData["commands"]:
# If the command exists, and is in the right format
if not "N/A" in jsonData["commands"][command] and not "defaultdict" in jsonData["commands"][command]:
# Populate the performedCommands dictionary as appropriate
performedCommands[name][str(command)] = str(jsonData["commands"][command])
# Return the dictionary
return performedCommands
def jsonR(sampleNames, path, metadata, fileType):
"""Creates a JSON report from a supplied metadata file"""
# Make the reports folder as required
reportPath = "%s/reports" % path
make_path(reportPath)
for name in sampleNames:
# Create the .json file for each sample
newPath = path + "/" + name
reportName = "%s_metadata%s.json" % (name, fileType)
JSONreport = open("%s/%s" % (newPath, reportName), "wb")
# Print the JSON data to file
output = json.dumps(metadata[name], sort_keys=True, indent=4, separators=(',', ': '))
JSONreport.write(output)
JSONreport.close()
# Move all the reports to a common directory
shutil.copy("%s/%s" % (newPath, reportName), "%s/%s" % (reportPath, reportName))
def foldererPrepProcesses(sampleName, path):
"""A helper function to make a pool of processes to allow for a multi-processed approach to error correction"""
foldererPrepArgs = []
# This used to check to see if the __name__ == '__main__', but since this is run as a module, the __name__
# must equal the name of the script
if __name__ == '__main__':
createfoldererPool = Pool()
# Prepare a tuple of the arguments (strainName and path)
for name in sampleName:
foldererPrepArgs.append((name, path))
# This map function allows for multi-processing
createfoldererPool.map(folderer, foldererPrepArgs)
def folderer((name, path)):
"""Uses gzip to decompress .gz.fastq files"""
# Get any .gz file starting with the strain name into a list
gzFiles = glob("%s/%s/*.gz" % (path, name))
# If there are any .gz files...
if gzFiles:
for gzFile in gzFiles:
# Gzip each file
gzipCommand = "gzip -d --force %s" % gzFile
subprocess.call(gzipCommand, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
def fileR(list):
"""Helper script that creates a set of the stain names created by stripping off parts of the filename.
Hopefully handles different naming conventions (e.g. 2015-SEQ-001_S1_L001_R1_001.fastq(.gz),
2015-SEQ-001_R1_001.fastq.gz, 2015-SEQ-001_R1.fastq.gz, 2015-SEQ-001_1.fastq.gz, and 2015-SEQ-001_1.fastq.gz
all become 2015-SEQ-001)"""
# Initialise the set
fileSet = set()
for seqFile in list:
# Search for the conventional motifs present following strain names
# _S\d+_L001_R\d_001.fastq(.gz) is a typical unprocessed Illumina fastq file
if re.search("_S\d+_L001", seqFile):
fileSet.add(re.split("_S\d+_L001", seqFile)[0])
# Files with _R\d_001.fastq(.gz) are created in the SPAdes assembly pipeline
elif re.search("_R\d_001", seqFile):
fileSet.add(re.split("_R\d_001", seqFile)[0])
# _R\d.fastq(.gz) represents a simple naming scheme for paired end reads
elif re.search("R\d.fastq", seqFile):
fileSet.add(re.split("_R\d.fastq", seqFile)[0])
# _\d.fastq(.gz) is always possible
elif re.search("[-_]\d.fastq", seqFile):
fileSet.add(re.split("[-_]\d.fastq", seqFile)[0])
# .fastq is the last option
else:
fileSet.add(re.split(".fastq", seqFile)[0])
sys.stdout.write('.')
return fileSet
def nameExtractor():
"""Creates a set of the names of the files to be used in the analyses"""
fileSet = set()
# Create lists of the .gz, the .fastq, and the folders in the path
gzChecker = glob("*.gz")
fastqChecker = glob("*.fastq")
folderChecker = glob("*/")
# For each list, ensure that the list exists...
if gzChecker:
# Extract the unique names using the helper function fileR...
fileList = fileR(gzChecker)
# Add the results to fileset
for seqFile in fileList:
fileSet.add(seqFile)
if fastqChecker:
fileList = fileR(fastqChecker)
for seqFile in fileList:
fileSet.add(seqFile)
if folderChecker:
for seqFolder in folderChecker:
# Exclude the 'reports' folder from the analysis
if not "reports" in seqFolder and not "results" in seqFolder:
seqName = seqFolder.rsplit("/")[0]
fileSet.add(seqName)
# Create a more easily parseable list from the set
fileList = list(fileSet)
# Return it
return fileList
def seqMovR(path, seqNames):
"""Creates appropriately named folders, and moves sequence files to appropriate folders"""
for seqName in seqNames:
# Make folders as required
make_path("%s/%s" % (path, seqName))
# Search the path for any file or folder that contains the seqName
filecheck = [f for f in os.listdir(path) if re.search("%s" % seqName, f)]
for seqFile in filecheck:
# Move files, ignore folders
if os.path.isfile(seqFile):
sys.stdout.write('.')
shutil.move(seqFile, "%s/%s/%s" % (path, seqName, seqFile))
sys.stdout.write('.')
def trimmomaticPrepProcesses(sampleName, path, metadata, commands):
"""A helper function to make a pool of processes to allow for a multi-processed approach to error correction"""
trimmomaticPrepArgs = []
output = {}
# This used to check to see if the __name__ == '__main__', but since this is run as a module, the __name__
# must equal the name of the script
if __name__ == '__main__':
createtrimmomaticPool = Pool()
# Prepare a tuple of the arguments (strainName and path)
for name in sampleName:
trimmomaticPrepArgs.append((name, path, metadata, commands))
# This map function allows for multi-processing
output = createtrimmomaticPool.map(trimmomatic, trimmomaticPrepArgs)
return output
def trimmomatic((name, path, metadata, commands)):
"""Runs trimmomatic to trim .fastq files on phred sequence quality. Populates metadata files as required"""
# Get the .fastq files in the sample folder
seqFiles = sorted(glob("%s/%s/*.fastq" % (path, name)))
# create a handy variable for use in the trimmomatic system call
newPath = "%s/%s/%s" % (path, name, name)
# Ensure that trimmomatic has not already been run on these files
if not commands[name]["trimmomaticCall"]:
# Treat paired-end samples slightly differently than single-end samples
if len(seqFiles) == 2:
# Prepare the trimmomatic call
trimmomaticCall = "trimmomatic-0.30.jar PE -threads 24 -phred33 -trimlog %s.log " \
"%s %s %s.paired1.fq %s.unpaired1.fq %s.paired2.fq %s.unpaired2.fq " \
"ILLUMINACLIP:/home/blais/Bioinformatics/Trimmomatic-0.30/adapters/TruSeq3-PE.fa:2:30:10 " \
"LEADING:10 TRAILING:10 SLIDINGWINDOW:4:30 MINLEN:36" \
% (newPath, seqFiles[0], seqFiles[1], newPath, newPath, newPath, newPath)
else:
trimmomaticCall = "trimmomatic-0.30.jar SE -threads 24 -phred33 -trimlog %s.log " \
"%s %s.trimmed.fq " \
"ILLUMINACLIP:/home/blais/Bioinformatics/Trimmomatic-0.30/adapters/TruSeq3-PE.fa:2:30:10 " \
"LEADING:10 TRAILING:10 SLIDINGWINDOW:4:30 MINLEN:36" \
% (newPath, seqFiles[0], newPath)
# Run trimmomatic
subprocess.call(trimmomaticCall, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
# Populate the metadata dictionary with the system call
metadata[name]["commands"]["trimmomaticCall"] = trimmomaticCall
# Remove the original .fastq files
for seqFile in seqFiles:
os.remove(seqFile)
else:
metadata[name]["commands"]["trimmomaticCall"] = commands[name]["trimmomaticCall"]
sys.stdout.write('.')
return metadata
def pairedEndJoinerPrepProcesses(sampleName, path, metadata, commands):
"""A helper function to make a pool of processes to allow for a multi-processed approach to error correction"""
pairedPrepArgs = []
output = {}
# This used to check to see if the __name__ == '__main__', but since this is run as a module, the __name__
# must equal the name of the script
if __name__ == '__main__':
createpairedPool = Pool()
# Prepare a tuple of the arguments (strainName and path)
for name in sampleName:
pairedPrepArgs.append((name, path, metadata, commands))
# This map function allows for multi-processing
output = createpairedPool.map(endPairer, pairedPrepArgs)
return output
def endPairer((name, path, metadata, commands)):
"""Joins quality-trimmed paired-end fastq files"""
seqFiles = sorted(glob("%s/%s/*.paired*" % (path, name)))
# create a handy variable for use in the endpairer system call
newPath = "%s/%s/%s" % (path, name, name)
# Ensure that join_paired_ends/py has not already been run on these files
if not commands[name]["endPairerCall"]:
# Treat paired-end samples slightly differently than single-end samples
if len(seqFiles) == 2:
# endPairerCall = "join_paired_ends.py %s %s > %s.trimmed.fq" % (seqFiles[0], seqFiles[1], newPath)
endPairerCall = "Pear -f %s -r %s -j 24 -q 10 -o %s" % (seqFiles[0], seqFiles[1], newPath)
subprocess.call(endPairerCall, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
# print endPairerCall
metadata[name]["commands"]["endPairerCall"] = endPairerCall
else:
metadata[name]["commands"]["endPairerCall"] = commands[name]["endPairerCall"]
# Remove the .paired\d.fq files
for pairedFile in seqFiles:
os.remove(pairedFile)
sys.stdout.write('.')
return metadata
def mergePrepProcesses(sampleName, path, metadata, commands):
"""A helper function to make a pool of processes to allow for a multi-processed approach to error correction"""
mergePrepArgs = []
output = {}
# This used to check to see if the __name__ == '__main__', but since this is run as a module, the __name__
# must equal the name of the script
if __name__ == '__main__':
createmergePool = Pool()
# Prepare a tuple of the arguments (strainName and path)
for name in sampleName:
mergePrepArgs.append((name, path, metadata, commands))
# This map function allows for multi-processing
output = createmergePool.map(mergR, mergePrepArgs)
return output
def mergR((name, path, metadata, commands)):
"""Merges the joined reads and the unpaired reads"""
make_path("%s/results" % path)
# Use a piped cat and gzip command
if not commands[name]["readMergeCall"]:
readMergeCall = "cat %s/%s/*.fq %s/%s/*.fastq| gzip > %s/results/%s.fastq.gz" % (path, name, path, name, path, name)
subprocess.call(readMergeCall, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
metadata[name]["commands"]["readMergeCall"] = readMergeCall
else:
metadata[name]["commands"]["readMergeCall"] = commands[name]["readMergeCall"]
sys.stdout.write('.')
#Remove .log, .fq and .fastq files - the folders will remain, as they will be used to get the names of the strains
# if the pipeline needs to be run again.
seqFiles = glob("%s/%s/*" % (path, name))
if seqFiles:
for seqFile in seqFiles:
os.remove(seqFile)
return metadata
def uploadR(sampleNames, path, metadata, commands):
"""Uploads samples to OneCodex for analysis"""
"""Not fully tested, as it seems to be impossible to delete uploaded sequences right now, and I don't want to
fill up Cathy's OneCodex account"""
os.chdir("%s/reports" % path)
for seqName in sampleNames:
if not commands["uploadCommand"]:
curlCommand = "curl https://beta.onecodex.com/api/v0/upload -X POST -u %s: " \
"--form filename=@%s.fastq.gz" % (apikey, seqName)
print curlCommand
proc = subprocess.Popen(curlCommand, shell=True, stdout=subprocess.PIPE, stderr=open(os.devnull, 'wb'))
stdout_value = proc.communicate()[0]
print stdout_value
metadata[seqName]["commands"]["uploadCommand"] = curlCommand
metadata[seqName]["commands"]["sample_id"] = stdout_value
else:
metadata[seqName]["commands"]["uploadCommand"] = commands["uploadCommand"]
metadata[seqName]["commands"]["sample_id"] = commands["sample_id"]
os.chdir(path)
return metadata
def filler(metadata, metadataList):
"""Properly populates the metadata dictionary - when I tried to populate the metadata dictionary within the
multi-processed functions, it was returned as a list (likely due to how the files were returned. This essentially
iterates through the list, and populates a dictionary appropriately"""
# Make a copy of the metadata dictionary
metadataCopy = metadata
# Iterate through metadataCopy
for item in metadataList:
# each item in metadataCopy is a dictionary entry
# iterate through all the dictionaries
for name in item:
# The way the dictionaries were created, they should have the format:
# metadataOutput[name]["commands"]["trimmomaticCall"] = "text text text...."
for generalCategory in item[name]:
for specificCategory in item[name][generalCategory]:
# Populate the dictionary
if specificCategory not in metadataCopy[name][generalCategory]:
metadataCopy[name][generalCategory][specificCategory] = str(item[name][generalCategory][specificCategory])
# Return the beautifully-populated dictionary
return metadataCopy
def runMetagenomR():
os.chdir(path)
print "Finding sample names"
seqNames = nameExtractor()
print "\nMoving files to appropriate folders"
seqMovR(path, seqNames)
print "\nExtracting files from archive as necessary"
foldererPrepProcesses(seqNames, path)
commands = commandR(seqNames, path)
print "\nPerforming trimmomatic quality trimming on samples"
trimmomaticMetadataList = trimmomaticPrepProcesses(seqNames, path, metadataFile, commands)
trimmomaticMetadata = filler(metadataFile, trimmomaticMetadataList)
jsonR(seqNames, path, trimmomaticMetadata, "Collection")
print "\nMerging paired ends and appending singletons"
pairedMetadataList = pairedEndJoinerPrepProcesses(seqNames, path, trimmomaticMetadata, commands)
pairedMetadata = filler(trimmomaticMetadata, pairedMetadataList)
jsonR(seqNames, path, pairedMetadata, "Collection")
mergeMetadataList = mergePrepProcesses(seqNames, path, pairedMetadata, commands)
mergeMetadata = filler(pairedMetadata, mergeMetadataList)
jsonR(seqNames, path, mergeMetadata, "Collection")
# Upload the files to OneCodex if desired
if upload:
print "\nUploading sequences to OneCodex"
uploadMetadata = uploadR(seqNames, path, mergeMetadata, commands)
jsonR(seqNames, path, uploadMetadata, "Collection")
# print json.dumps(commands, sort_keys=True, indent=4, separators=(',', ': '))
runMetagenomR()
print "\nElapsed Time: %.2f seconds" % (time.time() - start)
| {
"repo_name": "adamkoziol/metagenomeAutomator",
"path": "metagenomR.py",
"copies": "1",
"size": "18393",
"license": "mit",
"hash": 7853484469652363000,
"line_mean": 46.0409207161,
"line_max": 130,
"alpha_frac": 0.648779427,
"autogenerated": false,
"ratio": 3.860831234256927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5009610661256927,
"avg_score": null,
"num_lines": null
} |
__author__ = 'akoziol'
# Import the necessary modules
# OS is used for file/folder manipulations
import os
# Subprocess->call is used for making system calls
from subprocess import call
# Errno is used in the file creation command - I think it's similar to the $! variable in Perl
import errno
# Glob finds all the path names matching a specified pattern according to the rules used by the Unix shell
import glob
# Shutil is useful for file moving/copying
import shutil
# prints variables in a form which can be used as input to the interpreter - similar to Data::Dumper?
import pprint
# Regex
import re
import time
import sys
import threading
import Queue
import shlex
import argparse
# Define the variables for the read length and fold coverage, respectively
#readLength = [30, 35, 40, 45, 50, 55, 60, 75, 80, 100, 150]
readLength = [54]
#foldCoverage = [1, 2, 5, 10, 15, 20, 25, 30, 35, 40, 50, 75, 100]
foldCoverage = [18]
# Initialize the required dictionaries
vcfData = {}
# Define the range of k-mer sizes for indexing of targets
#kmer = [5, 7, 9, 11, 13, 15]
kmer = [9]
os.chdir("/media/nas/akoziol/Pipeline_development/SipprModelling")
path = os.getcwd()
reference = "/media/nas/akoziol/Pipeline_development/SipprModelling/reference/Escherichia_coli_O157_H7_str_Sakai.fas"
def createSimulatedFiles(readLength, foldCoverage):
"""Iterates over the readLength and foldCoverage lists to create folders (if necessary)\
and perform analyses"""
os.chdir(path)
for rLength in readLength:
for fCov in foldCoverage:
# Create a new folder(if necessary) at the appropriate location
newPath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newFile = "%s/%s_%s_" % (newPath, rLength, fCov)
artIlluminaCall = "art_illumina -i %s -l %s -f %s -m 225 -s 60 -o %s" % (reference, rLength, fCov, newFile)
make_path(newPath)
# Call art_illumina to simulate the reads into the appropriate folders
# art_illumina -i /path-to-file/Escherichia_coli_O157_H7_str_Sakai.fas -l "readLength" -f "foldCoverage" \
# -m 225 -s 60 -o /path-to-folder/Appropriate_name
if not os.path.isfile("%s1.fq" % newFile):
os.system(artIlluminaCall)
def make_path(inPath):
"""from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL"""
try:
os.makedirs(inPath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def target_prep():
"""Prepares the target genes as necessary - this includes getting the files
into a list, and processing using helper functions faidx_targets and index_targets"""
os.chdir("%s/targets" % path)
global targets
targets = glob.glob("*.fa")
faidx_targets()
index_targets()
def index_targets():
"""Performs smalt index on the targets using the range of k-mers stored in the variable kmer"""
for file in targets:
# file.split splits the file (eg purA.fa) on the period. The first entry from the new list is stored in filename
filename = file.split('.')[0]
for size in kmer:
# Create a new path to be created (if necessary) for the generation of the range of k-mers
indexPath = "%s/targets/%s/%s_%s" % (path, filename, filename, size)
# Call the make_path function to make folders as necessary
make_path(indexPath)
os.chdir(indexPath)
indexFile = "%s.smi" % filename
if not os.path.isfile(indexFile):
indexCommand = "smalt index -k %s -s 1 %s %s/targets/%s" % (size, filename, path, file)
os.system(indexCommand)
def faidx_targets():
"""Creates .fai index files of the targets, which are necessary for the conversion
of sorted BAM files to fastq files."""
for file in targets:
faidxFile = "%s.fai" % file
faidxPath = "%s/targets/faidxFiles" % path
make_path(faidxPath)
if not os.path.isfile("%s/%s" % (faidxPath, faidxFile)):
faidxCommand = "samtools faidx %s" % file
os.system(faidxCommand)
shutil.move(faidxFile, faidxPath)
shutil.move(file, faidxPath)
def mapping(rLength, fCov, size, count, filename, megaName, filePath, total):
"""Performs the mapping of the simulated reads to the targets"""
targetPath = "%s/targets/%s/%s_%s" % (path, filename, filename, size)
###Include the commented out lines if necessary to have targets in the same directory as fastq files
#os.chdir("%s/targets/%s/%s_%s" % (path, filename, filename, size))
#for files in os.listdir("."):
# shutil.copy(files, newPath)
if not os.path.isfile("%s.bam" % (megaName)):
smaltMap = "smalt map -o %s.bam -n 24 -f bam -x -l pe %s/%s %s/%s_%s_1.fq %s/%s_%s_2.fq" \
% (megaName, targetPath, filename, filePath, rLength, fCov, filePath, rLength, fCov)
os.system(smaltMap)
print "Mapping file %s of %s" % (count, total)
def sorting(count, megaName, sorted, sortedName, total):
"""Sorts the bam file in order for further manipulations to be possible"""
# Sort the BAM file
if not os.path.isfile(sortedName):
bamSort = "samtools sort %s.bam %s" % (megaName, sorted)
print "Sorting file %s of %s" % (count, total)
os.system(bamSort)
def indexing(megaName, sortedName, count, total):
"""Indexes the sorted bam files in order to visualize the assemblies with tablet - note this is OPTIONAL"""
indexedName = str(megaName) + "_sorted.bai"
if not os.path.isfile(indexedName):
bamIndex = "samtools index %s %s" % (sortedName, indexedName)
print "Indexing file %s of %s" % (count, total)
os.system(bamIndex)
def createVCF(target, sortedName, vcfFile, count, total):
"""Indexes the sorted bam files in order to visualize the assemblies with tablet - note this is OPTIONAL"""
faidxTarget = "%s/targets/faidxFiles/%s" % (path, target)
# Read this to understand why certain flags were used
if not os.path.isfile(vcfFile):
vcfPipe = "samtools mpileup -A -BQ0 -d 1000000 -uf %s %s | bcftools view -cg - > %s" % (faidxTarget, sortedName, vcfFile)
print "Creating VCF file %s of %s" % (count, total)
os.system(vcfPipe)
def populateVCFdata(megaName, vcfFile):
"""Opens the vcf file, extract the appropriate information, and places the data into the dictionary"""
vcf = open(vcfFile, 'r')
# Enter the first key in the dictionary - unlike Perl, Python doesn't support
# autovivification - intermediate data structures must be explicitly created
vcfData[megaName] = {}
for line in vcf:
# All lines with a '#' are ignored
if "#" in line:
pass
else:
line = line.rstrip()
tabLine = line.split('\t')
pos = int(tabLine[1])
ref = tabLine[3]
alt = tabLine[4]
qual = float(tabLine[5])
info = tabLine[7]
messyDP = info.split(';')[0]
DP = messyDP.replace('DP=','')
if alt == ".":
base = ref
else:
base = alt
# Populates the dictionary with the base position, base call, quality, and the depth of
# coverage for that position the int() function had to be used here, as the sort function
# used in traversing the dictionary was treating the values as strings, and sorting them
# inappropriately eg (1, 10, 100, 11, 12, 13, ... 2) instead of (1, 2, ... 100)
vcfData[megaName][pos] = {}
vcfData[megaName][pos][base] = {}
vcfData[megaName][pos][base][qual] = {}
vcfData[megaName][pos][base][qual] = DP
def pipeline():
"""Calls the appropriate functions defined above and pipes them all together"""
count = 0
os.chdir(path)
total = len(readLength) * len(foldCoverage) * len(targets) * len(kmer)
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
filename = target.split('.')[0]
for size in kmer:
count += 1
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, filename, size)
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
sorted = str(megaName) + "_sorted"
sortedName = str(megaName) + "_sorted.bam"
os.chdir(newPath)
mapping(rLength, fCov, size, count, filename, megaName, filePath, total)
sorting(count, megaName, sorted, sortedName, total)
indexing(megaName, sortedName, count, total)
vcfFile = str(megaName) + "_sorted.vcf"
createVCF(target, sortedName, vcfFile, count, total)
outputFile = str(megaName) + "_output.csv"
populateVCFdata(megaName, vcfFile)
target_prep()
createSimulatedFiles(readLength, foldCoverage)
pipeline()
pprint.pprint(vcfData)
| {
"repo_name": "adamkoziol/SipprModeling",
"path": "modelling.py",
"copies": "1",
"size": "9371",
"license": "mit",
"hash": 8077281785456471000,
"line_mean": 41.0224215247,
"line_max": 129,
"alpha_frac": 0.6235193683,
"autogenerated": false,
"ratio": 3.5362264150943394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9626153093610347,
"avg_score": 0.006718537956798562,
"num_lines": 223
} |
__author__ = 'akoziol'
# Import the necessary modules
# OS is used for file/folder manipulations
import os
# Subprocess->call is used for making system calls
import subprocess
# Errno is used in the file creation command - I think it's similar to the $! variable in Perl
import errno
# Glob finds all the path names matching a specified pattern according to the rules used by the Unix shell
import glob
# Shutil is useful for file moving/copying
import shutil
# prints variables in a form which can be used as input to the interpreter - similar to Data::Dumper?
#import pprint
# Regex
import re
# System tools
import sys
# Can import date, and calculate length of run, etc.
import time
# Multiprocessing module
from multiprocessing import Pool
# Numerical python - used in the parsing of vcf files
import numpy
# Math module - used in the parsing of vcf files
import math
# Argument parser for user-inputted values, and a nifty help menu
from argparse import ArgumentParser
#Parser for arguments
parser = ArgumentParser(description='Perform modelling of parameters for GeneSipping')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v1.0')
parser.add_argument('-i', '--input', required=True, help='Specify input directory')
parser.add_argument('-l', '--readLength', required=True, help='Specify list of read lengths to be used e.g. 18, 19, 20, 21, 22')
parser.add_argument('-f', '--foldCoverage', required=True, help='Specify list of fold coverage values to be used e.g. 1, 1.5, 2, 2.5, 5')
parser.add_argument('-k', '--kmerLength', required=True, help='Specify list of kmer lengths to be used e.g. 5, 7, 11')
# Get the arguments into a list
args = vars(parser.parse_args())
# Define variables from the arguments - there may be a more streamlined way to do this
path = args['input']
# Since the following three variables need to be lists, the string entered is split on the ,
readLength = args['readLength'].split(",")
foldCoverage = args['foldCoverage'].split(",")
kmer = args['kmerLength'].split(",")
# maxRL will be used in the fold coverage calculations during simulated file generation
maxRL = readLength[-1]
# Initialize the required dictionaries
vcfData = {}
# Target files are in scriptPath/targets
scriptPath = os.getcwd()
os.chdir("%s/targets" % scriptPath)
# Must have a .fa extension
targetFiles = glob.glob("*.fa")
# Add targets to appropriate list
targets = ["%s/targets/" % scriptPath + fastaFile for fastaFile in targetFiles]
# Genome reference files need to be in "path/reference"
os.chdir("%s/reference" % path)
# File extension needs to be .fa .fas .fasta, but not .ffn .fsa, etc.
referenceFile = glob.glob("*.fa*")
# Add all the reference to the references list
references = ["%s/reference/" % path + fastaFile for fastaFile in referenceFile]
# Define the path for the outputs
outPath = "%s/outputs" % path
def make_path(inPath):
"""from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL"""
try:
os.makedirs(inPath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def createSimulatedFilesProcesses(reference):
"""Creates a pool of processes, and maps data in a parallel fashion to createSimulatedFiles"""
print "Creating simulated files"
# Initialise the args list
simulatedArgs = []
# Every Python module has its __name__ defined and if this is '__main__',
# it implies that the module is being run standalone by the user and we can do corresponding appropriate actions.
# http://ibiblio.org/g2swap/byteofpython/read/module-name.html
if __name__ == '__main__':
# Initialise the pool of processes - it defaults to the number of processors
simulatedFilepool = Pool()
# Create a tuple of the appropriate read lengths and fold coverages
# eg. (30, 1), (30, 2), ... (30, 100), (35, 1), (35, 2), ... (150, 100)
for rLength in readLength:
for fCov in foldCoverage:
simulatedArgs.append((rLength, fCov, reference))
# Use the map function and the tuple created above to process the data rapidly
simulatedFilepool.map(createSimulatedFiles, simulatedArgs)
def createSimulatedFiles((rLength, fCov, reference)):
"""Iterates over the readLength and foldCoverage lists to create folders (if necessary)\
and perform analyses"""
os.chdir(path)
# Create a new folder(if necessary) at the appropriate location
newPath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newFile = "%s/%s_%s" % (newPath, rLength, fCov)
# The adjusted coverage keeps the number of reads constant for each readLength value supplied.
# a modelling experiment with a readLength of 20 will have a adjCov that is 40% the value of
# one with a readLength of 50
adjCov = float(fCov) * float(rLength)/float(maxRL)
# If using Sakai as the reference, then multiplying the foldCoverage by the constant below
# will allow for the use of precise read lengths - using a foldCoverage value of 5 will yield almost
# exactly 500 000 reads
# adjCov = float(fCov) * 0.90935049 * float(rLength)/float(maxRL)
# Call art_illumina to simulate the reads into the appropriate folders - general format of system call:
# art_illumina -i /path-to-file/Escherichia_coli_O157_H7_str_Sakai.fas -l "readLength" -f "foldCoverage" \
# -m 225 -s 60 -o /path-to-folder/Appropriate_name
artIlluminaCall = "art_illumina -i %s -l %s -f %s -o %s" % (reference, rLength, adjCov, newFile)
# If not using an adjusted coverage value, then uncomment the line below
# artIlluminaCall = "art_illumina -i %s -l %s -f %s -o %s" % (reference, rLength, float(fCov), newFile)
make_path(newPath)
if not os.path.isfile("%s.fq" % newFile):
sys.stdout.write('.')
# Subprocess.call requires that the command be finished before the loop can continue
# this ensures that processes will not be started, and continue running, while the
# script believes that it is "safe" to start more processes, eventually leading to problems
subprocess.call(artIlluminaCall, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
else:
print sys.stdout.write('.')
def faidxTargetsProcesses():
"""Faidx multiprocessing helper function"""
print '\nProcessing targets with faidx'
# Initialise the args list
if __name__ == '__main__':
# Initialise the pool of processes - it defaults to the number of processors
faidxPool = Pool()
faidxPool.map(faidxTargets, targets)
def faidxTargets(file):
"""Creates .fai index files of the targets, which are necessary for the conversion
of sorted BAM files to fastq files."""
# print json.dumps(file, sort_keys=True, indent=4, separators=(',', ': '))
fileName = os.path.split(file)[1]
faidxFile = "%s.fai" % file
# print faidxFile
faidxPath = "%s/targets/faidxFiles" % path
make_path(faidxPath)
if not os.path.isfile("%s/%s.fai" % (faidxPath, fileName)):
faidxCommand = "samtools faidx %s" % file
subprocess.call(faidxCommand, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
# Move the file and faidx-processed file to the appropriate folder for further processing
shutil.move(faidxFile, faidxPath)
shutil.copy(file, faidxPath)
sys.stdout.write('.')
else:
sys.stdout.write('.')
def indexTargets():
"""Performs smalt index on the targets using the range of k-mers stored in the variable kmer"""
print '\nIndexing targets'
for target in targets:
for size in kmer:
# Format the target names properly
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
# Create a new path to be created (if necessary) for the generation of the range of k-mers
indexPath = "%s/targets/%s/%s_%s" % (path, fileNoExt, fileNoExt, size)
# Call the make_path function to make folders as necessary
make_path(indexPath)
indexFileSMI = "%s.smi" % fileNoExt
indexFileSMA = "%s.sma" % fileNoExt
# Index the appropriate files
if not os.path.isfile("%s/%s" % (indexPath, indexFileSMI)):
indexCommand = "smalt index -k %s -s 1 %s %s/targets/faidxFiles/%s" % (size, fileNoExt, path, filename)
subprocess.call(indexCommand, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
shutil.move(indexFileSMI, indexPath)
shutil.move(indexFileSMA, indexPath)
sys.stdout.write('.')
else:
sys.stdout.write('.')
def mappingProcesses():
"""Mapping threads!"""
os.chdir(path)
print '\nPerforming reference mapping'
mappingProcessesArgs = []
if __name__ == '__main__':
mappingProcessesPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
mappingProcessesArgs.append((rLength, fCov, target, size))
mappingProcessesPool.map(mapping, mappingProcessesArgs)
def mapping((rLength, fCov, target, size)):
"""Performs the mapping of the simulated reads to the targets"""
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
make_path(newPath)
targetPath = "%s/targets/%s/%s_%s" % (path, fileNoExt, fileNoExt, size)
# Map the files to the reference
if not os.path.isfile("%s/%s.bam" % (newPath, megaName)):
smaltMap = "smalt map -o %s/%s.bam -f bam -x %s/%s %s/%s_%s.fq" \
% (newPath, megaName, targetPath, fileNoExt, filePath, rLength, fCov)
subprocess.call(smaltMap, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def sortingProcesses():
"""Multiprocessing for sorting bam files"""
print "\nSorting bam files"
sortingProcessesArgs = []
if __name__ == '__main__':
sortingProcessesPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
sortingProcessesArgs.append((rLength, fCov, target, size))
sortingProcessesPool.map(sorting, sortingProcessesArgs)
def sorting((rLength, fCov, target, size)):
"""Performs samtools sort to return a sorted bam file"""
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
sorted = megaName + "_sorted"
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
#Sort the BAM file
if not os.path.isfile("%s/%s" % (newPath, sortedMegaName)):
bamSort = "samtools sort %s/%s.bam %s/%s" % (newPath, megaName, newPath, sorted)
subprocess.call(bamSort, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def bamIndexingProcesses():
print '\nIndexing bam files'
bamIndexingArgs = []
if __name__ == '__main__':
bamIndexingPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
bamIndexingArgs.append((rLength, fCov, target, size))
bamIndexingPool.map(bamIndexing, bamIndexingArgs)
def bamIndexing((rLength, fCov, target, size)):
"""Indexes the sorted bam files in order to visualize the assemblies with tablet - note this is OPTIONAL"""
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
indexedName = megaName + "_sorted.bai"
if not os.path.isfile("%s/%s" % (newPath, indexedName)):
bamIndex = "samtools index %s/%s %s/%s" % (newPath, sortedMegaName, newPath, indexedName)
subprocess.call(bamIndex, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def createVCFProcesses():
print '\nCreating vcf files'
createVCFArgs = []
if __name__ == '__main__':
createVCFPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
createVCFArgs.append((rLength, fCov, target, size))
createVCFPool.map(createVCF, createVCFArgs)
def createVCF((rLength, fCov, target, size)):
"""Creates the variant calling format files from which all relevant data can be pulled"""
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
vcfFile = megaName + "_sorted.vcf"
newPath = "%s/%s" % (filePath, megaName)
faidxTarget = "%s/targets/faidxFiles/%s" % (path, filename)
# Read this to understand why certain flags were used
# http://samtools.sourceforge.net/mpileup.shtml
if not os.path.isfile("%s/%s" % (newPath, vcfFile)):
vcfPipe = "samtools mpileup -A -BQ0 -d 1000000 -uf %s %s/%s | bcftools view -cg - > %s/%s" \
% (faidxTarget, newPath, sortedMegaName, newPath, vcfFile)
subprocess.call(vcfPipe, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def createOutputFiles():
"""Parses the vcf files created above to create a handy summary table of mapping stats"""
print "\nCreating outputs"
make_path(outPath)
os.chdir(outPath)
outFile = open("SipprModelling_%s.csv" % start, "wb")
outFile.write("readLength\tfoldCoverage\ttarget\tkmerLength\tMedianQualityScore\t"
"QualityScoreSD\tMedianFoldCoverage\tFoldCoverageSD\tMedianPercentID\tqualityMetric\n")
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
total1 = 0
sys.stdout.write('.')
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
vcfFile = megaName + "_sorted.vcf"
newPath = "%s/%s" % (filePath, megaName)
outputFile = "%s/%s" % (newPath, vcfFile)
# Initialise the counter, which will be used to track lines in the vcf file - if positions in the
# target are not mapped, then the position field will jump ahead of the counter
count = 1
# Initialise the arrays, which will keep track of the appropriate values for each dataset
arrQual = []
arrCov = []
arrSum = []
output = open(outputFile, "r")
for line in output:
# vcf files have 36 commented out lines at the top of each file - these are not necessary
if re.search('#', line):
pass
else:
total1 += 1
# Format of file
# CHROM POS ID REF ALT QUAL FILTER INFO FORMAT
# adk-12 8 . G . 32.7 . DP=1;AF1=0;AC1=0;DP4=0,1,0,0;MQ=29;FQ=-30.3 PL 0
# data[0] [1] [2] [3] [4] [5] [6] [7]
data = line.split("\t")
#target = data[0]
pos = data[1]
refSeq = data[3]
mapSeq = data[4]
qual = data[5]
# Depth of coverage is reported prior to the first ";"
dpLine = data[7].split(";")[0]
# For now, I'm skipping lines that indicated the presence of a possible indel
# - I may return to this later
if re.search("INDEL", dpLine):
pass
else:
# If the called base (mapSeq) is identical to the reference base (refSeq)
# - denoted by a ".", then set seq to equal refSeq, otherwise, pull the
# value of mapSeq for seq
avgQual = sum(arrQual)/total1
if mapSeq == ".":
seq = refSeq
match = 1
# This section corrects for the fact that during the conversion of bam files to vcf
# files, SNP calls and ambiguous calls look identical, except for the fact that for
# SNPs, the qualityScore (qual) tends to be higher than the surrounding bases,
# while ambiguous calls have a lower qualityScore - this loop screens for quality
# scores that are at least 10 lower than the score of the previous base
else:
if float(arrQual[-1] - 10) >= 0:
prevValue = float(arrQual[-1] - 10)
else:
prevValue = 0
if float(qual) <= prevValue:
seq = refSeq
match = 1
else:
# This attempts to catch if there are two ambiguous bases in a row;
# they will hopefully have the same value
if float(qual) == prevValue:
seq = refSeq
match = 1
else:
# "True" SNPs seem to have increased qualityScore compared to the
# surrounding values, this will catch that
if float(qual) > prevValue:
seq = mapSeq
match = 0
# Strip the "DP=" from dpLine
DP = dpLine.split("=")[1]
#vcfData[pos] = (fileName, target, refSeq, mapSeq, DP)
# If pos > count, then there is a gap in the mapping (or a deletion, but ignoring
# this possibility for now). For my purposes, I want to have data reported for
# every position, whether it is present in the vcf file or not, so I will use count
# as the position, "-" as the seq, and 0 as the quality and depth of coverage
if int(pos) > count:
#print int(pos) - count, pos, count, range(count, int(pos))
# the number of skipped positions is equal to the value for pos - count
# For each skipped position (i), set appropriate variables to appropriate values
for i in range(count, int(pos)):
posAdj = count
seqAdj = "-"
matchAdj = 0
qualAdj = 0
DPAdj = 0
#vcfData[fileName][rL][fC][target][size][int(posAdj)][seqAdj][matchAdj][qualAdj] = DP
arrQual.append(float(qualAdj))
arrCov.append(float(DPAdj))
arrSum.append(float(matchAdj))
count += 1
if int(pos) == count:
#vcfData[fileName][rL][fC][target][size][int(pos)][seq][match][qual] = DP
arrQual.append(float(qual))
arrCov.append(float(DP))
arrSum.append(float(match))
count += 1
else:
#vcfData[fileName][rL][fC][target][size][int(pos)][seq][match][qual] = DP
arrQual.append(float(qual))
arrCov.append(float(DP))
arrSum.append(float(match))
count += 1
# In the case of no data being present in a file,
total = count - 1
if total == 0:
avgQual = 0
stdQual = 0
avgCov = 0
stdCov = 0
avgID = 0
qualMet = 0
else:
avgQual = sum(arrQual)/total
stdQual = numpy.std(arrQual)
avgCov = sum(arrCov)/total
stdCov = numpy.std(arrCov)
avgID = sum(arrSum)/total * 100
qualMet = avgQual * avgCov
outFile.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (rLength, fCov, fileNoExt, size, avgQual, stdQual, avgCov, stdCov, avgID, qualMet))
output.close()
outFile.close()
def pipeline():
"""Calls all the functions in a way that they can be multi-processed"""
for reference in references:
createSimulatedFilesProcesses(reference)
faidxTargetsProcesses()
indexTargets()
#Start the mapping operations
mappingProcesses()
sortingProcesses()
bamIndexingProcesses()
createVCFProcesses()
createOutputFiles()
start = time.time()
pipeline()
print "\nElapsed Time: %s seconds" % (time.time() - start) | {
"repo_name": "adamkoziol/SipprModeling",
"path": "modellingMultiprocessing.py",
"copies": "1",
"size": "24473",
"license": "mit",
"hash": -9210337676631298000,
"line_mean": 49.0490797546,
"line_max": 137,
"alpha_frac": 0.5448453398,
"autogenerated": false,
"ratio": 4.079513252208701,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5124358592008701,
"avg_score": null,
"num_lines": null
} |
__author__ = 'a.kozlowski'
import pygame
from pygame.locals import *
from random import randint
class Snake(object):
game_speed = 10 #fps limit
screen_w = 800
screen_h = 640
map_margin_y = 40 #space for displaying score during game
map_w = 40 #tiles x
map_h = 30 #tiles y
surface_color = (55, 55, 55)
last_update_time = 0
window_caption = "GeekSnake"
def __init__(self):
pygame.init()
#init game vars
self.map = None
self.snake = None
self.dir = 'r'
self.new_dir = 'r' #we use htis var to keep store button pressed beetwen frames
self.apple = None
self.running = False
self.score = -1 #-1 means its first game
self.img_apple = pygame.image.load('data/apple.png')
self.img_border = pygame.image.load('data/border.png')
self.img_snake = pygame.image.load('data/snake.png')
self.img_logo = pygame.image.load('data/logo.png')
self.tile_h = (self.screen_h - self.map_margin_y) / self.map_h
self.tile_w = self.screen_w / self.map_w
#inti pygame window
flag = DOUBLEBUF
self.surface = pygame.display.set_mode((self.screen_w, self.screen_h), flag)
pygame.display.set_caption(self.window_caption)
self.font = pygame.font.SysFont('Arial', 20)
self.clock = pygame.time.Clock()
self.start()
def _start_new_game(self):
self.score = 0
self.dir = 'r'
self.running = True
self.snake = [(10, 10), (11, 10), (12, 10)]
self.map = []
#generate new map
self.map.append([1] * self.map_w)
for i in range(self.map_h - 2):
row = [0] * (self.map_w - 2)
row.insert(0, 1)
row.append(1)
self.map.append(row)
self.map.append([1] * self.map_w)
#create first apple
self._create_apple()
def _update_game(self):
snake_head = self.snake[-1]
if self.dir == 'r':
snake_new_point = (snake_head[0] + 1, snake_head[1]) #direction right
elif self.dir == 'l':
snake_new_point = (snake_head[0] - 1, snake_head[1]) #direction left
elif self.dir == 't':
snake_new_point = (snake_head[0], snake_head[1] - 1) #direction top
else:
snake_new_point = (snake_head[0], snake_head[1] + 1) #direction down
if (snake_new_point in self.snake) or (self.map[snake_new_point[1]][snake_new_point[0]] == 1):
self.running = False
return
self.snake.append(snake_new_point)
if snake_new_point == self.apple:
self._create_apple()
self.score += 10
else:
del self.snake[0]
def _draw_game(self):
self._draw_map()
self._draw_snake()
self._draw_apple()
label_score = self.font.render("SCORE: " + str(self.score), 1, (255, 255, 0))
self.surface.blit(label_score, (5, 8))
def _draw_map(self):
for y in range(self.map_h):
for x in range(self.map_w):
tile = int(self.map[y][x])
if tile == 1:
self._drawBlock(self.img_border, x, y)
def _draw_snake(self):
for b in self.snake:
self._drawBlock(self.img_snake, b[0], b[1])
def _draw_apple(self):
self._drawBlock(self.img_apple, self.apple[0], self.apple[1])
def _create_apple(self):
while True:
appleY = randint(1, self.map_h - 2)
appleX = randint(1, self.map_w - 2)
collid = False
for i in range(len(self.snake)):
if appleX == self.snake[i][0] and appleY == self.snake[i][1]:
collid = True
if not collid:
break
self.apple = (appleX, appleY)
def _drawBlock(self, img, x, y):
self.surface.blit(img, self._cord2screen(x, y))
def _cord2screen(self, x, y):
return self.tile_w * x, (y * self.tile_h) + self.map_margin_y
def _drawMenu(self):
self.surface.blit(self.img_logo, (self.screen_w / 2 - self.img_logo.get_rect().w / 2, 20))
if self.score >= 0:
label_score = self.font.render("Game Over! Score: " + str(self.score), 1, (255, 255, 0))
lscw, lsch = label_score.get_size()
self.surface.blit(label_score, ((self.screen_w / 2 - lscw / 2), 150))
label_start = self.font.render("Press `S` to start new game", 1, (255, 255, 0))
lsw, lsh = label_start.get_size()
self.surface.blit(label_start, ((self.screen_w / 2 - lsw / 2), self.screen_h / 2 - lsh))
def _handle_events(self):
keys = pygame.key.get_pressed()
if self.running:
if keys[pygame.K_w] and not self.dir == 'd':
self.new_dir = 't'
if keys[pygame.K_s] and not self.dir == 't':
self.new_dir = 'd'
if keys[pygame.K_a] and not self.dir == 'r':
self.new_dir = 'l'
if keys[pygame.K_d] and not self.dir == 'l':
self.new_dir = 'r'
else:
if keys[pygame.K_s]:
self._start_new_game()
def start(self):
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
return True
#cleer screen
self.surface.fill(self.surface_color)
self._handle_events()
self.last_update_time += self.clock.tick(30)
if self.running:
if self.last_update_time > (1000 / self.game_speed):
self.last_update_time = 0
self.dir = self.new_dir
self._update_game()
self._draw_game()
else:
self._drawMenu()
#update screen
pygame.display.flip()
if __name__ == "__main__":
game = Snake()
| {
"repo_name": "AlbertKozlowski/GeekSnake",
"path": "snake.py",
"copies": "1",
"size": "6035",
"license": "mit",
"hash": 2478090873065433000,
"line_mean": 32.5277777778,
"line_max": 102,
"alpha_frac": 0.5231151616,
"autogenerated": false,
"ratio": 3.341638981173865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9315059791024518,
"avg_score": 0.009938870349869467,
"num_lines": 180
} |
__author__ = 'Akshay'
"""
File contains code to Mine reviews and stars from a state reviews.
This is just an additional POC that we had done on YELP for visualising number of 5 star reviews per state on a map.
For each business per state, 5 reviews are taken and the count of the review is kept in the dictionary for each state.
Use the resulting json to plot it onto the map.
For the actual map visualisation, please refer State Review Nightlife POC.
Since only 5 business reviews were taken per state, this still needs work.
"""
##############################################
from __future__ import division
import sys
reload(sys)
import json
import datetime
sys.setdefaultencoding('utf8')
state_5_star_dict = {}
state_4_star_dict = {}
state_3_star_dict = {}
state_2_star_dict = {}
state_1_star_dict = {}
state_business = {}
def create_set_for_business_with_cat(category):
business_count = 0
with open('Data\yelp_academic_dataset_business.json') as fp:
for line in fp.readlines():
temp = json.loads(line, encoding='utf-8')
categories = str(temp["categories"])
state = str(temp["state"])
if state == "ON" or state == "ELN" or state == "EDH" or state == "MLN" or state == "NTH" or state == "FIF":
continue
if state not in state_business:
state_business[state] = 0
if len(state_business.keys()) == 50:
break
if category in categories:
print state
business_id = str(temp["business_id"])
city = str(temp["city"])
name = str(temp["name"])
create_yelp_set(business_id, state, city, name)
print "set prepared."
def create_yelp_set(business_id, state, city, name):
file_write = open('Data\state_stars_date_business.txt', mode='a')
if state_business[state] == 5:
print state, " is already completed."
return
with open('Data\yelp_academic_dataset_review.json') as fp:
for line in fp.readlines():
temp = json.loads(line, encoding='utf-8')
if str(temp["business_id"]) == business_id:
state_business[state] += 1
star = str(temp["stars"])
date = str(temp["date"])
date_tm = datetime.datetime.strptime(date, "%Y-%m-%d").date()
file_write.write(business_id)
file_write.write('\t')
file_write.write(state)
file_write.write('\t')
file_write.write(star)
file_write.write('\t')
file_write.write(city)
file_write.write('\t')
file_write.write(name)
file_write.write('\t')
file_write.write(str(date_tm))
file_write.write('\n')
if state_business[state] == 5:
break
for key, value in state_5_star_dict.iteritems():
print key, value
file_write.close()
print "Done."
def state_review_trends():
count = 0
with open('Data\state_stars_date_business.txt') as fp:
for line in fp.readlines():
count += 1
tup = (line.split("\t")[0], line.split("\t")[1], line.split("\t")[2], line.split("\t")[3],
line.split("\t")[4], line.split("\t")[5])
state = tup[1]
star_rating = int(tup[2])
if int(star_rating) != 5:
continue
if state not in state_5_star_dict:
state_5_star_dict[state] = 0
if state not in state_4_star_dict:
state_4_star_dict[state] = 0
if state not in state_3_star_dict:
state_3_star_dict[state] = 0
if state not in state_2_star_dict:
state_2_star_dict[state] = 0
if state not in state_1_star_dict:
state_1_star_dict[state] = 0
if star_rating == 5:
state_5_star_dict[state] += 1
if star_rating == 4:
state_4_star_dict[state] += 1
if star_rating == 3:
state_3_star_dict[state] += 1
if star_rating == 2:
state_2_star_dict[state] += 1
if star_rating == 1:
state_1_star_dict[state] += 1
response = []
print "Number of 5 star reviews per state."
for key, value in state_5_star_dict.iteritems():
response.append({'id': key, 'value': value})
print key, value
json_data = json.dumps(response)
print json_data
print "Done."
print count
def main():
# Uncomment the line to run mining data.
# create_set_for_business_with_cat("Nightlife")
state_review_trends()
if __name__ == "__main__":
print "Execute Script!!"
main()
| {
"repo_name": "akshaykamath/StateReviewTrendAnalysisYelp",
"path": "StateReviewTrendsPOC.py",
"copies": "1",
"size": "4899",
"license": "mit",
"hash": 6676727531916012000,
"line_mean": 27.4825581395,
"line_max": 119,
"alpha_frac": 0.5362318841,
"autogenerated": false,
"ratio": 3.7597851112816576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4796016995381658,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alain Dechorgnat'
import subprocess
import re
def get_ceph_version():
try:
args = ['ceph',
'--version']
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
return "not found"
ceph_version = re.search('[0-9]*\.[0-9]*\.[0-9]*', output)
if ceph_version:
return ceph_version.group(0)
return "not found"
except:
return '0.0.0 (could not be found on inkscope server - Please consider to install Ceph on it)'
def get_ceph_version_name(major, minor):
if major == '14':
return 'Nautilus'
if major == '13':
return 'Mimic'
if major == '12':
return 'Luminous'
if major == '11':
return 'Kraken'
if major == '10':
return 'Jewel'
if major == '9':
return 'Infernalis'
if major == '0':
minor = int(minor)
if minor == 94:
return 'Hammer'
if minor > 87:
return 'Hammer (pre-version)'
if minor == 87:
return 'Giant'
if minor > 80:
return 'Giant (pre-version)'
if minor == 80:
return 'Firefly'
if minor > 72:
return 'Firefly (pre-version)'
if minor == 72:
return 'Emperor'
if minor > 67:
return 'Emperor (pre-version)'
if minor == 67:
return 'Dumpling'
if minor == 0:
return 'Unavailable'
return 'Really too old'
version = get_ceph_version()
major, minor, revision = version.split(".")
name = get_ceph_version_name(major, minor)
| {
"repo_name": "inkscope/inkscope",
"path": "inkscopeCtrl/ceph_version.py",
"copies": "1",
"size": "1709",
"license": "apache-2.0",
"hash": -841413974532078800,
"line_mean": 26.564516129,
"line_max": 102,
"alpha_frac": 0.521357519,
"autogenerated": false,
"ratio": 3.6517094017094016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46730669207094017,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alain.dechorgnat@orange.com'
# Copyright (c) 2014, Alain Dechorgnat
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from Log import Log
import json
class S3User:
def __init__(self):
pass
@staticmethod
def create(jsonUserData , conn):
# Content of jsonUserData :
# --------------------
# uid / The S3User ID to be created./ String / Required
# display-name / The display name of the user to be created. / String / Required
# email / The email address associated with the user./ String / not required
# key-type / Key type to be generated, options are: swift, s3 (default). / String / not required
# access-key / Specify access key./ String / not required
# secret-key / Specify secret key./ String / not required
# user-caps / User capabilities / String : Example: usage=read, write; users=read / not required
# generate-key / Generate a new key pair and add to the existing keyring./Boolean Example: True [True]/ not required
# max-buckets / Specify the maximum number of buckets the user can own. / Integer / not required
# suspended / Specify whether the user should be suspended / Boolean Example: False [False] / not required
self = S3User()
Log.debug("User creation input parameters : json="+jsonUserData)
userData = json.loads(jsonUserData)
self.uid = userData.get('uid', None)
self.displayName = userData.get('display_name', None)
self.email = userData.get('email',None)
self.keyType = userData.get('key_type', None)
self.access = userData.get('access_key', None)
self.secret = userData.get('secret_key', None)
self.caps = userData.get('user_caps', None)
self.generate = userData.get('generate_key', None)
self.maxBuckets = userData.get('max_buckets', None)
self.suspended = userData.get('suspended', None)
myargs = []
myargs.append(("uid",self.uid))
myargs.append(("display-name",self.displayName))
if self.email is not None :
myargs.append(("email",self.email))
if self.keyType is not None :
myargs.append(("key-type",self.keyType))
if self.access is not None :
myargs.append(("access-key",self.access))
if self.secret is not None :
myargs.append(("secret-key",self.secret))
if self.caps is not None :
myargs.append(("user-caps",self.caps))
if self.generate is not None :
myargs.append(("generate-key",self.generate))
if self.maxBuckets is not None :
myargs.append(("max-buckets",self.maxBuckets.__str__()))
if self.suspended is not None :
myargs.append(("suspended",self.suspended))
Log.debug(myargs.__str__())
request= conn.request(method="PUT", key="user", args= myargs)
res = conn.send(request)
user = res.read()
Log.debug(user)
Log.debug("Created User : "+user)
# if needed, create swift subuser
create_swift_subuser=userData.get('create_swift_subuser', 'False')
Log.debug("create_swift_subuser = "+create_swift_subuser)
if create_swift_subuser == 'True':
subuser=userData.get('subuser', None)
subuser_access=userData.get('subuser_access', "full")
subuser_generate_key=userData.get('subuser_generate_key', 'True')
subuser_secret_key = None;
if subuser_generate_key=='False':
subuser_secret_key=userData.get('subuser_secret_key', None)
myargs = []
myargs.append(("gen-subuser",""))
myargs.append(("uid",self.uid))
myargs.append(("access",subuser_access))
if subuser is not None :
myargs.append(("subuser",subuser))
if subuser_secret_key is not None :
myargs.append(("secret-key",subuser_secret_key))
Log.debug(myargs.__str__())
request= conn.request(method="PUT", key="user", args= myargs)
res = conn.send(request)
subusers = res.read()
Log.debug(subusers.__str__())
return user
@staticmethod
def modify(uid, jsonUserData , conn):
# Content of jsonUserData :
# --------------------
# display-name / The display name of the user to be created. / String / Required
# email / The email address associated with the user./ String / not required
# key-type / Key type to be generated, options are: swift, s3 (default). / String / not required
# access-key / Specify access key./ String / not required
# secret-key / Specify secret key./ String / not required
# user-caps / User capabilities / String : Example: usage=read, write; users=read / not required
# generate-key / Generate a new key pair and add to the existing keyring./Boolean Example: True [True]/ not required
# max-buckets / Specify the maximum number of buckets the user can own. / Integer / not required
# suspended / Specify whether the user should be suspended / Boolean Example: False [False] / not required
self = S3User()
userData = json.loads(jsonUserData)
self.uid = uid
self.displayName = userData.get('display_name', None)
self.email = userData.get('email',None)
self.maxBuckets = userData.get('max_buckets', None)
self.suspended = userData.get('suspended', None)
# self.keyType = userData.get('key_type', None)
self.access = userData.get('access_key', None)
self.secret = userData.get('secret_key', None)
# self.caps = userData.get('user_caps', None)
self.generate = userData.get('generate_key', None)
myargs = []
myargs.append(("uid",self.uid))
if self.displayName is not None :
myargs.append(("display-name",self.displayName))
if self.email is not None :
myargs.append(("email",self.email))
# if self.keyType is not None :
# myargs.append(("key-type",self.keyType))
# if self.access is not None :
# myargs.append(("access-key",self.access))
# if self.secret is not None :
# myargs.append(("secret-key",self.secret))
# if self.caps is not None :
# myargs.append(("user-caps",self.caps))
if self.generate is not None :
myargs.append(("generate-key",self.generate))
else:
if self.access is not None and self.secret is not None :
myargs.append(("access-key",self.access))
myargs.append(("secret-key",self.secret))
if self.maxBuckets is not None :
myargs.append(("max-buckets",self.maxBuckets.__str__()))
if self.suspended is not None :
myargs.append(("suspended",self.suspended))
Log.debug("Create user : "+myargs.__str__())
request= conn.request(method="POST", key="user", args= myargs)
res = conn.send(request)
user = res.read()
return user
@staticmethod
def view(uid , conn):
# uid / The user ID to view./ String / Required
request= conn.request(method="GET", key="user", args=[("uid",uid)])
res = conn.send(request)
userInfo = res.read()
return userInfo
@staticmethod
def remove(uid , conn):
# uid / The user ID to view./ String / Required
request= conn.request(method="DELETE", key="user", args=[("uid",uid),("purge-data","True")])
res = conn.send(request)
userInfo = res.read()
return userInfo
@staticmethod
def removeKey(key , conn):
request= conn.request(method="DELETE", key="user", args=[("key",""),("access-key",key)])
res = conn.send(request)
userInfo = res.read()
print userInfo.__str__()
return userInfo.__str__()
@staticmethod
def list( conn ):
request= conn.request(method="GET", key="metadata/user")
res = conn.send(request)
data = json.loads(res.read())
userList = []
for userId in data:
userList.append({"uid": userId , "display_name": userId})
print "User list : "+userList.__str__()
return json.dumps(userList)
@staticmethod
def createSubuser(uid, jsonSubuserData , conn):
self = S3User()
subuserData = json.loads(jsonSubuserData)
self.uid = uid
self.subuser = subuserData.get('subuser', None)
self.secret_key = subuserData.get('secret_key', None)
self.access = subuserData.get('access',None)
myargs = []
myargs.append(("gen-subuser",""))
myargs.append(("uid",self.uid))
myargs.append(("access",self.access))
if self.subuser is not None :
myargs.append(("subuser",self.subuser))
if self.secret_key is not None :
myargs.append(("secret-key",self.secret_key))
else:
myargs.append(("generate-secret","True"))
Log.debug(myargs.__str__())
request= conn.request(method="PUT", key="user", args= myargs)
res = conn.send(request)
subusers = res.read()
Log.debug(subusers.__str__())
return subusers.__str__()
@staticmethod
def saveCapability(uid, type, perm , conn):
myargs = []
myargs.append(("caps",""))
myargs.append(("uid",uid))
myargs.append(("user-caps",type+"="+perm))
Log.debug(myargs.__str__())
request= conn.request(method="PUT", key="user", args= myargs)
res = conn.send(request)
caps = res.read()
Log.debug(caps.__str__())
return caps.__str__()
@staticmethod
def deleteCapability(uid, type, perm , conn):
myargs = []
myargs.append(("caps",""))
myargs.append(("uid",uid))
myargs.append(("user-caps",type+"="+perm))
Log.debug(myargs.__str__())
request= conn.request(method="DELETE", key="user", args= myargs)
res = conn.send(request)
caps = res.read()
Log.debug(caps.__str__())
return caps.__str__()
@staticmethod
def deleteSubuser(uid, subuser , conn):
myargs = []
myargs.append(("subuser",subuser))
myargs.append(("uid", uid))
Log.debug(myargs.__str__())
request= conn.request(method="DELETE", key="user", args= myargs)
res = conn.send(request)
return "";
@staticmethod
def createSubuserKey(uid, subuser , generate_key, secret_key, conn):
myargs = []
myargs.append(("key",""))
myargs.append(("uid", uid))
myargs.append(("subuser",subuser))
myargs.append(("key-type", "swift"))
if (generate_key=='True'):
myargs.append(("generate-key", 'True'))
else:
myargs.append(("secret-key", secret_key))
Log.debug(myargs.__str__())
request= conn.request(method="PUT", key="user", args= myargs)
Log.debug(request.__str__())
res = conn.send(request)
return "";
@staticmethod
def deleteSubuserKey(uid, subuser , key, conn):
myargs = []
myargs.append(("subuser",subuser))
myargs.append(("uid", uid))
myargs.append(("key-type", "swift"))
myargs.append(("secret-key", key))
Log.debug(myargs.__str__())
request= conn.request(method="DELETE", key="user", args= myargs)
res = conn.send(request)
return "";
@staticmethod
def getBuckets (uid , jsonData, conn):
# Content of jsonData :
# --------------------
# stats / Specify whether the stats should be returned / Boolean Example: False [False] / not required
self = S3User()
if jsonData is not None :
data = json.loads(jsonData)
self.stats = data.get('stats', None)
else:
self.stats = "True"
myargs = []
myargs.append(("uid",uid))
if self.stats is not None :
myargs.append(("stats",self.stats))
Log.debug("myArgs: "+myargs.__str__())
request= conn.request(method="GET", key="bucket", args= myargs)
res = conn.send(request)
info = res.read()
return info
| {
"repo_name": "abrefort/inkscope-debian",
"path": "inkscopeCtrl/S3/user.py",
"copies": "1",
"size": "13635",
"license": "apache-2.0",
"hash": -382573789481204300,
"line_mean": 41.0833333333,
"line_max": 124,
"alpha_frac": 0.5983865053,
"autogenerated": false,
"ratio": 3.9237410071942445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9975586986078304,
"avg_score": 0.009308105283188064,
"num_lines": 324
} |
__author__ = 'alain.dechorgnat@orange.com'
from flask import Flask, request, Response
from S3.bucket import S3Bucket, S3Error
from S3.user import S3User
from Log import Log
import json
class S3Ctrl:
def __init__(self,conf):
self.admin = conf.get("radosgw_admin", "admin")
self.key = conf.get("radosgw_key", "")
self.secret = conf.get("radosgw_secret", "")
self.radosgw_url = conf.get("radosgw_url", "127.0.0.1")
if not self.radosgw_url.endswith('/'):
self.radosgw_url += '/'
self.url = self.radosgw_url + self.admin
#print "config url: "+self.url
#print "config admin: "+self.admin
#print "config key: "+self.key
#print "config secret: "+self.secret
def getAdminConnection(self):
return S3Bucket(self.admin, access_key=self.key, secret_key=self.secret , base_url= self.url)
def getBucket(self,bucketName):
return S3Bucket(bucketName, access_key=self.key, secret_key=self.secret , base_url= self.radosgw_url +bucketName)
def listUsers(self):
Log.debug( "list users from rgw api")
return S3User.list(self.getAdminConnection())
def createUser(self):
Log.debug( "user creation")
jsonform = request.form['json']
return S3User.create(jsonform,self.getAdminConnection())
def modifyUser(self, uid):
Log.debug( "modify user with uid "+ uid)
jsonform = request.form['json']
return S3User.modify(uid,jsonform,self.getAdminConnection())
def getUser(self, uid):
Log.debug( "get user with uid "+ uid)
return S3User.view(uid,self.getAdminConnection())
def removeUser(self, uid):
Log.debug( "remove user with uid "+ uid)
return S3User.remove(uid,self.getAdminConnection())
def removeUserKey(self, uid, key):
Log.debug( "remove key for user with uid "+ uid)
return S3User.removeKey(key,self.getAdminConnection())
def createSubuser(self, uid):
Log.debug( "create subuser for user with uid "+ uid)
jsonform = request.form['json']
return S3User.createSubuser(uid,jsonform,self.getAdminConnection())
def saveCapability(self, uid):
capType = request.form['type']
capPerm = request.form['perm']
Log.debug( "saveCapability "+capType+"="+capPerm+" for user with uid "+ uid)
return S3User.saveCapability(uid, capType, capPerm, self.getAdminConnection())
def deleteCapability(self, uid):
capType = request.form['type']
capPerm = request.form['perm']
Log.debug( "deleteCapability "+capType+"="+capPerm+" for user with uid "+ uid)
return S3User.deleteCapability(uid, capType, capPerm, self.getAdminConnection())
def deleteSubuser(self, uid, subuser):
Log.debug( "delete subuser "+subuser+" for user with uid "+ uid)
return S3User.deleteSubuser(uid, subuser, self.getAdminConnection())
def createSubuserKey(self, uid, subuser):
Log.debug( "create key for subuser "+subuser+" for user with uid "+ uid)
generate_key = request.form['generate_key']
secret_key = request.form['secret_key']
return S3User.createSubuserKey(uid, subuser, generate_key, secret_key, self.getAdminConnection())
def deleteSubuserKey(self, uid, subuser, key):
Log.debug( "delete key "+key+" for subuser "+subuser+" for user with uid "+ uid)
return S3User.deleteSubuserKey(uid, subuser, key, self.getAdminConnection())
def getUserBuckets(self, uid):
Log.debug( "getBuckets for uid " + uid)
jsonform = None
return S3User.getBuckets(uid,jsonform,self.getAdminConnection())
# bucket management
def createBucket(self):
bucket = request.form['bucket']
owner = request.form['owner']
Log.debug( "createBucket "+bucket+" for user "+owner)
print "\n--- info user for owner ---"
userInfo = self.getUser(owner)
#print userInfo
userInfo = json.loads(userInfo)
keys = userInfo.get('keys')
#print keys
access_key = keys[0].get('access_key')
secret_key = keys[0].get('secret_key')
#print access_key
#print secret_key
print "\n--- create bucket for owner ---"
mybucket = S3Bucket(bucket, access_key=access_key, secret_key=secret_key , base_url= self.radosgw_url+bucket)
res = mybucket.put_bucket()
return 'OK'
def getBucketInfo (self, bucket):
myargs = []
stats = request.form.get('stats', None)
if stats is not None:
myargs.append(("stats",stats))
if bucket is not None:
myargs.append(("bucket",bucket))
conn = self.getAdminConnection()
request2= conn.request(method="GET", key="bucket", args= myargs)
res = conn.send(request2)
info = res.read()
print info
return info
def linkBucket (self,uid, bucket):
conn = self.getAdminConnection()
myargs = [("bucket",bucket),("uid",uid)]
request= conn.request(method="PUT", key="bucket", args= myargs)
res = conn.send(request)
info = res.read()
print info
return info
def listBucket (self, bucketName):
myargs = []
if bucketName is not None:
myargs.append(("bucket",bucketName))
conn = self.getAdminConnection()
request2= conn.request(method="GET", key="bucket", args= myargs)
res = conn.send(request2)
bucketInfo = json.loads(res.read())
print bucketInfo
owner = bucketInfo.get('owner')
userInfo = self.getUser(owner)
print userInfo
userInfo = json.loads(userInfo)
keys = userInfo.get('keys')
print keys
access_key = keys[0].get('access_key')
secret_key = keys[0].get('secret_key')
bucket = S3Bucket(bucketName, access_key=access_key, secret_key=secret_key , base_url= self.radosgw_url+bucketName)
list = []
for (key, modify, etag, size) in bucket.listdir():
obj = {}
obj['name'] = key
obj['size'] = size
list.append(obj)
print "%r (%r) is size %r, modified %r" % (key, etag, size, modify)
return json.dumps(list)
def unlinkBucket (self,uid, bucket):
conn = self.getAdminConnection()
myargs = [("bucket",bucket),("uid",uid)]
request= conn.request(method="POST", key="bucket", args= myargs)
res = conn.send(request)
info = res.read()
print info
return info
def deleteBucket (self,bucket):
conn = self.getAdminConnection()
myargs = [("bucket",bucket),("purge-objects","True")]
request= conn.request(method="DELETE", key="bucket", args= myargs)
res = conn.send(request)
info = res.read()
print info
return info
| {
"repo_name": "abrefort/inkscope-debian",
"path": "inkscopeCtrl/S3Ctrl.py",
"copies": "1",
"size": "6920",
"license": "apache-2.0",
"hash": 4350361273932041000,
"line_mean": 36.6086956522,
"line_max": 123,
"alpha_frac": 0.6147398844,
"autogenerated": false,
"ratio": 3.673036093418259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9738082782734265,
"avg_score": 0.009938639016798695,
"num_lines": 184
} |
__author__ = 'alain.dechorgnat@orange.com'
from flask import Flask, request, Response
from S3.bucket import S3Bucket, S3Error
from S3.user import S3User
from Log import Log
import json
import boto
import boto.s3.connection
from boto.exception import S3PermissionsError
#import boto3
from InkscopeError import InkscopeError
class S3Ctrl:
def __init__(self,conf):
self.admin = conf.get("radosgw_admin", "admin")
self.key = conf.get("radosgw_key", "")
self.secret = conf.get("radosgw_secret", "")
self.radosgw_url = conf.get("radosgw_url", "127.0.0.1")
self.radosgw_endpoint = conf.get("radosgw_endpoint","")
self.secure = self.radosgw_url.startswith("https://")
if not self.radosgw_url.endswith('/'):
self.radosgw_url += '/'
self.url = self.radosgw_url + self.admin
#print "config url: "+self.url
#print "config admin: "+self.admin
#print "config key: "+self.key
#print "config secret: "+self.secret
def connectS3(self):
#boto
conn = boto.connect_s3(
aws_access_key_id = self.key,
aws_secret_access_key = self.secret,
host = self.radosgw_endpoint,
is_secure=False, # comment if you are using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
#boto3 ## for the future migration
# s3_resource = boto3.resource('s3')
# iam = boto3.resource('iam',i
# aws_access_key_id=self.key
# aws_secret_access_key=self.secret,
# )
return conn
def getAdminConnection(self):
return S3Bucket(self.admin, access_key=self.key, secret_key=self.secret , base_url= self.url, secure= self.secure)
def getBucket(self,bucketName):
return S3Bucket(bucketName, access_key=self.key, secret_key=self.secret , base_url= self.radosgw_url +bucketName, secure= self.secure)
def listUsers(self):
Log.debug( "list users from rgw api")
return S3User.list(self.getAdminConnection())
def createUser(self):
Log.debug( "user creation")
jsonform = request.form['json']
return S3User.create(jsonform,self.getAdminConnection())
def modifyUser(self, uid):
Log.debug( "modify user with uid "+ uid)
jsonform = request.form['json']
return S3User.modify(uid,jsonform,self.getAdminConnection())
def getUser(self, uid):
Log.debug( "get user with uid "+ uid)
return S3User.view(uid,self.getAdminConnection())
def removeUser(self, uid):
Log.debug( "remove user with uid "+ uid)
return S3User.remove(uid,self.getAdminConnection())
def removeUserKey(self, uid, key):
Log.debug( "remove key for user with uid "+ uid)
return S3User.removeKey(key,self.getAdminConnection())
def createSubuser(self, uid):
Log.debug( "create subuser for user with uid "+ uid)
jsonform = request.form['json']
return S3User.createSubuser(uid,jsonform,self.getAdminConnection())
def saveCapability(self, uid):
capType = request.form['type']
capPerm = request.form['perm']
Log.debug( "saveCapability "+capType+"="+capPerm+" for user with uid "+ uid)
return S3User.saveCapability(uid, capType, capPerm, self.getAdminConnection())
def deleteCapability(self, uid):
capType = request.form['type']
capPerm = request.form['perm']
Log.debug( "deleteCapability "+capType+"="+capPerm+" for user with uid "+ uid)
return S3User.deleteCapability(uid, capType, capPerm, self.getAdminConnection())
def deleteSubuser(self, uid, subuser):
Log.debug( "delete subuser "+subuser+" for user with uid "+ uid)
return S3User.deleteSubuser(uid, subuser, self.getAdminConnection())
def createSubuserKey(self, uid, subuser):
Log.debug( "create key for subuser "+subuser+" for user with uid "+ uid)
generate_key = request.form['generate_key']
secret_key = request.form['secret_key']
return S3User.createSubuserKey(uid, subuser, generate_key, secret_key, self.getAdminConnection())
def deleteSubuserKey(self, uid, subuser):
Log.debug( "delete key for subuser "+subuser+" for user with uid "+ uid)
return S3User.deleteSubuserKey(uid, subuser, self.getAdminConnection())
def getUserBuckets(self, uid):
Log.debug( "getBuckets for uid " + uid)
jsonform = None
return S3User.getBuckets(uid,jsonform,self.getAdminConnection())
# bucket management
def createBucket(self):
bucket = request.form['bucket']
owner = request.form['owner']
acl = request.form['acl']
Log.debug( "createBucket "+bucket+" for user "+owner+" with acl "+acl)
print "\n--- info user for owner ---"
userInfo = self.getUser(owner)
#print userInfo
userInfo = json.loads(userInfo)
keys = userInfo.get('keys')
#print keys
access_key = keys[0].get('access_key')
secret_key = keys[0].get('secret_key')
#print access_key
#print secret_key
print "\n--- create bucket for owner ---"
mybucket = S3Bucket(bucket, access_key=access_key, secret_key=secret_key , base_url= self.radosgw_url+bucket, secure= self.secure)
res = mybucket.put_bucket(acl=acl)
return 'OK'
def getUserAccess(self, bucketName, user):
conn = self.connectS3()
mybucket = conn.get_bucket(bucketName, validate=False)
bucket_acl = mybucket.get_acl()
print "getting "+user+" access to "+bucketName
perm = ""
for grant in bucket_acl.acl.grants:
if grant.id == user:
if perm == "":
perm = grant.permission
else:
perm = perm + ", " + grant.permission
if perm == "":
perm = "none"
return perm
def getBucketACL(self, bucketName):
conn = self.connectS3()
mybucket = conn.get_bucket(bucketName, validate=False)
#validate=False if you are sure the bucket exists
#validate=true to make a request to check if it exists first
### getting everything needed
usersdata = self.listUsers()
userList = json.loads(usersdata)
bucket_acl = mybucket.get_acl() # bucket_acl do not mention every user
print "getting "+bucketName+" ACL"
### building the JSON file
mylist = []
grantGroup = ""
for user in userList:
obj = {}
obj['uid'] = user['uid']
obj['type'] = "user"
obj['permission'] = ""
for grant in bucket_acl.acl.grants:
# checking if there is a group in the ACL and saving it
if grant.type == 'Group':
if grant.uri.endswith("AllUsers"):
grantGroup = "all"
groupPerm = grant.permission
else: # if AuthenticatedUsers
grantGroup = "auth"
groupPerm = grant.permission
# getting permission(s)
if grant.id == user['uid']:
if obj['permission'] == "":
obj['permission'] = grant.permission
else:
obj['permission'] = obj['permission'] + ", " + grant.permission
if obj['permission'] == "":
obj['permission'] = "none"
mylist.append(obj)
# need to set groups manually
allUsers = {}
allUsers['uid'] = "AllUsers"
allUsers['type'] = "group"
authUsers = {}
authUsers['uid'] = "AuthenticatedUsers"
authUsers['type'] = "group"
if grantGroup == "":
allUsers['permission'] = "none"
authUsers['permission'] = "none"
elif grantGroup == "all":
allUsers['permission'] = groupPerm
authUsers['permission'] = "none"
else: #if grantGroup == "auth"
allUsers['permission'] = "none"
authUsers['permission'] = groupPerm
mylist.append(allUsers)
mylist.append(authUsers)
print 'Complete access list : [%s]' % ', '.join(map(str, mylist))
return json.dumps(mylist)
def grantGroupAccess (self, bucket, bucketName, bucketACL):
bucketInfo = self.getBucketInfo(bucketName)
bucketInfo = json.loads(bucketInfo)
owner = bucketInfo.get('owner')
#print "owner : "+owner
for grant in bucketACL.acl.grants:
if grant.id != owner: #no need to grant access to the owner
userInfo = self.getUser(grant.id)
userInfo = json.loads(userInfo)
email = userInfo.get('email')
#print "id : "+grant.id
#print "mail : "+email
bucket.add_email_grant(permission=grant.permission, email_address=email)
def grantAccess (self, user, bucketName):
msg = "no message"
access = request.form['access']
email = request.form['email']
conn = self.connectS3()
mybucket = conn.get_bucket(bucketName, validate=False)
bucket_acl = mybucket.get_acl()
group = ""
granted = ""
for grant in bucket_acl.acl.grants:
if grant.id == user: #checking if the user already has access
if (grant.permission == access) or (grant.permission == "FULL_CONTROL"):
granted = grant.permission
if grant.type == 'Group': #checking if a group has access
group = grant.uri
### granting access
# using canned ACLs when granting access to a group of users
if email == "all":
if group != "": #if a group as access to the bucket
if group.endswith("AllUsers"): #if this group is AllUsers
raise InkscopeError("error1", 400) #shouldn't happen
else: #if this group is AuthenticatedUsers
raise InkscopeError("error2", 400)
else: #if there's no group : grant access
mybucket.set_canned_acl("public-read")
self.grantGroupAccess(mybucket, bucketName, bucket_acl)
return "ok"
elif email == "auth":
if group != "":
if group.endswith("AllUsers"):
raise InkscopeError("error2", 400)
else:
raise InkscopeError("error1", 400)
else:
mybucket.set_canned_acl("authenticated-read")
self.grantGroupAccess(mybucket, bucketName, bucket_acl)
return "ok"
else : #if it's a single user
if granted == access:
raise InkscopeError("error1", 400)
else:
if granted == "FULL_CONTROL":
raise InkscopeError("error3", 400)
elif access == "FULL_CONTROL":
msg = self.revokeAccess(user, bucketName)
mybucket.add_email_grant(permission=access, email_address=email)
else :
mybucket.add_email_grant(permission=access, email_address=email)
msg = "ok"
return msg
def revokeAccess(self, user, bucketName):
conn = self.connectS3()
bucket = conn.get_bucket(bucketName, validate=False)
bucketACL = bucket.get_acl()
new_grants = []
for grantee in bucketACL.acl.grants:
if (user == "AllUsers") or (user =="AuthenticatedUsers"): #if revoking a group's access
if grantee.type != "Group": #no groups in the acl
new_grants.append(grantee)
else : #if revoking a user's access
if grantee.id != user:
new_grants.append(grantee)
bucketACL.acl.grants = new_grants
bucket.set_acl(bucketACL)
return "ok"
def getBucketInfo (self, bucket):
myargs = []
stats = request.form.get('stats', None)
if stats is not None:
myargs.append(("stats",stats))
if bucket is not None:
myargs.append(("bucket",bucket))
conn = self.getAdminConnection()
request2= conn.request(method="GET", key="bucket", args= myargs)
res = conn.send(request2)
info = res.read()
print "BucketInfo : "+info
return info
def linkBucket (self,uid, bucket):
conn = self.getAdminConnection()
myargs = [("bucket",bucket),("uid",uid)]
request= conn.request(method="PUT", key="bucket", args= myargs)
res = conn.send(request)
info = res.read()
print info
return info
def listBucket (self, bucketName):
myargs = []
if bucketName is not None:
myargs.append(("bucket",bucketName))
conn = self.getAdminConnection()
request2= conn.request(method="GET", key="bucket", args= myargs)
res = conn.send(request2)
bucketInfo = json.loads(res.read())
print bucketInfo
owner = bucketInfo.get('owner')
userInfo = self.getUser(owner)
print userInfo
userInfo = json.loads(userInfo)
keys = userInfo.get('keys')
print keys
access_key = keys[0].get('access_key')
secret_key = keys[0].get('secret_key')
bucket = S3Bucket(bucketName, access_key=access_key, secret_key=secret_key , base_url= self.radosgw_url+bucketName, secure= self.secure)
list = []
for (key, modify, etag, size) in bucket.listdir():
obj = {}
obj['name'] = key
obj['size'] = size
list.append(obj)
print "%r (%r) is size %r, modified %r" % (key, etag, size, modify)
return json.dumps(list)
def unlinkBucket (self,uid, bucket):
conn = self.getAdminConnection()
myargs = [("bucket",bucket),("uid",uid)]
request= conn.request(method="POST", key="bucket", args= myargs)
res = conn.send(request)
info = res.read()
print info
return info
def deleteBucket (self,bucket):
conn = self.getAdminConnection()
myargs = [("bucket",bucket),("purge-objects","True")]
request= conn.request(method="DELETE", key="bucket", args= myargs)
res = conn.send(request)
info = res.read()
print info
return info
| {
"repo_name": "inkscope/inkscope",
"path": "inkscopeCtrl/S3Ctrl.py",
"copies": "1",
"size": "13046",
"license": "apache-2.0",
"hash": 9061942243403723000,
"line_mean": 33.4221635884,
"line_max": 144,
"alpha_frac": 0.6426490878,
"autogenerated": false,
"ratio": 3.4385872430152875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9420700061511176,
"avg_score": 0.032107253860822506,
"num_lines": 379
} |
__author__ = 'alain.dechorgnat@orange.com'
from flask import request
import subprocess
from StringIO import StringIO
import json
class RbdCtrl:
def __init__(self,conf):
self.cluster_name = conf['cluster']
pass
def list_images(self):
output = subprocess.Popen(['ceph', 'osd', 'lspools', '--format=json', '--cluster='+self.cluster_name], stdout=subprocess.PIPE).communicate()[0]
pools = json.load(StringIO(output))
#print 'pools=',pools
images = []
for pool in pools:
#print 'pool=',pool
output = subprocess.Popen(['rbd', 'ls', '-l', '--pool', pool['poolname'], '--format=json', '--cluster='+self.cluster_name], stdout=subprocess.PIPE).communicate()[0]
pool_images=json.load(StringIO(output))
for pool_image in pool_images:
image = {"pool": pool['poolname'], "image": pool_image}
images.append(image)
return json.dumps(images)
def image_info(self,pool_name, image_name):
# get image info
args = ['rbd',
'info',
pool_name+"/"+image_name,
'--format=json',
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
image=json.load(StringIO(output))
# add pool name
image['pool'] = pool_name
# get snapshots list for this image
args = ['rbd',
'snap',
'ls',
pool_name+"/"+image_name,
'--format=json',
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
image['snaps']=json.load(StringIO(output))
return json.dumps(image)
def create_image(self, pool_name, image_name):
size = request.form['size']
format = request.form['format']
args = ['rbd',
'create',
pool_name+"/"+image_name,
'--size',
size,
"--image-format",
format,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def modify_image(self, pool_name, image_name, action):
if action == 'resize':
return self.resize_image(pool_name, image_name)
elif action == 'flatten':
return self.flatten_image(pool_name, image_name)
elif action == 'purge':
return self.purge_image(pool_name, image_name)
elif action == 'rename':
return self.rename_image(pool_name, image_name)
elif action == 'copy':
return self.copy_image(pool_name, image_name)
def resize_image(self, pool_name, image_name):
data = json.loads(request.data)
size = data['size']
args = ['rbd',
'resize',
pool_name+"/"+image_name,
'--size',
size,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def delete_image(self, pool_name, image_name):
args = ['rbd',
'rm',
pool_name+"/"+image_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def purge_image(self, pool_name, image_name):
args = ['rbd',
'snap',
'purge',
pool_name+"/"+image_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def flatten_image(self, pool_name, image_name):
args = ['rbd',
'flatten',
pool_name+"/"+image_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def rename_image(self, pool_name, image_name):
# TODO
args = []
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def copy_image(self, pool_name, image_name):
copy = json.loads(request.data)
# print copy
dest_pool_name = copy['pool']
dest_image_name = copy['image']
args = ['rbd',
'copy',
pool_name+"/"+image_name,
dest_pool_name+"/"+dest_image_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
#
# Snapshots
#
def create_image_snapshot(self, pool_name, image_name, snap_name):
args = ['rbd',
'snap',
'create',
pool_name+"/"+image_name+"@"+snap_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def delete_image_snapshot(self, pool_name, image_name, snap_name):
args = ['rbd',
'snap',
'rm',
pool_name+"/"+image_name+"@"+snap_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def info_image_snapshot(self, pool_name, image_name, snap_name):
args = ['rbd', 'children', pool_name+"/"+image_name+"@"+snap_name, '--format=json']
output = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
children =json.load(StringIO(output))
args = ['rbd', 'ls', '-l', '--pool', pool_name, '--format=json', '--cluster='+self.cluster_name]
output = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
pool_images=json.load(StringIO(output))
for pool_image in pool_images:
# print pool_image
if 'image' in pool_image and pool_image['image'] == image_name :
# print pool_image['image']
if 'snapshot' in pool_image and pool_image['snapshot'] == snap_name :
pool_image['pool'] = pool_name
pool_image['children'] = children
return json.dumps(pool_image)
raise subprocess.CalledProcessError(1, '', 'snap not found')
def action_on_image_snapshot(self, pool_name, image_name, snap_name, action):
# print "Calling action_on_image_snapshot() method ", action
try:
if action == 'rollback':
return self.rollback_image_snapshot(pool_name, image_name, snap_name)
elif action == 'protect':
return self.protect_image_snapshot(pool_name, image_name, snap_name)
elif action == 'unprotect':
return self.unprotect_image_snapshot(pool_name, image_name, snap_name)
elif action == 'clone':
return self.clone_image_snapshot(pool_name, image_name, snap_name)
except subprocess.CalledProcessError, e:
raise
def rollback_image_snapshot(self, pool_name, image_name, snap_name):
args = ['rbd',
'snap',
'rollback',
pool_name+"/"+image_name+"@"+snap_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def clone_image_snapshot(self, pool_name, image_name, snap_name):
clone = json.loads(request.data)
# print clone
dest_pool_name = clone['pool']
dest_image_name = clone['image']
args = ['rbd',
'clone',
pool_name+"/"+image_name+"@"+snap_name,
dest_pool_name+"/"+dest_image_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def protect_image_snapshot(self, pool_name, image_name, snap_name):
# print "Calling protect_image_snapshot() method"
args = ['rbd',
'snap',
'protect',
pool_name+"/"+image_name+"@"+snap_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def unprotect_image_snapshot(self, pool_name, image_name, snap_name):
args = ['rbd',
'snap',
'unprotect',
pool_name+"/"+image_name+"@"+snap_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output)
def children_image_snapshot(self, pool_name, image_name, snap_name):
args = ['rbd',
'children',
pool_name+"/"+image_name+"@"+snap_name,
'--cluster='+self.cluster_name]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, "", error)
return StringIO(output) | {
"repo_name": "inkscope/inkscope",
"path": "inkscopeCtrl/rbdCtrl.py",
"copies": "1",
"size": "11715",
"license": "apache-2.0",
"hash": -2587347901505758000,
"line_mean": 38.9863481229,
"line_max": 176,
"alpha_frac": 0.5609048229,
"autogenerated": false,
"ratio": 4.2109992810927395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003270873384080749,
"num_lines": 293
} |
__author__ = 'alain ivars'
# -*- coding: utf-8 -*-
import sys, os
# check to see if this file is getting loaded on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd is False:
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.join(os.path.abspath('.'), '_ext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings.
extensions = [
'sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.todo',
# 'sphinx.ext.coverage',
# 'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'sphinx.ext.graphviz',
# 'djangodjango_base_appdocs',
]
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django contact_form_bootstrap'
copyright = u'2013-2021, Alain Ivars'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
version = "1.0rc1"
release = "beta"
# The language for content autogenerated by Sphinx. Refer to documentation for
# a list of supported languages.
language = "en"
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description unit
# titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['static']
| {
"repo_name": "alainivars/django-contact-form",
"path": "docs/conf.py",
"copies": "1",
"size": "2083",
"license": "bsd-3-clause",
"hash": 4110747569218348000,
"line_mean": 27.9305555556,
"line_max": 80,
"alpha_frac": 0.6706673068,
"autogenerated": false,
"ratio": 3.465890183028286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46365574898282863,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alandinneen'
from MySQLdb import connect, Error
from collections import OrderedDict
class DBConn(object):
"""
A class to handle all MySQL connection reads/writes.
"""
def __init__(self, host=None, db=None, user=None, password=None):
self._host = host
self._db = db
self._user = user
self._pass = password
def connect(self):
"""
Return a connect object to MySQL database
"""
db = connect(self._host, self._user, self._pass, self._db)
return db
def close(self, conobj):
"""
Close connection to MySQL database
"""
conobj.close()
return None
def select(self, sqlselect):
"""
Simple method for performing MySQL select.
"""
try:
db = self.connect()
cursor = db.cursor()
cursor.execute(sqlselect)
rawdata = cursor.fetchall()
cursor.close()
db.close()
return rawdata
except Error, e:
print "There has been an error in the select! " + e
def single_insert(self, sqlinsert):
"""
Performs a single insert and returns the last inserted id for the session.
"""
try:
insertid = None
db = self.connect()
cursor = db.cursor()
cursor.execute(sqlinsert)
cursor.execute("SELECT LAST_INSERT_ID();")
rowid = cursor.fetchall()
insertid = rowid[0][0]
cursor.close()
db.commit()
return insertid
except Exception as e:
db.rollback()
print "There has been an error in the single insert. The transaction has been rolled back. Error: " + e
finally:
db.close()
def mass_insert(self, sqlstatment, dbobj):
"""
Provides a mass insert mechanism. This method needs to be provided a self.connect() object before calling
this method. It should always be followed with a call to self.close()
"""
try:
cursor = dbobj.cursor()
cursor.execute(sqlstatment)
except Exception as e:
dbobj.rollback()
print "There has been an error in the single insert. The transaction has been rolled back. Error: " + e
def update(self, sqlupdate):
"""
Opens a MySQL connection, performs an update, then closes the connection. Rolls back any changes if an
error occurs.
"""
try:
insertid = None
db = self.connect()
cursor = db.cursor()
cursor.execute(sqlupdate)
cursor.close()
db.commit()
except Exception as e:
db.rollback()
print "There has been an error in the single insert. The transaction has been rolled back. Error: " + e
finally:
db.close()
def cursor_results_to_dict(self, results):
"""
Returns a list of OrderedDicts from the cursor results
"""
data = []
if results.rowcount:
keys = results.keys()
for row in results:
obj = OrderedDict()
for key in keys:
obj[key] = str(row[key]).decode('UTF-8', 'ignore')
data.append(obj)
return data | {
"repo_name": "rad08d/mysqlconn",
"path": "dbconn/dbconn.py",
"copies": "1",
"size": "3405",
"license": "mit",
"hash": 6393837889572282000,
"line_mean": 29.9636363636,
"line_max": 115,
"alpha_frac": 0.5380323054,
"autogenerated": false,
"ratio": 4.552139037433155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017932325985604645,
"num_lines": 110
} |
__author__ = 'alan'
from . import rssapp_blueprint
from flask import render_template
from os import path
from logging import getLogger
from flask_rss import customlogg
from settings import Configuration
import rss
logger = getLogger(__name__)
config = Configuration()
@rssapp_blueprint.route('/')
def index():
try:
dirname = path.dirname(__file__)
sitespath = path.join(dirname, config.RSSWEBSITES)
file = open(sitespath)
sites = file.readlines()
sitestories = []
group = 0
for site in sites:
try:
rsslink = rss.Rss(site)
rsslink.get_rss_into_articles()
articlegrouping = {group: rsslink}
sitestories.append(articlegrouping)
group += 1
except Exception as e:
logger.error("There has been an error in the index method. Error: " + str(e))
except Exception as e:
#logger.error("There has been an error in the index method. Error: " + str(e))
print e
return render_template('index.html', stories=sitestories) | {
"repo_name": "rad08d/rssreader_flask",
"path": "flask_rss/rssapp/views.py",
"copies": "1",
"size": "1110",
"license": "apache-2.0",
"hash": -8273817866912707000,
"line_mean": 31.6764705882,
"line_max": 93,
"alpha_frac": 0.6135135135,
"autogenerated": false,
"ratio": 4.157303370786517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024853824959737455,
"num_lines": 34
} |
__author__ = 'alan'
import heapq
class Solution(object):
def getSkyline(self, buildings):
"""
:type buildings: List[List[int]]
:rtype: List[List[int]]
"""
i, n = 0, len(buildings)
hq, re = [], []
while i < n or len(hq) > 0:
if len(hq) == 0 or (i < n and buildings[i][0] <= -hq[0][1]):
t = buildings[i][0]
while i < n and buildings[i][0] == t:
heapq.heappush(hq, [-buildings[i][2], -buildings[i][1]])
i += 1
else:
t = -hq[0][1]
while len(hq) > 0 and -hq[0][1] <= t:
heapq.heappop(hq)
height = 0 if len(hq) == 0 else -hq[0][0]
if len(re) == 0 or re[-1][1] != height:
re.append([t, height])
return re
if __name__ == "__main__":
sol = Solution()
buildings = [[2, 9, 10], [3, 7, 15], [5, 12, 12], [15, 20, 10], [19, 24, 8]]
print(sol.getSkyline(buildings))
| {
"repo_name": "alyiwang/LeetPy",
"path": "Skyline.py",
"copies": "1",
"size": "1027",
"license": "apache-2.0",
"hash": 359047702324409000,
"line_mean": 28.3428571429,
"line_max": 80,
"alpha_frac": 0.4206426485,
"autogenerated": false,
"ratio": 3.102719033232628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4023361681732628,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
import random
class Solution(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.ls = []
self.m = {}
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
:type val: int
:rtype: bool
"""
if val in self.m:
return False
self.ls.append(val)
self.m[val] = len(self.ls) - 1
return True
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
:type val: int
:rtype: bool
"""
if val not in self.m:
return False
idx, last = self.m[val], len(self.ls) - 1
if idx != last:
self.ls[idx] = self.ls[last]
self.m[self.ls[last]] = idx
del self.ls[last]
del self.m[val]
return True
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
"""
if not self.ls:
return None
idx = random.randint(0, len(self.ls) - 1)
return self.ls[idx]
if __name__ == "__main__":
sol = Solution()
print(sol.remove(0))
print(sol.remove(1))
print(sol.insert(3))
print(sol.getRandom())
print(sol.remove(3))
print(sol.insert(1))
print(sol.getRandom())
| {
"repo_name": "alyiwang/LeetPy",
"path": "InsertDeleteGetRandomO1.py",
"copies": "1",
"size": "1476",
"license": "apache-2.0",
"hash": -7501933958888869000,
"line_mean": 22.8064516129,
"line_max": 106,
"alpha_frac": 0.5108401084,
"autogenerated": false,
"ratio": 3.7367088607594936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47475489691594935,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
import random
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.head = head
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
re, count = 0, 0
p = self.head
while p:
if random.randint(0, count) == 0:
re = p.val
count += 1
p = p.next
return re
if __name__ == "__main__":
head = ListNode(100)
head.next = ListNode(1)
head.next.next = ListNode(10)
head.next.next.next = ListNode(30)
sol = Solution(head)
stat = {100: 0, 1: 0, 10: 0, 30: 0}
for i in range(1000000):
v = sol.getRandom()
stat[v] += 1
print(stat)
| {
"repo_name": "alyiwang/LeetPy",
"path": "LinkedListRandomNode.py",
"copies": "1",
"size": "1069",
"license": "apache-2.0",
"hash": -5775895797609902000,
"line_mean": 19.5576923077,
"line_max": 90,
"alpha_frac": 0.5070159027,
"autogenerated": false,
"ratio": 3.5752508361204014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45822667388204014,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
import string
class Solution(object):
def findLadders(self, start, end, dict):
"""
:type start: str
:type end: str
:type dict: Set[str]
:rtype: List[List[int]]
"""
l = len(start)
if start == end:
return [[start]]
preMap = {}
for i in dict:
preMap[i] = []
pre, cur = set(), set()
pre.add(start)
while pre:
cur.clear()
for t in pre:
dict.remove(t)
for t in pre:
for i in range(l):
for c in string.ascii_lowercase:
if c == t[i]:
continue
st = t[:i] + c + t[i + 1:]
if st not in dict:
continue
preMap[st].append(t)
cur.add(st)
pre, cur = cur, pre
if end in pre:
break
output = []
def dfs(s, path):
if s == start:
output.append([start] + path)
elif s in preMap:
for i in preMap[s]:
dfs(i, [s] + path)
dfs(end, [])
return output
if __name__ == "__main__":
sol = Solution()
start, end, dict = "hot", "dog", ["hot", "cog", "dog", "tot", "hog", "hop", "pot", "dot"]
print(sol.findLadders(start, end, dict))
| {
"repo_name": "alyiwang/LeetPy",
"path": "WordLadder2.py",
"copies": "1",
"size": "1485",
"license": "apache-2.0",
"hash": 8070814087898088000,
"line_mean": 24.1694915254,
"line_max": 93,
"alpha_frac": 0.3818181818,
"autogenerated": false,
"ratio": 3.9812332439678286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48630514257678287,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
import string
class Solution(object):
def ladderLength(self, beginWord, endWord, wordDict):
"""
:type beginWord: str
:type endWord: str
:type wordDict: Set[str]
:rtype: int
"""
dif = self.diff(beginWord, endWord)
if dif <= 1:
return dif + 1
l, step = len(beginWord), 3
q, end = [beginWord], beginWord
while q:
t = q.pop(0)
for i in range(l):
for c in string.ascii_lowercase:
if c == t[i]:
continue
newt = t[:i] + c + t[i + 1:]
if newt not in wordDict:
continue
if self.diff(newt, endWord) <= 1:
return step
q.append(newt)
wordDict.remove(newt)
if t == end:
if q:
end = q[-1]
step += 1
return 0
def diff(self, s, p):
count = 0
for i in range(len(s)):
if s[i] != p[i]:
count += 1
return count
if __name__ == "__main__":
sol = Solution()
beginWord, endWord = "hit", "cog"
wordDict = {"hot", "dot", "dog", "lot", "log"}
print(sol.ladderLength(beginWord, endWord, wordDict))
| {
"repo_name": "alyiwang/LeetPy",
"path": "WordLadder.py",
"copies": "1",
"size": "1377",
"license": "apache-2.0",
"hash": 371709880457842800,
"line_mean": 26,
"line_max": 57,
"alpha_frac": 0.41902687,
"autogenerated": false,
"ratio": 3.889830508474576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4808857378474576,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
class Solution:
# @param {character[][]} board
# @return {void} Do not return anything, modify board in-place instead.
def solveSudoku(self, board):
self.solve(board, 0, 0)
def solve(self, b, x, y):
if y >= 9:
x, y = x + 1, y - 9
if x >= 9:
return True
if b[x][y] != '.':
return self.solve(b, x, y + 1)
for i in '123456789':
b[x][y] = i
if Solution.check(b, x, y) and self.solve(b, x, y + 1):
return True
b[x][y] = '.'
return False
@staticmethod
def check(b, x, y):
for i in range(9):
if i != y and b[x][i] == b[x][y]:
return False
if i != x and b[i][y] == b[x][y]:
return False
ri, ci = x / 3 * 3, y / 3 * 3
for i in range(ri, ri + 3):
for j in range(ci, ci + 3):
if (i != x or j != y) and b[i][j] == b[x][y]:
return False
return True
if __name__ == "__main__":
board = ["..9748...", "7........", ".2.1.9...", "..7...24.", ".64.1.59.", ".98...3..", "...8.3.2.", "........6",
"...2759.."]
for i in range(9):
board[i] = list(board[i])
sol = Solution()
Solution.solveSudoku(sol, board)
print(board)
| {
"repo_name": "alyiwang/LeetPy",
"path": "SudokuSolver.py",
"copies": "1",
"size": "1360",
"license": "apache-2.0",
"hash": 5647523553822450000,
"line_mean": 26.2,
"line_max": 116,
"alpha_frac": 0.4088235294,
"autogenerated": false,
"ratio": 3.1627906976744184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9070759526219717,
"avg_score": 0.00017094017094017097,
"num_lines": 50
} |
__author__ = 'alan'
class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
if not num:
return []
def check0(snum):
return len(snum) > 1 and snum[0] == '0'
def find(num, target, mulexpr='', mulvalue=1):
results = []
if not check0(num) and int(num) * mulvalue == target:
results.append(num + mulexpr)
for i in range(len(num) - 1):
lnum, rnum = num[:i + 1], num[i + 1:]
if check0(rnum):
continue
rexpr, rvalue = rnum + mulexpr, int(rnum) * mulvalue
for left in find(lnum, target - rvalue):
results.append(left + '+' + rexpr)
for left in find(lnum, target + rvalue):
results.append(left + '-' + rexpr)
for left in find(lnum, target, '*' + rexpr, rvalue):
results.append(left)
return results
return find(num, target)
if __name__ == "__main__":
sol = Solution()
num, target = "232", 8
print(sol.addOperators(num, target))
| {
"repo_name": "alyiwang/LeetPy",
"path": "ExpressionAddOperators.py",
"copies": "1",
"size": "1241",
"license": "apache-2.0",
"hash": 2793746665715166000,
"line_mean": 29.2682926829,
"line_max": 68,
"alpha_frac": 0.4689766317,
"autogenerated": false,
"ratio": 3.8184615384615386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4787438170161538,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
stack = []
# find the previous '(' or beginning of stack
def goback():
t = len(stack) - 1
while t >= 0:
if stack[t] == '(':
break
t -= 1
re = calc(t + 1)
if t < 0:
t = 0
del stack[t:]
stack.append(re)
# calculate value from start to stack end.
def calc(start):
num, p = stack[start], start + 1
while p < len(stack):
op = stack[p]
if op == '+':
num += stack[p + 1]
else:
num -= stack[p + 1]
p += 2
return num
i = 0
while i <= len(s):
if i == len(s) or s[i] == ')':
goback()
i += 1
elif s[i] == '+' or s[i] == '-' or s[i] == '(':
stack += s[i]
i += 1
elif s[i] == ' ':
i += 1
else:
j = i + 1
while j < len(s) and s[j].isdigit():
j += 1
stack.append(int(s[i: j]))
i = j
return stack[0]
if __name__ == "__main__":
sol = Solution()
s = "(1+ (4+5+2) -23 )- ( 6-18)"
print(sol.calculate(s))
| {
"repo_name": "alyiwang/LeetPy",
"path": "BasicCalculator.py",
"copies": "1",
"size": "1496",
"license": "apache-2.0",
"hash": 6714300127272637000,
"line_mean": 24.3559322034,
"line_max": 59,
"alpha_frac": 0.3181818182,
"autogenerated": false,
"ratio": 3.9681697612732094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4786351579473209,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
tokens = self.reverse_polish_notation(s)
return self.evaluate(tokens)
operators = ['+', '-', '*', '/']
def priority(self, operator):
return {
'+': 1,
'-': 1,
'*': 2,
'/': 2,
}.get(operator, 0)
def calc(self, x, y, operator):
return {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x // y,
}[operator](x, y)
def reverse_polish_notation(self, s):
stack, tokens = [], []
num = ''
for c in s:
if c.isdigit():
num += c
else:
if num:
tokens.append(num)
num = ''
if c in self.operators:
while len(stack) and self.priority(stack[-1]) >= self.priority(c):
tokens.append(stack.pop())
stack.append(c)
elif c == '(':
stack.append(c)
elif c == ')':
while len(stack) and stack[-1] != '(':
tokens.append(stack.pop())
stack.pop()
if num:
tokens.append(num)
while len(stack):
tokens.append(stack.pop())
return tokens
def evaluate(self, tokens):
operands = []
for token in tokens:
if token in self.operators:
y, x = operands.pop(), operands.pop()
operands.append(self.calc(x, y, token))
else:
operands.append(int(token))
return operands[0]
if __name__ == "__main__":
sol = Solution()
s = " 3+5 / 2 "
print(sol.calculate(s))
| {
"repo_name": "alyiwang/LeetPy",
"path": "BasicCalculator2.py",
"copies": "1",
"size": "1934",
"license": "apache-2.0",
"hash": 8981887192509885000,
"line_mean": 26.2394366197,
"line_max": 86,
"alpha_frac": 0.4022750776,
"autogenerated": false,
"ratio": 4.195227765726681,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016189088554314392,
"num_lines": 71
} |
__author__ = 'alan'
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
r = [[] for i in range(numCourses)]
deg = [0 for i in range(numCourses)]
for p, q in prerequisites:
deg[p] += 1
r[q] += [p]
course, order = list(range(numCourses)), []
flag = True
while flag and len(course) > 0:
flag = False
remove = []
for i in course:
if deg[i] == 0:
flag = True
for j in r[i]:
deg[j] -= 1
remove += [i]
for i in remove:
course.remove(i)
order += remove
return order if len(course) == 0 else []
if __name__ == "__main__":
sol = Solution()
numCourses, prerequisites = 4, [[1, 0], [2, 0], [3, 1], [3, 2]]
print(sol.canFinish(numCourses, prerequisites))
| {
"repo_name": "alyiwang/LeetPy",
"path": "CourseSchedule.py",
"copies": "1",
"size": "1060",
"license": "apache-2.0",
"hash": -5572826823676477000,
"line_mean": 26.8947368421,
"line_max": 67,
"alpha_frac": 0.4481132075,
"autogenerated": false,
"ratio": 3.719298245614035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4667411453114035,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
class Solution(object):
def colorConnected(self, board):
"""
:type board: List[str]
:rtype: integer
"""
m, n = len(board), len(board[0])
def bfs():
count, q = 1, []
for i in range(m):
for j in range(n):
if board[i][j] == 1:
count += 1
q.append((i, j))
while q:
x, y = q.pop(0)
board[x][y] = count
if x > 0 and board[x - 1][y] == 1:
q.append((x - 1, y))
if x < m - 1 and board[x + 1][y] == 1:
q.append((x + 1, y))
if y > 0 and board[x][y - 1] == 1:
q.append((x, y - 1))
if y < n - 1 and board[x][y + 1] == 1:
q.append((x, y + 1))
return count - 1
def dfs():
count, st = 1, []
for i in range(m):
for j in range(n):
if board[i][j] == 1:
count += 1
board[i][j] = count
st.append((i, j))
while st:
x, y = st[-1]
if x > 0 and board[x - 1][y] == 1:
board[x - 1][y] = count
st.append((x - 1, y))
elif x < m - 1 and board[x + 1][y] == 1:
board[x + 1][y] = count
st.append((x + 1, y))
elif y > 0 and board[x][y - 1] == 1:
board[x][y - 1] = count
st.append((x, y - 1))
elif y < n - 1 and board[x][y + 1] == 1:
board[x][y + 1] = count
st.append((x, y + 1))
# nothing pushed to stack
else:
del st[-1]
return count - 1
return dfs()
if __name__ == "__main__":
sol = Solution()
board = [[0, 1, 0, 1, 0, 1],
[1, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 0]]
print(sol.colorConnected(board))
print(board)
| {
"repo_name": "alyiwang/LeetPy",
"path": "ColorConnected.py",
"copies": "1",
"size": "2506",
"license": "apache-2.0",
"hash": -8767288083784792000,
"line_mean": 35.8529411765,
"line_max": 68,
"alpha_frac": 0.2773343974,
"autogenerated": false,
"ratio": 4.247457627118644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5024792024518644,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == word[0]:
if self.search(board, word[1:], i, j):
return True
return False
def search(self, b, w, x, y):
if len(w) == 0:
return True
t, b[x][y] = b[x][y], '@'
if x > 0 and b[x - 1][y] == w[0]:
if self.search(b, w[1:], x - 1, y):
return True
if x < len(b) - 1 and b[x + 1][y] == w[0]:
if self.search(b, w[1:], x + 1, y):
return True
if y > 0 and b[x][y - 1] == w[0]:
if self.search(b, w[1:], x, y - 1):
return True
if y < len(b[0]) - 1 and b[x][y + 1] == w[0]:
if self.search(b, w[1:], x, y + 1):
return True
b[x][y] = t
return False
if __name__ == "__main__":
sol = Solution()
b = ["ABCE", "SFES", "ADEE"]
word = "ABCESEEEFS"
board = []
for st in b:
board.append(list(st))
print(sol.exist(board, word))
| {
"repo_name": "alyiwang/LeetPy",
"path": "WordSearch.py",
"copies": "1",
"size": "1284",
"license": "apache-2.0",
"hash": -9041286312901326000,
"line_mean": 24.68,
"line_max": 58,
"alpha_frac": 0.4026479751,
"autogenerated": false,
"ratio": 3.1625615763546797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.406520955145468,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
class Solution(object):
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
l1, l2 = len(board), len(board[0])
tr = Trie()
for w in words:
tr.add(w)
vis, result = [[False] * l2 for i in range(l1)], []
def find(w, p, x, y):
node = p.ch.get(board[x][y])
if node is None:
return []
vis[x][y] = True
for loc in [[0, -1], [0, 1], [-1, 0], [1, 0]]:
x1, y1 = x + loc[0], y + loc[1]
if 0 <= x1 < l1 and 0 <= y1 < l2 and not vis[x1][y1]:
find(w + board[x1][y1], node, x1, y1)
if node.isWord:
result.append(w)
tr.delete(w)
vis[x][y] = False
for x in range(l1):
for y in range(l2):
find(board[x][y], tr.root, x, y)
return result
class Node(object):
def __init__(self):
self.isWord = False
self.ch = {}
class Trie(object):
def __init__(self):
self.root = Node()
def add(self, w):
p = self.root
for c in w:
node = p.ch.get(c)
if node is None:
node = Node()
p.ch[c] = node
p = node
p.isWord = True
def delete(self, w):
p, q = self.root, []
for c in w:
q.append((c, p))
node = p.ch.get(c)
if node is None:
return False
p = node
if not p.isWord:
return False
if len(p.ch) > 0:
p.isWord = False
else:
for c, n in reversed(q):
del n.ch[c]
if len(n.ch) > 0 or n.isWord:
break
return True
if __name__ == "__main__":
sol = Solution()
board, b, words = [], ["aa"], ["a"]
for row in b:
board.append(list(row))
print(sol.findWords(board, words))
| {
"repo_name": "alyiwang/LeetPy",
"path": "WordSearch2.py",
"copies": "1",
"size": "2085",
"license": "apache-2.0",
"hash": -1574651295570158000,
"line_mean": 23.8214285714,
"line_max": 69,
"alpha_frac": 0.4081534772,
"autogenerated": false,
"ratio": 3.3902439024390243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4298397379639024,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
class Solution(object):
def __init__(self):
self.m = {}
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
return self.helper(nums, target)
def helper(self, nums, target):
if target in self.m:
return self.m[target]
count = 0
for n in nums:
if n == target:
count += 1
elif n < target:
count += self.helper(nums, target - n)
self.m[target] = count
return count
def combinationSum4DP(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
dp = [0] * (target + 1)
dp[0] = 1
for i in range(target):
for n in nums:
if i + n <= target:
dp[i + n] += dp[i]
return dp[target]
if __name__ == "__main__":
sol = Solution()
total = sol.combinationSum4DP([1, 2, 3, 5], 20)
print(total)
| {
"repo_name": "alyiwang/LeetPy",
"path": "CombinationSum4.py",
"copies": "1",
"size": "1089",
"license": "apache-2.0",
"hash": 4075672386866467300,
"line_mean": 21.2244897959,
"line_max": 54,
"alpha_frac": 0.4527089073,
"autogenerated": false,
"ratio": 3.7681660899653977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9720874997265398,
"avg_score": 0,
"num_lines": 49
} |
__author__ = 'alan'
class Solution(object):
def numberToWords(self, num):
"""
:type num: int
:rtype: str
"""
def get_base(x):
return {
1: '',
100: 'Hundred',
1000: 'Thousand',
1000000: 'Million',
1000000000: 'Billion',
}.get(x, '')
def get_num(x):
return {
1: 'One',
2: 'Two',
3: 'Three',
4: 'Four',
5: 'Five',
6: 'Six',
7: 'Seven',
8: 'Eight',
9: 'Nine',
10: 'Ten',
11: 'Eleven',
12: 'Twelve',
13: 'Thirteen',
14: 'Fourteen',
15: 'Fifteen',
16: 'Sixteen',
17: 'Seventeen',
18: 'Eighteen',
19: 'Nineteen',
20: 'Twenty',
30: 'Thirty',
40: 'Forty',
50: 'Fifty',
60: 'Sixty',
70: 'Seventy',
80: 'Eighty',
90: 'Ninety',
}.get(x, '')
def get1000(x):
hun = x // 100
x %= 100
ten = x // 10
one = x % 10
huns = '' if hun == 0 else ' ' + get_num(hun) + ' Hundred'
tens = ''
if ten == 1:
tens += ' ' + get_num(x)
else:
tens += '' if ten == 0 else ' ' + get_num(ten * 10)
tens += '' if one == 0 else ' ' + get_num(one)
return huns + tens
if num == 0:
return 'Zero'
flag = ''
if num < 0:
num = -num
flag = 'Minus '
base, output = 1, ''
while num > 0:
t = num % 1000
base_s = '' if t == 0 or base == 1 else ' ' + get_base(base)
output = get1000(t) + base_s + output
base *= 1000
num //= 1000
return flag + output[1:]
if __name__ == "__main__":
sol = Solution()
num = -1235488541
print(sol.numberToWords(num))
| {
"repo_name": "alyiwang/LeetPy",
"path": "IntegerToWords.py",
"copies": "1",
"size": "2236",
"license": "apache-2.0",
"hash": -939011879711247900,
"line_mean": 25.619047619,
"line_max": 72,
"alpha_frac": 0.3255813953,
"autogenerated": false,
"ratio": 3.802721088435374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46283024837353737,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alan'
class WordDictionary(object):
class Node(object):
def __init__(self):
self.isWord = False
self.ch = {}
def __init__(self):
"""
initialize your data structure here.
"""
self.root = self.Node()
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
p = self.root
for c in word:
t = p.ch.get(c)
if t is None:
p.ch[c] = self.Node()
p = p.ch[c]
p.isWord = True
def search(self, word):
"""
Returns if the word is in the data structure. A word could
contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
return self.find(word, 0, self.root)
def find(self, word, start, root):
if len(word) == start:
return root.isWord
c = word[start]
if c == '.':
for child, node in root.ch.items():
if self.find(word, start + 1, node):
return True
else:
t = root.ch.get(c)
if t is not None:
return self.find(word, start + 1, t)
return False
if __name__ == "__main__":
wd = WordDictionary()
wd.addWord("at")
wd.addWord("and")
wd.addWord("an")
wd.addWord("add")
print(wd.search("a"))
print(wd.search(".at"))
wd.addWord("bat")
print(wd.search(".at"))
print(wd.search("an."))
print(wd.search("a.d."))
print(wd.search("b."))
print(wd.search("a.d"))
print(wd.search("."))
| {
"repo_name": "alyiwang/LeetPy",
"path": "WordDict.py",
"copies": "1",
"size": "1706",
"license": "apache-2.0",
"hash": -6249998761865192000,
"line_mean": 23.7246376812,
"line_max": 66,
"alpha_frac": 0.4777256741,
"autogenerated": false,
"ratio": 3.708695652173913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9686421326273913,
"avg_score": 0,
"num_lines": 69
} |
__author__ = 'Alan Richmond'
'''
Mandelbrot.py Copyright (C) 2014 Alan Richmond (Tuxar.uk)
Full mandelbrot set with a couple of optimizations:
1 compute only top half, mirror it.
2 don't compute set inside the big circle and a couple of smaller ones.
See http://en.wikipedia.org/wiki/Mandelbrot_set#Cardioid_.2F_bulb_checking
'''
from pygame.locals import *
import pygame
def main():
width, height = 1000,1000
screen = pygame.display.set_mode((width,height),DOUBLEBUF)
xaxis = width/1.5+140
yaxis = height/2
scale = 400
iterations = 50
for iy in range(height/2+1):
for ix in range(width):
z = 0+0j
c = complex(float(ix-xaxis)/scale, float(iy-yaxis)/scale)
x=c.real
y=c.imag
y2=y*y
q=(x-0.25)**2+y2
if not(q*(q+(x-0.25))<y2/4.0 or (x+1.0)**2 + y2 <0.0625):
for i in range(iterations):
z = z**2+c
if abs(z) > 2:
v = 765*i/iterations
if v > 510:
color = (255, 255, v%255)
elif v > 255:
color = (255, v%255, 0)
else:
color = (v%255, 0, 0)
break
else:
color = (0, 0, 0)
screen.set_at((ix, iy), color)
screen.set_at((ix, height-iy), color)
pygame.display.update()
while True:
event = pygame.event.poll()
if (event.type == QUIT or
(event.type == KEYDOWN and event.key == K_ESCAPE)):
break
if __name__ == "__main__":
main()
| {
"repo_name": "tuxar-uk/Mandelbrot",
"path": "Mandelbrot.py",
"copies": "1",
"size": "1746",
"license": "mit",
"hash": 6832860415995486000,
"line_mean": 29.6315789474,
"line_max": 78,
"alpha_frac": 0.470790378,
"autogenerated": false,
"ratio": 3.556008146639511,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4526798524639511,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alanseciwa'
import sys
import json
import csv
from datetime import datetime, timedelta
from textblob import TextBlob
from elasticsearch import Elasticsearch
from scripts.spam_detection import SpamBotDetection
es = Elasticsearch()
def check_obj(c):
if not c:
c = ''
else:
c = c
return c
def parse_json_data(data):
bot = SpamBotDetection()
spam_counter = 0
true_users = 0
# Open json file while reserving a buffer size of 1024
with open(data, 'r', buffering=1024) as read_json:
for tweet in read_json:
# Load json and store key-value pair for text in tweet
jd = json.loads(tweet)
try:
# tweets = TextBlob(jd['text'])
tweets = jd['text']
#tweet_date = jd['created_at']
screen_name = jd['user']['screen_name']
# lang = jd['user']['lang']
# geo = jd['geo']
# User information
user_created_at = datetime.strptime(jd['user']['created_at'], '%a %b %d %H:%M:%S +0000 %Y')
user_statuses_count = jd['user']['statuses_count']
user_ratio = float(jd['user']['followers_count']) / float(jd['user']['friends_count'])
user_description = jd['user']['description']
# Location and Lang.
# coordinates = jd['coordinates']
# places = jd['place']
# geo = jd['geo']
# lang = jd['user']['lang']
except AttributeError as e:
print(e)
pass
# Check if twitter user is a spambot
if (bot.check_user_date(user_created_at) is False or bot.check_status_count(user_statuses_count) is False
or bot.check_ratio(user_ratio) is False or bot.check_descript_len(user_description)):
print("is not a spam bot")
true_users += 1
else:
spam_counter += 1
print(true_users)
def main():
#tweet_data = '/Users/alanseciwa/Desktop/Independent_Study/Raw_data/gop-debate-sep16-2.json'
#tweet_data = '/Users/alanseciwa/Desktop/Independent_Study/Raw_data/gop-debate-sep16-2.json'
tweet_data = '/Users/alanseciwa/Desktop/sample.json'
parse_json_data(tweet_data)
if __name__ == '__main__':
main()
sys.exit() | {
"repo_name": "aseciwa/independent-study",
"path": "scripts/parse_twitter_data_PartThree.py",
"copies": "1",
"size": "2411",
"license": "mit",
"hash": -7815309185996555000,
"line_mean": 29.15,
"line_max": 117,
"alpha_frac": 0.550808793,
"autogenerated": false,
"ratio": 3.737984496124031,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9766485875184467,
"avg_score": 0.004461482787912729,
"num_lines": 80
} |
__author__ = 'alanseciwa'
import sys
import json
import csv
from datetime import datetime, timedelta
from textblob import TextBlob
from textblob import sentiments
from elasticsearch import Elasticsearch
es = Elasticsearch()
class SpamBodDetection():
"""Check if twitter user is a spambot.
Check user date creation, check the total number of tweets, Check the friends to followers ratio,
and check the user description if its more than a few words
"""
def check_user_date(self, date_str):
one_day_ago = datetime.now() - timedelta(days=1)
if date_str < one_day_ago:
return True
else:
return False
def check_status_count(self, status):
if int(status) < 50:
return True
else:
return False
def check_ratio(self, ratio):
if ratio < 0.01:
return True
else:
return False
def check_descript_len(self, description):
if len(description) < 20:
return True
else:
return False
def check_obj(c):
if not c:
c = ''
else:
c = c
return c
def parse_json_data(data):
csv_file = open('/Users/alanseciwa/Desktop/clean_data-TWEETONLY-2.csv', 'w')
writer = csv.writer(csv_file, quoting=csv.QUOTE_NONE)
bot = SpamBodDetection()
# Open json file while reserving a buffer size of 1024
with open(data, 'r', buffering=1024) as read_json:
for tweet in read_json:
# Load json and store key-value pair for text in tweet
jd = json.loads(tweet)
try:
#tweets = TextBlob(jd['text'])
tweets = jd['text']
#tweet_date = jd['created_at']
screen_name = jd['user']['screen_name']
#lang = jd['user']['lang']
#geo = jd['geo']
# User information
user_created_at = datetime.strptime(jd['user']['created_at'], '%a %b %d %H:%M:%S +0000 %Y')
user_statuses_count = jd['user']['statuses_count']
user_ratio = float(jd['user']['followers_count']) / float(jd['user']['friends_count'])
user_description = jd['user']['description']
# Location and Lang.
#coordinates = jd['coordinates']
#places = jd['place']
#geo = jd['geo']
#lang = jd['user']['lang']
#check_if_spambot()
# Output sentiment polarity
print(tweets.sentiment.polarity)
# Determine if sentiment is positive, negative, or neutral
if tweets.sentiment.polarity < 0:
sentiment = 'negative'
elif tweets.sentiment.polarity == 0:
sentiment = 'neutral'
else:
sentiment = 'positive'
# Print output
print(sentiment)
# add text and sentiment info to elasticsearch
es.index(index="sentiment",
doc_type="text-type",
body={"message": jd["text"],
"polarity": tweets.sentiment.polarity,
"subjectivity": tweets.sentiment.subjectivity,
"sentiment": sentiment})
return True
#writer.writerow([tweets_encode])
#writer.writerow((screen_name, tweet, followers_count, geo, coordinates, places))
#print(screen_name)
#print(tweets_encode)
except:
#print('Error')
pass
csv_file.close()
def main():
#tweet_data = '/Users/alanseciwa/Desktop/Independent_Study/Raw_data/gop-debate-sep16-2.json'
#tweet_data = '/Users/alanseciwa/Desktop/Independent_Study/Raw_data/gop-debate-sep16-2.json'
tweet_data = '/Users/alanseciwa/Desktop/sample.json'
parse_json_data(tweet_data)
if __name__ == '__main__':
main()
sys.exit() | {
"repo_name": "aseciwa/independent-study",
"path": "scripts/parse_twitter_data_partTwo.py",
"copies": "1",
"size": "4110",
"license": "mit",
"hash": -3232682500037866500,
"line_mean": 28.3642857143,
"line_max": 107,
"alpha_frac": 0.5313868613,
"autogenerated": false,
"ratio": 3.9825581395348837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5013945000834883,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alanseciwa'
import sys, os
import json
import csv
def parse_json_data(data):
csv_file = open('/Users/alanseciwa/Desktop/clean_data-TWEETONLY-2.csv', 'w')
writer = csv.writer(csv_file, quoting=csv.QUOTE_NONE)
# Open json file while reserving a buffer size of 1028
with open(data, 'r', buffering=1028) as read_json:
for tweet in read_json:
# Load json and store key-value pair for text in tweet
jd = json.loads(tweet)
try:
#created_at = jd['created_at']
tweets = jd['text']
screen_name = jd['user']['screen_name']
#followers_count = jd['user']['followers_count']
#lang = jd['user']['lang']
#geo = jd['geo']
#coordinates = jd['coordinates']
#places = jd['place']
tweets_encode = tweets.encode('utf-8')
#clean = tweets_encode.remove("b'", "")
print(tweets_encode)
writer.writerow([tweets_encode])
#writer.writerow((screen_name, tweet, followers_count, geo, coordinates, places))
#print(screen_name)
#print(tweets_encode)
except:
#print('Error')
pass
csv_file.close()
def main():
#tweet_data = '/Users/alanseciwa/Desktop/Independent_Study/Raw_data/gop-debate-sep16-2.json'
tweet_data = '/Users/alanseciwa/Desktop/Independent_Study/Raw_data/gop-debate-sep16-2.json'
parse_json_data(tweet_data)
if __name__ == '__main__':
main()
sys.exit() | {
"repo_name": "aseciwa/independent-study",
"path": "scripts/parse_twitter_data.py",
"copies": "1",
"size": "1630",
"license": "mit",
"hash": 2785875847155723300,
"line_mean": 28.6545454545,
"line_max": 97,
"alpha_frac": 0.5460122699,
"autogenerated": false,
"ratio": 3.55119825708061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45972105269806096,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alanseciwa'
import sys, os
import nltk
import json
import pandas as pd
def parse_json_data(data):
with open(data, 'r') as read_json:
for i in read_json:
# Load json and store key value pair for text in tweet
jd = json.loads(i)
created_at = jd['created_at']
tweet = jd['text']
screen_name = jd['user']['screen_name']
followers_count = jd['user']['followers_count']
lang = jd['user']['lang']
geo = jd['geo']
coordinates = jd['coordinates']
places = jd['place']
#data = [created_at, tweet, screen_name, followers_count, lang, geo, coordinates, places]
#df = pd.DataFrame(data, columns=['created_at', 'tweet', 'screen_name', 'f_count', 'lang', 'geo', 'coordinates', 'place'])
#df = pd.to_csv('/Users/alanseciwa/Desktop/sample.csv', sep=',')
print(coordinates, places)
# Tokenize tweet (split every word).
#tokenized = nltk.word_tokenize(tweet)
#tagged = nltk.pos_tag(tokenized) # parts of speech tags
#print(tagged)
def main():
if os.path.isfile('/Users/alanseciwa/Desktop/gop-debate-bottom4-sep16.json'):
load_data = '/Users/alanseciwa/Desktop/gop-debate-bottom4-sep16.json'
parse_json_data(load_data)
else: print('No')
if __name__ == '__main__':
main()
sys.exit()
| {
"repo_name": "aseciwa/independent-study",
"path": "scripts/geo_location.py",
"copies": "1",
"size": "1452",
"license": "mit",
"hash": -6694450299291596000,
"line_mean": 29.8936170213,
"line_max": 134,
"alpha_frac": 0.5633608815,
"autogenerated": false,
"ratio": 3.4653937947494033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45287546762494035,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alanseciwa'
import re
import csv
import os
import sys
def match_values(u_list, v_list, t_list):
re_pattern = re.compile(u'['
u'\U0001F300-\U0001F5FF'
u'\U0001F600-\U0001F64F'
u'\U0001F680-\U0001F6FF'
u'\u2600-\u26FF\u2700-\u27BF]+',
re.UNICODE)
for i in t_list:
if re_pattern.sub('', i):
print(i)
else:
print("no")
def hex_to_values(tweet, hex_values):
# Put hex values and name into list variables
utf_list = []
val_list = []
with open(hex_values, 'r') as h_values:
csv_reader = csv.reader(h_values)
for values in csv_reader:
utf_list.append(values[0])
val_list.append(values[2])
tweet_list = []
with open(tweet, buffering=1024) as string_tweets:
for t in string_tweets:
tweet_list.append(t)
# Send lists to match_values
match_values(utf_list, val_list, tweet_list)
def main():
# Tweet location
tweets = "/Users/alanseciwa/Desktop/cleaned.csv"
hex_values = "/Users/alanseciwa/Desktop/hex_emoji_values.csv"
hex_to_values(tweets, hex_values)
if __name__ == '__main__':
main()
sys.exit() | {
"repo_name": "aseciwa/independent-study",
"path": "examples/replace_hex.py",
"copies": "1",
"size": "1327",
"license": "mit",
"hash": -1285317319336530200,
"line_mean": 19.75,
"line_max": 65,
"alpha_frac": 0.5350414469,
"autogenerated": false,
"ratio": 3.3341708542713566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9353442229685824,
"avg_score": 0.003154014297106402,
"num_lines": 64
} |
__author__ = 'alanseciwa'
import re
import numpy as np
import pandas as pd
from textblob import TextBlob
from candidate_list import clist
def clean(df):
# go through candidate name list
df = df[df.candidate.isin(clist)]
del df['Unnamed: 0']
return df
def datetimeify(df):
# get created time of tweet
df['created_at'] = pd.DatetimeIndex(df.created_at)
return df
def sentiment(df):
''' This function assigns sentiment values. It check if the tweet (text) is
is negative, neutral, or positive. In addition, it check either if the tweet
is subjective.
TextBlob is the does the heavy lifting.
'''
tweet = df.dropna(subset=['text']).text
sentiment = tweet.apply(lambda tweet: TextBlob(tweet).sentiment)
df['polarity'] = sentiment.apply(lambda sentiment: sentiment.polarity)
df['subjectivity'] = sentiment.apply(lambda sentiment: sentiment.subjectivity)
return df
def influence(df):
# get influence score for the amount of followers a user and the amount
# of retweets a user has.
friends = np.sqrt(df.user_followers_count.apply(lambda x: x + 1))
retweets = np.sqrt(df.retweet_count.apply(lambda x: x + 1))
df['influence'] = friends * retweets
return df
def influenced_polarity(df):
# get influenced polarity score by multiplying polarity score
# with influence score
df['influenced_polarity'] = df.polarity * df['influence']
return df
def georeference(df):
# Get geo coordinates from each tweet
def place_to_coordinate(pl_str, kind):
if(pd.isnull(pl_str)):
return float('nan')
# use regex to find coordinate number
num_matcher = r'(-?\d+\.\d+)[,\]]'
coordinates = re.findall(num_matcher, pl_str)
coordinate = tuple(float(n) for n in coordinates[:2])
if(kind == 'longitude'):
return coordinate[0]
elif(kind == 'latitude'):
return coordinate[1]
df['latitude'] = df.place.apply(place_to_coordinate, kind='latitude')
df['longitude'] = df.place.apply(place_to_coordinate, kind='longitude')
return df
def preprocess(df):
# return processes
return (df.pipe(datetimeify)
.pipe(sentiment)
.pipe(influence)
.pipe(influenced_polarity)
.pipe(georeference))
def preprocess_df(df):
# clean data
cleaned = df.pipe(clean)
copy = cleaned.copy()
return preprocess(copy)
def load_df(input_filename):
raw_df = pd.read_csv(input_filename, engine='python')
return preprocess_df(raw_df)
| {
"repo_name": "aseciwa/independent-study",
"path": "scripts/tweet_preprocess.py",
"copies": "1",
"size": "2608",
"license": "mit",
"hash": 8885392936108335000,
"line_mean": 25.3434343434,
"line_max": 84,
"alpha_frac": 0.6468558282,
"autogenerated": false,
"ratio": 3.5290933694181326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9671379522601966,
"avg_score": 0.0009139350032333731,
"num_lines": 99
} |
__author__ = 'alanseciwa'
# Reference: Raj Kesavan @ http://www.rajk.me
import datetime
import json
import pandas as pd
import tweepy
# import sys
from candidate_list import clist
from private_keys import consumer_key, consumer_secret, access_token, access_token_secret
from spam_detection import SpamBotDetection
# set authentications keys and tokens from private/private_keys.py
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# wait_on_rate_limit is set to true so that I could avoid hitting the rate limit
t_api = tweepy.API(auth, wait_on_rate_limit = True, wait_on_rate_limit_notify = True)
# anonymous function called term
# Returns tweets that match a specified query. rpp is the # of tweets to return per page
cursor = lambda term: tweepy.Cursor(t_api.search, q = term, rpp = 100)
def tweet_to_dict_spam(tweet, candidate):
# Check for Twitter-SpamBot
bot = SpamBotDetection()
dict = {
'candidate': candidate,
'id': tweet.id,
'coordinates': tweet.coordinates,
'favorite_count': tweet.favorite_count,
'created_at': tweet.created_at,
'geo': tweet.geo,
'lang': tweet.lang,
'place': tweet.place,
'retweet_count': tweet.retweet_count,
'text': tweet.text,
'user_created_at': tweet.user.created_at,
'user_statuses_count': tweet.user.statuses_count,
'user_location': tweet.user.location,
'user_name': tweet.user.name,
'user_screen_name': tweet.user.screen_name,
'user_time_zone': tweet.user.time_zone,
'user_followers_count': tweet.user.followers_count,
'user_description': tweet.user.description
}
# assign tweet values to variables
u_c_a = tweet.user.created_at
u_s_c = tweet.user.statuses_count
u_d = tweet.user.description
# catch error: division by zero
try:
u_r = float(tweet.user.friends_count) / float(tweet.user.followers_count)
except ZeroDivisionError:
u_r = 0
dict['user_follower_ratio'] = u_r
# check if Twitter user is a spambot
if(bot.check_user_date(u_c_a) is False or bot.check_status_count(u_s_c) is False
or bot.check_ratio(u_r) is False or bot.check_descript_len(u_d) is False):
dict['is_user_spam'] = 'False'
return dict
else:
dict['is_user_spam'] = 'True'
return dict
# following functions order and arrange tweets as they come in
def tweets_json(tweets):
return [tweet._json for tweet in tweets]
def tweets_df(tweets, term, f):
return pd.DataFrame([f(tweet, term) for tweet in tweets])
def search(cursor, term, number):
return list(cursor(term).items(number))
if __name__ == '__main__':
# specify the amount of tweets to collect per candidate
# e.g. ~$ python retrieve.py 100
#number_per_candidate = int(sys.argv[1])
number_per_candidate = 2000
print(number_per_candidate)
# path to results directory
path = '/path/to/your/results/directory'
# dataframes List
dfs = []
# dictionary for JSON's key:value pairs
jsons = {}
# search for specified candidates in tweets
for candidate in clist:
print('Searching for ' + candidate)
tweets = search(cursor, candidate, number_per_candidate)
dfs.append(tweets_df(tweets, candidate, tweet_to_dict_spam))
jsons[candidate] = tweets_json(tweets)
# assign tweets to pandas dataframes
df = pd.concat(dfs)
current_date = str(datetime.datetime.now())
# output results to specified location/directory
df.to_csv(path+'/results-{}.csv'.format(current_date))
with open(path+'/results-{}.json'.format(current_date), 'w') as json_file:
json_file.write(json.dumps(jsons))
| {
"repo_name": "aseciwa/independent-study",
"path": "scripts/retrieve.py",
"copies": "1",
"size": "3800",
"license": "mit",
"hash": -1591292753454534700,
"line_mean": 30.9327731092,
"line_max": 89,
"alpha_frac": 0.6663157895,
"autogenerated": false,
"ratio": 3.4111310592459607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4577446848745961,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alan Snow'
import netCDF4 as NET
import numpy as np
import os
from json import dumps
def generate_warning_points(ecmwf_prediction_folder, era_interim_file, out_directory):
"""
Create warning points from era interim data and ECMWD prediction data
"""
#Get list of prediciton files
prediction_files = [os.path.join(ecmwf_prediction_folder,f) for f in os.listdir(ecmwf_prediction_folder) \
if not os.path.isdir(os.path.join(ecmwf_prediction_folder, f))]
#get the comids in ECMWF files
data_nc = NET.Dataset(prediction_files[0], mode="r")
prediction_comids = data_nc.variables['COMID'][:]
comid_list_length = len(prediction_comids)
data_nc.close()
#get the comids in ERA Interim file
data_nc = NET.Dataset(era_interim_file, mode="r")
era_interim_comids = data_nc.variables['COMID'][:]
data_nc.close()
print "Extracting Data ..."
#get information from datasets
reach_prediciton_array_first_half = np.zeros((comid_list_length,len(prediction_files),40))
reach_prediciton_array_second_half = np.zeros((comid_list_length,len(prediction_files),20))
for file_index, prediction_file in enumerate(prediction_files):
data_values_2d_array = []
try:
ensemble_index = int(os.path.basename(prediction_file)[:-3].split("_")[-1])
#Get hydrograph data from ECMWF Ensemble
data_nc = NET.Dataset(prediction_file, mode="r")
qout_dimensions = data_nc.variables['Qout'].dimensions
if qout_dimensions[0].lower() == 'time' and qout_dimensions[1].lower() == 'comid':
data_values_2d_array = data_nc.variables['Qout'][:].transpose()
elif qout_dimensions[0].lower() == 'comid' and qout_dimensions[1].lower() == 'time':
data_values_2d_array = data_nc.variables['Qout'][:]
else:
print "Invalid ECMWF forecast file", prediction_file
data_nc.close()
continue
data_nc.close()
except Exception, e:
print e
#pass
#add data to main arrays and order in order of interim comids
if len(data_values_2d_array) > 0:
for comid_index, comid in enumerate(prediction_comids):
reach_prediciton_array_first_half[comid_index][file_index] = data_values_2d_array[comid_index][:40]
if(ensemble_index < 52):
reach_prediciton_array_second_half[comid_index][file_index] = data_values_2d_array[comid_index][40:]
print "Extracting and Sorting ERA Interim Data ..."
#get ERA Interim Data Analyzed
era_data_nc = NET.Dataset(era_interim_file, mode="r")
era_flow_data = era_data_nc.variables['Qout'][:]
num_years = int(len(era_flow_data[0])/365)
era_interim_data_2d_array = np.sort(era_flow_data, axis=1)[:,:num_years:-1]
era_interim_lat_data = era_data_nc.variables['lat'][:]
era_interim_lon_data = era_data_nc.variables['lon'][:]
era_data_nc.close()
print "Analyzing Data with Return Periods ..."
return_25_points = []
return_10_points = []
return_2_points = []
for prediction_comid_index, prediction_comid in enumerate(prediction_comids):
#get interim comid index
era_interim_comid_index = np.where(era_interim_comids==prediction_comid)[0][0]
#perform analysis on datasets
all_data_first = reach_prediciton_array_first_half[prediction_comid_index]
all_data_second = reach_prediciton_array_second_half[prediction_comid_index]
return_period_25 = era_interim_data_2d_array[era_interim_comid_index, num_years-25]
return_period_10 = era_interim_data_2d_array[era_interim_comid_index, num_years-10]
return_period_2 = era_interim_data_2d_array[era_interim_comid_index, num_years-2]
#get mean
mean_data_first = np.mean(all_data_first, axis=0)
mean_data_second = np.mean(all_data_second, axis=0)
mean_series = np.concatenate([mean_data_first,mean_data_second])
mean_peak = np.amax(mean_series)
if mean_peak > return_period_25:
return_25_points.append({ "lat" : era_interim_lat_data[era_interim_comid_index],
"lon" : era_interim_lon_data[era_interim_comid_index],
"size": 1,
})
elif mean_peak > return_period_10:
return_10_points.append({ "lat" : era_interim_lat_data[era_interim_comid_index],
"lon" : era_interim_lon_data[era_interim_comid_index],
"size": 1,
})
elif mean_peak > return_period_2:
return_2_points.append({ "lat" : era_interim_lat_data[era_interim_comid_index],
"lon" : era_interim_lon_data[era_interim_comid_index],
"size": 1,
})
#get max
max_data_first = np.amax(all_data_first, axis=0)
max_data_second = np.amax(all_data_second, axis=0)
max_series = np.concatenate([max_data_first,max_data_second])
max_peak = np.amax(max_series)
#get std dev
std_dev_first = np.std(all_data_first, axis=0)
std_dev_second = np.std(all_data_second, axis=0)
std_dev = np.concatenate([std_dev_first,std_dev_second])
#mean plus std
mean_plus_std_series = mean_series + std_dev
mean_plus_std_peak = min(np.amax(mean_plus_std_series), max_peak)
if mean_plus_std_peak > return_period_25:
return_25_points.append({ "lat" : era_interim_lat_data[era_interim_comid_index],
"lon" : era_interim_lon_data[era_interim_comid_index],
"size": 0,
})
elif mean_plus_std_peak > return_period_10:
return_10_points.append({ "lat" : era_interim_lat_data[era_interim_comid_index],
"lon" : era_interim_lon_data[era_interim_comid_index],
"size": 0,
})
elif mean_plus_std_peak > return_period_2:
return_2_points.append({ "lat" : era_interim_lat_data[era_interim_comid_index],
"lon" : era_interim_lon_data[era_interim_comid_index],
"size": 0,
})
print "Writing Output ..."
with open(os.path.join(out_directory, "return_25_points.txt"), 'wb') as outfile:
outfile.write(dumps(return_25_points))
with open(os.path.join(out_directory, "return_10_points.txt"), 'wb') as outfile:
outfile.write(dumps(return_10_points))
with open(os.path.join(out_directory, "return_2_points.txt"), 'wb') as outfile:
outfile.write(dumps(return_2_points))
if __name__ == "__main__":
ecmwf_prediction_folder = '/home/alan/tethysdev/tethysapp-erfp_tool/ecmwf_rapid_predictions/nfie_texas_gulf_region/huc_2_12/20150625.0'
era_interim_file = '/home/alan/tethysdev/tethysapp-erfp_tool/era_interim_historical_data/nfie_texas_gulf_region/huc_2_12/Qout_era_1980_2014_CF.nc'
generate_warning_points(ecmwf_prediction_folder, era_interim_file, out_directory=ecmwf_prediction_folder)
| {
"repo_name": "CI-WATER/erfp_data_process_ubuntu_aws",
"path": "generate_warning_points_from_era_interim_data.py",
"copies": "1",
"size": "7511",
"license": "mit",
"hash": 9020849851004345000,
"line_mean": 50.4452054795,
"line_max": 150,
"alpha_frac": 0.5866063107,
"autogenerated": false,
"ratio": 3.3102688408990746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43968751515990745,
"avg_score": null,
"num_lines": null
} |
__author__ = 'albert cuesta'
import os.path
class database:
def listaraplicaiones(self):
result = []
with open("database/data/aplicaciones.txt", mode='r+', encoding='utf-8') as file:
resultado = file.read()
texto = resultado.split("\n")
for linea in texto:
result.append(linea.split(","))
return result
def listaraplicacionespago(self):
aplicaciones=[]
fo = open("database/data/aplicacionesPago.txt", mode='r', encoding='utf-8')
for line in fo:
return line
fo.close()
def añadiraplicacionpago(self, nombre, proveedor, fecha, precio, descargas,puntuacion,comentarios):
aplicacion = []
with open("database/data/aplicacionesPago.txt", mode='a',encoding='utf-8')as archivo1:
archivo1.write(nombre+","+proveedor+","+fecha+","+precio+","+descargas+","+puntuacion+","+comentarios+"\n")
print ("app insertada")
def añadiraplicacionfree(self, nombre, proveedor, fecha, precio, descargas,puntuacion,comentarios):
aplicacion = []
with open("database/data/aplicaciones.txt", mode='a',encoding='utf-8')as archivo:
archivo.write(nombre+","+proveedor+","+fecha+","+precio+","+descargas+","+puntuacion+","+comentarios+"\n")
print ("app insertada")
def sumarDescarga(self, nombre, pathToDb="database/data/aplicaciones.txt"):
if os.path.isfile('database/data/aplicaciones.txt'):
file = open(pathToDb, 'r')
llista = file.readlines()
file.close()
trobat = False
with open(pathToDb, 'w') as file:
for linia in llista:
if linia.split(";")[0] != nombre:
file.write(linia)
else:
linia1 = linia.split(";")
descargues = int(linia.split(";")[4])
resultat = linia1[0]+";"+linia1[1]+";"+linia1[2]+";"+linia1[3]+";"+str(descargues+1)+";"+linia1[5]+";"+linia1[6]+";"+linia1[7]
file.write(resultat)
trobat = True
else:
print("Error! No se ha podido encontrar el fichero de aplicaciones!")
return trobat
| {
"repo_name": "albertcuesta/PEACHESTORE",
"path": "database/Database.py",
"copies": "1",
"size": "2281",
"license": "mit",
"hash": 5498083207788691000,
"line_mean": 42,
"line_max": 150,
"alpha_frac": 0.5563843791,
"autogenerated": false,
"ratio": 3.2510699001426535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43074542792426534,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Albert cuesta'
import PEACHESTORE.database.Database as database
def menu(self=None):
print('\t BENVINGUTS A PEACHESTORE')
print('\t [1]: Mostrar aplicacions')
print('\t [2]: Registrar aplicacions')
print('\t [3]: Modificar aplicacions')
print('\t [4]: sumar descarga')
opcio = int(input('opcion?'))
if opcio == 1:
print("=== aplicacions ===")
print('\t [1]: Mostrar aplicaciones gratis')
print('\t [2]: Mostrar aplicaciones pago')
opcio1 = int(input('opcion1?'))
if opcio == 1:
result = database.database.listaraplicaiones(self)
print(result)
if opcio == 2:
result2 = database.database.listaraplicacionespago(self)
print(result2)
if opcio == 2:
print("=== aplicacions ===")
print('\t [1]: registrar aplicaciones gratis')
print('\t [2]: registrar aplicaciones pago')
opcio2 = int(input('opcion2?'))
if opcio2 == 1:
print("=== aplicacions free===")
nombre = input('\tIntroduzca nombre app: ')
proveedor = input('\tIntroduzca proveedor: ')
fecha = input('\tIntroduzca fecha: ')
precio = input('\tIntroduzca su precio: ')
descargas = input('\tIntroduzca su descargas: ')
puntuacion = input('\tIntroduzca su puntuacion: ')
comentarios = input('\tIntroduzca su comentarios: ')
database.database.añadiraplicacionpago(self,nombre,proveedor,fecha, precio, descargas,puntuacion,comentarios)
if opcio2== 2:
print("=== aplicacions pago ===")
nombre = input('\tIntroduzca nombre app: ')
proveedor = input('\tIntroduzca proveedor: ')
fecha = input('\tIntroduzca fecha: ')
precio = input('\tIntroduzca su precio: ')
descargas = input('\tIntroduzca su descargas: ')
puntuacion = input('\tIntroduzca su puntuacion: ')
comentarios = input('\tIntroduzca su comentarios: ')
database.database.añadiraplicacionfree(self,nombre,proveedor,fecha, precio, descargas,puntuacion,comentarios)
if opcio== 3:
print("=== Modificar aplicacions ===")
print('\t [1]: modificar aplicaciones gratis')
print('\t [2]: modificar aplicaciones pago')
opcio2 = int(input('opcion2?'))
if opcio2 == 1:
print("=== aplicacions free===")
nombre = input('\tIntroduzca nombre app: ')
proveedor = input('\tIntroduzca proveedor: ')
fecha = input('\tIntroduzca fecha: ')
precio = input('\tIntroduzca su precio: ')
descargas = input('\tIntroduzca su descargas: ')
puntuacion = input('\tIntroduzca su puntuacion: ')
comentarios = input('\tIntroduzca su comentarios: ')
database.database.añadiraplicacionpago(self,nombre,proveedor,fecha, precio, descargas,puntuacion,comentarios)
if opcio2== 2:
print("=== modificar aplicacions pago ===")
nombre = input('\tIntroduzca nombre app: ')
proveedor = input('\tIntroduzca proveedor: ')
fecha = input('\tIntroduzca fecha: ')
precio = input('\tIntroduzca su precio: ')
descargas = input('\tIntroduzca su descargas: ')
puntuacion = input('\tIntroduzca su puntuacion: ')
comentarios = input('\tIntroduzca su comentarios: ')
database.database.añadiraplicacionfree(self,nombre,proveedor,fecha, precio, descargas,puntuacion,comentarios)
if opcio== 4:
print("=== sumar descarga ===")
database.database.sumarDescarga(self,'facebook',"database/data/aplicaciones.txt")
print
menu()
| {
"repo_name": "albertcuesta/PEACHESTORE",
"path": "database/menu.py",
"copies": "1",
"size": "4057",
"license": "mit",
"hash": -1122451580719135700,
"line_mean": 47.25,
"line_max": 125,
"alpha_frac": 0.5534172218,
"autogenerated": false,
"ratio": 3.3887959866220734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9349805255198518,
"avg_score": 0.01848159064471121,
"num_lines": 84
} |
__author__ = 'albertlwohletz'
from API import models
from django.http import HttpResponse
import json
def add_char(request):
# Get Request Information
name = request.GET['name']
image = request.GET['image']
hp = request.GET['hp']
ac = request.GET['ac']
count = int(request.GET['count'])
# Create new entries in DB
for i in range(0, count):
new_char = models.Chars(name=name + ' ' + str(i + 1), image=image, hp=hp, ac=ac)
new_char.save()
return HttpResponse("Success")
def remove_char(request):
id = request.GET['id']
models.Chars.objects.filter(id=id).delete()
return HttpResponse('Success')
# Returns json object of char for specified id.
def get_char(request):
id = int(request.GET['id'])
char = list(models.Chars.objects.filter(id=id))[0]
result_data = {"hp": char.hp, "ac": char.ac, "img": char.image, "name": char.name}
return HttpResponse(json.dumps(result_data), content_type="text/json")
def edit_char(request):
# Grab Variables from request.
id = int(request.GET['id'])
ac = request.GET['ac']
hp = request.GET['hp']
name = request.GET['name']
image = request.GET['image']
# Find Database Entry
char = models.Chars.objects.get(pk=id)
# Edit Database Entry
char.ac = ac
char.hp = hp
char.name = name
char.image = image
char.save()
return HttpResponse("Success", content_type="text/html")
| {
"repo_name": "albertwohletz/combatmanager",
"path": "API/views.py",
"copies": "1",
"size": "1445",
"license": "mit",
"hash": -3749873529307861500,
"line_mean": 26.7884615385,
"line_max": 88,
"alpha_frac": 0.6359861592,
"autogenerated": false,
"ratio": 3.3840749414519906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.950180966461381,
"avg_score": 0.003650287207636126,
"num_lines": 52
} |
__author__ = 'alberto, azu'
from escom.pepo.config import NUMBER_OF_GENERATIONS, EPSILON, logger, CURRENT_RESULT, CONSTANT_RESULT, CONSTANT_NON_ZERO
from escom.pepo.config import PRINTING_INTERVAL
from escom.pepo.genetic_algorithms.components.population import *
from escom.pepo.genetic_algorithms.components.mutation import random_mutation
from escom.pepo.utils import measure_time
import numpy as np
def strassen_fitness(current_population):
fitness = []
for single in current_population:
fitness.append(evaluate_product(single))
return fitness
def evaluate_product(single):
generated_chromosome = np.asarray(single)
generated_result = get_multiplication_result(generated_chromosome)
return CONSTANT_RESULT * sum(abs(CURRENT_RESULT - generated_result)) + CONSTANT_NON_ZERO * np.count_nonzero(
generated_chromosome)
def get_multiplication_result(chromosome):
P1 = get_complete_product(chromosome[0:8])
P2 = get_complete_product(chromosome[8:16])
P3 = get_complete_product(chromosome[16:24])
P4 = get_complete_product(chromosome[24:32])
P5 = get_complete_product(chromosome[32:40])
P6 = get_complete_product(chromosome[40:48])
P7 = get_complete_product(chromosome[48:56])
r = chromosome[56:63]
s = chromosome[63:70]
t = chromosome[70:77]
u = chromosome[77:84]
generated_r = r[0] * P1 + r[1] * P2 + r[2] * P3 + r[3] * P4 + r[4] * P5 + r[5] * P6 + r[6] * P7
generated_s = s[0] * P1 + s[1] * P2 + s[2] * P3 + s[3] * P4 + s[4] * P5 + s[5] * P6 + s[6] * P7
generated_t = t[0] * P1 + t[1] * P2 + t[2] * P3 + t[3] * P4 + t[4] * P5 + t[5] * P6 + t[6] * P7
generated_u = u[0] * P1 + u[1] * P2 + u[2] * P3 + u[3] * P4 + u[4] * P5 + u[5] * P6 + u[6] * P7
generated_result = np.asarray([])
generated_result = np.append(generated_result, generated_r)
generated_result = np.append(generated_result, generated_s)
generated_result = np.append(generated_result, generated_t)
generated_result = np.append(generated_result, generated_u)
return generated_result
def get_complete_product(product):
result = np.asarray([])
for alelle in product[0:product.size / 2]:
result = np.append(result, alelle * product[product.size / 2:product.size])
return result
def strassen_mutation(allele):
if allele == 1:
return -1
if allele == -1:
return 1
if allele == 0:
rand_allele = random.randint(-1, 1)
return rand_allele
@measure_time
def start_evaluation():
logger.info("*" * 80)
logger.info("*" * 80)
logger.info("Starting Strassen Finder")
logger.info("*" * 80)
logger.info("Number of Generations: %s", NUMBER_OF_GENERATIONS)
logger.info("Chromosome Length: %s", CHROMOSOME_LENGTH)
logger.info("Population Size: %s", POPULATION_SIZE)
logger.info("*" * 80)
logger.info("*" * 80)
# generating population
population = generate_population(-1, 1)
fitness = strassen_fitness(population)
#
ancestor = fitness[0]
for i in range(0, NUMBER_OF_GENERATIONS):
if fitness[0] < EPSILON:
logger.info("*" * 80)
logger.info("Best solution found at %s generation.", i)
logger.info(population[0])
logger.info(fitness[0])
logger.info("*" * 80)
break
# There's no change, let's create an unfortunate event!
if (ancestor == fitness[0]) and (i % (PRINTING_INTERVAL / 2) == 0):
logger.debug("Wake Up!!!!")
new_sons = one_point_crosses(population[0], population[1])
population[2] = new_sons[0]
population[-1] = random_mutation(population[-1], -1, 1)
else:
ancestor = fitness[0]
new_population = generate_new_population(population, fitness, mutator=strassen_mutation)
population = sorted(new_population, key=evaluate_product, reverse=False)[0:POPULATION_SIZE]
fitness = strassen_fitness(population)
if i % PRINTING_INTERVAL == 0:
#
logger.debug("-" * 80)
logger.debug("Generation %s ", i)
logger.debug("BEST :")
logger.debug("Single %s ", population[0])
logger.debug("Fitness %s ", fitness[0])
logger.debug("." * 80)
logger.debug("WORST :")
logger.debug("Single %s ", population[-1])
logger.debug("Fitness %s ", fitness[-1])
logger.debug("-" * 80)
logger.info("*" * 80)
logger.info("No better solution found. ")
logger.info("Best solution at %s generation", NUMBER_OF_GENERATIONS)
logger.info(population[0])
logger.info(fitness[0])
logger.info("*" * 80)
return population
if __name__ == '__main__':
result = start_evaluation()
| {
"repo_name": "jresendiz27/EvolutionaryComputing",
"path": "practices/second/strassen_algorithm/strassen_evaluation.py",
"copies": "2",
"size": "4796",
"license": "apache-2.0",
"hash": 4368299829279361000,
"line_mean": 37.0634920635,
"line_max": 120,
"alpha_frac": 0.6171809842,
"autogenerated": false,
"ratio": 3.208026755852843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9819438071445186,
"avg_score": 0.0011539337215315196,
"num_lines": 126
} |
__author__ = 'alberto'
from datetime import datetime
def create_and_add_mapping(connection, index_name, type_name):
try:
connection.indices.create(index_name)
except:
# we skip exception if index already exists
pass
connection.cluster.health(wait_for_status="yellow")
connection.indices.put_mapping(index=index_name, doc_type=type_name, body={type_name: {"properties": {
"uuid": {"index": "not_analyzed", "type": "string", "store": "yes"},
"title": {"index": "analyzed", "type": "string", "store": "yes", "term_vector": "with_positions_offsets"},
"parsedtext": {"index": "analyzed", "type": "string", "store": "yes", "term_vector": "with_positions_offsets"},
"nested": {"type": "nested", "properties": {"num": {"index": "not_analyzed", "type": "integer", "store": "yes"},
"name": {"index": "not_analyzed", "type": "string", "store": "yes"},
"value": {"index": "not_analyzed", "type": "string",
"store": "yes"}}},
"date": {"index": "not_analyzed", "type": "date", "store": "yes"},
"position": {"index": "not_analyzed", "type": "integer", "store": "yes"},
"name": {"index": "analyzed", "type": "string", "store": "yes", "term_vector": "with_positions_offsets"}}}})
type2 = type_name + "2"
connection.indices.put_mapping(index=index_name, doc_type=type2, body={type2: {
"_parent": {
"type": "my_type"
},
"_routing": {
"required": True
},
"_type": {
"store": True
},
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
}
}})
def populate(connection, index_name, type_name):
connection.index(index=index_name, doc_type=type_name, id=1,
body={"name": "Joe Tester", "parsedtext": "Joe Testere nice guy", "uuid": "11111",
"position": 1,
"date": datetime(2013, 12, 8)})
connection.index(index=index_name, doc_type=type_name + "2", id=1, body={"name": "data1", "value": "value1"},
parent=1)
connection.index(index=index_name, doc_type=type_name, id=2,
body={"name": "Bill Baloney", "parsedtext": "Bill Testere nice guy", "uuid": "22222",
"position": 2,
"date": datetime(2013, 12, 8)})
connection.index(index=index_name, doc_type=type_name + "2", id=2, body={"name": "data2", "value": "value2"},
parent=2)
connection.index(index=index_name, doc_type=type_name, id=3, body={"name": "Bill Clinton", "parsedtext": """Bill is not
nice guy""", "uuid": "33333", "position": 3, "date": datetime(2013, 12, 8)})
connection.indices.refresh(index_name) | {
"repo_name": "abarry/elasticsearch-cookbook-scripts",
"path": "chapter_11/utils.py",
"copies": "2",
"size": "3032",
"license": "bsd-2-clause",
"hash": 1829086959101975600,
"line_mean": 49.55,
"line_max": 123,
"alpha_frac": 0.5039577836,
"autogenerated": false,
"ratio": 3.6181384248210025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5122096208421002,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alberto'
from escom.pepo.config import CHROMOSOME_LENGTH, OFFSPRING_POPULATION_SIZE, FITNESS_WEIGHT, logger, np
from escom.pepo.genetic_algorithms.components.selectors import *
from escom.pepo.genetic_algorithms.components.crosses import one_point_crosses
from escom.pepo.genetic_algorithms.components.mutation import whole_mutation
def generate_population(chromosome_min_value, chromosome_max_value, generator=None):
if generator:
return [[generator(chromosome_min_value, chromosome_max_value) for i in range(0, CHROMOSOME_LENGTH)] for j
in range(0, POPULATION_SIZE)]
else:
return [[random.randint(chromosome_min_value, chromosome_max_value) for i in range(0, CHROMOSOME_LENGTH)] for j
in range(0, POPULATION_SIZE)]
def show_population(population, fitness):
logger.info("Population ")
for index in range(0, POPULATION_SIZE):
logger.info("%f => %f", (population[index], fitness[index]))
def generate_new_population(population, fitness, **kwargs):
new_population = []
for i in range(0, OFFSPRING_POPULATION_SIZE):
father_pos = roulette_selector(fitness)
mother_pos = roulette_selector(fitness)
father = population[father_pos]
mother = population[mother_pos]
offsprings = one_point_crosses(father, mother)
for son in offsprings:
son_clone = son
if kwargs['mutator']:
whole_mutation(son_clone, kwargs['mutator'])
else:
whole_mutation(son_clone)
new_population.append(son_clone)
total_population = population + new_population
return total_population
def binary_fitness(single):
fitness = 0.0
for gen in single:
if gen == 0:
fitness = fitness + FITNESS_WEIGHT
return fitness
def population_fitness(population, fitness_function=None):
fitness = []
if fitness_function:
for single in population:
fitness = np.append(fitness, fitness_function(single))
else:
for single in population:
fitness = np.append(fitness, binary_fitness(single))
return fitness
def choose_best(population, fitness_evaluator, number_of_singles=POPULATION_SIZE, reversed_order=True):
return sorted(population, key=fitness_evaluator, reverse=reversed_order)[0:number_of_singles] | {
"repo_name": "pepo27/EvolutionaryComputing",
"path": "escom/pepo/genetic_algorithms/components/population.py",
"copies": "2",
"size": "2372",
"license": "apache-2.0",
"hash": -1317048423356229400,
"line_mean": 35.5076923077,
"line_max": 119,
"alpha_frac": 0.6804384486,
"autogenerated": false,
"ratio": 3.588502269288956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5268940717888957,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alberto'
from pyes import ES
import random
import os
import codecs
from lorem_ipsum import words
from datetime import datetime, timedelta
import sys
def get_names():
"""
Return a list of names.
"""
return [n.strip().replace("'", "-") for n in codecs.open(os.path.join("data", "names.txt"), "rb", 'utf8').readlines()]
# 90
# long 180
class SampleMaker(object):
def __init__(self, name):
log = open(name, "wb")
self.log = log
self.conn = ES(("http", "127.0.0.1", 9200), timeout=300.0, log_curl=True, dump_curl=log)
self.index_name = "test-index"
self.document_type = "test-type"
self.conn.delete_index_if_exists(self.index_name)
self.init_default_index()
def init_default_index(self):
from pyes.helpers import SettingsBuilder
settings = SettingsBuilder()
from pyes.mappings import DocumentObjectField
from pyes.mappings import IntegerField
from pyes.mappings import NestedObject
from pyes.mappings import StringField, DateField, BooleanField, GeoPointField, FloatField
docmapping = DocumentObjectField(name=self.document_type)
docmapping.add_property(
StringField(name="description", store=True, term_vector="with_positions_offsets", index="analyzed"))
docmapping.add_property(
StringField(name="name", store=True, term_vector="with_positions_offsets", index="analyzed"))
docmapping.add_property(StringField(name="tag", store=True, index="not_analyzed"))
docmapping.add_property(IntegerField(name="age", store=True))
docmapping.add_property(FloatField(name="price"))
docmapping.add_property(DateField(name="date", store=True))
docmapping.add_property(BooleanField(name="in_stock", store=True, index="not_analyzed"))
docmapping.add_property(GeoPointField(name="position"))
nested_object = NestedObject(name="metadata")
nested_object.add_property(StringField(name="name", store=True))
nested_object.add_property(StringField(name="value", store=True))
nested_object.add_property(IntegerField(name="num", store=True))
docmapping.add_property(nested_object)
settings.add_mapping(docmapping)
self.conn.ensure_index(self.index_name, settings)
def generate_datafile(self, number_items=1000):
"""
Generate a dataset with number_items elements.
"""
names = get_names()
totalnames = len(names)
#init random seeder
random.seed()
#calculate items
# names = random.sample(names, number_items)
for i in xrange(number_items):
data = {"name": names[random.randint(0, totalnames - 1)],
"age": random.randint(1, 100),
"price": random.random()*100.0,
"tag":[words(1, False) for r in xrange(random.randint(1, 5))],
"in_stock": random.choice([True, False]),
"date": datetime.now()+timedelta(days=random.choice([1, -1])*random.randint(0,1000)),
"position": {
"lat" : random.choice([1, -1])* random.random()*90.0,
"lon" : random.choice([1, -1])* random.random()*180.0
},
"description": words(random.randint(1, 100), False),
"metadata":[{"name":names[random.randint(0, totalnames - 1)],
"value":str(random.randint(1, 5)), "num":random.randint(1, 50) } for r in xrange(random.randint(1, 5))]
}
self.conn.index(data, self.index_name, self.document_type, id=str(i+1))
def close(self):
self.conn.flush(self.index_name)
self.log.close()
if __name__ == '__main__':
sm = SampleMaker(name="populate_facets.sh")
sm.generate_datafile()
sm.close()
| {
"repo_name": "aparo/elasticsearch-cookbook-second-edition",
"path": "data_scripts/facets_data_generation.py",
"copies": "2",
"size": "3930",
"license": "bsd-2-clause",
"hash": -4771752301036804000,
"line_mean": 39.9375,
"line_max": 135,
"alpha_frac": 0.6061068702,
"autogenerated": false,
"ratio": 3.82295719844358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5429064068643581,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alberto'
import os
import sys
from difflib import unified_diff
MIGRATIONS = [
("aliases", "indices.aliases"),
("status", "indices.status"),
("create_index", "indices.create_index"),
("create_index_if_missing", "indices.create_index_if_missing"),
("delete_index", "indices.delete_index"),
("exists_index", "indices.exists_index"),
("delete_index_if_exists", "indices.delete_index_if_exists"),
("get_indices", "indices.get_indices"),
("get_closed_indices", "indices.get_closed_indices"),
("get_alias", "indices.get_alias"),
("change_aliases", "indices.change_aliases"),
("add_alias", "indices.add_alias"),
("delete_alias", "indices.delete_alias"),
("set_alias", "indices.set_alias"),
("close_index", "indices.close_index"),
("open_index", "indices.open_index"),
("flush", "indices.flush"),
("refresh", "indices.refresh"),
("optimize", "indices.optimize"),
("analyze", "indices.analyze"),
("gateway_snapshot", "indices.gateway_snapshot"),
("put_mapping", "indices.put_mapping"),
("get_mapping", "indices.get_mapping"),
("delete_mapping", "indices.delete_mapping"),
("get_settings", "indices.get_settings"),
("update_settings", "indices.update_settings"),
# ("index_stats", "indices.index_stats"),
# ("put_warmer", "indices.put_warmer"),
# ("get_warmer", "indices.get_warmer"),
# ("delete_warmer", "indices.delete_warmer"),
# update_mapping_meta
("cluster_health", "cluster.health"),
("cluster_state", "cluster.state"),
("cluster_nodes", "cluster.nodes"),
("cluster_stats", "cluster.stats"),
]
filenames = [filename for filename in os.listdir("tests") if filename.endswith(".py")]
for filename in filenames:
print "processing", filename
path = os.path.join("tests", filename)
ndata = data = open(path).read()
for old_name, new_name in MIGRATIONS:
pos = ndata.find(old_name + "(")
if ndata[pos - 1] != '.':
pos = ndata.find(old_name, pos + 1)
continue
prefix = new_name.split(".")[0]
while pos != -1:
#check if already fixed
ppos = pos - len(prefix) - 1
if ppos > 0 and ndata[ppos:pos] == "." + prefix:
pos = ndata.find(old_name, pos + 1)
continue
ndata = ndata[:pos] + new_name + ndata[pos + len(old_name):]
pos = ndata.find(old_name, pos + len(new_name))
if data != ndata:
for line in unified_diff(data.splitlines(1), ndata.splitlines(1), fromfile=path, tofile=path):
sys.stdout.write(line)
with open(path, "wb") as fo:
fo.write(ndata) | {
"repo_name": "HackLinux/pyes",
"path": "migrate_deprecation.py",
"copies": "5",
"size": "2697",
"license": "bsd-3-clause",
"hash": 4830755783668730000,
"line_mean": 38.6764705882,
"line_max": 102,
"alpha_frac": 0.5936225436,
"autogenerated": false,
"ratio": 3.448849104859335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012649677287125839,
"num_lines": 68
} |
__author__ = 'alberto'
import random
CHROMOSOME_LENGTH = 8
POPULATION_SIZE = 4
NUMBER_OF_GENERATIONS = 5000
FITNESS_WEIGHT = 0.1
EPSILON = 0.01
chromosome = [random.randint(0, 1) for i in range(0, CHROMOSOME_LENGTH)]
def init():
return [[random.randint(0, 1) for i in range(0, CHROMOSOME_LENGTH)] for j in range(0, POPULATION_SIZE)]
def print_single(single, fitness):
print single, " => ", fitness
def print_population(population, fitness):
i = 0
for single in population:
single_fitness = fitness[i]
print_single(single, single_fitness)
i = i + 1
def fitness_calculation(single):
fitness = 0.0
for gen in single:
if gen == 0:
fitness = fitness + FITNESS_WEIGHT
return fitness
def population_fitness(population):
fitness = []
for single in population:
fitness.append(fitness_calculation(single))
return fitness
def choose_single(fitness):
fitness_sum = sum(fitness)
fitness_average = fitness_sum / POPULATION_SIZE
expected_values = []
for single_fitness in fitness:
expected_value = single_fitness / fitness_average
expected_values.append(expected_value)
random_number = random.random() * POPULATION_SIZE # same as random.random()*T
partial_sum = 0.0
index = 0
for expected_value in expected_values:
partial_sum = partial_sum + expected_value
if partial_sum >= random_number:
return index
index = index + 1
def one_point_crosses(father, mother):
cross_point = random.randint(1, CHROMOSOME_LENGTH - 1)
#
left_side_father = father[0:cross_point]
right_side_father = father[cross_point:CHROMOSOME_LENGTH]
#
left_side_mother = mother[0:cross_point]
right_side_mother = mother[cross_point:CHROMOSOME_LENGTH]
#
son_one = left_side_father + right_side_mother
son_two = left_side_mother + right_side_father
return son_one, son_two
def mutate(single):
mutation_point = random.randint(0, CHROMOSOME_LENGTH - 1)
if single[mutation_point] is 1:
single[mutation_point] = 0
else:
single[mutation_point] = 1
def generate_new_population(population, fitness):
new_population = []
for i in range(0, int(POPULATION_SIZE / 2)):
position_one = choose_single(fitness)
position_two = choose_single(fitness)
#
father = population[position_one]
mother = population[position_two]
#
son_one, son_two = one_point_crosses(father, mother)
#
mutate(son_one)
mutate(son_two)
#
new_population.append(son_one)
new_population.append(son_two)
new_population = population + new_population
return new_population
def choose_best(population):
return sorted(population, key=fitness_calculation, reverse=True)
def find_best(n):
population = init()
fitness = population_fitness(population)
for i in range(0, NUMBER_OF_GENERATIONS):
value = FITNESS_WEIGHT * CHROMOSOME_LENGTH - EPSILON
if fitness[0] >= value:
print "**" * 60 + "\n"
print "Found at %s generation!" % (str(i))
print_single(population[0], fitness[0])
print "**" * 60 + "\n"
break
new_population = generate_new_population(population, fitness)
population = (choose_best(new_population))[0:POPULATION_SIZE]
fitness = population_fitness(population)
print "+-" * 60 + "\n"
print "Generation: ", i
print "Single : ", population[0]
print "Fitness : ", fitness[0]
print "+-" * 60 + "\n"
#
return population[0:n]
if __name__ == '__main__':
find_best(1) | {
"repo_name": "jresendiz27/EvolutionaryComputing",
"path": "practices/first/GeneticAlgorithm/ga_example.py",
"copies": "2",
"size": "3733",
"license": "apache-2.0",
"hash": -5625134492002996000,
"line_mean": 27.0751879699,
"line_max": 107,
"alpha_frac": 0.6249665149,
"autogenerated": false,
"ratio": 3.4153705397987193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006301312182732601,
"num_lines": 133
} |
__author__ = 'alberto'
import sys
import numpy as np
from string import Template
print sys.argv
def getDicionary(elements):
dictionary = {}
for index in range(0, len(elements)):
dictionary["x" + str(index)] = elements[index]
return dictionary
def generatetruthtable(nvariables, template):
truthtable = np.zeros((2 ** nvariables, nvariables + 2), dtype='S1000')
numberposibilites = 2 ** nvariables
for column in range(nvariables, 0, -1):
for row in range(0, numberposibilites):
if row % (2 ** column) < 2 ** (column - 1):
truthtable[row, column - 1] = "True"
else:
truthtable[row, column - 1] = "False"
# evaluating the template
for row in range(0, numberposibilites):
# true/false
dictionary = getDicionary(truthtable[row, 0: nvariables])
expression = Template(template).substitute(dictionary)
truthtable[row, nvariables] = expression
truthtable[row, nvariables + 1] = eval(expression)
# truthtable[row, nvariables] = expession
return truthtable
def readfile(pathtofile):
file = open(pathtofile, 'r').read()
formatFlag = True
initialLength = 0
counter = 0
strings = []
maxiterms = []
indexVariable = 0
# checking if file format is right
with open(pathtofile, 'r') as file:
for line in file:
# getting the strings to be evaluated
expression = line.split(" ")
for term in expression:
if int(term) is 1:
strings.append("$x" + str(indexVariable))
if int(term) is 0:
strings.append(" not($x" + str(indexVariable) + ") ")
indexVariable += 1
maxiterms.append("(" + " and ".join(strings) + ")")
indexVariable = 0
strings = []
if counter is 0:
initialLength = len(expression)
counter += 1
continue
if not (len(expression) is initialLength):
formatFlag = False
counter += 1
if not (formatFlag):
return None, 0, False, "Wrong format!", None
return file, initialLength, True, "File is ok!", " or ".join(maxiterms)
def satsolution(pathtofile):
file, nvariables, status, message, template = readfile(pathtofile)
print "Expression to be evaluated : \n"
print template
print "-" * 60 + "\n"
if status:
truthtable = generatetruthtable(nvariables, template)
for row in range(0, len(truthtable)):
print truthtable[row]
print "-" * 60 + "\n"
else:
print message
satsolution("./content.txt") | {
"repo_name": "jresendiz27/EvolutionaryComputing",
"path": "practices/first/booleanSatisfiabilityProblem/solution.py",
"copies": "2",
"size": "2730",
"license": "apache-2.0",
"hash": 6265369876759264000,
"line_mean": 31.1294117647,
"line_max": 75,
"alpha_frac": 0.5758241758,
"autogenerated": false,
"ratio": 4.038461538461538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00042016806722689073,
"num_lines": 85
} |
__author__ = 'alberto'
def create_and_add_mapping(connection, index_name, type_name):
from pyes.mappings import DocumentObjectField
from pyes.mappings import IntegerField
from pyes.mappings import NestedObject
from pyes.mappings import StringField, DateField
from pyes.helpers import SettingsBuilder
#we try to delete index if exists
try:
connection.indices.delete(index_name)
except:
pass
settings = SettingsBuilder()
docmapping = DocumentObjectField(name=type_name)
docmapping.add_property(
StringField(name="parsedtext", store=True, term_vector="with_positions_offsets", index="analyzed"))
docmapping.add_property(
StringField(name="name", store=True, term_vector="with_positions_offsets", index="analyzed"))
docmapping.add_property(
StringField(name="title", store=True, term_vector="with_positions_offsets", index="analyzed"))
docmapping.add_property(IntegerField(name="position", store=True))
docmapping.add_property(DateField(name="date", store=True))
docmapping.add_property(StringField(name="uuid", store=True, index="not_analyzed"))
nested_object = NestedObject(name="nested")
nested_object.add_property(StringField(name="name", store=True))
nested_object.add_property(StringField(name="value", store=True))
nested_object.add_property(IntegerField(name="num", store=True))
docmapping.add_property(nested_object)
settings.add_mapping(docmapping)
docmapping = DocumentObjectField(name=type_name+"2", _parent={"type": type_name})
settings.add_mapping(docmapping)
connection.ensure_index(index_name, settings)
def populate(connection, index_name, type_name):
connection.index(doc={"name": "Joe Tester", "parsedtext": "Joe Testere nice guy", "uuid": "11111", "position": 1},
index=index_name, doc_type=type_name, id=1)
connection.index(doc={"name": "data1", "value": "value1"}, index=index_name, doc_type=type_name + "2", id=1, parent=1)
connection.index(doc={"name": "Bill Baloney", "parsedtext": "Bill Testere nice guy", "uuid": "22222", "position": 2},
index=index_name, doc_type=type_name, id=2, bulk=True)
connection.index(doc={"name": "data2", "value": "value2"}, index=index_name, doc_type=type_name + "2", id=2, parent=2,
bulk=True)
connection.index(doc={"name": "Bill Clinton", "parsedtext": """Bill is not
nice guy""", "uuid": "33333", "position": 3}, index=index_name, doc_type=type_name, id=3, bulk=True)
connection.indices.refresh(index_name)
| {
"repo_name": "aparo/elasticsearch-cookbook-second-edition",
"path": "chapter_11/utils_pyes.py",
"copies": "2",
"size": "2585",
"license": "bsd-2-clause",
"hash": 5617987258648719000,
"line_mean": 48.7115384615,
"line_max": 122,
"alpha_frac": 0.6912959381,
"autogenerated": false,
"ratio": 3.4932432432432434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5184539181343244,
"avg_score": null,
"num_lines": null
} |
__author__ = "alberto"
'''
Fitnesses
@author: azu
'''
#
import math
import random
import numpy as np
#
maxGenerations = 1000
sigma = [np.float(0) for i in range(0, maxGenerations + 1)]
sigma[0] = 10
epsilon = 0.00001
mu = 10
lamb = 10
a = [[-32, -16, 0, 16, 32, -32, -16, 0, 16, 32, -32, -16, 0, 16, 32, -32, -16, 0, 16, 32, -32, -16, 0, 16, 32],
[-32, -32, -32, -32, -32, -16, -16, -16, -16, -16, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 32, 32, 32, 32, 32]]
def fitness0(x):
return ((1 - x[0]) ** 4 - (2 * x[0] + 10) ** 2)
def fitness1(x):
return -0.75 / (1 + x[0] ** 2) - (0.65 * x[0] * math.atan(1 / x[0])) + 0.65
def fitness2(x):
return (-4 * x[0] ** 2 - 20 * x[0] - 100) + (1 - x[0]) ** 4
def fitness3(x):
return 3 * x[0] ** 2 + 12 / (x[0] ** 3) - 5
def fitness4(x):
return 3 * x[0] ** 4 + x[0] ** 2 - 2 * x[0] + 1
def fitness5(x):
return 10 + x[0] ** 3 - 2 * x[0] - 5 * (np.finfo(float).eps) ** x[0]
def fitness6(x):
return x[0] ** 2 - 10 * (np.finfo(float).eps) ** (0.1 * x[0])
def fitness7(x):
return (10 * x[0] ** 3 + 3 * x[0] ** 2 + 5) ** 2
def fitness8(x):
return 0.5 / math.sqrt(1 + (x[0] ** 2)) - math.sqrt(1 + (x[0] ** 2)) * (1 - (0.5 / (1 + (x[0] ** 2)))) + x[0]
def fitness9(x):
return (np.finfo(float).eps) ** x[0] - x[0] ** 3
def fitness10(x):
return (x[0] ** 2 - 1) ** 3 - (2 * x[0] - 5) ** 4
def fitness11(x):
return (-4 * x[0] ** 2 - 20 * x[0] - 100) + (1 - x[0]) ** 4
def fitness12(x):
return (x[0] ** 2 + (x[1] + 1) ** 2) * (x[0] ** 2 + (x[1] - 1) ** 2)
def fitness13(x):
return (x[0] ** 2 - x[1]) ** 2 + x[1] ** 2
def fitness14(x):
return 50 * (x[1] - x[0] ** 2) ** 2 + (2 - x[0]) ** 2
def fitness15(x):
return (x[0] + 2 * x[1] - 7) ** 2 + (2 * x[0] + x[1] - 5) ** 2
def fitness16(x):
return (1.5 - x[0] * (1 - x[1])) ** 2 + (2.25 - x[0] * (1 - x[1] ** 2)) ** 2 + (2.625 - x[0] * (1 - x[1] ** 3)) ** 2
def fitness17(x):
return (10 * (x[1] - x[0] ** 2)) ** 2 + (1 - x[0]) ** 2 + 90 * (x[3] - x[2] ** 2) ** 2 + (1 - x[2]) ** 2 + 10 * (x[
1] +
x[
3] - 2) ** 2 + 0.1 * (
x[1] - x[3])
def fitness18(x):
return (4 - 2.1 * x[0] ** 2 + (x[0] ** 4) / 3) * x[0] ** 2 + x[0] * x[1] + (-4 + 4 * x[1] ** 2) * x[1] ** 2
def fitness19(x):
return (x[0] + 10 * x[1]) ** 2 + 5 * (x[2] - x[3]) ** 2 + (x[1] - 2 * x[2]) ** 4 + 10 * (x[0] - x[3]) ** 4
def fitness20(x):
return x[0] ** 2 + x[1] ** 2 + x[2] ** 2
def fitness21(x):
return 100 * (x[0] ** 2 - x[1]) ** 2 + (1 - x[0]) ** 2
def fitness22(x):
return math.floor(x[0]) + math.floor(x[1]) + math.floor(x[2]) + math.floor(x[3]) + math.floor(x[4])
def fitness23(x):
suma = 0
for i in range(1, 30):
suma += (i * x[i - 1] ** 4)
return suma + random.gauss(0, 1)
def fitness24(x):
superSuma = 0
for j in range(1, 25):
superSuma += 1 / f2(j, x)
return 1 / (1 / 500 + superSuma)
def f2(j, x):
suma = 0
i = 0
suma += (x[0] - a[i][j]) ** 6
i = 1
suma += (x[1] - a[i][j]) ** 6
return j + suma
def fitness25(x):
return 0
def fitness26(x):
return 0
numberOfVariables = {0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 2, 13: 2, 14: 2,
15: 2, 16: 2,
17: 4, 18: 2, 19: 4, 20: 3, 21: 2, 22: 5, 23: 30, 24: 2, 25: 1, 26: 1}
function = {0: fitness0, 1: fitness1, 2: fitness2, 3: fitness3, 4: fitness4, 5: fitness5, 6: fitness6, 7: fitness7,
8: fitness8, 9: fitness9, 10: fitness10, 11: fitness11, 12: fitness12, 13: fitness13, 14: fitness14,
15: fitness15, 16: fitness16, 17: fitness17, 18: fitness18, 19: fitness19, 20: fitness20, 21: fitness21,
22: fitness22, 23: fitness23, 24: fitness24, 25: fitness25, 26: fitness26}
| {
"repo_name": "pepo27/EvolutionaryComputing",
"path": "practices/first/evolutionaryStrategies/fitness.py",
"copies": "2",
"size": "4158",
"license": "apache-2.0",
"hash": -8477773472258619000,
"line_mean": 25.6538461538,
"line_max": 143,
"alpha_frac": 0.4155844156,
"autogenerated": false,
"ratio": 2.3151447661469935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8724004200577942,
"avg_score": 0.0013449962338102704,
"num_lines": 156
} |
__author__ = 'alberto'
from escom.pepo.config import POPULATION_SIZE
from escom.pepo.config import random
# Returns the index of the most suitable
def roulette_selector(fitness):
fitness_sum = sum(fitness)
fitness_average = fitness_sum / POPULATION_SIZE
expected_values = []
for single_fitness in fitness:
expected_value = single_fitness / fitness_average
expected_values.append(expected_value)
random_number = random.random() * POPULATION_SIZE # same as random.random()*T
partial_sum = 0.0
index = 0
for expected_value in expected_values:
partial_sum = partial_sum + expected_value
if partial_sum >= random_number:
return index
index = index + 1
# Returns the index of the most suitable
def strict_selector(fitness, asc=False):
selected_value = fitness[0]
index = 0
if asc:
selected_value = min(fitness)
else:
selected_value = max(fitness)
for i in range(0, len(fitness)):
if fitness[i] is selected_value:
index = i
return index
# Return a random index
def random_selector(fitness):
return random.randint(0, len(fitness)) | {
"repo_name": "pepo27/EvolutionaryComputing",
"path": "escom/pepo/genetic_algorithms/components/selectors.py",
"copies": "2",
"size": "1180",
"license": "apache-2.0",
"hash": -859248969881603800,
"line_mean": 26.4651162791,
"line_max": 82,
"alpha_frac": 0.6601694915,
"autogenerated": false,
"ratio": 3.6196319018404908,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5279801393340491,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alberto'
# Para poder mostrar en pantalla el proceso
debug = False
#
from pylab import *
from sympy import solve
from sympy.abc import x
from practices.first.evolutionaryStrategies.PureFunctions import function as function_database
# Constant values
RAW_VALUES = True
IMAGE_PATH = './latex/images/'
X_MIN = -127
X_MAX = 127
X_STEP = 0.1
# Image maker function, recevives a key, value arg and creates an image, it returns the image name
def imageMaker(**kwargs):
# Solution
x_coordinate = kwargs['point'][0][0]
y_coordinate = kwargs['point'][1][0]
try:
z_coordinate = kwargs['point'][2][0]
except Exception as e:
z_coordinate = None
# Check data types
if x_coordinate == float('inf') or x_coordinate == float('-inf'):
return False, None
if y_coordinate == float('inf') or y_coordinate == float('-inf'):
return False, None
if z_coordinate == float('inf') or z_coordinate == float('-inf'):
return False, None
# check which type of plot we will use
if kwargs['number_of_variables'] < 2:
# Looking for discontinuities in the function given
discontinuities = sort(solve(function_database[kwargs['function_id']], x))
# pieces from xmin to last discontinuity
last_b = X_MIN
for b in discontinuities:
# check that this discontinuity is inside our range, also make sure it's real
if b < last_b or b > X_MAX or not (str(b).isdigit):
continue
#Generaton a valid range
xi = np.arange(last_b, b, X_STEP)
#Removing the last element from the range, is the one we don't desire on the function
plot(xi[:-1], 1. / (xi[:-1] - 2), 'r-')
last_b = float(str(b))
# from last discontinuity to xmax
xi = np.arange(last_b, X_MAX, X_STEP)
#plotting the function, removing the first one, it's the one we don't want!
plot(xi[1:], 1. / (xi[1:] - 2), 'r-')
points = [kwargs['point'][0], kwargs['point'][1]]
#plotting the answer
plt.plot(*zip(points), marker="o")
if kwargs['number_of_variables'] is 2:
fig = figure()
ax = fig.gca(projection='3d')
X = np.arange(-2, 2, 0.25)
Y = np.arange(-2, 2, 0.25)
X, Y = np.meshgrid(X, Y)
Z = function_database[kwargs['function_id']](X, Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
# Trying to save the image
try:
# Setting the title of each image
fig.suptitle('Excersice ' + str(kwargs['name']))
#Saving the image
plt.savefig(IMAGE_PATH + str(kwargs['name']) + '.png')
return True, str(kwargs['name']) + '.png'
except Exception as e:
print e
return False, None | {
"repo_name": "jresendiz27/EvolutionaryComputing",
"path": "practices/first/evolutionaryStrategies/ImageMaker.py",
"copies": "2",
"size": "2938",
"license": "apache-2.0",
"hash": -8597609945121923000,
"line_mean": 38.1866666667,
"line_max": 98,
"alpha_frac": 0.5953029272,
"autogenerated": false,
"ratio": 3.518562874251497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5113865801451497,
"avg_score": null,
"num_lines": null
} |
__author__='alberto.rincon.borreguero@gmail.com'
"""
"""
from flask import Flask, render_template, request, redirect, url_for, session
from svd_recs import SVDRecommender
import utils
import os
app = Flask(__name__)
@app.route('/registration', methods=['POST', 'GET'])
def signup():
if request.method == 'POST':
user = dict(name = request.form['first_name'],
surname = request.form['last_name'],
email = request.form['email'],
password = request.form['pass'])
utils.save_user(user)
return redirect(url_for('login'))
return render_template('registration.html')
@app.route('/login', methods=['POST', 'GET'])
def login():
if 'userid' in session:
print session
redirect(url_for('get_recommendations'))
elif request.method == 'POST':
print "post"
user_id = utils.authenticate(request.form['email'], request.form['password'])
print user_id
if user_id:
session['email'] = request.form['email']
session['userid'] = user_id
redirect(url_for('get_recommendations'))
return render_template('login.html')
@app.route('/main')
def get_recommendations():
collaborative_recs = SVDRecommender().recommend(session['userid'])
movies_id = [c[0] for c in collaborative_recs]
recs = map(utils.get_title, movies_id)
images = map(utils.get_imdb_images, movies_id)
movies = [(image,recs[index]) for index,image in enumerate(images)]
return render_template('main.html', recs=recs, movies=movies, userid=session['userid'])
app.secret_key = os.environ.get('FLASK_SECRET')
| {
"repo_name": "albertorb/movies-for-recommender",
"path": "movies.py",
"copies": "1",
"size": "1664",
"license": "apache-2.0",
"hash": 6075577523566009000,
"line_mean": 32.9591836735,
"line_max": 91,
"alpha_frac": 0.6298076923,
"autogenerated": false,
"ratio": 3.570815450643777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4700623142943777,
"avg_score": null,
"num_lines": null
} |
__author__ = 'albmin'
import json
import os
"""
Initializer that will return a json schema object
"""
#TODO add add functionality separately (from a shell or program) with a persist option in the fn call
class Schema():
dict = None
"""
Constructor takes in an input json file and schema map (also JSON)
"""
#TODO add error checking if not JSON, (i guess kinda already done)
def __init__(self, input, schema_map):
inp = self.read_json(input)
dmap = self.read_json(schema_map)
self.dict = self.json_map(inp, dmap)
"""
Reads in the json file and returns the dict
"""
def read_json(self, file):
if not os.path.isfile(file):
print 'File does not exist in this context, aborting'
exit()
try:
with open(file) as f:
return json.load(f)
except ValueError:
print 'Error processing file, aborting'
exit()
"""
This is where the cool code lives, basically a json structure is created
based on the input file and it's values, but the variables are assigned to
the ones specified in the schema, if they don't exist in the schema, then the
input variables are used
"""
def json_map(self, inp, dmap):
#construct the file via an iteration of the input
output = {}
for key in inp:
if key in dmap:
output[dmap[key]] = inp[key]
else:
output[key] = inp[key]
return output
"""
write the current values of the dict var to a file
NOTE: will overwrite any contents of said file
"""
def to_file(self, file_name, new_lines = False):
if not new_lines:
with open(file_name, 'w') as f:
f.write(json.dumps(self.dict))
else:
with open(file_name, 'w') as f:
f.write(u'{')
for key in self.dict:
#this could have unintended consequences by coercing the ints into unicode
#FIXME get rid of the comma on the last one
f.write(key + u' : ' + unicode(self.dict[key]) + u',\n')
f.write(u'}')
| {
"repo_name": "albmin/json_mapper",
"path": "schema.py",
"copies": "1",
"size": "2224",
"license": "mit",
"hash": -1809691296005813800,
"line_mean": 30.7714285714,
"line_max": 101,
"alpha_frac": 0.5683453237,
"autogenerated": false,
"ratio": 4.080733944954129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5149079268654129,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alby Chaj and Alex Frank'
import numpy as np
import datetime as dt
import time
def calc_cma(filename, size):
"""
This function calculates the centered moving average (CMA) of the norm values
for the image data set's top and bottom amplifiers
Parameters
filename (str) -- name of dat file containing list of FITS filenames and
the norm values for the image's top and bottom amplifiers
size (int) -- desired subset size (between 10-20)
Returns
dict
"""
image_names = []
timestamps = []
bottom_norms = []
top_norms = []
with open(filename) as dat_file:
for line in dat_file:
img_norm_data = line.split()
image_names.append(img_norm_data[0])
timestamps.append(img_norm_data[0][4:20])
bottom_norms.append(float(img_norm_data[1]))
top_norms.append(float(img_norm_data[2]))
# Initial smoothing
window = np.ones(int(size)) / float(size)
bottom_norm_avgs = np.convolve(bottom_norms, window, 'same')
top_norm_avgs = np.convolve(top_norms, window, 'same')
# If the size is even, then we need to smooth the smoothed values
if size % 2 == 0:
window = np.ones(2) / float(2)
bottom_norm_avgs = np.convolve(bottom_norm_avgs, window, 'same')
top_norm_avgs = np.convolve(top_norm_avgs, window, 'same')
result = []
for i in range(len(image_names)):
result.append({
"sci_filename": image_names[i],
"timestamp": timestamps[i],
"smoothed_btm_norm": bottom_norm_avgs[i],
"smoothed_top_norm": top_norm_avgs[i]
})
return result
def shift_cma(norm_filename, smoothed_sciences):
"""
This function adjusts the science smoothed norm values based on the norm values
of the dark images.
Parameters
norm_filename (str) -- name of dat file containing list of FITS filenames for dark images and
the norm values for the image's top and bottom amplifiers
smoothed_sciences (list) -- the returned result of calc_cma
Returns
dict of the form:
{ sci_filename: { "bottom_norm": smoothed and scaled norm value, "top_norm": smoothed and scaled norm value } }
"""
dark_filenames = []
dark_timestamps = []
dark_bottom_norms = []
dark_top_norms = []
with open(norm_filename) as dark_norm_file:
for line in dark_norm_file:
dark_image_data = line.split()
dark_filenames.append(dark_image_data[0])
dark_timestamps.append(dark_image_data[0][4:20])
dark_bottom_norms.append(float(dark_image_data[1]))
dark_top_norms.append(float(dark_image_data[2]))
# convert all timestamps to seconds since epoch
dark_time_in_seconds = []
for t in dark_timestamps:
year = int(t[0:4])
month = int(t[4:6])
day = int(t[6:8])
hours = int(t[8:10])
minutes = int(t[10:12])
seconds = int(t[12:14])
partial_seconds = int(t[14:20])
dark_time = dt.datetime(year, month, day, hours, minutes, seconds)
dark_time = time.mktime(dark_time.timetuple()) + partial_seconds*(10**(-6))
# dark_time is now in seconds since epoch
dark_time_in_seconds.append(dark_time)
sci_time_in_seconds = []
for j in range(len(smoothed_sciences)):
year = int(smoothed_sciences[j].timestamp[0:4])
month = int(smoothed_sciences[j].timestamp[4:6])
day = int(smoothed_sciences[j].timestamp[6:8])
hours = int(smoothed_sciences[j].timestamp[8:10])
minutes = int(smoothed_sciences[j].timestamp[10:12])
seconds = int(smoothed_sciences[j].timestamp[12:14])
partial_seconds = int(smoothed_sciences[j].timestamp[14:20])
sci_time = dt.datetime(year, month, day, hours, minutes, seconds)
sci_time = time.mktime(sci_time.timetuple()) + partial_seconds*(10**(-6))
# sci_time is now in seconds since epoch
sci_time_in_seconds.append(sci_time)
# calculate deltas
deltas = []
j = 0
for i in range(len(dark_time_in_seconds)):
found = 0
while found == 0 & j < len(sci_time_in_seconds):
if abs(sci_time_in_seconds[j] - dark_time_in_seconds[i]) < abs(sci_time_in_seconds[j + 1] - dark_time_in_seconds[i]):
found = 1
j += 1
deltas.append({
"bottom": smoothed_sciences[j].smoothed_bottom_norm - dark_bottom_norms[i],
"top": smoothed_sciences[j].smoothed_top_norm - dark_top_norms[i]
})
# shift smoothed norms
result = {}
j = 0
for i in range(len(dark_time_in_seconds)):
found = 0
while found == 0 & i < len(dark_time_in_seconds):
if abs(dark_time_in_seconds[i] - sci_time_in_seconds[j]) < abs(dark_time_in_seconds[i+1] - sci_time_in_seconds[j]):
result[smoothed_sciences[j].sci_filename] = {
"bottom_norm": smoothed_sciences[j].smoothed_btm_norm - deltas[i].bottom,
"top_norm": smoothed_sciences[j].smoothed_top_norm - deltas[i].top
}
j += 1
else:
found = 1
# special case for the last dark and the last few sciences
while j <= len(sci_time_in_seconds):
result[smoothed_sciences[j]].sci_filename = {
"bottom_norm": smoothed_sciences[j].smoothed_btm_norm - deltas[i].bottom,
"top_norm": smoothed_sciences[j].smoothed_top_norm - deltas[i].top
}
j += 1
return result
def main(sci_filename, size, dark_filename):
"""
This is a wrapper for calc_cma and shift_cma
Parameters
"sci_filename" is calc_cma's "filename" parameter
"size" is calc_cma's "size" parameter
"dark_filename" is shift_cma's "norm_filename" parameter
Returns
dict result from shift_cma
"""
return shift_cma(dark_filename, calc_cma(sci_filename, size)) | {
"repo_name": "acic2015/findr",
"path": "deprecated/calc_cma.py",
"copies": "1",
"size": "6071",
"license": "mit",
"hash": 6950851090556844000,
"line_mean": 35.1428571429,
"line_max": 129,
"alpha_frac": 0.5956185142,
"autogenerated": false,
"ratio": 3.479083094555874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45747016087558734,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aleaf'
import sys
sys.path.append('/Users/aleaf/Documents/GitHub/flopy3')
import os
import glob
import shutil
import numpy as np
try:
import matplotlib
if os.getenv('TRAVIS'): # are we running https://travis-ci.org/ automated tests ?
matplotlib.use('Agg') # Force matplotlib not to use any Xwindows backend
except:
matplotlib = None
import flopy
fm = flopy.modflow
from flopy.utils.sfroutputfile import SfrFile
if os.path.split(os.getcwd())[-1] == 'flopy3':
path = os.path.join('examples', 'data', 'mf2005_test')
path2 = os.path.join('examples', 'data', 'sfr_test')
outpath = os.path.join('py.test/temp')
else:
path = os.path.join('..', 'examples', 'data', 'mf2005_test')
path2 = os.path.join('..', 'examples', 'data', 'sfr_test')
outpath = os.path.join('temp', 't009')
# make the directory if it does not exist
if not os.path.isdir(outpath):
os.makedirs(outpath)
sfr_items = {0: {'mfnam': 'test1ss.nam',
'sfrfile': 'test1ss.sfr'},
1: {'mfnam': 'test1tr.nam',
'sfrfile': 'test1tr.sfr'},
2: {'mfnam': 'testsfr2_tab.nam',
'sfrfile': 'testsfr2_tab_ICALC1.sfr'},
3: {'mfnam': 'testsfr2_tab.nam',
'sfrfile': 'testsfr2_tab_ICALC2.sfr'},
4: {'mfnam': 'testsfr2.nam',
'sfrfile': 'testsfr2.sfr'},
5: {'mfnam': 'UZFtest2.nam',
'sfrfile': 'UZFtest2.sfr'},
6: {'mfnam': 'TL2009.nam',
'sfrfile': 'TL2009.sfr'}
}
def sfr_process(mfnam, sfrfile, model_ws, outfolder=outpath):
m = flopy.modflow.Modflow.load(mfnam, model_ws=model_ws, verbose=False)
sfr = m.get_package('SFR')
if not os.path.exists(outfolder):
os.makedirs(outfolder)
outpath = os.path.join(outfolder, sfrfile)
sfr.write_file(outpath)
m.remove_package('SFR')
sfr2 = flopy.modflow.ModflowSfr2.load(outpath, m)
assert np.all(sfr2.reach_data == sfr.reach_data)
assert np.all(sfr2.dataset_5 == sfr.dataset_5)
for k, v in sfr2.segment_data.items():
assert np.all(v == sfr.segment_data[k])
for k, v in sfr2.channel_flow_data.items():
assert np.all(v == sfr.channel_flow_data[k])
for k, v in sfr2.channel_geometry_data.items():
assert np.all(v == sfr.channel_geometry_data[k])
return m, sfr
def load_sfr_only(sfrfile):
m = flopy.modflow.Modflow()
sfr = flopy.modflow.ModflowSfr2.load(sfrfile, m)
return m, sfr
def load_all_sfr_only(path):
for i, item in sfr_items.items():
load_sfr_only(os.path.join(path, item['sfrfile']))
def interpolate_to_reaches(sfr):
reach_data = sfr.reach_data
segment_data = sfr.segment_data[0]
for reachvar, segvars in {'strtop': ('elevup', 'elevdn'),
'strthick': ('thickm1', 'thickm2'),
'strhc1': ('hcond1', 'hcond2')}.items():
reach_data[reachvar] = sfr._interpolate_to_reaches(*segvars)
for seg in segment_data.nseg:
reaches = reach_data[reach_data.iseg == seg]
dist = np.cumsum(reaches.rchlen) - 0.5 * reaches.rchlen
fp = [segment_data[segment_data['nseg'] == seg][segvars[0]][0],
segment_data[segment_data['nseg'] == seg][segvars[1]][0]]
xp = [dist[0], dist[-1]]
assert np.sum(np.abs(
reaches[reachvar] - np.interp(dist, xp, fp).tolist())) < 0.01
return reach_data
def test_sfr():
load_all_sfr_only(path2)
m, sfr = sfr_process('test1ss.nam', 'test1ss.sfr', path)
m, sfr = sfr_process('test1tr.nam', 'test1tr.sfr', path)
# assert list(sfr.dataset_5.keys()) == [0, 1]
m, sfr = sfr_process('testsfr2_tab.nam', 'testsfr2_tab_ICALC1.sfr', path)
assert list(sfr.dataset_5.keys()) == list(range(0, 50))
m, sfr = sfr_process('testsfr2_tab.nam', 'testsfr2_tab_ICALC2.sfr', path)
assert sfr.channel_geometry_data[0][1] == [
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0],
[6.0, 4.5, 3.5, 0.0, 0.3, 3.5, 4.5, 6.0]]
m, sfr = sfr_process('testsfr2.nam', 'testsfr2.sfr', path)
assert round(sum(sfr.segment_data[49][0]), 7) == 3.9700007
m, sfr = sfr_process('UZFtest2.nam', 'UZFtest2.sfr', path)
if matplotlib is not None:
assert isinstance(sfr.plot()[0],
matplotlib.axes.Axes) # test the plot() method
# trout lake example (only sfr file is included)
# can add tests for sfr connection with lak package
m, sfr = load_sfr_only(os.path.join(path2, 'TL2009.sfr'))
# convert sfr package to reach input
sfr.reachinput = True
sfr.isfropt = 1
sfr.reach_data = interpolate_to_reaches(sfr)
sfr.get_slopes()
assert sfr.reach_data.slope[29] == (sfr.reach_data.strtop[29] -
sfr.reach_data.strtop[107]) \
/ sfr.reach_data.rchlen[29]
chk = sfr.check()
assert sfr.reach_data.slope.min() < 0.0001 and 'minimum slope' in chk.warnings
sfr.reach_data.slope[0] = 1.1
chk.slope(maximum_slope=1.0)
assert 'maximum slope' in chk.warnings
def test_sfr_renumbering():
# test segment renumbering
r = np.zeros((27, 2), dtype=[('iseg', int), ('ireach', int)])
r = np.core.records.fromarrays(r.transpose(),
dtype=[('iseg', int), ('ireach', int)])
r['iseg'] = sorted(list(range(1, 10)) * 3)
r['ireach'] = [1, 2, 3] * 9
d = np.zeros((9, 2), dtype=[('nseg', int), ('outseg', int)])
d = np.core.records.fromarrays(d.transpose(),
dtype=[('nseg', int), ('outseg', int)])
d['nseg'] = range(1, 10)
d['outseg'] = [4, 0, 6, 8, 3, 8, 1, 2, 8]
m = flopy.modflow.Modflow()
sfr = flopy.modflow.ModflowSfr2(m, reach_data=r, segment_data={0: d})
chk = sfr.check()
assert 'segment numbering order' in chk.warnings
sfr.renumber_segments()
chk = sfr.check()
assert 'continuity in segment and reach numbering' in chk.passed
assert 'segment numbering order' in chk.passed
def test_example():
m = flopy.modflow.Modflow.load('test1ss.nam', version='mf2005',
exe_name='mf2005.exe',
model_ws=path,
load_only=['ghb', 'evt', 'rch', 'dis',
'bas6', 'oc', 'sip', 'lpf'])
reach_data = np.genfromtxt(
'../examples/data/sfr_examples/test1ss_reach_data.csv', delimiter=',',
names=True)
segment_data = np.genfromtxt(
'../examples/data/sfr_examples/test1ss_segment_data.csv',
delimiter=',', names=True)
# segment_data = {0: ss_segment_data}
channel_flow_data = {
0: {1: [[0.5, 1.0, 2.0, 4.0, 7.0, 10.0, 20.0, 30.0, 50.0, 75.0, 100.0],
[0.25, 0.4, 0.55, 0.7, 0.8, 0.9, 1.1, 1.25, 1.4, 1.7, 2.6],
[3.0, 3.5, 4.2, 5.3, 7.0, 8.5, 12.0, 14.0, 17.0, 20.0, 22.0]]}}
channel_geometry_data = {
0: {7: [[0.0, 10.0, 80.0, 100.0, 150.0, 170.0, 240.0, 250.0],
[20.0, 13.0, 10.0, 2.0, 0.0, 10.0, 13.0, 20.0]],
8: [[0.0, 10.0, 80.0, 100.0, 150.0, 170.0, 240.0, 250.0],
[25.0, 17.0, 13.0, 4.0, 0.0, 10.0, 16.0, 20.0]]}}
nstrm = len(reach_data) # number of reaches
nss = len(segment_data) # number of segments
nsfrpar = 0 # number of parameters (not supported)
nparseg = 0
const = 1.486 # constant for manning's equation, units of cfs
dleak = 0.0001 # closure tolerance for stream stage computation
ipakcb = 53 # flag for writing SFR output to cell-by-cell budget (on unit 53)
istcb2 = 81 # flag for writing SFR output to text file
dataset_5 = {0: [nss, 0, 0]} # dataset 5 (see online guide)
sfr = flopy.modflow.ModflowSfr2(m, nstrm=nstrm, nss=nss, const=const,
dleak=dleak, ipakcb=ipakcb, istcb2=istcb2,
reach_data=reach_data,
segment_data=segment_data,
channel_geometry_data=channel_geometry_data,
channel_flow_data=channel_flow_data,
dataset_5=dataset_5)
#assert istcb2 in m.package_units
assert istcb2 in m.output_units
assert True
# test handling of a 0-D array (produced by genfromtxt sometimes)
segment_data = np.array(segment_data[0])
sfr = flopy.modflow.ModflowSfr2(m, nstrm=nstrm, nss=nss, const=const,
dleak=dleak, ipakcb=ipakcb, istcb2=istcb2,
reach_data=reach_data,
segment_data=segment_data,
channel_geometry_data=channel_geometry_data,
channel_flow_data=channel_flow_data,
dataset_5=dataset_5)
def test_transient_example():
path = os.path.join('temp', 't009')
gpth = os.path.join('..', 'examples', 'data', 'mf2005_test', 'testsfr2.*')
for f in glob.glob(gpth):
shutil.copy(f, path)
mf = flopy.modflow
m = mf.Modflow.load('testsfr2.nam', model_ws=path)
# test handling of unformatted output file
m.sfr.istcb2 = -49
m.set_output_attribute(unit=abs(m.sfr.istcb2), attr={'binflag':True})
m.write_input()
m2 = mf.Modflow.load('testsfr2.nam', model_ws=path)
assert m2.sfr.istcb2 == -49
assert m2.get_output_attribute(unit=abs(m2.sfr.istcb2), attr='binflag')
def test_assign_layers():
m = fm.Modflow()
m.dis = fm.ModflowDis(nrow=1, ncol=6, nlay=7,
botm=np.array([[ 50., 49., 42., 27., 6., -33.],
[ -196., -246., -297., -351., -405., -462.],
[ -817., -881., -951., -1032., -1141., -1278.],
[-1305., -1387., -1466., -1546., -1629., -1720.],
[-2882., -2965., -3032., -3121., -3226., -3341.],
[-3273., -3368., -3451., -3528., -3598., -3670.],
[-3962., -4080., -4188., -4292., -4392., -4496.]]),
model=m)
reach_data = fm.ModflowSfr2.get_empty_reach_data(5)
seg_data = {0: fm.ModflowSfr2.get_empty_segment_data(1)}
seg_data[0]['outseg'] = 0
reach_data['k'] = 0
reach_data['i'] = 0
reach_data['j'] = np.arange(5)
reach_data['strtop'] = np.array([20, -250, 0., -3000., -4500.])
reach_data['strthick'] = 1.
sfr = fm.ModflowSfr2(reach_data=reach_data,
segment_data=seg_data,
model=m)
sfr.assign_layers()
assert np.array_equal(sfr.reach_data.k, np.array([1, 2, 1, 4, 6]))
l = m.dis.get_layer(0, 0, 0.)
assert l == 1
l = m.dis.get_layer(0, [0, 1], 0.)
assert np.array_equal(l, np.array([1, 1]))
def test_SfrFile():
sfrout = SfrFile('../examples/data/sfr_examples/sfroutput2.txt')
# will be None if pandas is not installed
if sfrout.pd is not None:
df = sfrout.get_dataframe()
assert df.layer.values[0] == 1
assert df.column.values[0] == 169
assert df.Cond.values[0] == 74510.0
assert df.col18.values[3] == 1.288E+03
sfrout = SfrFile('../examples/data/sfr_examples/test1tr.flw')
if sfrout.pd is not None:
df = sfrout.get_dataframe()
assert df.col16.values[-1] == 5.502E-02
assert df.shape == (1080, 20)
def test_sfr_plot():
#m = flopy.modflow.Modflow.load('test1ss.nam', model_ws=path, verbose=False)
#sfr = m.get_package('SFR')
#sfr.plot(key='strtop')
#plt.show()
#assert True
pass
if __name__ == '__main__':
#test_sfr()
#test_sfr_renumbering()
#test_example()
#test_transient_example()
#test_sfr_plot()
#test_assign_layers()
#test_SfrFile()
pass
| {
"repo_name": "bdestombe/flopy-1",
"path": "autotest/t009_test.py",
"copies": "1",
"size": "12148",
"license": "bsd-3-clause",
"hash": -535786901955293900,
"line_mean": 38.3139158576,
"line_max": 92,
"alpha_frac": 0.5428877181,
"autogenerated": false,
"ratio": 2.9314671814671813,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3974354899567181,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aleaf'
import sys
#sys.path.append('/Users/aleaf/Documents/GitHub/flopy3')
import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import flopy
if os.path.split(os.getcwd())[-1] == 'flopy3':
path = os.path.join('examples', 'data', 'mf2005_test')
path2 = os.path.join('examples', 'data', 'sfr_test')
outpath = os.path.join('py.test/temp')
else:
path = os.path.join('..', 'examples', 'data', 'mf2005_test')
path2 = os.path.join('..', 'examples', 'data', 'sfr_test')
outpath = 'temp'
sfr_items = {0: {'mfnam': 'test1ss.nam',
'sfrfile': 'test1ss.sfr'},
1: {'mfnam': 'test1tr.nam',
'sfrfile': 'test1tr.sfr'},
2: {'mfnam': 'testsfr2_tab.nam',
'sfrfile': 'testsfr2_tab_ICALC1.sfr'},
3: {'mfnam': 'testsfr2_tab.nam',
'sfrfile': 'testsfr2_tab_ICALC2.sfr'},
4: {'mfnam': 'testsfr2.nam',
'sfrfile': 'testsfr2.sfr'},
5: {'mfnam': 'UZFtest2.nam',
'sfrfile': 'UZFtest2.sfr'},
6: {'mfnam': 'TL2009.nam',
'sfrfile': 'TL2009.sfr'}
}
def sfr_process(mfnam, sfrfile, model_ws, outfolder=outpath):
m = flopy.modflow.Modflow.load(mfnam, model_ws=model_ws, verbose=False)
sfr = m.get_package('SFR')
if not os.path.exists(outfolder):
os.makedirs(outfolder)
outpath = os.path.join(outfolder, sfrfile)
sfr.write_file(outpath)
m.remove_package('SFR')
sfr2 = flopy.modflow.ModflowSfr2.load(outpath, m)
assert np.all(sfr2.reach_data == sfr.reach_data)
assert np.all(sfr2.dataset_5 == sfr.dataset_5)
for k, v in sfr2.segment_data.items():
assert np.all(v == sfr.segment_data[k])
for k, v in sfr2.channel_flow_data.items():
assert np.all(v == sfr.channel_flow_data[k])
for k, v in sfr2.channel_geometry_data.items():
assert np.all(v == sfr.channel_geometry_data[k])
return m, sfr
def load_sfr_only(sfrfile):
m = flopy.modflow.Modflow()
sfr = flopy.modflow.ModflowSfr2.load(sfrfile, m)
return m, sfr
def load_all_sfr_only(path):
for i, item in sfr_items.items():
load_sfr_only(os.path.join(path, item['sfrfile']))
def interpolate_to_reaches(sfr):
reach_data = sfr.reach_data
segment_data = sfr.segment_data[0]
for reachvar, segvars in {'strtop': ('elevup', 'elevdn'),
'strthick': ('thickm1', 'thickm2'),
'strhc1': ('hcond1', 'hcond2')}.items():
reach_data[reachvar] = sfr._interpolate_to_reaches(*segvars)
for seg in segment_data.nseg:
reaches = reach_data[reach_data.iseg == seg]
dist = np.cumsum(reaches.rchlen) - 0.5 * reaches.rchlen
fp = [segment_data[segment_data['nseg'] == seg][segvars[0]][0],
segment_data[segment_data['nseg'] == seg][segvars[1]][0]]
xp = [dist[0], dist[-1]]
assert np.sum(np.abs(reaches[reachvar] - np.interp(dist, xp, fp).tolist())) < 0.01
return reach_data
def test_sfr():
load_all_sfr_only(path2)
m, sfr = sfr_process('test1ss.nam', 'test1ss.sfr', path)
m, sfr = sfr_process('test1tr.nam', 'test1tr.sfr', path)
#assert list(sfr.dataset_5.keys()) == [0, 1]
m, sfr = sfr_process('testsfr2_tab.nam', 'testsfr2_tab_ICALC1.sfr', path)
assert list(sfr.dataset_5.keys()) == list(range(0, 50))
m, sfr = sfr_process('testsfr2_tab.nam', 'testsfr2_tab_ICALC2.sfr', path)
assert sfr.channel_geometry_data[0][1] == [[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0],
[6.0, 4.5, 3.5, 0.0, 0.3, 3.5, 4.5, 6.0]]
m, sfr = sfr_process('testsfr2.nam', 'testsfr2.sfr', path)
assert round(sum(sfr.segment_data[49][0]), 7) == 3.9700007
m, sfr = sfr_process('UZFtest2.nam', 'UZFtest2.sfr', path)
assert isinstance(sfr.plot()[0], matplotlib.axes.Axes) # test the plot() method
# trout lake example (only sfr file is included)
# can add tests for sfr connection with lak package
m, sfr = load_sfr_only(os.path.join(path2, 'TL2009.sfr'))
# convert sfr package to reach input
sfr.reachinput = True
sfr.isfropt = 1
sfr.reach_data = interpolate_to_reaches(sfr)
sfr.get_slopes()
assert sfr.reach_data.slope[29] == (sfr.reach_data.strtop[29] - sfr.reach_data.strtop[107])\
/sfr.reach_data.rchlen[29]
chk = sfr.check()
assert sfr.reach_data.slope.min() < 0.0001 and 'minimum slope' in chk.warnings
sfr.reach_data.slope[0] = 1.1
chk.slope(maximum_slope=1.0)
assert 'maximum slope' in chk.warnings
def test_sfr_renumbering():
# test segment renumbering
r = np.zeros((27, 2), dtype=[('iseg', int), ('ireach', int)])
r = np.core.records.fromarrays(r.transpose(), dtype=[('iseg', int), ('ireach', int)])
r['iseg'] = sorted(list(range(1, 10)) * 3)
r['ireach'] = [1, 2, 3] * 9
d = np.zeros((9, 2), dtype=[('nseg', int), ('outseg', int)])
d = np.core.records.fromarrays(d.transpose(), dtype=[('nseg', int), ('outseg', int)])
d['nseg'] = range(1, 10)
d['outseg'] = [4, 0, 6, 8, 3, 8, 1, 2, 8]
m = flopy.modflow.Modflow()
sfr = flopy.modflow.ModflowSfr2(m, reach_data=r, segment_data={0: d})
chk = sfr.check()
assert 'segment numbering order' in chk.warnings
sfr.renumber_segments()
chk = sfr.check()
assert 'continuity in segment and reach numbering' in chk.passed
assert 'segment numbering order' in chk.passed
def test_example():
m = flopy.modflow.Modflow.load('test1ss.nam', version='mf2005', exe_name='mf2005.exe',
model_ws=path, load_only=['ghb', 'evt', 'rch', 'dis', 'bas6', 'oc', 'sip', 'lpf'])
reach_data = np.genfromtxt('../examples/data/sfr_examples/test1ss_reach_data.csv', delimiter=',', names=True)
segment_data = np.genfromtxt('../examples/data/sfr_examples/test1ss_segment_data.csv', delimiter=',', names=True)
#segment_data = {0: ss_segment_data}
channel_flow_data = {0: {1: [[0.5, 1.0, 2.0, 4.0, 7.0, 10.0, 20.0, 30.0, 50.0, 75.0, 100.0],
[0.25, 0.4, 0.55, 0.7, 0.8, 0.9, 1.1, 1.25, 1.4, 1.7, 2.6],
[3.0, 3.5, 4.2, 5.3, 7.0, 8.5, 12.0, 14.0, 17.0, 20.0, 22.0]]}}
channel_geometry_data = {0: {7: [[0.0, 10.0, 80.0, 100.0, 150.0, 170.0, 240.0, 250.0],
[20.0, 13.0, 10.0, 2.0, 0.0, 10.0, 13.0, 20.0]],
8: [[0.0, 10.0, 80.0, 100.0, 150.0, 170.0, 240.0, 250.0],
[25.0, 17.0, 13.0, 4.0, 0.0, 10.0, 16.0, 20.0]]}}
nstrm = len(reach_data) # number of reaches
nss = len(segment_data) # number of segments
nsfrpar = 0 # number of parameters (not supported)
nparseg = 0
const = 1.486 # constant for manning's equation, units of cfs
dleak = 0.0001 # closure tolerance for stream stage computation
istcb1 = 53 # flag for writing SFR output to cell-by-cell budget (on unit 53)
istcb2 = 81 # flag for writing SFR output to text file
dataset_5 = {0: [nss, 0, 0]} # dataset 5 (see online guide)
sfr = flopy.modflow.ModflowSfr2(m, nstrm=nstrm, nss=nss, const=const, dleak=dleak, istcb1=istcb1, istcb2=istcb2,
reach_data=reach_data,
segment_data=segment_data,
channel_geometry_data=channel_geometry_data,
channel_flow_data=channel_flow_data,
dataset_5=dataset_5)
# test handling of a 0-D array (produced by genfromtxt sometimes)
segment_data = np.array(segment_data[0])
sfr = flopy.modflow.ModflowSfr2(m, nstrm=nstrm, nss=nss, const=const, dleak=dleak, istcb1=istcb1, istcb2=istcb2,
reach_data=reach_data,
segment_data=segment_data,
channel_geometry_data=channel_geometry_data,
channel_flow_data=channel_flow_data,
dataset_5=dataset_5)
if __name__ == '__main__':
test_sfr()
test_sfr_renumbering()
test_example()
| {
"repo_name": "mrustl/flopy",
"path": "autotest/t009_test.py",
"copies": "1",
"size": "8374",
"license": "bsd-3-clause",
"hash": 6532958614616524000,
"line_mean": 42.1649484536,
"line_max": 117,
"alpha_frac": 0.5597086219,
"autogenerated": false,
"ratio": 2.8541240627130198,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.391383268461302,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aleaf'
import sys
import textwrap
import os
import numpy as np
from numpy.lib import recfunctions
from ..pakbase import Package
from ..utils import MfList
from ..utils.flopy_io import line_parse
class ModflowSfr2(Package):
"""
Streamflow-Routing (SFR2) Package Class
Parameters
----------
model : model object
The model object (of type :class:'flopy.modflow.mf.Modflow') to which
this package will be added.
nstrm : integer
An integer value that can be specified to be positive or negative. The absolute value of NSTRM is equal to
the number of stream reaches (finite-difference cells) that are active during the simulation and the number of
lines of data to be included in Item 2, described below. When NSTRM is specified to be a negative integer,
it is also used as a flag for changing the format of the data input, for simulating unsaturated flow beneath
streams, and (or) for simulating transient streamflow routing (for MODFLOW-2005 simulations only), depending
on the values specified for variables ISFROPT and IRTFLG, as described below. When NSTRM is negative, NSFRPAR
must be set to zero, which means that parameters cannot be specified.
By default, nstrm is set to negative.
nss : integer
An integer value equal to the number of stream segments (consisting of one or more reaches) that are used
to define the complete stream network. The value of NSS represents the number of segments that must be
defined through a combination of parameters and variables in Item 4 or variables in Item 6.
nparseg : integer
An integer value equal to (or exceeding) the number of stream-segment definitions associated with all
parameters. This number can be more than the total number of segments (NSS) in the stream network because
the same segment can be defined in multiple parameters, and because parameters can be time-varying. NPARSEG
must equal or exceed the sum of NLST x N for all parameters, where N is the greater of 1 and NUMINST;
that is, NPARSEG must equal or exceed the total number of repetitions of item 4b. This variable must be zero
when NSTRM is negative.
const : float
A real value (or conversion factor) used in calculating stream depth for stream reach. If stream depth is
not calculated using Manning's equation for any stream segment (that is, ICALC does not equal 1 or 2), then
a value of zero can be entered. If Manning's equation is used, a constant of 1.486 is used for flow units of
cubic feet per second, and a constant of 1.0 is used for units of cubic meters per second. The constant must
be multiplied by 86,400 when using time units of days in the simulation. An explanation of time units used
in MODFLOW is given by Harbaugh and others (2000, p. 10).
dleak : float
A real value equal to the tolerance level of stream depth used in computing leakage between each stream
reach and active model cell. Value is in units of length. Usually a value of 0.0001 is sufficient when units
of feet or meters are used in model.
ipakcb : integer
An integer value used as a flag for writing stream-aquifer leakage values. If ipakcb > 0, unformatted leakage
between each stream reach and corresponding model cell will be saved to the main cell-by-cell budget file whenever
when a cell-by-cell budget has been specified in Output Control (see Harbaugh and others, 2000, pages 52-55).
If ipakcb = 0, leakage values will not be printed or saved. Printing to the listing file (ipakcb < 0) is not supported.
istcsb2 : integer
An integer value used as a flag for writing to a separate formatted file all information on inflows and
outflows from each reach; on stream depth, width, and streambed conductance; and on head difference and
gradient across the streambed. If ISTCB2 > 0, then ISTCB2 also represents the unit number to which all
information for each stream reach will be saved to a separate file when a cell-by-cell budget has been
specified in Output Control. If ISTCB2 < 0, it is the unit number to which unformatted streamflow out of
each reach will be saved to a file whenever the cell-by-cell budget has been specified in Output Control.
Unformatted output will be saved to <model name>.sfq.
isfropt : integer
An integer value that defines the format of the input data and whether or not unsaturated flow is simulated
beneath streams. Values of ISFROPT are defined as follows
0 No vertical unsaturated flow beneath streams. Streambed elevations, stream slope, streambed thickness,
and streambed hydraulic conductivity are read for each stress period using variables defined in Items 6b
and 6c; the optional variables in Item 2 are not used.
1 No vertical unsaturated flow beneath streams. Streambed elevation, stream slope, streambed thickness,
and streambed hydraulic conductivity are read for each reach only once at the beginning of the simulation
using optional variables defined in Item 2; Items 6b and 6c are used to define stream width and depth for
ICALC = 0 and stream width for ICALC = 1.
2 Streambed and unsaturated-zone properties are read for each reach only once at the beginning of the
simulation using optional variables defined in Item 2; Items 6b and 6c are used to define stream width and
depth for ICALC = 0 and stream width for ICALC = 1. When using the LPF Package, saturated vertical
hydraulic conductivity for the unsaturated zone is the same as the vertical hydraulic conductivity of the
corresponding layer in LPF and input variable UHC is not read.
3 Same as 2 except saturated vertical hydraulic conductivity for the unsaturated zone (input variable UHC)
is read for each reach.
4 Streambed and unsaturated-zone properties are read for the beginning and end of each stream segment using
variables defined in Items 6b and 6c; the optional variables in Item 2 are not used. Streambed properties
can vary each stress period. When using the LPF Package, saturated vertical hydraulic conductivity for the
unsaturated zone is the same as the vertical hydraulic conductivity of the corresponding layer in LPF
and input variable UHC1 is not read.
5 Same as 4 except saturated vertical hydraulic conductivity for the unsaturated zone (input variable UHC1)
is read for each segment at the beginning of the first stress period only.
nstrail : integer
An integer value that is the number of trailing wave increments used to represent a trailing wave. Trailing
waves are used to represent a decrease in the surface infiltration rate. The value can be increased to improve
mass balance in the unsaturated zone. Values between 10 and 20 work well and result in unsaturated-zone mass
balance errors beneath streams ranging between 0.001 and 0.01 percent. Please see Smith (1983) for further
details. (default is 10; for MODFLOW-2005 simulations only when isfropt > 1)
isuzn : integer
An integer value that is the maximum number of vertical cells used to define the unsaturated zone beneath a
stream reach. If ICALC is 1 for all segments then ISUZN should be set to 1.
(default is 1; for MODFLOW-2005 simulations only when isfropt > 1)
nsfrsets : integer
An integer value that is the maximum number of different sets of trailing waves used to allocate arrays.
Arrays are allocated by multiplying NSTRAIL by NSFRSETS. A value of 30 is sufficient for problems where the
stream depth varies often. NSFRSETS does not affect model run time.
(default is 30; for MODFLOW-2005 simulations only when isfropt > 1)
irtflg : integer
An integer value that indicates whether transient streamflow routing is active. IRTFLG must be specified
if NSTRM < 0. If IRTFLG > 0, streamflow will be routed using the kinematic-wave equation (see USGS Techniques
and Methods 6-D1, p. 68-69); otherwise, IRTFLG should be specified as 0. Transient streamflow routing is only
available for MODFLOW-2005; IRTFLG can be left blank for MODFLOW-2000 simulations.
(default is 1)
numtim : integer
An integer value equal to the number of sub time steps used to route streamflow. The time step that will be
used to route streamflow will be equal to the MODFLOW time step divided by NUMTIM.
(default is 2; for MODFLOW-2005 simulations only when irtflg > 0)
weight : float
A real number equal to the time weighting factor used to calculate the change in channel storage. WEIGHT has
a value between 0.5 and 1. Please refer to equation 83 in USGS Techniques and Methods 6-D1 for further
details. (default is 0.75; for MODFLOW-2005 simulations only when irtflg > 0)
flwtol : float
A real number equal to the streamflow tolerance for convergence of the kinematic wave equation used for
transient streamflow routing. A value of 0.00003 cubic meters per second has been used successfully in test
simulations (and would need to be converted to whatever units are being used in the particular simulation).
(default is 0.0001; for MODFLOW-2005 simulations only when irtflg > 0)
reach_data : recarray
Numpy record array of length equal to nstrm, with columns for each variable entered in item 2
(see SFR package input instructions). In following flopy convention, layer, row, column and node number
(for unstructured grids) are zero-based; segment and reach are one-based.
segment_data : recarray
Numpy record array of length equal to nss, with columns for each variable entered in items 6a, 6b and 6c
(see SFR package input instructions). Segment numbers are one-based.
itmp : list of integers (len = NPER)
For each stress period, an integer value for reusing or reading stream segment data that can change each
stress period. If ITMP = 0 then all stream segment data are defined by Item 4 (NSFRPAR > 0; number of stream
parameters is greater than 0). If ITMP > 0, then stream segment data are not defined in Item 4 and must be
defined in Item 6 below for a number of segments equal to the value of ITMP. If ITMP < 0, then stream segment
data not defined in Item 4 will be reused from the last stress period (Item 6 is not read for the current
stress period). ITMP must be defined >= 0 for the first stress period of a simulation.
irdflag : list of integers (len = NPER)
For each stress period, an integer value for printing input data specified for this stress period.
If IRDFLG = 0, input data for this stress period will be printed. If IRDFLG > 0, then input data for this
stress period will not be printed.
iptflag : list of integers (len = NPER)
For each stress period, an integer value for printing streamflow-routing results during this stress period.
If IPTFLG = 0, or whenever the variable ICBCFL or "Save Budget" is specified in Output Control, the results
for specified time steps during this stress period will be printed. If IPTFLG > 0, then the results during
this stress period will not be printed.
extension : string
Filename extension (default is 'sfr')
unit_number : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output and sfr output name will be
created using the model name and .cbc the .sfr.bin/.sfr.out extensions
(for example, modflowtest.cbc, and modflowtest.sfr.bin), if ipakcbc and
istcb2 are numbers greater than zero. If a single string is passed the
package name will be set to the string and other uzf output files will
be set to the model name with the appropriate output file extensions.
To define the names for all package files (input and output) the
length of the list of strings should be 3. Default is None.
Attributes
----------
outlets : nested dictionary
Contains the outlet for each SFR segment; format is {per: {segment: outlet}}
This attribute is created by the get_outlets() method.
outsegs : dictionary of arrays
Each array is of shape nss rows x maximum of nss columns. The first column contains the SFR segments,
the second column contains the outsegs of those segments; the third column the outsegs of the outsegs,
and so on, until all outlets have been encountered, or nss is reached. The latter case indicates
circular routing. This attribute is created by the get_outlets() method.
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
MODFLOW-OWHM is not supported.
The Ground-Water Transport (GWT) process is not supported.
Limitations on which features are supported...
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow()
>>> sfr2 = flopy.modflow.ModflowSfr2(ml, ...)
"""
nsfrpar = 0
heading = '# Streamflow-Routing (SFR2) file for MODFLOW, generated by Flopy'
default_value = -1.0E+10
def __init__(self, model, nstrm=-2, nss=1, nsfrpar=0, nparseg=0,
const=128390.4, dleak=0.0001, ipakcb=None, istcb2=None,
isfropt=0,
nstrail=10, isuzn=1, nsfrsets=30, irtflg=1, numtim=2,
weight=0.75, flwtol=0.0001,
reach_data=None,
segment_data=None,
channel_geometry_data=None,
channel_flow_data=None,
dataset_5=None,
reachinput=False, transroute=False,
tabfiles=False, tabfiles_dict=None,
extension='sfr', unit_number=None,
filenames=None):
"""
Package constructor
"""
# set default unit number of one is not specified
if unit_number is None:
unit_number = ModflowSfr2.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None, None]
elif isinstance(filenames, str):
filenames = [filenames, None, None]
elif isinstance(filenames, list):
if len(filenames) < 3:
for idx in range(len(filenames), 3):
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(ipakcb, fname=fname,
package=ModflowSfr2.ftype())
else:
ipakcb = 0
# add sfr flow output file
if istcb2 is not None:
if abs(istcb2) > 0:
binflag = False
ext = 'out'
if istcb2 < 0:
binflag = True
ext = 'bin'
fname = filenames[2]
if fname is None:
fname = model.name + '.sfr.{}'.format(ext)
model.add_output_file(abs(istcb2), fname=fname,
binflag=binflag,
package=ModflowSfr2.ftype())
else:
istcb2 = 0
# Fill namefile items
name = [ModflowSfr2.ftype()]
units = [unit_number]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.url = 'sfr2.htm'
self.nper = self.parent.nrow_ncol_nlay_nper[-1]
self.nper = 1 if self.nper == 0 else self.nper # otherwise iterations from 0, nper won't run
# Dataset 0 -----------------------------------------------------------------------
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
# Dataset 1a and 1b. -----------------------------------------------------------------------
self.reachinput = reachinput
self.transroute = transroute
self.tabfiles = tabfiles
self.tabfiles_dict = tabfiles_dict
self.numtab = 0 if not tabfiles else len(tabfiles_dict)
self.maxval = np.max([tb['numval'] for tb in tabfiles_dict.values()]) if self.numtab > 0 else 0
# Dataset 1c. ----------------------------------------------------------------------
self._nstrm = np.sign(nstrm) * len(reach_data) if reach_data is not None else nstrm # number of reaches, negative value is flag for unsat. flow beneath streams and/or transient routing
if segment_data is not None:
# segment_data is a zero-d array
if not isinstance(segment_data, dict):
if len(segment_data.shape) == 0:
segment_data = np.atleast_1d(segment_data)
nss = len(segment_data)
segment_data = {0: segment_data}
nss = len(segment_data[0])
else:
pass
# use atleast_1d for length since segment_data might be a 0D array
# this seems to be OK, because self.segment_data is produced by the constructor (never 0D)
self.nsfrpar = nsfrpar
self.nparseg = nparseg
self.const = const # conversion factor used in calculating stream depth for stream reach (icalc = 1 or 2)
self.dleak = dleak # tolerance level of stream depth used in computing leakage
self.ipakcb = ipakcb
self.istcb2 = istcb2 # flag; unit number for writing table of SFR output to text file
# if nstrm < 0
self.isfropt = isfropt # defines the format of the input data and whether or not unsaturated flow is simulated
# if isfropt > 1
self.nstrail = nstrail # number of trailing wave increments
self.isuzn = isuzn # max number of vertical cells used to define unsat. zone
self.nsfrsets = nsfrsets # max number trailing waves sets
# if nstrm < 0 (MF-2005 only)
self.irtflag = irtflg # switch for transient streamflow routing (> 0 = kinematic wave)
# if irtflag > 0
self.numtim = numtim # number of subtimesteps used for routing
self.weight = weight # time weighting factor used to calculate the change in channel storage
self.flwtol = flwtol # streamflow tolerance for convergence of the kinematic wave equation
# Dataset 2. -----------------------------------------------------------------------
self.reach_data = self.get_empty_reach_data(np.abs(self._nstrm))
if reach_data is not None:
for n in reach_data.dtype.names:
self.reach_data[n] = reach_data[n]
# assign node numbers if there are none (structured grid)
if np.diff(self.reach_data.node).max() == 0 and 'DIS' in self.parent.get_package_list():
# first make kij list
lrc = self.reach_data[['k', 'i', 'j']]
lrc = (lrc.view((int, len(lrc.dtype.names))) + 1).tolist()
self.reach_data['node'] = self.parent.dis.get_node(lrc)
# assign unique ID and outreach columns to each reach
self.reach_data.sort(order=['iseg', 'ireach'])
new_cols = {'reachID': np.arange(1, len(self.reach_data) + 1),
'outreach': np.zeros(len(self.reach_data))}
for k, v in new_cols.items():
if k not in self.reach_data.dtype.names:
recfunctions.append_fields(self.reach_data, names=k, data=v, asrecarray=True)
# create a stress_period_data attribute to enable parent functions (e.g. plot)
self.stress_period_data = MfList(self, self.reach_data, dtype=self.reach_data.dtype)
# Datasets 4 and 6. -----------------------------------------------------------------------
# list of values that indicate segments outside of the model
# (depending on how SFR package was constructed)
self.not_a_segment_values = [999999]
self.segment_data = {0: self.get_empty_segment_data(nss)}
if segment_data is not None:
for i in segment_data.keys():
self.segment_data[i] = self.get_empty_segment_data(nss)
for n in segment_data[i].dtype.names:
self.segment_data[i][n] = segment_data[i][n]
# compute outreaches if nseg and outseg columns have non-default values
if len(self.segment_data[0]) == 1 or \
np.diff(self.segment_data[0].nseg).max() != 0 and np.diff(
self.segment_data[0].outseg).max() != 0:
# first convert any not_a_segment_values to 0
for v in self.not_a_segment_values:
self.segment_data[0].outseg[self.segment_data[0].outseg == v] = 0
self.set_outreaches()
self.channel_geometry_data = channel_geometry_data
self.channel_flow_data = channel_flow_data
# Dataset 5 -----------------------------------------------------------------------
self._dataset_5 = dataset_5
# Attributes not included in SFR package input
self.outsegs = {} # dictionary of arrays; see Attributes section of documentation
self.outlets = {} # nested dictionary of format {per: {segment: outlet}}
# -input format checks:
assert isfropt in [0, 1, 2, 3, 4, 5]
# derived attributes
self._paths = None
self.parent.add_package(self)
def __setattr__(self, key, value):
if key == "nstrm":
super(ModflowSfr2, self). \
__setattr__("_nstrm", value)
else: # return to default behavior of pakbase
super(ModflowSfr2, self).__setattr__(key, value)
@property
def nss(self):
# number of stream segments
return len(np.atleast_1d(self.segment_data[0]))
@property
def nstrm(self):
return np.sign(self._nstrm) * len(self.reach_data)
@property
def dataset_5(self):
"""auto-update itmp so it is consistent with reach_data."""
nss = self.nss
ds5 = {}
for k, v in self._dataset_5.items():
itmp = np.sign(v[0]) * nss
ds5[k] = [itmp] + v[1:]
self._dataset_5 = ds5
return ds5
@property
def graph(self):
graph = dict(zip(self.segment_data[0].nseg, self.segment_data[0].outseg))
outlets = set(graph.values()).difference(set(graph.keys())) #including lakes
graph.update({o:0 for o in outlets})
return graph
@property
def paths(self):
if self._paths is None:
self._set_paths()
return self._paths
# check to see if routing in segment data was changed
nseg = np.array(sorted(self._paths.keys()), dtype=int)
nseg = nseg[nseg > 0].copy()
outseg = np.array([self._paths[k][1] for k in nseg])
sd = self.segment_data[0]
if not np.array_equal(nseg, sd.nseg) or not np.array_equal(outseg, sd.outseg):
self._set_paths()
return self._paths
def _set_paths(self):
graph = self.graph
self._paths = {seg: find_path(graph, seg) for seg in graph.keys()}
@staticmethod
def get_empty_reach_data(nreaches=0, aux_names=None, structured=True, default_value=-1.0E+10):
# get an empty recarray that correponds to dtype
dtype = ModflowSfr2.get_default_reach_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
d = np.zeros((nreaches, len(dtype)), dtype=dtype)
d[:, :] = default_value
d = np.core.records.fromarrays(d.transpose(), dtype=dtype)
d['reachID'] = np.arange(1, nreaches + 1)
return d
@staticmethod
def get_empty_segment_data(nsegments=0, aux_names=None, default_value=-1.0E+10):
# get an empty recarray that correponds to dtype
dtype = ModflowSfr2.get_default_segment_dtype()
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
d = np.zeros((nsegments, len(dtype)), dtype=dtype)
d[:, :] = default_value
return np.core.records.fromarrays(d.transpose(), dtype=dtype)
@staticmethod
def get_default_reach_dtype(structured=True):
if structured:
# include node column for structured grids (useful for indexing)
return np.dtype([('node', np.int),
('k', np.int),
('i', np.int),
('j', np.int),
('iseg', np.int),
('ireach', np.int),
('rchlen', np.float32),
('strtop', np.float32),
('slope', np.float32),
('strthick', np.float32),
('strhc1', np.float32),
('thts', np.float32),
('thti', np.float32),
('eps', np.float32),
('uhc', np.float32),
('reachID', np.int),
('outreach', np.int)])
else:
return np.dtype([('node', np.int)
('iseg', np.int),
('ireach', np.int),
('rchlen', np.float32),
('strtop', np.float32),
('slope', np.float32),
('strthick', np.float32),
('strhc1', np.float32),
('thts', np.float32),
('thti', np.float32),
('eps', np.float32),
('uhc', np.float32),
('reachID', np.int),
('outreach', np.int)])
@staticmethod
def get_default_segment_dtype():
return np.dtype([('nseg', np.int),
('icalc', np.int),
('outseg', np.int),
('iupseg', np.int),
('iprior', np.int),
('nstrpts', np.int),
('flow', np.float32),
('runoff', np.float32),
('etsw', np.float32),
('pptsw', np.float32),
('roughch', np.float32),
('roughbk', np.float32),
('cdpth', np.float32),
('fdpth', np.float32),
('awdth', np.float32),
('bwdth', np.float32),
('hcond1', np.float32),
('thickm1', np.float32),
('elevup', np.float32),
('width1', np.float32),
('depth1', np.float32),
('thts1', np.float32),
('thti1', np.float32),
('eps1', np.float32),
('uhc1', np.float32),
('hcond2', np.float32),
('thickm2', np.float32),
('elevdn', np.float32),
('width2', np.float32),
('depth2', np.float32),
('thts2', np.float32),
('thti2', np.float32),
('eps2', np.float32),
('uhc2', np.float32)])
@staticmethod
def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
if model.verbose:
sys.stdout.write('loading sfr2 package file...\n')
tabfiles = False
tabfiles_dict = {}
transroute = False
reachinput = False
structured = model.structured
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# Item 0 -- header
while True:
line = next(f)
if line[0] != '#':
break
# Item 1
if "reachinput" in line.lower():
"""
When REACHINPUT is specified, variable ISFROPT is read in data set 1c.
ISFROPT can be used to change the default format for entering reach and segment data
or to specify that unsaturated flow beneath streams will be simulated.
"""
reachinput = True
if "transroute" in line.lower():
"""When TRANSROUTE is specified, optional variables IRTFLG, NUMTIM, WEIGHT, and FLWTOL
also must be specified in Item 1c.
"""
transroute = True
if transroute or reachinput:
line = next(f)
if "tabfiles" in line.lower():
"""
tabfiles
An optional character variable that is a flag to indicate that inflows to one or more stream
segments will be specified with tabular inflow files.
numtab
An integer value equal to the number of tabular inflow files that will be read if TABFILES
is specified. A separate input file is required for each segment that receives specified inflow.
Thus, the maximum value of NUMTAB that can be specified is equal to the total number of
segments specified in Item 1c with variables NSS. The name (Fname) and unit number (Nunit)
of each tabular file must be specified in the MODFLOW-2005 Name File using tile type (Ftype) DATA.
maxval
"""
tabfiles, numtab, maxval = line.strip().split()
numtab, maxval = int(numtab), int(maxval)
line = next(f)
# item 1c
nstrm, nss, nsfrpar, nparseg, const, dleak, ipakcb, istcb2, \
isfropt, nstrail, isuzn, nsfrsets, \
irtflg, numtim, weight, flwtol, option = _parse_1c(line, reachinput=reachinput, transroute=transroute)
# item 2
# set column names, dtypes
names = _get_item2_names(nstrm, reachinput, isfropt, structured)
dtypes = [d for d in ModflowSfr2.get_default_reach_dtype().descr
if d[0] in names]
lines = []
for i in range(abs(nstrm)):
line = next(f)
line = line_parse(line)
ireach = tuple(map(float, line[:len(dtypes)]))
lines.append(ireach)
tmp = np.array(lines, dtype=dtypes)
# initialize full reach_data array with all possible columns
reach_data = ModflowSfr2.get_empty_reach_data(len(lines))
for n in names:
reach_data[n] = tmp[n] # not sure if there's a way to assign multiple columns
# zero-based convention
inds = ['k', 'i', 'j'] if structured else ['node']
_markitzero(reach_data, inds)
# items 3 and 4 are skipped (parameters not supported)
# item 5
segment_data = {}
channel_geometry_data = {}
channel_flow_data = {}
dataset_5 = {}
aux_variables = {} # not sure where the auxillary variables are supposed to go
for i in range(0, nper):
# Dataset 5
dataset_5[i] = _get_dataset(next(f), [1, 0, 0, 0])
itmp = dataset_5[i][0]
if itmp > 0:
# Item 6
current = ModflowSfr2.get_empty_segment_data(nsegments=itmp, aux_names=option)
current_aux = {} # container to hold any auxillary variables
current_6d = {} # these could also be implemented as structured arrays with a column for segment number
current_6e = {}
for j in range(itmp):
dataset_6a = _parse_6a(next(f), option)
current_aux[j] = dataset_6a[-1]
dataset_6a = dataset_6a[:-1] # drop xyz
icalc = dataset_6a[1]
dataset_6b = _parse_6bc(next(f), icalc, nstrm, isfropt, reachinput, per=i)
dataset_6c = _parse_6bc(next(f), icalc, nstrm, isfropt, reachinput, per=i)
current[j] = dataset_6a + dataset_6b + dataset_6c
if icalc == 2:
# ATL: not sure exactly how isfropt logic functions for this
# dataset 6d description suggests that this line isn't read for isfropt > 1
# but description of icalc suggest that icalc=2 (8-point channel) can be used with any isfropt
if i == 0 or nstrm > 0 and not reachinput: # or isfropt <= 1:
dataset_6d = []
for k in range(2):
dataset_6d.append(_get_dataset(next(f), [0.0] * 8))
# dataset_6d.append(list(map(float, next(f).strip().split())))
current_6d[j + 1] = dataset_6d
if icalc == 4:
nstrpts = dataset_6a[5]
dataset_6e = []
for k in range(3):
dataset_6e.append(_get_dataset(next(f), [0.0] * nstrpts))
current_6e[j + 1] = dataset_6e
segment_data[i] = current
aux_variables[j + 1] = current_aux
if len(current_6d) > 0:
channel_geometry_data[i] = current_6d
if len(current_6e) > 0:
channel_flow_data[i] = current_6e
if tabfiles and i == 0:
for j in range(numtab):
segnum, numval, iunit = map(int, next(f).strip().split())
tabfiles_dict[segnum] = {'numval': numval, 'inuit': iunit}
else:
continue
# determine specified unit number
unitnumber = None
filenames = [None, None, None]
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if value.filetype == ModflowSfr2.ftype():
unitnumber = key
filenames[0] = os.path.basename(value.filename)
if ipakcb > 0:
if key == ipakcb:
filenames[1] = os.path.basename(value.filename)
model.add_pop_key_list(key)
if abs(istcb2) > 0:
if key == abs(istcb2):
filenames[2] = os.path.basename(value.filename)
model.add_pop_key_list(key)
return ModflowSfr2(model, nstrm=nstrm, nss=nss, nsfrpar=nsfrpar, nparseg=nparseg, const=const, dleak=dleak,
ipakcb=ipakcb, istcb2=istcb2,
isfropt=isfropt, nstrail=nstrail, isuzn=isuzn, nsfrsets=nsfrsets, irtflg=irtflg,
numtim=numtim, weight=weight, flwtol=flwtol,
reach_data=reach_data,
segment_data=segment_data,
dataset_5=dataset_5,
channel_geometry_data=channel_geometry_data,
channel_flow_data=channel_flow_data,
reachinput=reachinput, transroute=transroute,
tabfiles=tabfiles, tabfiles_dict=tabfiles_dict,
unit_number=unitnumber, filenames=filenames)
def check(self, f=None, verbose=True, level=1):
"""
Check sfr2 package data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.sfr2.check()
"""
chk = check(self, verbose=verbose, level=level)
chk.for_nans()
chk.numbering()
chk.routing()
chk.overlapping_conductance()
chk.elevations()
chk.slope()
if f is not None:
if isinstance(f, str):
pth = os.path.join(self.parent.model_ws, f)
f = open(pth, 'w')
f.write('{}\n'.format(chk.txt))
# f.close()
return chk
def assign_layers(self, adjust_botms=False, pad=1.):
"""Assigns the appropriate layer for each SFR reach,
based on cell bottoms at location of reach.
Parameters
----------
adjust_botms : bool
Streambed bottom elevations below the model bottom
will cause an error in MODFLOW. If True, adjust
bottom elevations in lowest layer of the model
so they are at least pad distance below any co-located
streambed elevations.
pad : scalar
Minimum distance below streambed bottom to set
any conflicting model bottom elevations.
Notes
-----
Streambed bottom = strtop - strthick
This routine updates the elevations in the botm array
of the flopy.model.ModflowDis instance. To produce a
new DIS package file, model.write() or flopy.model.ModflowDis.write()
must be run.
"""
streambotms = self.reach_data.strtop - self.reach_data.strthick
i, j = self.reach_data.i, self.reach_data.j
layers = self.parent.dis.get_layer(i, j, streambotms)
# check against model bottom
logfile = 'sfr_botm_conflicts.chk'
logtxt = ''
mbotms = self.parent.dis.botm.array[-1, i, j]
below = streambotms <= mbotms
below_i = self.reach_data.i[below]
below_j = self.reach_data.j[below]
l = []
header = ''
if np.any(below):
print('Warning: SFR streambed elevations below model bottom. '
'See sfr_botm_conflicts.chk')
if not adjust_botms:
l += [below_i,
below_j,
mbotms[below],
streambotms[below]]
header += 'i,j,model_botm,streambed_botm'
else:
print('Fixing elevation conflicts...')
botm = self.parent.dis.botm.array.copy()
for ib, jb in zip(below_i, below_j):
inds = (self.reach_data.i == ib) & (self.reach_data.j == jb)
botm[-1, ib, jb] = streambotms[inds].min() - pad
botm[-1, below_i, below_j] = streambotms[below] - pad
l.append(botm[-1, below_i, below_j])
header += ',new_model_botm'
self.parent.dis.botm = botm
mbotms = self.parent.dis.botm.array[-1, i, j]
assert not np.any(streambotms <= mbotms)
print('New bottom array assigned to Flopy DIS package '
'instance.\nRun flopy.model.write() or '
'flopy.model.ModflowDis.write() to write new DIS file.')
header += '\n'
with open(logfile, 'w') as log:
log.write(header)
a = np.array(l).transpose()
for line in a:
log.write(','.join(map(str, line)) + '\n')
self.reach_data['k'] = layers
def get_outlets(self, level=0, verbose=True):
"""Traces all routing connections from each headwater to the outlet.
"""
txt = ''
for per in range(self.nper):
if per > 0 > self.dataset_5[per][0]: # skip stress periods where seg data not defined
continue
segments = self.segment_data[per].nseg
outsegs = self.segment_data[per].outseg
'''
all_outsegs = np.vstack([segments, outsegs])
max_outseg = all_outsegs[-1].max()
knt = 1
while max_outseg > 0:
nextlevel = np.array([outsegs[s - 1] if s > 0 and s < 999999 else 0
for s in all_outsegs[-1]])
all_outsegs = np.vstack([all_outsegs, nextlevel])
max_outseg = nextlevel.max()
if max_outseg == 0:
break
knt += 1
if knt > self.nss:
# subset outsegs map to only include rows with outseg number > 0 in last column
circular_segs = all_outsegs.T[all_outsegs[-1] > 0]
# only retain one instance of each outseg number at iteration=nss
vals = [] # append outseg values to vals after they've appeared once
mask = [(True, vals.append(v))[0]
if v not in vals
else False for v in circular_segs[-1]]
circular_segs = circular_segs[:, np.array(mask)]
# cull the circular segments array to remove duplicate instances of routing circles
circles = []
duplicates = []
for i in range(np.shape(circular_segs)[0]):
# find where values in the row equal the last value;
# record the index of the second to last instance of last value
repeat_start_ind = np.where(circular_segs[i] == circular_segs[i, -1])[0][-2:][0]
# use that index to slice out the repeated segment sequence
circular_seq = circular_segs[i, repeat_start_ind:].tolist()
# keep track of unique sequences of repeated segments
if set(circular_seq) not in circles:
circles.append(set(circular_seq))
duplicates.append(False)
else:
duplicates.append(True)
circular_segs = circular_segs[~np.array(duplicates), :]
txt += '{0} instances where an outlet was not found after {1} consecutive segments!\n' \
.format(len(circular_segs), self.nss)
if level == 1:
txt += '\n'.join([' '.join(map(str, row)) for row in circular_segs]) + '\n'
else:
f = 'circular_routing.csv'
np.savetxt(f, circular_segs, fmt='%d', delimiter=',', header=txt)
txt += 'See {} for details.'.format(f)
if verbose:
print(txt)
break
# the array of segment sequence is useful for other other operations,
# such as plotting elevation profiles
self.outsegs[per] = all_outsegs
'''
# use graph instead of above loop
nrow = len(self.segment_data[per].nseg)
ncol = np.max([len(v) if v is not None else 0 for v in self.paths.values()])
all_outsegs = np.zeros((nrow, ncol), dtype=int)
for i, (k, v) in enumerate(self.paths.items()):
if k > 0:
all_outsegs[i, :len(v)] = v
all_outsegs.sort(axis=0)
self.outsegs[per] = all_outsegs
# create a dictionary listing outlets associated with each segment
# outlet is the last value in each row of outseg array that is != 0 or 999999
#self.outlets[per] = {i + 1: r[(r != 0) & (r != 999999)][-1]
#if len(r[(r != 0) & (r != 999999)]) > 0
#else i + 1
# for i, r in enumerate(all_outsegs.T)}
self.outlets[per] = {k: self.paths[k][-1] if k in self.paths
else k for k in self.segment_data[per].nseg}
return txt
def reset_reaches(self):
self.reach_data.sort(order=['iseg', 'ireach'])
reach_data = self.reach_data
segment_data = self.segment_data[0]
ireach = []
for iseg in segment_data.nseg:
nreaches = np.sum(reach_data.iseg == iseg)
ireach += list(range(1, nreaches+1))
self.reach_data['ireach'] = ireach
def set_outreaches(self):
"""Determine the outreach for each SFR reach (requires a reachID column in reach_data).
Uses the segment routing specified for the first stress period to route reaches between segments.
"""
self.reach_data.sort(order=['iseg', 'ireach'])
self.reset_reaches() # ensure that each segment starts with reach 1
reach_data = self.reach_data
segment_data = self.segment_data[0]
# this vectorized approach is more than an order of magnitude faster than a list comprehension
is_first_reach = reach_data.ireach == 1
first_reaches = reach_data[is_first_reach]
# make a dictionary of reach 1 ID for each segment
first_reach_IDs = dict(zip(reach_data[is_first_reach].iseg,
reach_data[is_first_reach].reachID))
is_last_reach = np.append(is_first_reach[1:], True)
last_reaches = reach_data[is_last_reach]
# below doesn't work if there are gaps in numbering
#last_reaches = np.append((np.diff(reach_data.iseg) == 1), True)
reach_data.outreach = np.append(reach_data.reachID[1:], 0)
# for now, treat lakes (negative outseg number) the same as outlets
reach_data.outreach[is_last_reach] = [first_reach_IDs.get(s-1, 0)
for s in segment_data.outseg]
self.reach_data['outreach'] = reach_data.outreach
def get_slopes(self):
"""Compute slopes by reach using values in strtop (streambed top) and rchlen (reach length)
columns of reach_data. The slope for a reach n is computed as strtop(n+1) - strtop(n) / rchlen(n).
Slopes for outlet reaches are assumed to be equal to slope of previous reach. """
slopes = np.append(np.diff(self.reach_data.strtop), 0) / self.reach_data.rchlen
last_reaches = np.append((np.diff(self.reach_data.iseg) == 1), True)
last_reach_data = self.reach_data[last_reaches]
last_reaches_outreach_elevs = [self.reach_data.strtop[o - 1] if o != 0 else 0
for o in last_reach_data.outreach]
second_to_last_reaches = np.append(last_reaches[1:], False)
# compute slopes for last reaches
slopes[last_reaches] = [slopes[second_to_last_reaches][i]
if last_reaches_outreach_elevs[i] == 0
else
(last_reaches_outreach_elevs[i] - last_reach_data.strtop[i])
/ last_reach_data.rchlen[i]
for i in range(len(last_reach_data))]
self.reach_data['slope'] = slopes * -1 # convert from numpy to sfr package convention
def get_upsegs(self):
"""From segment_data, returns nested dict of all upstream segments by segemnt,
by stress period.
Returns
-------
all_upsegs : dict
Nested dictionary of form {stress period: {segment: [list of upsegs]}}
Notes
-----
This method will not work if there are instances of circular routing.
"""
all_upsegs = {}
for per in range(self.nper):
if per > 0 > self.dataset_5[per][0]: # skip stress periods where seg data not defined
continue
segment_data = self.segment_data[per]
# make a list of adjacent upsegments keyed to outseg list in Mat2
upsegs = {o: segment_data.nseg[segment_data.outseg == o].tolist()
for o in np.unique(segment_data.outseg)}
outsegs = [k for k in list(upsegs.keys()) if k > 0] # exclude 0, which is the outlet designator
# for each outseg key, for each upseg, check for more upsegs, append until headwaters has been reached
for outseg in outsegs:
up = True
upsegslist = upsegs[outseg]
while up:
added_upsegs = []
for us in upsegslist:
if us in outsegs:
added_upsegs += upsegs[us]
if len(added_upsegs) == 0:
up = False
break
else:
upsegslist = added_upsegs
upsegs[outseg] += added_upsegs
# the above algorithm is recursive, so lower order streams get duplicated many times
# use a set to get unique upsegs
all_upsegs[per] = {u: list(set(upsegs[u])) for u in outsegs}
return all_upsegs
def renumber_segments(self):
"""Renumber segments so that segment numbering is continuous and always increases
in the downstream direction. This may speed convergence of the NWT solver
in some situations.
"""
self.segment_data[0].sort(order='nseg')
# get renumbering info from per=0
nseg = self.segment_data[0].nseg
outseg = self.segment_data[0].outseg
# explicitly fix any gaps in the numbering
# (i.e. from removing segments)
nseg2 = np.arange(1, len(nseg) + 1)
# intermediate mapping that
r1 = dict(zip(nseg, nseg2))
r1[0] = 0
outseg2 = np.array([r1[s] for s in outseg])
# function re-assing upseg numbers consecutively at one level relative to outlet(s)
# counts down from the number of segments
def reassign_upsegs(r, nexts, upsegs):
nextupsegs = []
for u in upsegs:
r[u] = nexts if u > 0 else u # handle lakes
nexts -= 1
nextupsegs += list(nseg2[outseg2 == u])
return r, nexts, nextupsegs
ns = len(nseg)
# start at outlets with nss;
# renumber upsegs consecutively at each level
# until all headwaters have been reached
nexts = ns
r2 = {0: 0}
nextupsegs = nseg2[outseg2 == 0]
for i in range(ns):
r2, nexts, nextupsegs = reassign_upsegs(r2, nexts, nextupsegs)
if len(nextupsegs) == 0:
break
# map original segment numbers to new numbers
r = {k: r2.get(v, v) for k, v in r1.items()}
# renumber segments in all stress period data
for per in self.segment_data.keys():
self.segment_data[per]['nseg'] = [r.get(s, s) for s in self.segment_data[per].nseg]
self.segment_data[per]['outseg'] = [r.get(s, s) for s in self.segment_data[per].outseg]
self.segment_data[per].sort(order='nseg')
inds = (outseg > 0) & (nseg > outseg)
assert not np.any(inds)
assert len(self.segment_data[per]['nseg']) == self.segment_data[per]['nseg'].max()
# renumber segments in reach_data
self.reach_data['iseg'] = [r.get(s, s) for s in self.reach_data.iseg]
self.reach_data.sort(order=['iseg', 'ireach'])
self.reach_data['reachID'] = np.arange(1, len(self.reach_data) + 1)
self.set_outreaches() # reset the outreaches to ensure continuity
# renumber segments in other datasets
def renumber_channel_data(d):
if d is not None:
d2 = {}
for k, v in d.items():
d2[k] = {}
for s, vv in v.items():
d2[k][r[s]] = vv
else:
d2 = None
return d2
self.channel_geometry_data = renumber_channel_data(self.channel_geometry_data)
self.channel_flow_data = renumber_channel_data(self.channel_flow_data)
def _get_headwaters(self, per=0):
"""List all segments that are not outsegs (that do not have any segments upstream).
Parameters
----------
per : int
Stress period for which to list headwater segments (default 0)
Returns
-------
headwaters : np.ndarray (1-D)
One dimmensional array listing all headwater segments.
"""
upsegs = [self.segment_data[per].nseg[self.segment_data[per].outseg == s].tolist()
for s in self.segment_data[0].nseg]
return self.segment_data[per].nseg[np.array([i for i, u in enumerate(upsegs) if len(u) == 0])]
def _interpolate_to_reaches(self, segvar1, segvar2, per=0):
"""Interpolate values in datasets 6b and 6c to each reach in stream segment
Parameters
----------
segvar1 : str
Column/variable name in segment_data array for representing start of segment
(e.g. hcond1 for hydraulic conductivity)
For segments with icalc=2 (specified channel geometry); if width1 is given,
the eigth distance point (XCPT8) from dataset 6d will be used as the stream width.
For icalc=3, an abitrary width of 5 is assigned.
For icalc=4, the mean value for width given in item 6e is used.
segvar2 : str
Column/variable name in segment_data array for representing start of segment
(e.g. hcond2 for hydraulic conductivity)
per : int
Stress period with segment data to interpolate
Returns
-------
reach_values : 1D array
One dimmensional array of interpolated values of same length as reach_data array.
For example, hcond1 and hcond2 could be entered as inputs to get values for the
strhc1 (hydraulic conductivity) column in reach_data.
"""
reach_data = self.reach_data
segment_data = self.segment_data[per]
segment_data.sort(order='nseg')
reach_data.sort(order=['iseg', 'ireach'])
reach_values = []
for seg in segment_data.nseg:
reaches = reach_data[reach_data.iseg == seg]
dist = np.cumsum(reaches.rchlen) - 0.5 * reaches.rchlen
icalc = segment_data.icalc[segment_data.nseg == seg]
if 'width' in segvar1 and icalc == 2: # get width from channel cross section length
channel_geometry_data = self.channel_geometry_data[per]
reach_values += list(np.ones(len(reaches)) * channel_geometry_data[seg][0][-1])
elif 'width' in segvar1 and icalc == 3: # assign arbitrary width since width is based on flow
reach_values += list(np.ones(len(reaches)) * 5)
elif 'width' in segvar1 and icalc == 4: # assume width to be mean from streamflow width/flow table
channel_flow_data = self.channel_flow_data[per]
reach_values += list(np.ones(len(reaches)) * np.mean(channel_flow_data[seg][2]))
else:
fp = [segment_data[segment_data['nseg'] == seg][segvar1][0],
segment_data[segment_data['nseg'] == seg][segvar2][0]]
xp = [dist[0], dist[-1]]
reach_values += np.interp(dist, xp, fp).tolist()
return np.array(reach_values)
def _write_1c(self, f_sfr):
# NSTRM NSS NSFRPAR NPARSEG CONST DLEAK ipakcb ISTCB2
# [ISFROPT] [NSTRAIL] [ISUZN] [NSFRSETS] [IRTFLG] [NUMTIM] [WEIGHT] [FLWTOL]
f_sfr.write('{:.0f} {:.0f} {:.0f} {:.0f} {:.8f} {:.8f} {:.0f} {:.0f} '
.format(self.nstrm, self.nss, self.nsfrpar, self.nparseg,
self.const, self.dleak, self.ipakcb, self.istcb2))
if self.reachinput:
self.nstrm = abs(self.nstrm) # see explanation for dataset 1c in online guide
f_sfr.write('{:.0f} '.format(self.isfropt))
if self.isfropt > 1:
f_sfr.write('{:.0f} {:.0f} {:.0f} '.format(self.nstrail,
self.isuzn,
self.nsfrsets))
if self.nstrm < 0:
f_sfr.write('{:.0f} {:.0f} {:.0f} {:.0f} '.format(self.isfropt,
self.nstrail,
self.isuzn,
self.nsfrsets))
if self.nstrm < 0 or self.transroute:
f_sfr.write('{:.0f} '.format(self.irtflag))
if self.irtflag < 0:
f_sfr.write('{:.0f} {:.8f} {:.8f} '.format(self.numtim,
self.weight,
self.flwtol))
f_sfr.write('\n')
def _write_reach_data(self, f_sfr):
# Write the recarray (data) to the file (or file handle) f
assert isinstance(self.reach_data, np.recarray), "MfList.__tofile() data arg " + \
"not a recarray"
# decide which columns to write
# columns = self._get_item2_names()
columns = _get_item2_names(self.nstrm, self.reachinput, self.isfropt,
structured=self.parent.structured)
# Add one to the kij indices
# names = self.reach_data.dtype.names
# lnames = []
# [lnames.append(name.lower()) for name in names]
# --make copy of data for multiple calls
d = np.recarray.copy(self.reach_data[columns])
for idx in ['k', 'i', 'j', 'node']:
if (idx in columns):
d[idx] += 1
formats = _fmt_string(d)[:-1] + '\n'
for i in range(len(d)):
f_sfr.write(formats.format(*d[i]))
def _write_segment_data(self, i, j, f_sfr):
cols = ['nseg', 'icalc', 'outseg', 'iupseg', 'iprior', 'nstrpts', 'flow', 'runoff',
'etsw', 'pptsw', 'roughch', 'roughbk', 'cdpth', 'fdpth', 'awdth', 'bwdth']
fmts = _fmt_string_list(self.segment_data[i][cols][j])
nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \
pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth = \
[0 if v == self.default_value else v for v in self.segment_data[i][cols][j]]
f_sfr.write(' '.join(fmts[0:4]).format(nseg, icalc, outseg, iupseg) + ' ')
if iupseg > 0:
f_sfr.write(fmts[4].format(iprior) + ' ')
if icalc == 4:
f_sfr.write(fmts[5].format(nstrpts) + ' ')
f_sfr.write(' '.join(fmts[6:10]).format(flow, runoff, etsw, pptsw) + ' ')
if icalc in [1, 2]:
f_sfr.write(fmts[10].format(roughch) + ' ')
if icalc == 2:
f_sfr.write(fmts[11].format(roughbk) + ' ')
if icalc == 3:
f_sfr.write(' '.join(fmts[12:16]).format(cdpth, fdpth, awdth, bwdth) + ' ')
f_sfr.write('\n')
self._write_6bc(i, j, f_sfr, cols=['hcond1', 'thickm1', 'elevup', 'width1', 'depth1', 'thts1', 'thti1',
'eps1', 'uhc1'])
self._write_6bc(i, j, f_sfr, cols=['hcond2', 'thickm2', 'elevdn', 'width2', 'depth2', 'thts2', 'thti2',
'eps2', 'uhc2'])
def _write_6bc(self, i, j, f_sfr, cols=[]):
icalc = self.segment_data[i][j][1]
fmts = _fmt_string_list(self.segment_data[i][cols][j])
hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = \
[0 if v == self.default_value else v for v in self.segment_data[i][cols][j]]
if self.isfropt in [0, 4, 5] and icalc <= 0:
f_sfr.write(' '.join(fmts[0:5]).format(hcond, thickm, elevupdn, width, depth) + ' ')
elif self.isfropt in [0, 4, 5] and icalc == 1:
f_sfr.write(fmts[0].format(hcond) + ' ')
if i == 0:
f_sfr.write(' '.join(fmts[1:4]).format(thickm, elevupdn, width) + ' ')
if self.isfropt in [4, 5]:
f_sfr.write(' '.join(fmts[5:8]).format(thts, thti, eps) + ' ')
if self.isfropt == 5:
f_sfr.write(fmts[8].format(uhc) + ' ')
elif i > 0 and self.isfropt == 0:
f_sfr.write(' '.join(fmts[1:4]).format(thickm, elevupdn, width) + ' ')
elif self.isfropt in [0, 4, 5] and icalc >= 2:
f_sfr.write(fmts[0].format(hcond) + ' ')
if self.isfropt in [4, 5] and i > 0 and icalc == 2:
pass
else:
f_sfr.write(' '.join(fmts[1:3]).format(thickm, elevupdn) + ' ')
if self.isfropt in [4, 5] and icalc == 2 and i == 0:
f_sfr.write(' '.join(fmts[3:6]).format(thts, thti, eps) + ' ')
if self.isfropt == 5:
f_sfr.write(fmts[8].format(uhc) + ' ')
else:
pass
elif self.isfropt == 1 and icalc <= 1:
f_sfr.write(fmts[3].format(width) + ' ')
if icalc <= 0:
f_sfr.write(fmts[4].format(depth) + ' ')
elif self.isfropt in [2, 3] and icalc <= 1:
if i > 0:
pass
else:
f_sfr.write(fmts[3].format(width) + ' ')
if icalc <= 0:
f_sfr.write(fmts[4].format(depth) + ' ')
else:
pass
f_sfr.write('\n')
# def plot(self, **kwargs):
# return super(ModflowSfr2, self).plot(**kwargs)
def write_file(self, filename=None):
"""
Write the package file.
Returns
-------
None
"""
# tabfiles = False
# tabfiles_dict = {}
# transroute = False
# reachinput = False
if filename is not None:
self.fn_path = filename
f_sfr = open(self.fn_path, 'w')
# Item 0 -- header
f_sfr.write('{0}\n'.format(self.heading))
# Item 1
if self.reachinput:
"""
When REACHINPUT is specified, variable ISFROPT is read in data set 1c.
ISFROPT can be used to change the default format for entering reach and segment data
or to specify that unsaturated flow beneath streams will be simulated.
"""
f_sfr.write('reachinput ')
if self.transroute:
"""When TRANSROUTE is specified, optional variables IRTFLG, NUMTIM, WEIGHT, and FLWTOL
also must be specified in Item 1c.
"""
f_sfr.write('transroute')
if self.transroute or self.reachinput:
f_sfr.write('\n')
if self.tabfiles:
"""
tabfiles
An optional character variable that is a flag to indicate that inflows to one or more stream
segments will be specified with tabular inflow files.
numtab
An integer value equal to the number of tabular inflow files that will be read if TABFILES
is specified. A separate input file is required for each segment that receives specified inflow.
Thus, the maximum value of NUMTAB that can be specified is equal to the total number of
segments specified in Item 1c with variables NSS. The name (Fname) and unit number (Nunit)
of each tabular file must be specified in the MODFLOW-2005 Name File using tile type (Ftype) DATA.
maxval
"""
f_sfr.write('{} {} {}\n'.format(self.tabfiles, self.numtab, self.maxval))
self._write_1c(f_sfr)
# item 2
self._write_reach_data(f_sfr)
# items 3 and 4 are skipped (parameters not supported)
for i in range(0, self.nper):
# item 5
itmp = self.dataset_5[i][0]
f_sfr.write(' '.join(map(str, self.dataset_5[i])) + '\n')
if itmp > 0:
# Item 6
for j in range(itmp):
# write datasets 6a, 6b and 6c
self._write_segment_data(i, j, f_sfr)
icalc = self.segment_data[i].icalc[j]
if icalc == 2:
if i == 0 or self.nstrm > 0 and not self.reachinput: # or isfropt <= 1:
for k in range(2):
for d in self.channel_geometry_data[i][j + 1][k]:
f_sfr.write('{:.2f} '.format(d))
f_sfr.write('\n')
if icalc == 4:
# nstrpts = self.segment_data[i][j][5]
for k in range(3):
for d in self.channel_flow_data[i][j + 1][k]:
f_sfr.write('{:.2f} '.format(d))
f_sfr.write('\n')
if self.tabfiles and i == 0:
for j in sorted(self.tabfiles_dict.keys()):
f_sfr.write('{:.0f} {:.0f} {:.0f}\n'.format(j,
self.tabfiles_dict[j]['numval'],
self.tabfiles_dict[j]['inuit']))
else:
continue
f_sfr.close()
@staticmethod
def ftype():
return 'SFR'
@staticmethod
def defaultunit():
return 17
class check:
"""
Check SFR2 package for common errors
Parameters
----------
sfrpackage : object
Instance of Flopy ModflowSfr2 class.
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Notes
-----
Daniel Feinstein's top 10 SFR problems (7/16/2014):
1) cell gaps btw adjacent reaches in a single segment
2) cell gaps btw routed segments. possibly because of re-entry problems at domain edge
3) adjacent reaches with STOP sloping the wrong way
4) routed segments with end/start sloping the wrong way
5) STOP>TOP1 violations, i.e.,floaters
6) STOP<<TOP1 violations, i.e., exaggerated incisions
7) segments that end within one diagonal cell distance from another segment, inviting linkage
8) circular routing of segments
9) multiple reaches with non-zero conductance in a single cell
10) reaches in inactive cells
Also after running the model they will want to check for backwater effects.
"""
def __init__(self, sfrpackage, verbose=True, level=1):
self.sfr = sfrpackage
self.reach_data = sfrpackage.reach_data
self.segment_data = sfrpackage.segment_data
self.verbose = verbose
self.level = level
self.passed = []
self.warnings = []
self.errors = []
self.txt = '\n{} ERRORS:\n'.format(self.sfr.name[0])
self.summary_array = None
def _boolean_compare(self, array, col1, col2,
level0txt='{} violations encountered.',
level1txt='Violations:',
sort_ascending=True, print_delimiter=' '):
"""Compare two columns in a record array. For each row,
tests if value in col1 is greater than col2. If any values
in col1 are > col2, subsets array to only include rows where
col1 is greater. Creates another column with differences
(col1-col2), and prints the array sorted by the differences
column (diff).
Parameters
----------
array : record array
Array with columns to compare.
col1 : string
Column name in array.
col2 : string
Column name in array.
sort_ascending : T/F; default True
If True, printed array will be sorted by differences in
ascending order.
print_delimiter : str
Delimiter for printed array.
Returns
-------
txt : str
Error messages and printed array (if .level attribute of
checker is set to 1). Returns an empty string if no
values in col1 are greater than col2.
Notes
-----
info about appending to record arrays (views vs. copies and upcoming changes to numpy):
http://stackoverflow.com/questions/22865877/how-do-i-write-to-multiple-fields-of-a-structured-array
"""
txt = ''
array = array.copy()
if isinstance(col1, np.ndarray):
array = recfunctions.append_fields(array, names='tmp1', data=col1,
asrecarray=True)
col1 = 'tmp1'
if isinstance(col2, np.ndarray):
array = recfunctions.append_fields(array, names='tmp2', data=col2,
asrecarray=True)
col2 = 'tmp2'
if isinstance(col1, tuple):
array = recfunctions.append_fields(array, names=col1[0], data=col1[1],
asrecarray=True)
col1 = col1[0]
if isinstance(col2, tuple):
array = recfunctions.append_fields(array, names=col2[0], data=col2[1],
asrecarray=True)
col2 = col2[0]
failed = array[col1] > array[col2]
if np.any(failed):
failed_info = array[failed].copy()
txt += level0txt.format(len(failed_info)) + '\n'
if self.level == 1:
diff = failed_info[col2] - failed_info[col1]
cols = [c for c in failed_info.dtype.names if failed_info[c].sum() != 0
and c != 'diff'
and 'tmp' not in c]
# currently failed_info[cols] results in a warning. Not sure
# how to do this properly with a recarray.
failed_info = recfunctions.append_fields(failed_info[cols].copy(),
names='diff',
data=diff,
asrecarray=True)
failed_info.sort(order='diff', axis=0)
if not sort_ascending:
failed_info = failed_info[::-1]
txt += level1txt + '\n'
txt += _print_rec_array(failed_info, delimiter=print_delimiter)
txt += '\n'
return txt
def _txt_footer(self, headertxt, txt, testname, passed=False, warning=True):
if len(txt) == 0 or passed:
txt += 'passed.'
self.passed.append(testname)
elif warning:
self.warnings.append(testname)
else:
self.errors.append(testname)
if self.verbose:
print(txt + '\n')
self.txt += headertxt + txt + '\n'
def for_nans(self):
"""Check for nans in reach or segment data"""
headertxt = 'Checking for nan values...\n'
txt = ''
passed = False
isnan = np.any(np.isnan(np.array(self.reach_data.tolist())), axis=1)
nanreaches = self.reach_data[isnan]
if np.any(isnan):
txt += 'Found {} reachs with nans:\n'.format(len(nanreaches))
if self.level == 1:
txt += _print_rec_array(nanreaches, delimiter=' ')
for per, sd in self.segment_data.items():
isnan = np.any(np.isnan(np.array(sd.tolist())), axis=1)
nansd = sd[isnan]
if np.any(isnan):
txt += 'Per {}: found {} segments with nans:\n'.format(per, len(nanreaches))
if self.level == 1:
txt += _print_rec_array(nansd, delimiter=' ')
if len(txt) == 0:
passed = True
self._txt_footer(headertxt, txt, 'nan values', passed)
def run_all(self):
return self.sfr.check()
def numbering(self):
"""checks for continuity in segment and reach numbering
"""
headertxt = 'Checking for continuity in segment and reach numbering...\n'
if self.verbose:
print(headertxt.strip())
txt = ''
passed = False
for per in range(self.sfr.nper):
if per > 0 > self.sfr.dataset_5[per][0]:
continue
# check segment numbering
txt += _check_numbers(self.sfr.nss,
self.segment_data[per]['nseg'],
level=self.level,
datatype='segment')
# check reach numbering
for segment in np.arange(1, self.sfr.nss + 1):
reaches = self.reach_data.ireach[self.reach_data.iseg == segment]
t = _check_numbers(len(reaches),
reaches,
level=self.level,
datatype='reach')
if len(t) > 0:
txt += 'Segment {} has {}'.format(segment, t)
if txt == '':
passed = True
self._txt_footer(headertxt, txt, 'continuity in segment and reach numbering', passed, warning=False)
headertxt = 'Checking for increasing segment numbers in downstream direction...\n'
txt = ''
passed = False
if self.verbose:
print(headertxt.strip())
for per, segment_data in self.segment_data.items():
inds = (segment_data.outseg < segment_data.nseg) & (segment_data.outseg != 0)
if len(txt) == 0 and np.any(inds):
decreases = segment_data[['nseg', 'outseg']][inds]
txt += 'Found segment numbers decreasing in the downstream direction.\n'.format(len(decreases))
txt += 'MODFLOW will run but convergence may be slowed:\n'
if self.level == 1:
txt += 'per nseg outseg\n'
t = ''
for ns, os in decreases:
t += '{} {} {}\n'.format(per, ns, os)
txt += t#'\n'.join(textwrap.wrap(t, width=10))
if len(t) == 0:
passed = True
self._txt_footer(headertxt, txt, 'segment numbering order', passed)
def routing(self):
"""checks for breaks in routing and does comprehensive check for circular routing
"""
headertxt = 'Checking for circular routing...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
#txt += self.sfr.get_outlets(level=self.level, verbose=False) # will print twice if verbose=True
# simpler check method using paths from routing graph
circular_segs = [k for k, v in self.sfr.paths.items() if v is None]
if len(circular_segs) > 0:
txt += '{0} instances where an outlet was not found after {1} consecutive segments!\n' \
.format(len(circular_segs), self.sfr.nss)
if self.level == 1:
txt += ' '.join(map(str, circular_segs)) + '\n'
else:
f = 'circular_routing.csv'
np.savetxt(f, circular_segs, fmt='%d', delimiter=',', header=txt)
txt += 'See {} for details.'.format(f)
if self.verbose:
print(txt)
self._txt_footer(headertxt, txt, 'circular routing', warning=False)
def overlapping_conductance(self, tol=1e-6):
"""checks for multiple SFR reaches in one cell; and whether more than one reach has Cond > 0
"""
headertxt = 'Checking for model cells with multiple non-zero SFR conductances...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
# make nreach vectors of each conductance parameter
reach_data = self.reach_data.copy()
# if no dis file was supplied, can't compute node numbers
# make nodes based on unique row, col pairs
if np.diff(reach_data.node).max() == 0:
uniquerc = {}
for i, (r, c) in enumerate(reach_data[['i', 'j']]):
if (r, c) not in uniquerc:
uniquerc[(r, c)] = i + 1
reach_data['node'] = [uniquerc[(r, c)] for r, c in reach_data[['i', 'j']]]
K = reach_data.strhc1
if K.max() == 0:
K = self.sfr._interpolate_to_reaches('hcond1', 'hcond2')
b = reach_data.strthick
if b.max() == 0:
b = self.sfr._interpolate_to_reaches('thickm1', 'thickm2')
L = reach_data.rchlen
w = self.sfr._interpolate_to_reaches('width1', 'width2')
# Calculate SFR conductance for each reach
Cond = K * w * L / b
shared_cells = _get_duplicates(reach_data.node)
nodes_with_multiple_conductance = set()
for node in shared_cells:
# select the collocated reaches for this cell
conductances = Cond[reach_data.node == node].copy()
conductances.sort()
# list nodes with multiple non-zero SFR reach conductances
if conductances[0] / conductances[-1] > tol:
nodes_with_multiple_conductance.update({node})
if len(nodes_with_multiple_conductance) > 0:
txt += '{} model cells with multiple non-zero SFR conductances found.\n' \
'This may lead to circular routing between collocated reaches.\n' \
.format(len(nodes_with_multiple_conductance))
if self.level == 1:
txt += 'Nodes with overlapping conductances:\n'
reach_data['strthick'] = b
reach_data['strhc1'] = K
cols = [c for c in reach_data.dtype.names if c in \
['node', 'k', 'i', 'j', 'iseg', 'ireach', 'rchlen', 'strthick', 'strhc1']]
reach_data = recfunctions.append_fields(reach_data,
names=['width', 'conductance'],
data=[w, Cond],
usemask=False,
asrecarray=True)
has_multiple = np.array([True if n in nodes_with_multiple_conductance
else False for n in reach_data.node])
reach_data = reach_data[has_multiple].copy()
reach_data = reach_data[cols].copy()
txt += _print_rec_array(reach_data, delimiter='\t')
self._txt_footer(headertxt, txt, 'overlapping conductance')
def elevations(self):
"""checks streambed elevations for downstream rises and inconsistencies with model grid
"""
headertxt = 'Checking segment_data for downstream rises in streambed elevation...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
# decide whether to check elevup and elevdn from items 6b/c
# (see online guide to SFR input; Data Set 6b description)
passed = False
if self.sfr.isfropt in [0, 4, 5]:
pers = sorted(self.segment_data.keys())
for per in pers:
segment_data = self.segment_data[per][self.segment_data[per].elevup > -999999]
# enforce consecutive increasing segment numbers (for indexing)
segment_data.sort(order='nseg')
t = _check_numbers(len(segment_data), segment_data.nseg, level=1, datatype='Segment')
if len(t) > 0:
raise Exception('Elevation check requires consecutive segment numbering.')
# first check for segments where elevdn > elevup
d_elev = segment_data.elevdn - segment_data.elevup
segment_data = recfunctions.append_fields(segment_data, names='d_elev', data=d_elev,
asrecarray=True)
txt += self._boolean_compare(segment_data[['nseg', 'outseg', 'elevup', 'elevdn',
'd_elev']].copy(),
col1='d_elev', col2=np.zeros(len(segment_data)),
level0txt='Stress Period {}: '.format(per + 1) + \
'{} segments encountered with elevdn > elevup.',
level1txt='Backwards segments:',
)
# next check for rises between segments
non_outlets = segment_data.outseg > 0
non_outlets_seg_data = segment_data[non_outlets] # lake outsegs are < 0
outseg_elevup = np.array([segment_data.elevup[o - 1] for o in segment_data.outseg if o > 0])
d_elev2 = outseg_elevup - segment_data.elevdn[non_outlets]
non_outlets_seg_data = recfunctions.append_fields(non_outlets_seg_data,
names=['outseg_elevup', 'd_elev2'],
data=[outseg_elevup, d_elev2],
asrecarray=True)
txt += self._boolean_compare(non_outlets_seg_data[['nseg', 'outseg', 'elevdn',
'outseg_elevup', 'd_elev2']].copy(),
col1='d_elev2', col2=np.zeros(len(non_outlets_seg_data)),
level0txt='Stress Period {}: '.format(per + 1) + \
'{} segments encountered with segments encountered ' \
'with outseg elevup > elevdn.',
level1txt='Backwards segment connections:',
)
if len(txt) == 0:
passed = True
else:
txt += 'Segment elevup and elevdn not specified for nstrm={} and isfropt={}\n' \
.format(self.sfr.nstrm, self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'segment elevations', passed)
headertxt = 'Checking reach_data for downstream rises in streambed elevation...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
passed = False
if self.sfr.nstrm < 0 or self.sfr.reachinput and self.sfr.isfropt in [1, 2, 3]: # see SFR input instructions
# first get an outreach for each reach
if np.diff(self.sfr.reach_data.outreach).max() == 0: # not sure if this is the best test
self.sfr.set_outreaches()
reach_data = self.sfr.reach_data # inconsistent with other checks that work with
# reach_data attribute of check class. Want to have get_outreaches as a method of sfr class
# (for other uses). Not sure if other check methods should also copy reach_data directly from
# SFR package instance for consistency.
# use outreach values to get downstream elevations
non_outlets = reach_data[reach_data.outreach != 0]
outreach_elevdn = np.array([reach_data.strtop[o - 1] for o in reach_data.outreach])
d_strtop = outreach_elevdn[reach_data.outreach != 0] - non_outlets.strtop
non_outlets = recfunctions.append_fields(non_outlets,
names=['strtopdn', 'd_strtop'],
data=[outreach_elevdn, d_strtop],
asrecarray=True)
txt += self._boolean_compare(non_outlets[['k', 'i', 'j', 'iseg', 'ireach',
'strtop', 'strtopdn', 'd_strtop', 'reachID']].copy(),
col1='d_strtop', col2=np.zeros(len(non_outlets)),
level0txt='{} reaches encountered with strtop < strtop of downstream reach.',
level1txt='Elevation rises:',
)
if len(txt) == 0:
passed = True
else:
txt += 'Reach strtop not specified for nstrm={}, reachinput={} and isfropt={}\n' \
.format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'reach elevations', passed)
headertxt = 'Checking reach_data for inconsistencies between streambed elevations and the model grid...\n'
if self.verbose:
print(headertxt.strip())
txt = ''
if self.sfr.parent.dis is None:
txt += 'No DIS file supplied; cannot check SFR elevations agains model grid.'
self._txt_footer(headertxt, txt, '')
return
passed = False
warning = True
if self.sfr.nstrm < 0 or self.sfr.reachinput and self.sfr.isfropt in [1, 2, 3]: # see SFR input instructions
reach_data = self.reach_data
i, j, k = reach_data.i, reach_data.j, reach_data.k
# check streambed bottoms in relation to respective cell bottoms
bots = self.sfr.parent.dis.botm.array[k, i, j]
streambed_bots = reach_data.strtop - reach_data.strthick
reach_data = recfunctions.append_fields(reach_data,
names=['layerbot', 'strbot'],
data=[bots, streambed_bots],
asrecarray=True)
txt += self._boolean_compare(reach_data[['k', 'i', 'j', 'iseg', 'ireach',
'strtop', 'strthick', 'strbot', 'layerbot',
'reachID']].copy(),
col1='layerbot', col2='strbot',
level0txt='{} reaches encountered with streambed bottom below layer bottom.',
level1txt='Layer bottom violations:',
)
if len(txt) > 0:
warning = False # this constitutes an error (MODFLOW won't run)
# check streambed elevations in relation to model top
tops = self.sfr.parent.dis.top.array[i, j]
reach_data = recfunctions.append_fields(reach_data, names='modeltop', data=tops, asrecarray=True)
txt += self._boolean_compare(reach_data[['k', 'i', 'j', 'iseg', 'ireach',
'strtop', 'modeltop', 'strhc1', 'reachID']].copy(),
col1='strtop', col2='modeltop',
level0txt='{} reaches encountered with streambed above model top.',
level1txt='Model top violations:',
)
if len(txt) == 0:
passed = True
else:
txt += 'Reach strtop, strthick not specified for nstrm={}, reachinput={} and isfropt={}\n' \
.format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'reach elevations vs. grid elevations', passed, warning=warning)
# In cases where segment end elevations/thicknesses are used,
# do these need to be checked for consistency with layer bottoms?
headertxt = 'Checking segment_data for inconsistencies between segment end elevations and the model grid...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
passed = False
if self.sfr.isfropt in [0, 4, 5]:
reach_data = self.reach_data
pers = sorted(self.segment_data.keys())
for per in pers:
segment_data = self.segment_data[per][self.segment_data[per].elevup > -999999]
# enforce consecutive increasing segment numbers (for indexing)
segment_data.sort(order='nseg')
t = _check_numbers(len(segment_data), segment_data.nseg, level=1, datatype='Segment')
if len(t) > 0:
raise Exception('Elevation check requires consecutive segment numbering.')
first_reaches = reach_data[reach_data.ireach == 1].copy()
last_reaches = reach_data[np.append((np.diff(reach_data.iseg) == 1), True)].copy()
segment_ends = recfunctions.stack_arrays([first_reaches, last_reaches],
asrecarray=True, usemask=False)
segment_ends['strtop'] = np.append(segment_data.elevup, segment_data.elevdn)
i, j = segment_ends.i, segment_ends.j
tops = self.sfr.parent.dis.top.array[i, j]
diff = tops - segment_ends.strtop
segment_ends = recfunctions.append_fields(segment_ends,
names=['modeltop', 'diff'],
data=[tops, diff],
asrecarray=True)
txt += self._boolean_compare(segment_ends[['k', 'i', 'j', 'iseg',
'strtop', 'modeltop', 'diff', 'reachID']].copy(),
col1=np.zeros(len(segment_ends)), col2='diff',
level0txt='{} reaches encountered with streambed above model top.',
level1txt='Model top violations:',
)
if len(txt) == 0:
passed = True
else:
txt += 'Segment elevup and elevdn not specified for nstrm={} and isfropt={}\n' \
.format(self.sfr.nstrm, self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'segment elevations vs. model grid', passed)
def slope(self, minimum_slope=1e-4, maximum_slope=1.0):
"""Checks that streambed slopes are greater than or equal to a specified minimum value.
Low slope values can cause "backup" or unrealistic stream stages with icalc options
where stage is computed.
"""
headertxt = 'Checking for streambed slopes of less than {}...\n'.format(minimum_slope)
txt = ''
if self.verbose:
print(headertxt.strip())
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.slope).max() == 0:
txt += 'isfropt setting of 1,2 or 3 requries slope information!\n'
else:
is_less = self.reach_data.slope < minimum_slope
if np.any(is_less):
below_minimum = self.reach_data[is_less]
txt += '{} instances of streambed slopes below minimum found.\n'.format(len(below_minimum))
if self.level == 1:
txt += 'Reaches with low slopes:\n'
txt += _print_rec_array(below_minimum, delimiter='\t')
if len(txt) == 0:
passed = True
else:
txt += 'slope not specified for isfropt={}\n'.format(self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'minimum slope', passed)
headertxt = 'Checking for streambed slopes of greater than {}...\n'.format(maximum_slope)
txt = ''
if self.verbose:
print(headertxt.strip())
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.slope).max() == 0:
txt += 'isfropt setting of 1,2 or 3 requries slope information!\n'
else:
is_greater = self.reach_data.slope > maximum_slope
if np.any(is_greater):
above_max = self.reach_data[is_greater]
txt += '{} instances of streambed slopes above maximum found.\n'.format(len(above_max))
if self.level == 1:
txt += 'Reaches with high slopes:\n'
txt += _print_rec_array(above_max, delimiter='\t')
if len(txt) == 0:
passed = True
else:
txt += 'slope not specified for isfropt={}\n'.format(self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'maximum slope', passed)
def _check_numbers(n, numbers, level=1, datatype='reach'):
"""Check that a sequence of numbers is consecutive
(that the sequence is equal to the range from 1 to n+1, where n is the expected length of the sequence).
Parameters
----------
n : int
Expected length of the sequence (i.e. number of stream segments)
numbers : array
Sequence of numbers (i.e. 'nseg' column from the segment_data array)
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
datatype : str, optional
Only used for reporting.
"""
txt = ''
num_range = np.arange(1, n + 1)
if not np.array_equal(num_range, numbers):
txt += 'Invalid {} numbering\n'.format(datatype)
if level == 1:
non_consecutive = np.append(np.diff(numbers) != 1, False) # consistent dimmension for boolean array
gaps = num_range[non_consecutive] + 1
if len(gaps) > 0:
gapstr = ' '.join(map(str, gaps))
txt += 'Gaps in numbering at positions {}\n'.format(gapstr)
return txt
def _isnumeric(str):
try:
float(str)
return True
except:
return False
def _markitzero(recarray, inds):
"""subtracts 1 from columns specified in inds argument, to convert from 1 to 0-based indexing
"""
lnames = [n.lower() for n in recarray.dtype.names]
for idx in inds:
if (idx in lnames):
recarray[idx] -= 1
def _pop_item(line):
if len(line) > 0:
return line.pop(0)
return 0
def _get_dataset(line, dataset):
tmp = []
# interpret number supplied with decimal points as floats, rest as ints
# this could be a bad idea (vs. explicitly formatting values for each dataset)
for i, s in enumerate(line_parse(line)):
try:
n = int(s)
except:
try:
n = float(s)
except:
break
dataset[i] = n
return dataset
def _get_duplicates(a):
"""Returns duplcate values in an array, similar to pandas .duplicated() method
http://stackoverflow.com/questions/11528078/determining-duplicate-values-in-an-array
"""
s = np.sort(a, axis=None)
equal_to_previous_item = np.append(s[1:] == s[:-1], False) # maintain same dimmension for boolean array
return np.unique(s[equal_to_previous_item])
def _get_item2_names(nstrm, reachinput, isfropt, structured=False):
"""Determine which variables should be in item 2, based on model grid type,
reachinput specification, and isfropt.
Returns
-------
names : list of str
List of names (same as variables in SFR Package input instructions) of columns
to assign (upon load) or retain (upon write) in reach_data array.
Notes
-----
Lowercase is used for all variable names.
"""
names = []
if structured:
names += ['k', 'i', 'j']
else:
names += ['node']
names += ['iseg', 'ireach', 'rchlen']
if nstrm < 0 or reachinput:
if isfropt in [1, 2, 3]:
names += ['strtop', 'slope', 'strthick', 'strhc1']
if isfropt in [2, 3]:
names += ['thts', 'thti', 'eps']
if isfropt == 3:
names += ['uhc']
return names
def _fmt_string(array, float_format='{}'):
fmt_string = ''
for field in array.dtype.descr:
vtype = field[1][1].lower()
if (vtype == 'i'):
fmt_string += '{:.0f} '
elif (vtype == 'f'):
fmt_string += '{} '.format(float_format)
elif (vtype == 'o'):
fmt_string += '{} '
elif (vtype == 's'):
raise Exception("MfList error: '\str\' type found it dtype." + \
" This gives unpredictable results when " + \
"recarray to file - change to \'object\' type")
else:
raise Exception("MfList.fmt_string error: unknown vtype " + \
"in dtype:" + vtype)
return fmt_string
def _fmt_string_list(array, float_format='{}'):
fmt_string = []
for field in array.dtype.descr:
vtype = field[1][1].lower()
if (vtype == 'i'):
fmt_string += ['{:.0f}']
elif (vtype == 'f'):
fmt_string += [float_format]
elif (vtype == 'o'):
fmt_string += ['{}']
elif (vtype == 's'):
raise Exception("MfList error: '\str\' type found it dtype." + \
" This gives unpredictable results when " + \
"recarray to file - change to \'object\' type")
else:
raise Exception("MfList.fmt_string error: unknown vtype " + \
"in dtype:" + vtype)
return fmt_string
def _print_rec_array(array, cols=None, delimiter=' ', float_format='{:.6f}'):
"""Print out a numpy record array to string, with column names.
Parameters
----------
cols : list of strings
List of columns to print.
delimiter : string
Delimited to use.
Returns
-------
txt : string
Text string of array.
"""
txt = ''
if cols is not None:
cols = [c for c in array.dtype.names if c in cols]
else:
cols = list(array.dtype.names)
# drop columns with no data
if np.shape(array)[0] > 1:
cols = [c for c in cols if array[c].min() > -999999]
# add _fmt_string call here
fmts = _fmt_string_list(array[cols], float_format=float_format)
txt += delimiter.join(cols) + '\n'
txt += '\n'.join([delimiter.join(fmts).format(*r) for r in array[cols].copy().tolist()])
return txt
def _parse_1c(line, reachinput, transroute):
"""Parse Data Set 1c for SFR2 package.
See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info
Parameters
----------
line : str
line read from SFR package input file
Returns
-------
a list of length 13 containing all variables for Data Set 6a
"""
na = 0
# line = _get_dataset(line, [0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 1, 30, 1, 2, 0.75, 0.0001, []])
# line = line.strip().split()
line = line_parse(line)
nstrm = int(line.pop(0))
nss = int(line.pop(0))
nsfrpar = int(line.pop(0))
nparseg = int(line.pop(0))
const = float(line.pop(0))
dleak = float(line.pop(0))
ipakcb = int(line.pop(0))
istcb2 = int(line.pop(0))
isfropt, nstrail, isuzn, nsfrsets = na, na, na, na
if reachinput:
nstrm = abs(nstrm) # see explanation for dataset 1c in online guide
isfropt = int(line.pop(0))
if isfropt > 1:
nstrail = int(line.pop(0))
isuzn = int(line.pop(0))
nsfrsets = int(line.pop(0))
if nstrm < 0:
isfropt = int(line.pop(0))
nstrail = int(line.pop(0))
isuzn = int(line.pop(0))
nsfrsets = int(line.pop(0))
irtflg, numtim, weight, flwtol = na, na, na, na
if nstrm < 0 or transroute:
irtflg = int(_pop_item(line))
if irtflg > 0:
numtim = int(line.pop(0))
weight = int(line.pop(0))
flwtol = int(line.pop(0))
# auxillary variables (MODFLOW-LGR)
option = [line[i] for i in np.arange(1, len(line)) if 'aux' in line[i - 1].lower()]
return nstrm, nss, nsfrpar, nparseg, const, dleak, ipakcb, istcb2, \
isfropt, nstrail, isuzn, nsfrsets, irtflg, numtim, weight, flwtol, option
def _parse_6a(line, option):
"""Parse Data Set 6a for SFR2 package.
See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info
Parameters
----------
line : str
line read from SFR package input file
Returns
-------
a list of length 13 containing all variables for Data Set 6a
"""
# line = line.strip().split()
line = line_parse(line)
xyz = []
# handle any aux variables at end of line
for i, s in enumerate(line):
if s.lower() in option:
xyz.append(s.lower())
na = 0
nvalues = sum([_isnumeric(s) for s in line])
# line = _get_dataset(line, [0] * nvalues)
nseg = int(line.pop(0))
icalc = int(line.pop(0))
outseg = int(line.pop(0))
iupseg = int(line.pop(0))
iprior = na
nstrpts = na
if iupseg > 0:
iprior = int(line.pop(0))
if icalc == 4:
nstrpts = int(line.pop(0))
flow = float(line.pop(0))
runoff = float(line.pop(0))
etsw = float(line.pop(0))
pptsw = float(line.pop(0))
roughch = na
roughbk = na
if icalc in [1, 2]:
roughch = float(line.pop(0))
if icalc == 2:
roughbk = float(line.pop(0))
cdpth, fdpth, awdth, bwdth = na, na, na, na
if icalc == 3:
cdpth, fdpth, awdth, bwdth = map(float, line)
return nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \
pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth, xyz
def _parse_6bc(line, icalc, nstrm, isfropt, reachinput, per=0):
"""Parse Data Set 6b for SFR2 package.
See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info
Parameters
----------
line : str
line read from SFR package input file
Returns
-------
a list of length 9 containing all variables for Data Set 6b
"""
na = 0
# line = [s for s in line.strip().split() if s.isnumeric()]
nvalues = sum([_isnumeric(s) for s in line_parse(line)])
line = _get_dataset(line, [0] * nvalues)
hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = [0.0] * 9
if isfropt in [0, 4, 5] and icalc <= 0:
hcond = line.pop(0)
thickm = line.pop(0)
elevupdn = line.pop(0)
width = line.pop(0)
depth = line.pop(0)
elif isfropt in [0, 4, 5] and icalc == 1:
hcond = line.pop(0)
if per == 0:
thickm = line.pop(0)
elevupdn = line.pop(0)
width = line.pop(0) # depth is not read if icalc == 1; see table in online guide
thts = _pop_item(line)
thti = _pop_item(line)
eps = _pop_item(line)
if isfropt == 5:
uhc = line.pop(0)
elif isfropt in [0, 4, 5] and icalc >= 2:
hcond = line.pop(0)
if isfropt in [4, 5] and per > 0 and icalc == 2:
pass
else:
thickm = line.pop(0)
elevupdn = line.pop(0)
if isfropt in [4, 5] and icalc == 2 and per == 0:
# table in online guide suggests that the following items should be present in this case
# but in the example
thts = _pop_item(line)
thti = _pop_item(line)
eps = _pop_item(line)
if isfropt == 5:
uhc = _pop_item(line)
else:
pass
elif isfropt == 1 and icalc <= 1:
width = line.pop(0)
if icalc <= 0:
depth = line.pop(0)
elif isfropt in [2, 3] and icalc <= 1:
if per > 0:
pass
else:
width = line.pop(0)
if icalc <= 0:
depth = line.pop(0)
else:
pass
return hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc
def find_path(graph, start, end=0, path=[]):
path = path + [start]
if start == end:
return path
if start not in graph:
return None
if not isinstance(graph[start], list):
graph[start] = [graph[start]]
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath: return newpath
return None
| {
"repo_name": "brclark-usgs/flopy",
"path": "flopy/modflow/mfsfr2.py",
"copies": "1",
"size": "108856",
"license": "bsd-3-clause",
"hash": -620210491802452700,
"line_mean": 44.4204690832,
"line_max": 193,
"alpha_frac": 0.5301591093,
"autogenerated": false,
"ratio": 3.9932501834189287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023079430615220503,
"num_lines": 2345
} |
__author__ = 'aleaf'
import sys
sys.path.insert(0, '..')
import textwrap
import os
import numpy as np
from numpy.lib import recfunctions
from ..pakbase import Package
from flopy.utils.util_list import MfList
from ..utils.flopy_io import line_parse
class ModflowSfr2(Package):
"""
Streamflow-Routing (SFR2) Package Class
Parameters
----------
model : model object
The model object (of type :class:'flopy.modflow.mf.Modflow') to which
this package will be added.
nstrm : integer
An integer value that can be specified to be positive or negative. The absolute value of NSTRM is equal to
the number of stream reaches (finite-difference cells) that are active during the simulation and the number of
lines of data to be included in Item 2, described below. When NSTRM is specified to be a negative integer,
it is also used as a flag for changing the format of the data input, for simulating unsaturated flow beneath
streams, and (or) for simulating transient streamflow routing (for MODFLOW-2005 simulations only), depending
on the values specified for variables ISFROPT and IRTFLG, as described below. When NSTRM is negative, NSFRPAR
must be set to zero, which means that parameters cannot be specified.
nss : integer
An integer value equal to the number of stream segments (consisting of one or more reaches) that are used
to define the complete stream network. The value of NSS represents the number of segments that must be
defined through a combination of parameters and variables in Item 4 or variables in Item 6.
nparseg : integer
An integer value equal to (or exceeding) the number of stream-segment definitions associated with all
parameters. This number can be more than the total number of segments (NSS) in the stream network because
the same segment can be defined in multiple parameters, and because parameters can be time-varying. NPARSEG
must equal or exceed the sum of NLST x N for all parameters, where N is the greater of 1 and NUMINST;
that is, NPARSEG must equal or exceed the total number of repetitions of item 4b. This variable must be zero
when NSTRM is negative.
const : float
A real value (or conversion factor) used in calculating stream depth for stream reach. If stream depth is
not calculated using Manning's equation for any stream segment (that is, ICALC does not equal 1 or 2), then
a value of zero can be entered. If Manning's equation is used, a constant of 1.486 is used for flow units of
cubic feet per second, and a constant of 1.0 is used for units of cubic meters per second. The constant must
be multiplied by 86,400 when using time units of days in the simulation. An explanation of time units used
in MODFLOW is given by Harbaugh and others (2000, p. 10).
dleak : float
A real value equal to the tolerance level of stream depth used in computing leakage between each stream
reach and active model cell. Value is in units of length. Usually a value of 0.0001 is sufficient when units
of feet or meters are used in model.
istcsb1 : integer
An integer value used as a flag for writing stream-aquifer leakage values. If ISTCB1 > 0, unformatted leakage
between each stream reach and corresponding model cell will be saved to a file whenever the cell-by-cell budget
has been specified in Output Control (see Harbaugh and others, 2000, pages 52-55). If ISTCB1 = 0, leakage values
will not be printed or saved. If ISTCB1 < 0, all information on inflows and outflows from each reach; on stream
depth, width, and streambed conductance; and on head difference and gradient across the streambed will be
printed in the main listing file whenever a cell-by-cell budget has been specified in Output Control.
istcsb2 : integer
An integer value used as a flag for writing to a separate formatted file all information on inflows and
outflows from each reach; on stream depth, width, and streambed conductance; and on head difference and
gradient across the streambed. If ISTCB2 > 0, then ISTCB2 also represents the unit number to which all
information for each stream reach will be saved to a separate file when a cell-by-cell budget has been
specified in Output Control. If ISTCB2 < 0, it is the unit number to which unformatted streamflow out of
each reach will be saved to a file whenever the cell-by-cell budget has been specified in Output Control.
isfropt : integer
An integer value that defines the format of the input data and whether or not unsaturated flow is simulated
beneath streams. Values of ISFROPT are defined as follows
0 No vertical unsaturated flow beneath streams. Streambed elevations, stream slope, streambed thickness,
and streambed hydraulic conductivity are read for each stress period using variables defined in Items 6b
and 6c; the optional variables in Item 2 are not used.
1 No vertical unsaturated flow beneath streams. Streambed elevation, stream slope, streambed thickness,
and streambed hydraulic conductivity are read for each reach only once at the beginning of the simulation
using optional variables defined in Item 2; Items 6b and 6c are used to define stream width and depth for
ICALC = 0 and stream width for ICALC = 1.
2 Streambed and unsaturated-zone properties are read for each reach only once at the beginning of the
simulation using optional variables defined in Item 2; Items 6b and 6c are used to define stream width and
depth for ICALC = 0 and stream width for ICALC = 1. When using the LPF Package, saturated vertical
hydraulic conductivity for the unsaturated zone is the same as the vertical hydraulic conductivity of the
corresponding layer in LPF and input variable UHC is not read.
3 Same as 2 except saturated vertical hydraulic conductivity for the unsaturated zone (input variable UHC)
is read for each reach.
4 Streambed and unsaturated-zone properties are read for the beginning and end of each stream segment using
variables defined in Items 6b and 6c; the optional variables in Item 2 are not used. Streambed properties
can vary each stress period. When using the LPF Package, saturated vertical hydraulic conductivity for the
unsaturated zone is the same as the vertical hydraulic conductivity of the corresponding layer in LPF
and input variable UHC1 is not read.
5 Same as 4 except saturated vertical hydraulic conductivity for the unsaturated zone (input variable UHC1)
is read for each segment at the beginning of the first stress period only.
nstrail : integer
An integer value that is the number of trailing wave increments used to represent a trailing wave. Trailing
waves are used to represent a decrease in the surface infiltration rate. The value can be increased to improve
mass balance in the unsaturated zone. Values between 10 and 20 work well and result in unsaturated-zone mass
balance errors beneath streams ranging between 0.001 and 0.01 percent. Please see Smith (1983) for further
details. (default is 10; for MODFLOW-2005 simulations only when isfropt > 1)
isuzn : integer
An integer value that is the maximum number of vertical cells used to define the unsaturated zone beneath a
stream reach. If ICALC is 1 for all segments then ISUZN should be set to 1.
(default is 1; for MODFLOW-2005 simulations only when isfropt > 1)
nsfrsets : integer
An integer value that is the maximum number of different sets of trailing waves used to allocate arrays.
Arrays are allocated by multiplying NSTRAIL by NSFRSETS. A value of 30 is sufficient for problems where the
stream depth varies often. NSFRSETS does not affect model run time.
(default is 30; for MODFLOW-2005 simulations only when isfropt > 1)
irtflg : integer
An integer value that indicates whether transient streamflow routing is active. IRTFLG must be specified
if NSTRM < 0. If IRTFLG > 0, streamflow will be routed using the kinematic-wave equation (see USGS Techniques
and Methods 6-D1, p. 68-69); otherwise, IRTFLG should be specified as 0. Transient streamflow routing is only
available for MODFLOW-2005; IRTFLG can be left blank for MODFLOW-2000 simulations.
(default is 1)
numtim : integer
An integer value equal to the number of sub time steps used to route streamflow. The time step that will be
used to route streamflow will be equal to the MODFLOW time step divided by NUMTIM.
(default is 2; for MODFLOW-2005 simulations only when irtflg > 0)
weight : float
A real number equal to the time weighting factor used to calculate the change in channel storage. WEIGHT has
a value between 0.5 and 1. Please refer to equation 83 in USGS Techniques and Methods 6-D1 for further
details. (default is 0.75; for MODFLOW-2005 simulations only when irtflg > 0)
flwtol : float
A real number equal to the streamflow tolerance for convergence of the kinematic wave equation used for
transient streamflow routing. A value of 0.00003 cubic meters per second has been used successfully in test
simulations (and would need to be converted to whatever units are being used in the particular simulation).
(default is 0.0001; for MODFLOW-2005 simulations only when irtflg > 0)
reach_data : recarray
Numpy record array of length equal to nstrm, with columns for each variable entered in item 2
(see SFR package input instructions). In following flopy convention, layer, row, column and node number
(for unstructured grids) are zero-based; segment and reach are one-based.
segment_data : recarray
Numpy record array of length equal to nss, with columns for each variable entered in items 6a, 6b and 6c
(see SFR package input instructions). Segment numbers are one-based.
itmp : list of integers (len = NPER)
For each stress period, an integer value for reusing or reading stream segment data that can change each
stress period. If ITMP = 0 then all stream segment data are defined by Item 4 (NSFRPAR > 0; number of stream
parameters is greater than 0). If ITMP > 0, then stream segment data are not defined in Item 4 and must be
defined in Item 6 below for a number of segments equal to the value of ITMP. If ITMP < 0, then stream segment
data not defined in Item 4 will be reused from the last stress period (Item 6 is not read for the current
stress period). ITMP must be defined >= 0 for the first stress period of a simulation.
irdflag : list of integers (len = NPER)
For each stress period, an integer value for printing input data specified for this stress period.
If IRDFLG = 0, input data for this stress period will be printed. If IRDFLG > 0, then input data for this
stress period will not be printed.
iptflag : list of integers (len = NPER)
For each stress period, an integer value for printing streamflow-routing results during this stress period.
If IPTFLG = 0, or whenever the variable ICBCFL or "Save Budget" is specified in Output Control, the results
for specified time steps during this stress period will be printed. If IPTFLG > 0, then the results during
this stress period will not be printed.
extension : string
Filename extension (default is 'mnw2')
unitnumber : int
File unit number (default is 34).
Attributes
----------
outlets : nested dictionary
Contains the outlet for each SFR segment; format is {per: {segment: outlet}}
This attribute is created by the get_outlets() method.
outsegs : dictionary of arrays
Each array is of shape nss rows x maximum of nss columns. The first column contains the SFR segments,
the second column contains the outsegs of those segments; the third column the outsegs of the outsegs,
and so on, until all outlets have been encountered, or nss is reached. The latter case indicates
circular routing. This attribute is created by the get_outlets() method.
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
MODFLOW-OWHM is not supported.
The Ground-Water Transport (GWT) process is not supported.
Limitations on which features are supported...
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow()
>>> sfr2 = flopy.modflow.ModflowSfr2(ml, ...)
"""
nsfrpar = 0
heading = '# Streamflow-Routing (SFR2) file for MODFLOW, generated by Flopy'
default_value = -1.0E+10
def __init__(self, model, nstrm=2, nss=1, nsfrpar=0, nparseg=0, const=128390.4, dleak=0.0001, istcb1=50, istcb2=66,
isfropt=0,
nstrail=10, isuzn=1, nsfrsets=30, irtflg=1, numtim=2, weight=0.75, flwtol=0.0001,
reach_data=None,
segment_data=None,
channel_geometry_data=None,
channel_flow_data=None,
dataset_5=None,
reachinput=False, transroute=False,
tabfiles=False, tabfiles_dict=None,
extension='sfr', unitnumber=17):
"""
Package constructor
"""
Package.__init__(self, model, extension, 'SFR',
unitnumber) # Call ancestor's init to set self.parent, extension, name, and unit number
self.url = 'sfr2.htm'
self.nper = self.parent.nrow_ncol_nlay_nper[-1]
self.nper = 1 if self.nper == 0 else self.nper # otherwise iterations from 0, nper won't run
# Dataset 0 -----------------------------------------------------------------------
self.heading = '# SFR2 for MODFLOW, generated by Flopy.'
# Dataset 1a and 1b. -----------------------------------------------------------------------
self.reachinput = reachinput
self.transroute = transroute
self.tabfiles = tabfiles
self.tabfiles_dict = tabfiles_dict
self.numtab = 0 if not tabfiles else len(tabfiles_dict)
self.maxval = np.max([tb['numval'] for tb in tabfiles_dict.values()]) if self.numtab > 0 else 0
# Dataset 1c. ----------------------------------------------------------------------
self.nstrm = np.sign(nstrm) * len(reach_data) if reach_data is not None else nstrm # number of reaches, negative value is flag for unsat. flow beneath streams and/or transient routing
if segment_data is not None and not isinstance(segment_data, dict):
segment_data = {0: segment_data}
# use atleast_1d for length since segment_data might be a 0D array
# this seems to be OK, because self.segment_data is produced by the constructor (never 0D)
self.nss = len(np.atleast_1d(segment_data[0]))# number of stream segments
self.nsfrpar = nsfrpar
self.nparseg = nparseg
self.const = const # conversion factor used in calculating stream depth for stream reach (icalc = 1 or 2)
self.dleak = dleak # tolerance level of stream depth used in computing leakage
if istcb1 != 0: # flag; unit number for stream leakage output
if istcb1 < 0:
self.istcb1 = -53
else:
self.istcb1 = 53
else:
self.istcb1 = 0 # 0: no cell by cell terms are written
self.istcb2 = istcb2 # flag; unit number for writing table of SFR output to text file
# if nstrm < 0
self.isfropt = isfropt # defines the format of the input data and whether or not unsaturated flow is simulated
# if isfropt > 1
self.nstrail = nstrail # number of trailing wave increments
self.isuzn = isuzn # max number of vertical cells used to define unsat. zone
self.nsfrsets = nsfrsets # max number trailing waves sets
# if nstrm < 0 (MF-2005 only)
self.irtflag = irtflg # switch for transient streamflow routing (> 0 = kinematic wave)
# if irtflag > 0
self.numtim = numtim # number of subtimesteps used for routing
self.weight = weight # time weighting factor used to calculate the change in channel storage
self.flwtol = flwtol # streamflow tolerance for convergence of the kinematic wave equation
# Dataset 2. -----------------------------------------------------------------------
self.reach_data = self.get_empty_reach_data(np.abs(self.nstrm))
if reach_data is not None:
for n in reach_data.dtype.names:
self.reach_data[n] = reach_data[n]
# assign node numbers if there are none (structured grid)
if np.diff(self.reach_data.node).max() == 0 and 'DIS' in self.parent.get_package_list():
# first make kij list
lrc = self.reach_data[['k', 'i', 'j']]
lrc = (lrc.view((int, len(lrc.dtype.names))) + 1).tolist()
self.reach_data['node'] = self.parent.dis.get_node(lrc)
# assign unique ID and outreach columns to each reach
self.reach_data.sort(order=['iseg', 'ireach'])
new_cols = {'reachID': np.arange(1, len(self.reach_data) + 1),
'outreach': np.zeros(len(self.reach_data))}
for k, v in new_cols.items():
if k not in self.reach_data.dtype.names:
recfunctions.append_fields(self.reach_data, names=k, data=v, asrecarray=True)
# create a stress_period_data attribute to enable parent functions (e.g. plot)
self.stress_period_data = MfList(self, self.reach_data, dtype=self.reach_data.dtype)
# Datasets 4 and 6. -----------------------------------------------------------------------
# list of values that indicate segments outside of the model
# (depending on how SFR package was constructed)
self.not_a_segment_values = [999999]
self.segment_data = {0: self.get_empty_segment_data(self.nss)}
if segment_data is not None:
for i in segment_data.keys():
self.segment_data[i] = self.get_empty_segment_data(self.nss)
for n in segment_data[i].dtype.names:
self.segment_data[i][n] = segment_data[i][n]
# compute outreaches if nseg and outseg columns have non-default values
if len(self.segment_data[0]) == 1 or \
np.diff(self.segment_data[0].nseg).max() != 0 and np.diff(
self.segment_data[0].outseg).max() != 0:
# first convert any not_a_segment_values to 0
for v in self.not_a_segment_values:
self.segment_data[0].outseg[self.segment_data[0].outseg == v] = 0
self.get_outreaches()
self.channel_geometry_data = channel_geometry_data
self.channel_flow_data = channel_flow_data
# Dataset 5 -----------------------------------------------------------------------
self.dataset_5 = dataset_5
# Attributes not included in SFR package input
self.outsegs = {} # dictionary of arrays; see Attributes section of documentation
self.outlets = {} # nested dictionary of format {per: {segment: outlet}}
# -input format checks:
assert isfropt in [0, 1, 2, 3, 4, 5]
self.parent.add_package(self)
@staticmethod
def get_empty_reach_data(nreaches=0, aux_names=None, structured=True, default_value=-1.0E+10):
# get an empty recarray that correponds to dtype
dtype = ModflowSfr2.get_default_reach_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
d = np.zeros((nreaches, len(dtype)), dtype=dtype)
d[:, :] = default_value
d = np.core.records.fromarrays(d.transpose(), dtype=dtype)
d['reachID'] = np.arange(1, nreaches + 1)
return d
@staticmethod
def get_empty_segment_data(nsegments=0, aux_names=None, default_value=-1.0E+10):
# get an empty recarray that correponds to dtype
dtype = ModflowSfr2.get_default_segment_dtype()
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
d = np.zeros((nsegments, len(dtype)), dtype=dtype)
d[:, :] = default_value
return np.core.records.fromarrays(d.transpose(), dtype=dtype)
@staticmethod
def get_default_reach_dtype(structured=True):
if structured:
# include node column for structured grids (useful for indexing)
return np.dtype([('node', np.int),
('k', np.int),
('i', np.int),
('j', np.int),
('iseg', np.int),
('ireach', np.int),
('rchlen', np.float32),
('strtop', np.float32),
('slope', np.float32),
('strthick', np.float32),
('strhc1', np.float32),
('thts', np.int),
('thti', np.float32),
('eps', np.float32),
('uhc', np.float32),
('reachID', np.int),
('outreach', np.int)])
else:
return np.dtype([('node', np.int)
('iseg', np.int),
('ireach', np.int),
('rchlen', np.float32),
('strtop', np.float32),
('slope', np.float32),
('strthick', np.float32),
('strhc1', np.float32),
('thts', np.int),
('thti', np.float32),
('eps', np.float32),
('uhc', np.float32),
('reachID', np.int),
('outreach', np.int)])
@staticmethod
def get_default_segment_dtype():
return np.dtype([('nseg', np.int),
('icalc', np.int),
('outseg', np.int),
('iupseg', np.int),
('iprior', np.int),
('nstrpts', np.int),
('flow', np.float32),
('runoff', np.float32),
('etsw', np.float32),
('pptsw', np.float32),
('roughch', np.float32),
('roughbk', np.float32),
('cdpth', np.float32),
('fdpth', np.float32),
('awdth', np.float32),
('bwdth', np.float32),
('hcond1', np.float32),
('thickm1', np.float32),
('elevup', np.float32),
('width1', np.float32),
('depth1', np.float32),
('thts1', np.float32),
('thti1', np.float32),
('eps1', np.float32),
('uhc1', np.float32),
('hcond2', np.float32),
('thickm2', np.float32),
('elevdn', np.float32),
('width2', np.float32),
('depth2', np.float32),
('thts2', np.float32),
('thti2', np.float32),
('eps2', np.float32),
('uhc2', np.float32)])
@staticmethod
def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
if model.verbose:
sys.stdout.write('loading sfr2 package file...\n')
tabfiles = False
tabfiles_dict = {}
transroute = False
reachinput = False
structured = model.structured
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# Item 0 -- header
while True:
line = next(f)
if line[0] != '#':
break
# Item 1
if "reachinput" in line.lower():
"""
When REACHINPUT is specified, variable ISFROPT is read in data set 1c.
ISFROPT can be used to change the default format for entering reach and segment data
or to specify that unsaturated flow beneath streams will be simulated.
"""
reachinput = True
if "transroute" in line.lower():
"""When TRANSROUTE is specified, optional variables IRTFLG, NUMTIM, WEIGHT, and FLWTOL
also must be specified in Item 1c.
"""
transroute = True
if transroute or reachinput:
line = next(f)
if "tabfiles" in line.lower():
"""
tabfiles
An optional character variable that is a flag to indicate that inflows to one or more stream
segments will be specified with tabular inflow files.
numtab
An integer value equal to the number of tabular inflow files that will be read if TABFILES
is specified. A separate input file is required for each segment that receives specified inflow.
Thus, the maximum value of NUMTAB that can be specified is equal to the total number of
segments specified in Item 1c with variables NSS. The name (Fname) and unit number (Nunit)
of each tabular file must be specified in the MODFLOW-2005 Name File using tile type (Ftype) DATA.
maxval
"""
tabfiles, numtab, maxval = line.strip().split()
numtab, maxval = int(numtab), int(maxval)
line = next(f)
# item 1c
nstrm, nss, nsfrpar, nparseg, const, dleak, istcb1, istcb2, \
isfropt, nstrail, isuzn, nsfrsets, \
irtflg, numtim, weight, flwtol, option = _parse_1c(line, reachinput=reachinput, transroute=transroute)
# item 2
# set column names, dtypes
names = _get_item2_names(nstrm, reachinput, isfropt, structured)
dtypes = [d for d in ModflowSfr2.get_default_reach_dtype().descr
if d[0] in names]
lines = []
for i in range(abs(nstrm)):
line = next(f)
line = line_parse(line)
ireach = tuple(map(float, line[:len(dtypes)]))
lines.append(ireach)
tmp = np.array(lines, dtype=dtypes)
# initialize full reach_data array with all possible columns
reach_data = ModflowSfr2.get_empty_reach_data(len(lines))
for n in names:
reach_data[n] = tmp[n] # not sure if there's a way to assign multiple columns
# zero-based convention
inds = ['k', 'i', 'j'] if structured else ['node']
_markitzero(reach_data, inds)
# items 3 and 4 are skipped (parameters not supported)
# item 5
segment_data = {}
channel_geometry_data = {}
channel_flow_data = {}
dataset_5 = {}
aux_variables = {} # not sure where the auxillary variables are supposed to go
for i in range(0, nper):
# Dataset 5
dataset_5[i] = _get_dataset(next(f), [1, 0, 0, 0])
itmp = dataset_5[i][0]
if itmp > 0:
# Item 6
current = ModflowSfr2.get_empty_segment_data(nsegments=itmp, aux_names=option)
current_aux = {} # container to hold any auxillary variables
current_6d = {} # these could also be implemented as structured arrays with a column for segment number
current_6e = {}
for j in range(itmp):
dataset_6a = _parse_6a(next(f), option)
current_aux[j] = dataset_6a[-1]
dataset_6a = dataset_6a[:-1] # drop xyz
icalc = dataset_6a[1]
dataset_6b = _parse_6bc(next(f), icalc, nstrm, isfropt, reachinput, per=i)
dataset_6c = _parse_6bc(next(f), icalc, nstrm, isfropt, reachinput, per=i)
current[j] = dataset_6a + dataset_6b + dataset_6c
if icalc == 2:
# ATL: not sure exactly how isfropt logic functions for this
# dataset 6d description suggests that this line isn't read for isfropt > 1
# but description of icalc suggest that icalc=2 (8-point channel) can be used with any isfropt
if i == 0 or nstrm > 0 and not reachinput: # or isfropt <= 1:
dataset_6d = []
for k in range(2):
dataset_6d.append(_get_dataset(next(f), [0.0] * 8))
# dataset_6d.append(list(map(float, next(f).strip().split())))
current_6d[j + 1] = dataset_6d
if icalc == 4:
nstrpts = dataset_6a[5]
dataset_6e = []
for k in range(3):
dataset_6e.append(_get_dataset(next(f), [0.0] * nstrpts))
current_6e[j + 1] = dataset_6e
segment_data[i] = current
aux_variables[j + 1] = current_aux
if len(current_6d) > 0:
channel_geometry_data[i] = current_6d
if len(current_6e) > 0:
channel_flow_data[i] = current_6e
if tabfiles and i == 0:
for j in range(numtab):
segnum, numval, iunit = map(int, next(f).strip().split())
tabfiles_dict[segnum] = {'numval': numval, 'inuit': iunit}
else:
continue
return ModflowSfr2(model, nstrm=nstrm, nss=nss, nsfrpar=nsfrpar, nparseg=nparseg, const=const, dleak=dleak,
istcb1=istcb1, istcb2=istcb2,
isfropt=isfropt, nstrail=nstrail, isuzn=isuzn, nsfrsets=nsfrsets, irtflg=irtflg,
numtim=numtim, weight=weight, flwtol=flwtol,
reach_data=reach_data,
segment_data=segment_data,
dataset_5=dataset_5,
channel_geometry_data=channel_geometry_data,
channel_flow_data=channel_flow_data,
reachinput=reachinput, transroute=transroute,
tabfiles=tabfiles, tabfiles_dict=tabfiles_dict
)
def check(self, f=None, verbose=True, level=1):
"""
Check sfr2 package data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.sfr2.check()
"""
chk = check(self, verbose=verbose, level=level)
chk.numbering()
chk.routing()
chk.overlapping_conductance()
chk.elevations()
chk.slope()
if f is not None:
if isinstance(f, str):
pth = os.path.join(self.parent.model_ws, f)
f = open(pth, 'w')
f.write('{}\n'.format(chk.txt))
# f.close()
return chk
def get_outlets(self, level=0, verbose=True):
"""Traces all routing connections from each headwater to the outlet.
"""
txt = ''
for per in range(self.nper):
if per > 0 > self.dataset_5[per][0]: # skip stress periods where seg data not defined
continue
segments = self.segment_data[per].nseg
outsegs = self.segment_data[per].outseg
all_outsegs = np.vstack([segments, outsegs])
max_outseg = all_outsegs[-1].max()
knt = 1
while max_outseg > 0:
nextlevel = np.array([outsegs[s - 1] if s > 0 and s < 999999 else 0
for s in all_outsegs[-1]])
all_outsegs = np.vstack([all_outsegs, nextlevel])
max_outseg = nextlevel.max()
if max_outseg == 0:
break
knt += 1
if knt > self.nss:
# subset outsegs map to only include rows with outseg number > 0 in last column
circular_segs = all_outsegs.T[all_outsegs[-1] > 0]
# only retain one instance of each outseg number at iteration=nss
vals = [] # append outseg values to vals after they've appeared once
mask = [(True, vals.append(v))[0]
if v not in vals
else False for v in circular_segs[-1]]
circular_segs = circular_segs[:, np.array(mask)]
# cull the circular segments array to remove duplicate instances of routing circles
circles = []
duplicates = []
for i in range(np.shape(circular_segs)[0]):
# find where values in the row equal the last value;
# record the index of the second to last instance of last value
repeat_start_ind = np.where(circular_segs[i] == circular_segs[i, -1])[0][-2:][0]
# use that index to slice out the repeated segment sequence
circular_seq = circular_segs[i, repeat_start_ind:].tolist()
# keep track of unique sequences of repeated segments
if set(circular_seq) not in circles:
circles.append(set(circular_seq))
duplicates.append(False)
else:
duplicates.append(True)
circular_segs = circular_segs[~np.array(duplicates), :]
txt += '{0} instances where an outlet was not found after {1} consecutive segments!\n' \
.format(len(circular_segs), self.nss)
if level == 1:
txt += '\n'.join([' '.join(map(str, row)) for row in circular_segs]) + '\n'
else:
f = 'circular_routing.csv'
np.savetxt(f, circular_segs, fmt='%d', delimiter=',', header=txt)
txt += 'See {} for details.'.format(f)
if verbose:
print(txt)
break
# the array of segment sequence is useful for other other operations,
# such as plotting elevation profiles
self.outsegs[per] = all_outsegs
# create a dictionary listing outlets associated with each segment
# outlet is the last value in each row of outseg array that is != 0 or 999999
self.outlets[per] = {i + 1: r[(r != 0) & (r != 999999)][-1]
if len(r[(r != 0) & (r != 999999)]) > 0
else i + 1
for i, r in enumerate(all_outsegs.T)}
return txt
def get_outreaches(self):
"""Determine the outreach for each SFR reach (requires a reachID column in reach_data).
Uses the segment routing specified for the first stress period to route reaches between segments.
"""
self.reach_data.sort(order=['iseg', 'ireach'])
reach_data = self.reach_data
segment_data = self.segment_data[0]
# this vectorized approach is more than an order of magnitude faster than a list comprehension
first_reaches = reach_data[reach_data.ireach == 1]
last_reaches = np.append((np.diff(reach_data.iseg) == 1), True)
reach_data.outreach = np.append(reach_data.reachID[1:], 0)
# for now, treat lakes (negative outseg number) the same as outlets
reach_data.outreach[last_reaches] = [first_reaches.reachID[s] if s > 0
else 0
for s in segment_data.outseg - 1]
self.reach_data['outreach'] = reach_data.outreach
def get_slopes(self):
"""Compute slopes by reach using values in strtop (streambed top) and rchlen (reach length)
columns of reach_data. The slope for a reach n is computed as strtop(n+1) - strtop(n) / rchlen(n).
Slopes for outlet reaches are assumed to be equal to slope of previous reach. """
slopes = np.append(np.diff(self.reach_data.strtop), 0) / self.reach_data.rchlen
last_reaches = np.append((np.diff(self.reach_data.iseg) == 1), True)
last_reach_data = self.reach_data[last_reaches]
last_reaches_outreach_elevs = [self.reach_data.strtop[o - 1] if o != 0 else 0
for o in last_reach_data.outreach]
second_to_last_reaches = np.append(last_reaches[1:], False)
# compute slopes for last reaches
slopes[last_reaches] = [slopes[second_to_last_reaches][i]
if last_reaches_outreach_elevs[i] == 0
else
(last_reaches_outreach_elevs[i] - last_reach_data.strtop[i])
/ last_reach_data.rchlen[i]
for i in range(len(last_reach_data))]
self.reach_data['slope'] = slopes * -1 # convert from numpy to sfr package convention
def get_upsegs(self):
"""From segment_data, returns nested dict of all upstream segments by segemnt,
by stress period.
Returns
-------
all_upsegs : dict
Nested dictionary of form {stress period: {segment: [list of upsegs]}}
Notes
-----
This method will not work if there are instances of circular routing.
"""
all_upsegs = {}
for per in range(self.nper):
if per > 0 > self.dataset_5[per][0]: # skip stress periods where seg data not defined
continue
segment_data = self.segment_data[per]
# make a list of adjacent upsegments keyed to outseg list in Mat2
upsegs = {o: segment_data.nseg[segment_data.outseg == o].tolist()
for o in np.unique(segment_data.outseg)}
outsegs = [k for k in list(upsegs.keys()) if k > 0] # exclude 0, which is the outlet designator
# for each outseg key, for each upseg, check for more upsegs, append until headwaters has been reached
for outseg in outsegs:
up = True
upsegslist = upsegs[outseg]
while up:
added_upsegs = []
for us in upsegslist:
if us in outsegs:
added_upsegs += upsegs[us]
if len(added_upsegs) == 0:
up = False
break
else:
upsegslist = added_upsegs
upsegs[outseg] += added_upsegs
# the above algorithm is recursive, so lower order streams get duplicated many times
# use a set to get unique upsegs
all_upsegs[per] = {u: list(set(upsegs[u])) for u in outsegs}
return all_upsegs
def renumber_segments(self):
"""Renumber segments so that segment numbering is continuous and always increases
in the downstream direction. Experience suggests that this can substantially speed
convergence for some models using the NWT solver.
"""
# get renumbering info from per=0
nseg = self.segment_data[0].nseg
outseg = self.segment_data[0].outseg
def reassign_upsegs(r, nexts, upsegs):
nextupsegs = []
for u in upsegs:
r[u] = nexts if u > 0 else u # handle lakes
nexts -= 1
nextupsegs += list(nseg[outseg == u])
return r, nexts, nextupsegs
ns = len(nseg)
nexts = ns
r = {0: 0}
nextupsegs = nseg[outseg == 0]
for i in range(ns):
r, nexts, nextupsegs = reassign_upsegs(r, nexts, nextupsegs)
if len(nextupsegs) == 0:
break
# renumber segments in all stress period data
for per in self.segment_data.keys():
self.segment_data[per]['nseg'] = [r[s] for s in nseg]
self.segment_data[per]['outseg'] = [r[s] for s in outseg]
self.segment_data[per].sort(order='nseg')
inds = (outseg > 0) & (nseg > outseg)
assert not np.any(inds)
assert len(self.segment_data[per]['nseg']) == self.segment_data[per]['nseg'].max()
# renumber segments in reach_data
self.reach_data['iseg'] = [r[s] for s in self.reach_data.iseg]
def _get_headwaters(self, per=0):
"""List all segments that are not outsegs (that do not have any segments upstream).
Parameters
----------
per : int
Stress period for which to list headwater segments (default 0)
Returns
-------
headwaters : np.ndarray (1-D)
One dimmensional array listing all headwater segments.
"""
upsegs = [self.segment_data[per].nseg[self.segment_data[per].outseg == s].tolist()
for s in self.segment_data[0].nseg]
return self.segment_data[per].nseg[np.array([i for i, u in enumerate(upsegs) if len(u) == 0])]
def _interpolate_to_reaches(self, segvar1, segvar2, per=0):
"""Interpolate values in datasets 6b and 6c to each reach in stream segment
Parameters
----------
segvar1 : str
Column/variable name in segment_data array for representing start of segment
(e.g. hcond1 for hydraulic conductivity)
For segments with icalc=2 (specified channel geometry); if width1 is given,
the eigth distance point (XCPT8) from dataset 6d will be used as the stream width.
For icalc=3, an abitrary width of 5 is assigned.
For icalc=4, the mean value for width given in item 6e is used.
segvar2 : str
Column/variable name in segment_data array for representing start of segment
(e.g. hcond2 for hydraulic conductivity)
per : int
Stress period with segment data to interpolate
Returns
-------
reach_values : 1D array
One dimmensional array of interpolated values of same length as reach_data array.
For example, hcond1 and hcond2 could be entered as inputs to get values for the
strhc1 (hydraulic conductivity) column in reach_data.
"""
reach_data = self.reach_data
segment_data = self.segment_data[per]
segment_data.sort(order='nseg')
reach_data.sort(order=['iseg', 'ireach'])
reach_values = []
for seg in segment_data.nseg:
reaches = reach_data[reach_data.iseg == seg]
dist = np.cumsum(reaches.rchlen) - 0.5 * reaches.rchlen
icalc = segment_data.icalc[segment_data.nseg == seg]
if 'width' in segvar1 and icalc == 2: # get width from channel cross section length
channel_geometry_data = self.channel_geometry_data[per]
reach_values += list(np.ones(len(reaches)) * channel_geometry_data[seg][0][-1])
elif 'width' in segvar1 and icalc == 3: # assign arbitrary width since width is based on flow
reach_values += list(np.ones(len(reaches)) * 5)
elif 'width' in segvar1 and icalc == 4: # assume width to be mean from streamflow width/flow table
channel_flow_data = self.channel_flow_data[per]
reach_values += list(np.ones(len(reaches)) * np.mean(channel_flow_data[seg][2]))
else:
fp = [segment_data[segment_data['nseg'] == seg][segvar1][0],
segment_data[segment_data['nseg'] == seg][segvar2][0]]
xp = [dist[0], dist[-1]]
reach_values += np.interp(dist, xp, fp).tolist()
return np.array(reach_values)
def _write_1c(self, f_sfr):
# NSTRM NSS NSFRPAR NPARSEG CONST DLEAK ISTCB1 ISTCB2
# [ISFROPT] [NSTRAIL] [ISUZN] [NSFRSETS] [IRTFLG] [NUMTIM] [WEIGHT] [FLWTOL]
f_sfr.write('{:.0f} {:.0f} {:.0f} {:.0f} {:.8f} {:.8f} {:.0f} {:.0f} '
.format(self.nstrm, self.nss, self.nsfrpar, self.nparseg,
self.const, self.dleak, self.istcb1, self.istcb2))
if self.reachinput:
self.nstrm = abs(self.nstrm) # see explanation for dataset 1c in online guide
f_sfr.write('{:.0f} '.format(self.isfropt))
if self.isfropt > 1:
f_sfr.write('{:.0f} {:.0f} {:.0f} '.format(self.nstrail,
self.isuzn,
self.nsfrsets))
if self.nstrm < 0:
f_sfr.write('{:.0f} {:.0f} {:.0f} {:.0f} '.format(self.isfropt,
self.nstrail,
self.isuzn,
self.nsfrsets))
if self.nstrm < 0 or self.transroute:
f_sfr.write('{:.0f} '.format(self.irtflag))
if self.irtflag < 0:
f_sfr.write('{:.0f} {:.8f} {:.8f} '.format(self.numtim,
self.weight,
self.flwtol))
f_sfr.write('\n')
def _write_reach_data(self, f_sfr):
# Write the recarray (data) to the file (or file handle) f
assert isinstance(self.reach_data, np.recarray), "MfList.__tofile() data arg " + \
"not a recarray"
# decide which columns to write
# columns = self._get_item2_names()
columns = _get_item2_names(self.nstrm, self.reachinput, self.isfropt,
structured=self.parent.structured)
# Add one to the kij indices
# names = self.reach_data.dtype.names
# lnames = []
# [lnames.append(name.lower()) for name in names]
# --make copy of data for multiple calls
d = np.recarray.copy(self.reach_data[columns])
for idx in ['k', 'i', 'j', 'node']:
if (idx in columns):
d[idx] += 1
formats = _fmt_string(d)[:-1] + '\n'
for i in range(len(d)):
f_sfr.write(formats.format(*d[i]))
def _write_segment_data(self, i, j, f_sfr):
cols = ['nseg', 'icalc', 'outseg', 'iupseg', 'iprior', 'nstrpts', 'flow', 'runoff',
'etsw', 'pptsw', 'roughch', 'roughbk', 'cdpth', 'fdpth', 'awdth', 'bwdth']
fmts = _fmt_string_list(self.segment_data[i][cols][j])
nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \
pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth = \
[0 if v == self.default_value else v for v in self.segment_data[i][cols][j]]
f_sfr.write(' '.join(fmts[0:4]).format(nseg, icalc, outseg, iupseg) + ' ')
if iupseg > 0:
f_sfr.write(fmts[4].format(iprior) + ' ')
if icalc == 4:
f_sfr.write(fmts[5].format(nstrpts) + ' ')
f_sfr.write(' '.join(fmts[6:10]).format(flow, runoff, etsw, pptsw) + ' ')
if icalc in [1, 2]:
f_sfr.write(fmts[10].format(roughch) + ' ')
if icalc == 2:
f_sfr.write(fmts[11].format(roughbk) + ' ')
if icalc == 3:
f_sfr.write(' '.join(fmts[12:16]).format(cdpth, fdpth, awdth, bwdth) + ' ')
f_sfr.write('\n')
self._write_6bc(i, j, f_sfr, cols=['hcond1', 'thickm1', 'elevup', 'width1', 'depth1', 'thts1', 'thti1',
'eps1', 'uhc1'])
self._write_6bc(i, j, f_sfr, cols=['hcond2', 'thickm2', 'elevdn', 'width2', 'depth2', 'thts2', 'thti2',
'eps2', 'uhc2'])
def _write_6bc(self, i, j, f_sfr, cols=[]):
icalc = self.segment_data[i][j][1]
fmts = _fmt_string_list(self.segment_data[i][cols][j])
hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = \
[0 if v == self.default_value else v for v in self.segment_data[i][cols][j]]
if self.isfropt in [0, 4, 5] and icalc <= 0:
f_sfr.write(' '.join(fmts[0:5]).format(hcond, thickm, elevupdn, width, depth) + ' ')
elif self.isfropt in [0, 4, 5] and icalc == 1:
f_sfr.write(fmts[0].format(hcond) + ' ')
if i == 0:
f_sfr.write(' '.join(fmts[1:4]).format(thickm, elevupdn, width) + ' ')
f_sfr.write(' '.join(fmts[5:8]).format(thts, thti, eps) + ' ')
if self.isfropt == 5:
f_sfr.write(fmts[8].format(uhc) + ' ')
elif self.isfropt in [0, 4, 5] and icalc >= 2:
f_sfr.write(fmts[0].format(hcond) + ' ')
if self.isfropt in [4, 5] and i > 0 and icalc == 2:
pass
else:
f_sfr.write(' '.join(fmts[1:3]).format(thickm, elevupdn) + ' ')
if self.isfropt in [4, 5] and icalc == 2 and i == 0:
f_sfr.write(' '.join(fmts[3:6]).format(thts, thti, eps) + ' ')
if self.isfropt == 5:
f_sfr.write(fmts[8].format(uhc) + ' ')
else:
pass
elif self.isfropt == 1 and icalc <= 1:
f_sfr.write(fmts[3].format(width) + ' ')
if icalc <= 0:
f_sfr.write(fmts[4].format(depth) + ' ')
elif self.isfropt in [2, 3] and icalc <= 1:
if i > 0:
pass
else:
f_sfr.write(fmts[3].format(width) + ' ')
if icalc <= 0:
f_sfr.write(fmts[4].format(depth) + ' ')
else:
pass
f_sfr.write('\n')
# def plot(self, **kwargs):
# return super(ModflowSfr2, self).plot(**kwargs)
def write_file(self, filename=None):
"""
Write the package file.
Returns
-------
None
"""
# tabfiles = False
# tabfiles_dict = {}
# transroute = False
# reachinput = False
if filename is not None:
self.fn_path = filename
f_sfr = open(self.fn_path, 'w')
# Item 0 -- header
f_sfr.write('{0}\n'.format(self.heading))
# Item 1
if self.reachinput:
"""
When REACHINPUT is specified, variable ISFROPT is read in data set 1c.
ISFROPT can be used to change the default format for entering reach and segment data
or to specify that unsaturated flow beneath streams will be simulated.
"""
f_sfr.write('reachinput ')
if self.transroute:
"""When TRANSROUTE is specified, optional variables IRTFLG, NUMTIM, WEIGHT, and FLWTOL
also must be specified in Item 1c.
"""
f_sfr.write('transroute')
if self.transroute or self.reachinput:
f_sfr.write('\n')
if self.tabfiles:
"""
tabfiles
An optional character variable that is a flag to indicate that inflows to one or more stream
segments will be specified with tabular inflow files.
numtab
An integer value equal to the number of tabular inflow files that will be read if TABFILES
is specified. A separate input file is required for each segment that receives specified inflow.
Thus, the maximum value of NUMTAB that can be specified is equal to the total number of
segments specified in Item 1c with variables NSS. The name (Fname) and unit number (Nunit)
of each tabular file must be specified in the MODFLOW-2005 Name File using tile type (Ftype) DATA.
maxval
"""
f_sfr.write('{} {} {}\n'.format(self.tabfiles, self.numtab, self.maxval))
self._write_1c(f_sfr)
# item 2
self._write_reach_data(f_sfr)
# items 3 and 4 are skipped (parameters not supported)
for i in range(0, self.nper):
# item 5
itmp = self.dataset_5[i][0]
f_sfr.write(' '.join(map(str, self.dataset_5[i])) + '\n')
if itmp > 0:
# Item 6
for j in range(itmp):
# write datasets 6a, 6b and 6c
self._write_segment_data(i, j, f_sfr)
icalc = self.segment_data[i].icalc[j]
if icalc == 2:
if i == 0 or self.nstrm > 0 and not self.reachinput: # or isfropt <= 1:
for k in range(2):
for d in self.channel_geometry_data[i][j + 1][k]:
f_sfr.write('{:.2f} '.format(d))
f_sfr.write('\n')
if icalc == 4:
# nstrpts = self.segment_data[i][j][5]
for k in range(3):
for d in self.channel_flow_data[i][j + 1][k]:
f_sfr.write('{:.2f} '.format(d))
f_sfr.write('\n')
if self.tabfiles and i == 0:
for j in sorted(self.tabfiles_dict.keys()):
f_sfr.write('{:.0f} {:.0f} {:.0f}\n'.format(j,
self.tabfiles_dict[j]['numval'],
self.tabfiles_dict[j]['inuit']))
else:
continue
f_sfr.close()
class check:
"""
Check SFR2 package for common errors
Parameters
----------
sfrpackage : object
Instance of Flopy ModflowSfr2 class.
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Notes
-----
Daniel Feinstein's top 10 SFR problems (7/16/2014):
1) cell gaps btw adjacent reaches in a single segment
2) cell gaps btw routed segments. possibly because of re-entry problems at domain edge
3) adjacent reaches with STOP sloping the wrong way
4) routed segments with end/start sloping the wrong way
5) STOP>TOP1 violations, i.e.,floaters
6) STOP<<TOP1 violations, i.e., exaggerated incisions
7) segments that end within one diagonal cell distance from another segment, inviting linkage
8) circular routing of segments
9) multiple reaches with non-zero conductance in a single cell
10) reaches in inactive cells
Also after running the model they will want to check for backwater effects.
"""
def __init__(self, sfrpackage, verbose=True, level=1):
self.sfr = sfrpackage
self.reach_data = sfrpackage.reach_data
self.segment_data = sfrpackage.segment_data
self.verbose = verbose
self.level = level
self.passed = []
self.warnings = []
self.errors = []
self.txt = '\n{} ERRORS:\n'.format(self.sfr.name[0])
self.summary_array = None
def _boolean_compare(self, array, col1, col2,
level0txt='{} violations encountered.',
level1txt='Violations:',
sort_ascending=True, print_delimiter=' '):
"""Compare two columns in a record array. For each row,
tests if value in col1 is greater than col2. If any values
in col1 are > col2, subsets array to only include rows where
col1 is greater. Creates another column with differences
(col1-col2), and prints the array sorted by the differences
column (diff).
Parameters
----------
array : record array
Array with columns to compare.
col1 : string
Column name in array.
col2 : string
Column name in array.
sort_ascending : T/F; default True
If True, printed array will be sorted by differences in
ascending order.
print_delimiter : str
Delimiter for printed array.
Returns
-------
txt : str
Error messages and printed array (if .level attribute of
checker is set to 1). Returns an empty string if no
values in col1 are greater than col2.
Notes
-----
info about appending to record arrays (views vs. copies and upcoming changes to numpy):
http://stackoverflow.com/questions/22865877/how-do-i-write-to-multiple-fields-of-a-structured-array
"""
txt = ''
array = array.copy()
if isinstance(col1, np.ndarray):
array = recfunctions.append_fields(array, names='tmp1', data=col1,
asrecarray=True)
col1 = 'tmp1'
if isinstance(col2, np.ndarray):
array = recfunctions.append_fields(array, names='tmp2', data=col2,
asrecarray=True)
col2 = 'tmp2'
if isinstance(col1, tuple):
array = recfunctions.append_fields(array, names=col1[0], data=col1[1],
asrecarray=True)
col1 = col1[0]
if isinstance(col2, tuple):
array = recfunctions.append_fields(array, names=col2[0], data=col2[1],
asrecarray=True)
col2 = col2[0]
failed = array[col1] > array[col2]
if np.any(failed):
failed_info = array[failed].copy()
txt += level0txt.format(len(failed_info)) + '\n'
if self.level == 1:
diff = failed_info[col2] - failed_info[col1]
cols = [c for c in failed_info.dtype.names if failed_info[c].sum() != 0
and c != 'diff'
and 'tmp' not in c]
# currently failed_info[cols] results in a warning. Not sure
# how to do this properly with a recarray.
failed_info = recfunctions.append_fields(failed_info[cols].copy(),
names='diff',
data=diff,
asrecarray=True)
failed_info.sort(order='diff', axis=0)
if not sort_ascending:
failed_info = failed_info[::-1]
txt += level1txt + '\n'
txt += _print_rec_array(failed_info, delimiter=print_delimiter)
txt += '\n'
return txt
def _txt_footer(self, headertxt, txt, testname, passed=False, warning=True):
if len(txt) == 0 or passed:
txt += 'passed.'
self.passed.append(testname)
elif warning:
self.warnings.append(testname)
else:
self.errors.append(testname)
if self.verbose:
print(txt + '\n')
self.txt += headertxt + txt + '\n'
def run_all(self):
return self.sfr.check()
def numbering(self):
"""checks for continuity in segment and reach numbering
"""
headertxt = 'Checking for continuity in segment and reach numbering...\n'
if self.verbose:
print(headertxt.strip())
txt = ''
passed = False
for per in range(self.sfr.nper):
if per > 0 > self.sfr.dataset_5[per][0]:
continue
# check segment numbering
txt += _check_numbers(self.sfr.nss,
self.segment_data[per]['nseg'],
level=self.level,
datatype='segment')
# check reach numbering
for segment in np.arange(1, self.sfr.nss + 1):
reaches = self.reach_data.ireach[self.reach_data.iseg == segment]
t = _check_numbers(len(reaches),
reaches,
level=self.level,
datatype='reach')
if len(t) > 0:
txt += 'Segment {} has {}'.format(segment, t)
if txt == '':
passed = True
self._txt_footer(headertxt, txt, 'continuity in segment and reach numbering', passed, warning=False)
headertxt = 'Checking for increasing segment numbers in downstream direction...\n'
txt = ''
passed = False
if self.verbose:
print(headertxt.strip())
for per, segment_data in self.segment_data.items():
decreases = segment_data.outseg[segment_data.outseg < segment_data.nseg]
decreases = decreases[decreases > 0]
if len(decreases) >= 1:
txt += '{} instances of segment numbers decreasing in the downstream direction.\n'.format(len(decreases))
txt += 'MODFLOW will run but convergence may be appreciably slowed.\n'
if self.level == 1:
txt += 'at segments:'
t = ''
for s in decreases:
t += ' {}'.format(s)
txt += '\n'.join(textwrap.wrap(t, width=10))
if len(t) == 0:
passed = True
self._txt_footer(headertxt, txt, 'segment numbering order', passed)
def routing(self):
"""checks for breaks in routing and does comprehensive check for circular routing
"""
headertxt = 'Checking for circular routing...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
txt += self.sfr.get_outlets(level=self.level, verbose=False) # will print twice if verbose=True
self._txt_footer(headertxt, txt, 'circular routing', warning=False)
def overlapping_conductance(self, tol=1e-6):
"""checks for multiple SFR reaches in one cell; and whether more than one reach has Cond > 0
"""
headertxt = 'Checking for model cells with multiple non-zero SFR conductances...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
# make nreach vectors of each conductance parameter
reach_data = self.reach_data.copy()
# if no dis file was supplied, can't compute node numbers
# make nodes based on unique row, col pairs
if np.diff(reach_data.node).max() == 0:
uniquerc = {}
for i, (r, c) in enumerate(reach_data[['i', 'j']]):
if (r, c) not in uniquerc:
uniquerc[(r, c)] = i + 1
reach_data['node'] = [uniquerc[(r, c)] for r, c in reach_data[['i', 'j']]]
K = reach_data.strhc1
if K.max() == 0:
K = self.sfr._interpolate_to_reaches('hcond1', 'hcond2')
b = reach_data.strthick
if b.max() == 0:
b = self.sfr._interpolate_to_reaches('thickm1', 'thickm2')
L = reach_data.rchlen
w = self.sfr._interpolate_to_reaches('width1', 'width2')
# Calculate SFR conductance for each reach
Cond = K * w * L / b
shared_cells = _get_duplicates(reach_data.node)
nodes_with_multiple_conductance = set()
for node in shared_cells:
# select the collocated reaches for this cell
conductances = Cond[reach_data.node == node].copy()
conductances.sort()
# list nodes with multiple non-zero SFR reach conductances
if conductances[0] / conductances[-1] > tol:
nodes_with_multiple_conductance.update({node})
if len(nodes_with_multiple_conductance) > 0:
txt += '{} model cells with multiple non-zero SFR conductances found.\n' \
'This may lead to circular routing between collocated reaches.\n' \
.format(len(nodes_with_multiple_conductance))
if self.level == 1:
txt += 'Nodes with overlapping conductances:\n'
reach_data['strthick'] = b
reach_data['strhc1'] = K
cols = [c for c in reach_data.dtype.names if c in \
['node', 'k', 'i', 'j', 'iseg', 'ireach', 'rchlen', 'strthick', 'strhc1']]
reach_data = recfunctions.append_fields(reach_data,
names=['width', 'conductance'],
data=[w, Cond],
usemask=False,
asrecarray=True)
has_multiple = np.array([True if n in nodes_with_multiple_conductance
else False for n in reach_data.node])
reach_data = reach_data[has_multiple].copy()
reach_data = reach_data[cols].copy()
txt += _print_rec_array(reach_data, delimiter='\t')
self._txt_footer(headertxt, txt, 'overlapping conductance')
def elevations(self):
"""checks for multiple SFR reaches in one cell; and whether more than one reach has Cond > 0
"""
headertxt = 'Checking segment_data for downstream rises in streambed elevation...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
# decide whether to check elevup and elevdn from items 6b/c
# (see online guide to SFR input; Data Set 6b description)
passed = False
if self.sfr.isfropt in [0, 4, 5]:
pers = sorted(self.segment_data.keys())
for per in pers:
segment_data = self.segment_data[per][self.segment_data[per].elevup > -999999]
# enforce consecutive increasing segment numbers (for indexing)
segment_data.sort(order='nseg')
t = _check_numbers(len(segment_data), segment_data.nseg, level=1, datatype='Segment')
if len(t) > 0:
raise Exception('Elevation check requires consecutive segment numbering.')
# first check for segments where elevdn > elevup
d_elev = segment_data.elevdn - segment_data.elevup
segment_data = recfunctions.append_fields(segment_data, names='d_elev', data=d_elev,
asrecarray=True)
txt += self._boolean_compare(segment_data[['nseg', 'outseg', 'elevup', 'elevdn',
'd_elev']].copy(),
col1='d_elev', col2=np.zeros(len(segment_data)),
level0txt='Stress Period {}: '.format(per + 1) + \
'{} segments encountered with elevdn > elevup.',
level1txt='Backwards segments:',
)
# next check for rises between segments
non_outlets = segment_data.outseg > 0
non_outlets_seg_data = segment_data[non_outlets] # lake outsegs are < 0
outseg_elevup = np.array([segment_data.elevup[o - 1] for o in segment_data.outseg if o > 0])
d_elev2 = outseg_elevup - segment_data.elevdn[non_outlets]
non_outlets_seg_data = recfunctions.append_fields(non_outlets_seg_data,
names=['outseg_elevup', 'd_elev2'],
data=[outseg_elevup, d_elev2],
asrecarray=True)
txt += self._boolean_compare(non_outlets_seg_data[['nseg', 'outseg', 'elevdn',
'outseg_elevup', 'd_elev2']].copy(),
col1='d_elev2', col2=np.zeros(len(non_outlets_seg_data)),
level0txt='Stress Period {}: '.format(per + 1) + \
'{} segments encountered with segments encountered ' \
'with outseg elevup > elevdn.',
level1txt='Backwards segment connections:',
)
if len(txt) == 0:
passed = True
else:
txt += 'Segment elevup and elevdn not specified for nstrm={} and isfropt={}\n' \
.format(self.sfr.nstrm, self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'segment elevations', passed)
headertxt = 'Checking reach_data for downstream rises in streambed elevation...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
passed = False
if self.sfr.nstrm < 0 or self.sfr.reachinput and self.sfr.isfropt in [1, 2, 3]: # see SFR input instructions
# first get an outreach for each reach
if np.diff(self.sfr.reach_data.outreach).max() == 0: # not sure if this is the best test
self.sfr.get_outreaches()
reach_data = self.sfr.reach_data # inconsistent with other checks that work with
# reach_data attribute of check class. Want to have get_outreaches as a method of sfr class
# (for other uses). Not sure if other check methods should also copy reach_data directly from
# SFR package instance for consistency.
# use outreach values to get downstream elevations
non_outlets = reach_data[reach_data.outreach != 0]
outreach_elevdn = np.array([reach_data.strtop[o - 1] for o in reach_data.outreach])
d_strtop = outreach_elevdn[reach_data.outreach != 0] - non_outlets.strtop
non_outlets = recfunctions.append_fields(non_outlets,
names=['strtopdn', 'd_strtop'],
data=[outreach_elevdn, d_strtop],
asrecarray=True)
txt += self._boolean_compare(non_outlets[['k', 'i', 'j', 'iseg', 'ireach',
'strtop', 'strtopdn', 'd_strtop', 'reachID']].copy(),
col1='d_strtop', col2=np.zeros(len(non_outlets)),
level0txt='{} reaches encountered with strtop < strtop of downstream reach.',
level1txt='Elevation rises:',
)
if len(txt) == 0:
passed = True
else:
txt += 'Reach strtop not specified for nstrm={}, reachinput={} and isfropt={}\n' \
.format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'reach elevations', passed)
headertxt = 'Checking reach_data for inconsistencies between streambed elevations and the model grid...\n'
if self.verbose:
print(headertxt.strip())
txt = ''
if self.sfr.parent.dis is None:
txt += 'No DIS file supplied; cannot check SFR elevations agains model grid.'
self._txt_footer(headertxt, txt, '')
return
passed = False
warning = True
if self.sfr.nstrm < 0 or self.sfr.reachinput and self.sfr.isfropt in [1, 2, 3]: # see SFR input instructions
reach_data = self.reach_data
i, j, k = reach_data.i, reach_data.j, reach_data.k
# check streambed bottoms in relation to respective cell bottoms
bots = self.sfr.parent.dis.botm.array[k, i, j]
streambed_bots = reach_data.strtop - reach_data.strthick
reach_data = recfunctions.append_fields(reach_data,
names=['layerbot', 'strbot'],
data=[bots, streambed_bots],
asrecarray=True)
txt += self._boolean_compare(reach_data[['k', 'i', 'j', 'iseg', 'ireach',
'strtop', 'strthick', 'strbot', 'layerbot',
'reachID']].copy(),
col1='layerbot', col2='strbot',
level0txt='{} reaches encountered with streambed bottom below layer bottom.',
level1txt='Layer bottom violations:',
)
if len(txt) > 0:
warning = False # this constitutes an error (MODFLOW won't run)
# check streambed elevations in relation to model top
tops = self.sfr.parent.dis.top.array[i, j]
reach_data = recfunctions.append_fields(reach_data, names='modeltop', data=tops, asrecarray=True)
txt += self._boolean_compare(reach_data[['k', 'i', 'j', 'iseg', 'ireach',
'strtop', 'modeltop', 'reachID']].copy(),
col1='strtop', col2='modeltop',
level0txt='{} reaches encountered with streambed above model top.',
level1txt='Model top violations:',
)
if len(txt) == 0:
passed = True
else:
txt += 'Reach strtop, strthick not specified for nstrm={}, reachinput={} and isfropt={}\n' \
.format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'reach elevations vs. grid elevations', passed, warning=warning)
# In cases where segment end elevations/thicknesses are used,
# do these need to be checked for consistency with layer bottoms?
headertxt = 'Checking segment_data for inconsistencies between segment end elevations and the model grid...\n'
txt = ''
if self.verbose:
print(headertxt.strip())
passed = False
if self.sfr.isfropt in [0, 4, 5]:
reach_data = self.reach_data
pers = sorted(self.segment_data.keys())
for per in pers:
segment_data = self.segment_data[per][self.segment_data[per].elevup > -999999]
# enforce consecutive increasing segment numbers (for indexing)
segment_data.sort(order='nseg')
t = _check_numbers(len(segment_data), segment_data.nseg, level=1, datatype='Segment')
if len(t) > 0:
raise Exception('Elevation check requires consecutive segment numbering.')
first_reaches = reach_data[reach_data.ireach == 1].copy()
last_reaches = reach_data[np.append((np.diff(reach_data.iseg) == 1), True)].copy()
segment_ends = recfunctions.stack_arrays([first_reaches, last_reaches],
asrecarray=True, usemask=False)
segment_ends['strtop'] = np.append(segment_data.elevup, segment_data.elevdn)
i, j = segment_ends.i, segment_ends.j
tops = self.sfr.parent.dis.top.array[i, j]
diff = tops - segment_ends.strtop
segment_ends = recfunctions.append_fields(segment_ends,
names=['modeltop', 'diff'],
data=[tops, diff],
asrecarray=True)
txt += self._boolean_compare(segment_ends[['k', 'i', 'j', 'iseg',
'strtop', 'modeltop', 'diff', 'reachID']].copy(),
col1=np.zeros(len(segment_ends)), col2='diff',
level0txt='{} reaches encountered with streambed above model top.',
level1txt='Model top violations:',
)
if len(txt) == 0:
passed = True
else:
txt += 'Segment elevup and elevdn not specified for nstrm={} and isfropt={}\n' \
.format(self.sfr.nstrm, self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'segment elevations vs. model grid', passed)
def slope(self, minimum_slope=1e-4, maximum_slope=1.0):
"""Checks that streambed slopes are greater than or equal to a specified minimum value.
Low slope values can cause "backup" or unrealistic stream stages with icalc options
where stage is computed.
"""
headertxt = 'Checking for streambed slopes of less than {}...\n'.format(minimum_slope)
txt = ''
if self.verbose:
print(headertxt.strip())
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.slope).max() == 0:
txt += 'isfropt setting of 1,2 or 3 requries slope information!\n'
else:
is_less = self.reach_data.slope < minimum_slope
if np.any(is_less):
below_minimum = self.reach_data[is_less]
txt += '{} instances of streambed slopes below minimum found.\n'.format(len(below_minimum))
if self.level == 1:
txt += 'Reaches with low slopes:\n'
txt += _print_rec_array(below_minimum, delimiter='\t')
if len(txt) == 0:
passed = True
else:
txt += 'slope not specified for isfropt={}\n'.format(self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'minimum slope', passed)
headertxt = 'Checking for streambed slopes of greater than {}...\n'.format(maximum_slope)
txt = ''
if self.verbose:
print(headertxt.strip())
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.slope).max() == 0:
txt += 'isfropt setting of 1,2 or 3 requries slope information!\n'
else:
is_greater = self.reach_data.slope > maximum_slope
if np.any(is_greater):
above_max = self.reach_data[is_greater]
txt += '{} instances of streambed slopes above maximum found.\n'.format(len(above_max))
if self.level == 1:
txt += 'Reaches with high slopes:\n'
txt += _print_rec_array(above_max, delimiter='\t')
if len(txt) == 0:
passed = True
else:
txt += 'slope not specified for isfropt={}\n'.format(self.sfr.isfropt)
passed = True
self._txt_footer(headertxt, txt, 'maximum slope', passed)
def _check_numbers(n, numbers, level=1, datatype='reach'):
"""Check that a sequence of numbers is consecutive
(that the sequence is equal to the range from 1 to n+1, where n is the expected length of the sequence).
Parameters
----------
n : int
Expected length of the sequence (i.e. number of stream segments)
numbers : array
Sequence of numbers (i.e. 'nseg' column from the segment_data array)
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
datatype : str, optional
Only used for reporting.
"""
txt = ''
num_range = np.arange(1, n + 1)
if not np.array_equal(num_range, numbers):
txt += 'Invalid {} numbering\n'.format(datatype)
if level == 1:
non_consecutive = np.append(np.diff(numbers) != 1, False) # consistent dimmension for boolean array
gaps = num_range[non_consecutive] + 1
if len(gaps) > 0:
gapstr = ' '.join(map(str, gaps))
txt += 'Gaps in numbering at positions {}\n'.format(gapstr)
return txt
def _isnumeric(str):
try:
float(str)
return True
except:
return False
def _markitzero(recarray, inds):
"""subtracts 1 from columns specified in inds argument, to convert from 1 to 0-based indexing
"""
lnames = [n.lower() for n in recarray.dtype.names]
for idx in inds:
if (idx in lnames):
recarray[idx] -= 1
def _pop_item(line):
if len(line) > 0:
return line.pop(0)
return 0
def _get_dataset(line, dataset):
tmp = []
# interpret number supplied with decimal points as floats, rest as ints
# this could be a bad idea (vs. explicitly formatting values for each dataset)
for i, s in enumerate(line_parse(line)):
try:
n = int(s)
except:
try:
n = float(s)
except:
break
dataset[i] = n
return dataset
def _get_duplicates(a):
"""Returns duplcate values in an array, similar to pandas .duplicated() method
http://stackoverflow.com/questions/11528078/determining-duplicate-values-in-an-array
"""
s = np.sort(a, axis=None)
equal_to_previous_item = np.append(s[1:] == s[:-1], False) # maintain same dimmension for boolean array
return np.unique(s[equal_to_previous_item])
def _get_item2_names(nstrm, reachinput, isfropt, structured=False):
"""Determine which variables should be in item 2, based on model grid type,
reachinput specification, and isfropt.
Returns
-------
names : list of str
List of names (same as variables in SFR Package input instructions) of columns
to assign (upon load) or retain (upon write) in reach_data array.
Notes
-----
Lowercase is used for all variable names.
"""
names = []
if structured:
names += ['k', 'i', 'j']
else:
names += ['node']
names += ['iseg', 'ireach', 'rchlen']
if nstrm < 0 or reachinput:
if isfropt in [1, 2, 3]:
names += ['strtop', 'slope', 'strthick', 'strhc1']
if isfropt in [2, 3]:
names += ['thts', 'thti', 'eps']
if isfropt == 3:
names += ['uhc']
return names
def _fmt_string(array, float_format='{}'):
fmt_string = ''
for field in array.dtype.descr:
vtype = field[1][1].lower()
if (vtype == 'i'):
fmt_string += '{:.0f} '
elif (vtype == 'f'):
fmt_string += '{} '.format(float_format)
elif (vtype == 'o'):
fmt_string += '{} '
elif (vtype == 's'):
raise Exception("MfList error: '\str\' type found it dtype." + \
" This gives unpredictable results when " + \
"recarray to file - change to \'object\' type")
else:
raise Exception("MfList.fmt_string error: unknown vtype " + \
"in dtype:" + vtype)
return fmt_string
def _fmt_string_list(array, float_format='{}'):
fmt_string = []
for field in array.dtype.descr:
vtype = field[1][1].lower()
if (vtype == 'i'):
fmt_string += ['{:.0f}']
elif (vtype == 'f'):
fmt_string += [float_format]
elif (vtype == 'o'):
fmt_string += ['{}']
elif (vtype == 's'):
raise Exception("MfList error: '\str\' type found it dtype." + \
" This gives unpredictable results when " + \
"recarray to file - change to \'object\' type")
else:
raise Exception("MfList.fmt_string error: unknown vtype " + \
"in dtype:" + vtype)
return fmt_string
def _print_rec_array(array, cols=None, delimiter=' ', float_format='{:.6f}'):
"""Print out a numpy record array to string, with column names.
Parameters
----------
cols : list of strings
List of columns to print.
delimiter : string
Delimited to use.
Returns
-------
txt : string
Text string of array.
"""
txt = ''
if cols is not None:
cols = [c for c in array.dtype.names if c in cols]
else:
cols = list(array.dtype.names)
# drop columns with no data
if np.shape(array)[0] > 1:
cols = [c for c in cols if array[c].min() > -999999]
# add _fmt_string call here
fmts = _fmt_string_list(array[cols], float_format=float_format)
txt += delimiter.join(cols) + '\n'
txt += '\n'.join([delimiter.join(fmts).format(*r) for r in array[cols].copy().tolist()])
return txt
def _parse_1c(line, reachinput, transroute):
"""Parse Data Set 1c for SFR2 package.
See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info
Parameters
----------
line : str
line read from SFR package input file
Returns
-------
a list of length 13 containing all variables for Data Set 6a
"""
na = 0
# line = _get_dataset(line, [0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 1, 30, 1, 2, 0.75, 0.0001, []])
# line = line.strip().split()
line = line_parse(line)
nstrm = int(line.pop(0))
nss = int(line.pop(0))
nsfrpar = int(line.pop(0))
nparseg = int(line.pop(0))
const = float(line.pop(0))
dleak = float(line.pop(0))
istcb1 = int(line.pop(0))
istcb2 = int(line.pop(0))
isfropt, nstrail, isuzn, nsfrsets = na, na, na, na
if reachinput:
nstrm = abs(nstrm) # see explanation for dataset 1c in online guide
isfropt = int(line.pop(0))
if isfropt > 1:
nstrail = int(line.pop(0))
isuzn = int(line.pop(0))
nsfrsets = int(line.pop(0))
if nstrm < 0:
isfropt = int(line.pop(0))
nstrail = int(line.pop(0))
isuzn = int(line.pop(0))
nsfrsets = int(line.pop(0))
irtflg, numtim, weight, flwtol = na, na, na, na
if nstrm < 0 or transroute:
irtflg = int(_pop_item(line))
if irtflg > 0:
numtim = int(line.pop(0))
weight = int(line.pop(0))
flwtol = int(line.pop(0))
# auxillary variables (MODFLOW-LGR)
option = [line[i] for i in np.arange(1, len(line)) if 'aux' in line[i - 1].lower()]
return nstrm, nss, nsfrpar, nparseg, const, dleak, istcb1, istcb2, \
isfropt, nstrail, isuzn, nsfrsets, irtflg, numtim, weight, flwtol, option
def _parse_6a(line, option):
"""Parse Data Set 6a for SFR2 package.
See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info
Parameters
----------
line : str
line read from SFR package input file
Returns
-------
a list of length 13 containing all variables for Data Set 6a
"""
# line = line.strip().split()
line = line_parse(line)
xyz = []
# handle any aux variables at end of line
for i, s in enumerate(line):
if s.lower() in option:
xyz.append(s.lower())
na = 0
nvalues = sum([_isnumeric(s) for s in line])
# line = _get_dataset(line, [0] * nvalues)
nseg = int(line.pop(0))
icalc = int(line.pop(0))
outseg = int(line.pop(0))
iupseg = int(line.pop(0))
iprior = na
nstrpts = na
if iupseg > 0:
iprior = int(line.pop(0))
if icalc == 4:
nstrpts = int(line.pop(0))
flow = float(line.pop(0))
runoff = float(line.pop(0))
etsw = float(line.pop(0))
pptsw = float(line.pop(0))
roughch = na
roughbk = na
if icalc in [1, 2]:
roughch = float(line.pop(0))
if icalc == 2:
roughbk = float(line.pop(0))
cdpth, fdpth, awdth, bwdth = na, na, na, na
if icalc == 3:
cdpth, fdpth, awdth, bwdth = map(float, line)
return nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \
pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth, xyz
def _parse_6bc(line, icalc, nstrm, isfropt, reachinput, per=0):
"""Parse Data Set 6b for SFR2 package.
See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info
Parameters
----------
line : str
line read from SFR package input file
Returns
-------
a list of length 9 containing all variables for Data Set 6b
"""
na = 0
# line = [s for s in line.strip().split() if s.isnumeric()]
nvalues = sum([_isnumeric(s) for s in line_parse(line)])
line = _get_dataset(line, [0] * nvalues)
hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = [0.0] * 9
if isfropt in [0, 4, 5] and icalc <= 0:
hcond = line.pop(0)
thickm = line.pop(0)
elevupdn = line.pop(0)
width = line.pop(0)
depth = line.pop(0)
elif isfropt in [0, 4, 5] and icalc == 1:
hcond = line.pop(0)
if per == 0:
thickm = line.pop(0)
elevupdn = line.pop(0)
width = line.pop(0) # depth is not read if icalc == 1; see table in online guide
thts = _pop_item(line)
thti = _pop_item(line)
eps = _pop_item(line)
if isfropt == 5:
uhc = line.pop(0)
elif isfropt in [0, 4, 5] and icalc >= 2:
hcond = line.pop(0)
if isfropt in [4, 5] and per > 0 and icalc == 2:
pass
else:
thickm = line.pop(0)
elevupdn = line.pop(0)
if isfropt in [4, 5] and icalc == 2 and per == 0:
# table in online guide suggests that the following items should be present in this case
# but in the example
thts = _pop_item(line)
thti = _pop_item(line)
eps = _pop_item(line)
if isfropt == 5:
uhc = _pop_item(line)
else:
pass
elif isfropt == 1 and icalc <= 1:
width = line.pop(0)
if icalc <= 0:
depth = line.pop(0)
elif isfropt in [2, 3] and icalc <= 1:
if per > 0:
pass
else:
width = line.pop(0)
if icalc <= 0:
depth = line.pop(0)
else:
pass
return hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc
| {
"repo_name": "mrustl/flopy",
"path": "flopy/modflow/mfsfr2.py",
"copies": "1",
"size": "94999",
"license": "bsd-3-clause",
"hash": -7441174444280513000,
"line_mean": 45.547047047,
"line_max": 192,
"alpha_frac": 0.5324266571,
"autogenerated": false,
"ratio": 4.004341595009273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5036768252109273,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aleivag'
import os
from distutils.core import setup
import os
from distutils.core import setup
from epifpm import __version__
try:
f = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
long_description = f.read()
f.close()
except:
long_description = ''
reqs = []
try:
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
reqs.extend(map(lambda x: x.strip(), f))
except:
pass
setup(
name='epifpm',
version=__version__,
packages=['epifpm'],
author='Alvaro Leiva',
author_email='aleivag@gmail.com',
url='https://github.com/Epi10/epifpm',
classifiers=[
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Development Status :: 3 - Alpha",
"Topic :: Utilities"
],
keywords=['fingerprint', 'serial'],
description='library to read from fingerprint scaners in python',
long_description=long_description,
install_requires=reqs,
license='MIT'
)
| {
"repo_name": "Epi10/epifpm",
"path": "setup.py",
"copies": "1",
"size": "1181",
"license": "mit",
"hash": 3817331868641952000,
"line_mean": 23.6041666667,
"line_max": 80,
"alpha_frac": 0.6265876376,
"autogenerated": false,
"ratio": 3.6450617283950617,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9744321691097942,
"avg_score": 0.005465534979423869,
"num_lines": 48
} |
__author__ = 'aleivag'
import logging
import serial
from cStringIO import StringIO
HEADER = [0xEF, 0x01]
PACKAGE_HANDSHAKE = 0x17 #: To greet (and posible ping) the fingerprint
PACKAGE_EMPTY = 0x0d
PACKAGE_GETIMAGE = 0x01
PACKAGE_IMAGE2TZ = 0x02
PACKAGE_REGMODEL = 0x05
PACKAGE_RANDOM = 0x14
PACKAGE_STORE = 0x06
PACKAGE_MATCH = 0x03
PACKAGE_SEARCH = 0x04
PACKAGE_TEMPLATE_NUM = 0x1d
PACKAGE_UP_IMAGE = 0x0A
PACKAGE_UP_CHAR = 0x08
PACKAGE_GET_SYS_PARS = 0x0f
PACKAGE_DOWN_IMAGE = 0x0B
PACKAGE_COMMAND = 0x01
PACKAGE_DATA = 0x02
PACKAGE_ACK = 0x07
PACKAGE_END_OF_DATA = 0x08
FINGERPRINT_OK = 0x00
FINGERPRINT_NOFINGER = 0x02
FINGERPRINT_ENROLLMISMATCH = 0x0A
#logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class Fingerprint(object):
def __init__(self, port='/dev/ttyAMA0', baudrate=57600, timeout=2):
self.password = 0
self.address = [0xFF, 0xFF, 0xFF, 0xFF]
self.serial = None
self.port = port
self.baudrate=baudrate
self.timeout=timeout
def connect(self):
self.serial = serial.Serial(self.port, self.baudrate, timeout=self.timeout)
def close(self):
self.serial.close()
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def write(self, instruction_code, data, identifier=PACKAGE_COMMAND):
size = len(data) + 3
packet_size = list(divmod(size, 256))
checksum = identifier + sum(packet_size) + (instruction_code or 0) + sum(data)
checksum = list(divmod(checksum, 256))
buffer = map(
lambda x: chr(x),
HEADER + self.address + [identifier] + packet_size + filter(None, [instruction_code]) + data + checksum
)
self.last_write_package = buffer
logger.debug('write package: %s' % repr(buffer))
self.serial.write(''.join(buffer))
def read(self):
header = self.serial.read(2)
addr = self.serial.read(4)
pi = ord(self.serial.read(1))
length = self.serial.read(2)
ilen = sum([ord(i) for i in length])
edata=self.serial.read(ilen-2)
resp = {'identifier': pi}
if pi == 0x07:
resp['confirmation_code'] = ord(edata[0])
edata = edata[1:]
resp['extra_data'] = edata
csum = self.serial.read(2)
self.last_read_package = [header, addr, pi, length, resp.get('confirmation_code'), edata, csum]
logger.debug('read package: %s' % self.last_read_package)
logger.debug('return read dict: %s' % resp)
return resp
def handshake(self):
self.write(instruction_code=PACKAGE_HANDSHAKE, data=[0])
return self.read()
def get_system_parameters(self):
self.write(instruction_code=PACKAGE_GET_SYS_PARS, data=[])
ret = self.read()
ret['Status register'] = ret['extra_data'][0:0+1]
ret['System identifier code'] = ret['extra_data'][1:1+1]
ret['Finger library size'] = ret['extra_data'][2:2+1]
ret['Security level'] = ret['extra_data'][3:3+1]
ret['Device address'] = ret['extra_data'][4:4+2]
ret['Data packet size'] = [32, 64, 128, 256][ord(ret['extra_data'][6:6+1])]
ret['Baud settings'] = ret['extra_data'][7:7+1]
return ret
def empty(self):
self.write(instruction_code=PACKAGE_EMPTY, data=[])
print self.read()
def get_image(self):
"""Get a single read from the sensor looking for a fingerprint and load it into a "ImageBuffer" if successful"""
self.write(instruction_code=PACKAGE_GETIMAGE, data=[])
return self.read()
def get_image_until(self, condition=FINGERPRINT_OK):
""" Will continuously lookup for a fingerprint from the sensor until a condition """
r = self.get_image()
while r.get('confirmation_code') != condition:
r = self.get_image()
return r
def up_image(self, fo=None):
""" Get Image src from ImageBuffer """
logger.info('UPLOAD IMAGE')
self.write(instruction_code=PACKAGE_UP_IMAGE, data=[])
resp = self.read()
r = {'identifier': 0x00}
datas = []
while r['identifier'] != 0x08:
r = self.read()
datas.append(r['extra_data'])
#resp['image'].write(r['extra_data'])
logger.debug("get %s bytes" % len(r['extra_data']))
if fo: fo.write(''.join(datas))
resp['image'] = StringIO(''.join(datas))
resp['image-data'] = datas
return resp
def down_image(self, fo):
""" Not finish """
logger.info('DOWNLOAD IMAGE')
chunks = self.get_system_parameters()['Data packet size']
self.write(instruction_code=PACKAGE_DOWN_IMAGE, data=[])
self.read()
rdata = []
data = fo.read(chunks)
while data:
rdata.append(map(ord, data))
data = fo.read(chunks)
for idata in rdata[:-1]:
self.write(instruction_code=None, data=idata, identifier=PACKAGE_DATA)
self.write(instruction_code=None, data=rdata[-1], identifier=PACKAGE_END_OF_DATA)
#def up_char(self, fo, buffer, chunks=128):
# logger.info('uploading char')
# self.write(instruction_code=PACKAGE_UP_CHAR, data=[buffer])
# # add read sequence
def image_2_tz(self, buffer):
self.write(instruction_code=PACKAGE_IMAGE2TZ, data=[buffer])
return self.read()
def up_char(self, buffer, fo=None):
self.write(instruction_code=PACKAGE_UP_CHAR, data=[buffer])
resp = self.read()
resp['char'] = StringIO()
r = {'identifier': 0x00}
while r['identifier'] != 0x08:
r = self.read()
resp['char'].write(r['extra_data'])
if fo: fo.write(r['extra_data'])
resp['char'].seek(0)
return resp
def match(self):
self.write(instruction_code=PACKAGE_MATCH, data=[])
resp = self.read()
resp['score'] = sum(map(ord, resp['extra_data']))
return resp
def register_model(self):
self.write(instruction_code=PACKAGE_REGMODEL, data=[])
return self.read()
def store_model(self, id, buffer=0x01):
self.write(instruction_code=PACKAGE_STORE, data=[buffer] + list(divmod(id, 255)))
return self.read()
def template_number(self):
self.write(instruction_code=PACKAGE_TEMPLATE_NUM, data=[])
resp = self.read()
resp['number'] = sum(map(ord, resp['extra_data']))
return resp
def get_random_code(self):
self.write(instruction_code=PACKAGE_RANDOM, data=[])
resp = self.read()
resp['random'] = sum(map(ord, resp['extra_data']))
return resp
def search(self, buffer, start_page=0, page_num=0x00a3):
self.write(instruction_code=PACKAGE_SEARCH, data=[buffer] + list(divmod(start_page, 255)) + list(divmod(page_num, 255)))
resp = self.read()
resp['page_id'] = sum(map(ord, resp['extra_data'][:2]))
resp['score'] = sum(map(ord, resp['extra_data'][2:]))
if resp['confirmation_code'] == 0:
resp['confirmation_desc'] = 'OK'
elif resp['confirmation_code'] == 9:
resp['desc'] = "No matching in the library (both the PageID and matching score are 0)"
return resp
def register_finger(id):
with Fingerprint() as f:
print "place finger"
f.get_image_until()
f.image_2_tz(buffer=1)
print "remove your finger"
f.get_image_until(condition=FINGERPRINT_NOFINGER)
print "place finger again"
f.get_image_until()
f.image_2_tz(buffer=2)
model = f.register_model()
if model['confirmation_code'] != FINGERPRINT_OK:
raise Exception("No Match")
print f.store_model(id=id, buffer=1)
def validate_finger():
with Fingerprint() as f:
print "place finger"
f.get_image_until()
print f.image_2_tz(0x01)
print f.search(buffer=0x01)
if __name__ == '__main__':
with Fingerprint() as f:
f.handshake()
f.empty()
image = f.get_image_until()
print image
| {
"repo_name": "Epi10/epifpm",
"path": "epifpm/zfm20.py",
"copies": "1",
"size": "8279",
"license": "mit",
"hash": -8618993402727876000,
"line_mean": 28.462633452,
"line_max": 128,
"alpha_frac": 0.5917381326,
"autogenerated": false,
"ratio": 3.4741921947125474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9557292329804012,
"avg_score": 0.0017275995017068519,
"num_lines": 281
} |
__author__ = 'aleivag'
import sys
import time
from csv import writer
import argparse
from multiprocessing import Process, Queue
class Generator(Process):
def __init__(self, manager):
Process.__init__(self)
self.manager = manager
self.queue = manager.manager_queue
self.small_pool = self.manager.work_finish
def do_generate(self):
return []
def run(self):
generate_iter = self.do_generate()
buffer_to_complete = self.manager.p_args.simultaneous
buffer = []
while True:
try:
work = generate_iter.next()
except StopIteration:
break
buffer.append(work)
if buffer_to_complete:
buffer_to_complete -= 1
else:
while buffer:
self.queue.put(buffer.pop(0))
for nid in range(self.manager.p_args.simultaneous):
self.queue.put(None)
class Worker(Process):
def __init__(self, manager):
Process.__init__(self)
self.manager = manager
self.in_queue = manager.manager_queue
self.out_queue = manager.work_done_queue
def do_work(self, work):
return work
def run(self):
while True:
w = self.in_queue.get()
if w is None:
self.out_queue.put(None)
return
nw = {'start': time.time()}
try:
nw.update(self.do_work(w))
nw['stop'] = time.time()
nw['duration'] = nw['stop'] - nw['start']
except Exception, e:
nw = {'result': False, 'error': str(e).replace('\n', '-')}
w.update(nw)
self.out_queue.put(w)
class Reporter(Process):
duration = 'duration'
def __init__(self, manager):
Process.__init__(self)
self.manager = manager
self.out_queue = manager.work_done_queue
def run(self):
nones = self.manager.p_args.simultaneous
results = []
timeline = {}
good, bad = 0., 0.
max_sim = 0
ppi_5, ppi_50, ppi_avg, ppi_80, ppi_95 = 0, 0, 0, 0, 0
while nones:
q = self.out_queue.get()
if q is None:
nones -= 1
continue
if q['result']:
results.append(q['ppi'])
timeline[q['start']] = timeline.get(q['start'], 0) + 1
timeline[q['stop']] = timeline.get(q['stop'], 0) - 1
acum = 0
for k, v in sorted(timeline.items(), key= lambda x: x[0]):
acum += v
max_sim = max(acum, max_sim)
good += 1.
else:
bad += 1.
tdone = good + bad
pdone = tdone/self.manager.p_args.total*100
pgood = good/tdone*100
results.sort()
if good:
ppi_5 = results[int(good*0.05)]
ppi_50 = results[int(good*0.5)]
ppi_avg = sum(results) / good
ppi_80 = results[int(good*0.8)]
ppi_95 = results[int(good*0.95)]
writer(sys.stdout).writerow(
[
self.manager.p_args.name,
self.manager.p_args.simultaneous,
self.manager.p_args.total,
'%.1f%%' % pdone,
'%.1f%%' % pgood,
max_sim,
'%.2f' % ppi_5,
'%.2f' % ppi_50,
'%.2f' % ppi_80,
'%.2f' % ppi_95,
'%.2f' % ppi_avg
]
)
class Manager(object):
args = argparse.ArgumentParser(description="Stresser")
def init_arguments(self):
self.args.add_argument('--simultaneous', '--sim', default=1, type=int)
self.args.add_argument('--total', default=10, type=int)
self.args.add_argument('--name', default="")
def __init__(self):
self.init_arguments()
self.p_args = self.args.parse_args()
self.work_finish = Queue()
self.manager_queue = Queue()
self.work_done_queue = Queue()
def register_generator(self, generator=Generator):
self.generator = generator(self)
return self.generator
def regirster_workers(self, worker=Worker):
self.workers = [
worker(self) for nid in range(self.p_args.simultaneous)
]
return self.workers
def register_reporter(self, reporter=Reporter):
self.reporter = reporter(self)
def start(self):
self.generator.start()
map(lambda x: x.start(), self.workers)
self.reporter.start()
#
self.generator.join()
map(lambda x: x.join(), self.workers)
self.reporter.join()
| {
"repo_name": "aleivag/stress",
"path": "stresslib/stress.py",
"copies": "1",
"size": "4841",
"license": "mit",
"hash": -686755892057190400,
"line_mean": 25.7458563536,
"line_max": 78,
"alpha_frac": 0.4955587688,
"autogenerated": false,
"ratio": 3.8728,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48683587687999996,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alejandro.Esquiva'
class AARConnector:
def __init__(self,**kwargs):
self.url = kwargs.get("url","")
self.domain = kwargs.get("domain","http://automaticapirest.info/demo/")
self.table = kwargs.get("table","")
self.columns = kwargs.get("columns","")
self.orderby = kwargs.get("orderby","")
self.way = kwargs.get("way","ASC")
self.limit = kwargs.get("limit","")
self.where = kwargs.get("where","")
self.opt = kwargs.get("opt","")
if(self.url == ""):
print(self.formatURL())
self.url = self.formatURL()
def getRawData(self):
url = self.url
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
jsonraw = response.read().decode('utf-8')
return jsonraw
def getJson(self):
jsonraw = self.getRawData()
data = json.loads(jsonraw)
return data
def printJson(self):
jsonobject = self.getJson()
print(json.dumps(jsonobject, indent=1, sort_keys=True))
def getData(self):
return self.getJson()["data"]
def printData(self):
data = self.getData()
print(json.dumps(data, indent=1, sort_keys=True))
def getDBInfo(self):
return self.getJson()["dbInfo"]
def printDBInfo(self):
dbinfo = self.getDBInfo()
print(json.dumps(dbinfo, indent=1, sort_keys=True))
def formatURL(self):
url = self.domain+"getData.php?t="+self.table
if(self.columns != ""):
url = url+"&c="+self.columns
if(self.orderby != ""):
url = url+"&o="+self.orderby
if(self.way != "ASC"):
url = url+"&s="+self.way
else:
url = url+"&s=ASC"
if(self.limit != ""):
url = url+"&l="+self.limit
if(self.where != ""):
url = url+"&w="+urllib.parse.quote(self.where)
if(self.opt != ""):
url = url+"&opt="+self.opt
return url
| {
"repo_name": "alejandroesquiva/AutomaticApiRest-PythonConnector",
"path": "aarpy/AARConnector.py",
"copies": "1",
"size": "2036",
"license": "mit",
"hash": 2172136919043305500,
"line_mean": 28.0857142857,
"line_max": 79,
"alpha_frac": 0.5358546169,
"autogenerated": false,
"ratio": 3.597173144876325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4633027761776325,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alek Ratzloff <alekratz@gmail.com>'
from socket import socket
class ThinClient:
"""
Python client used for doing stuff... Thinly
"""
def __init__(self, port, host="127.0.0.1", recv_size=1024):
self.port = port
self.host = host
self.recv_size = recv_size
self.sock = None
def connect(self):
"""
Creates a connection from a thin client to a server to send commands back and forth.
"""
if self.sock:
raise Exception("Client is already connected to a server")
# create the socket and connect
self.sock = socket()
self.sock.connect((self.host, self.port))
def close(self):
"""
Closes a connection from a thin client to a server.
"""
self.__verify_connection()
self.sock.close()
self.sock = None
def send(self, message):
"""
Sends a message to the server after a connection is established.
:param message: the message or command to send to the server
"""
self.__verify_connection()
# determine the type of the message; convert it to bytes if need be
if type(message) is str:
message = message.encode('ascii')
self.sock.send(message)
def wait_receive(self):
"""
Blocks program execution until a message is received from the server.
:return: the string read from the server
"""
self.__verify_connection()
return self.sock.recv(self.recv_size).decode('utf-8')
def send_receive(self, message):
"""
Creates a connection to the server, sends the message, waits on a
response, closes the connection, and returns the server's response.
"""
self.connect()
self.send(message)
response = self.wait_receive()
self.close()
return response
def __verify_connection(self):
"""
Ensures that the thin client is connected to a server. If not, it will raise an exception.
"""
if not self.sock:
raise Exception("Client is not connected to the server")
class BasicThinClient(ThinClient):
def __init__(self, port=65000, host="127.0.0.1", recv_size=1024):
super(BasicThinClient, self).__init__(port, host, recv_size)
| {
"repo_name": "alekratz/pythinclient",
"path": "pythinclient/client.py",
"copies": "1",
"size": "2352",
"license": "bsd-3-clause",
"hash": 167175308280535260,
"line_mean": 30.7837837838,
"line_max": 98,
"alpha_frac": 0.5948129252,
"autogenerated": false,
"ratio": 4.192513368983957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5287326294183957,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alek Ratzloff <alekratz@gmail.com>'
import abc
import sys
import os
from socket import socket, timeout
from os.path import exists
from threading import Thread
class ThinServer:
__metaclass__ = abc.ABCMeta
def __init__(self, port, host='127.0.0.1', recv_size=1024, is_daemon=False, lockfile="/tmp/pythinclient.pid"):
"""
Creates an instance of a server that listens for connections and commands from the thin client.
:param port: the port to listen on
:param host: the hostname to listen on. 127.0.0.1 by default
:param recv_size: the size of the buffer to read. 1024 by default
:param is_daemon: whether this thin server is a daemon. False by default
:param lockfile: the file to use to hold the process ID of the daemon process
"""
self.port = port
self.host = host
self.recv_size = recv_size
self.sock = None
self.is_daemon = is_daemon
self.is_running = False
self.lockfile = lockfile
self.hooks = {}
self.child_pid = -1
def stop(self):
assert self.is_running
self.is_running = False
def start(self):
assert ((self.sock is None) == (not self.is_running))
self.is_running = True
# determine if this is a daemonized server, and check to see if the lockfile is already taken
if self.is_daemon:
if exists(self.lockfile):
with open(self.lockfile, "r") as fp:
pid = fp.read()
raise Exception("Daemonized server is already running with PID %s" % pid)
# fork
child = os.fork()
if child == -1:
raise Exception("Failed to fork for daemon")
elif child == 0:
# child section
self.sock = socket()
self.sock.bind((self.host, self.port))
self.sock.listen(1)
try:
self._accept_loop()
except KeyboardInterrupt:
# nothing to do here, just silently accept it
pass
finally:
# clean up the lockfile and exit cleanly
os.remove(self.lockfile)
sys.exit(0)
else:
# parent section
# create the lockfile and put the PID inside of it
self.child_pid = child
with open(self.lockfile, "w") as fp:
fp.write(str(child))
else:
# not a daemon. initialize like normal and run in this thread
self.sock = socket()
self.sock.bind((self.host, self.port))
self.sock.listen(1)
self.sock.settimeout(1.0)
self._accept_loop()
def add_hook(self, command, method):
"""
Adds a keyword command for the server to invoke a method.
The method must take 3 arguments: the message, the connection, and the address.
:param command: the command, as a string, that is handled
:param method: the function that is called
:return:
"""
self.hooks[command] = method
def _accept_loop(self):
"""
Private helper method that accepts clients
:return:
"""
assert self.sock
assert self.is_running
while self.is_running:
try:
conn, addr = self.sock.accept()
self.on_accept(conn, addr)
except timeout:
# do nothing
pass
@abc.abstractmethod
def on_accept(self, conn, addr):
"""
Handles what happens when a connection is accepted to the thin server.
:param conn: the socket connection that connected to the server
:param addr: the address that connected to the server
"""
return
def on_receive(self, message, conn, addr):
"""
Handles the receiving of a message from a client
:param message: the message that was received
:param conn: the socket connection that sent the message
:param addr: the address of the connection that sent the message
"""
# if the message has a length of zero, break out
if len(message) == 0:
return
# convert the message back to a string
message = message.decode('utf-8')
# get the first word of the message, and use that to figure out what the command was
command = message.split()[0]
if command in self.hooks:
try:
self.hooks[command](message, conn, addr)
result_message = b"\nOK"
except Exception as ex:
result_message = ("Server reported error: " + str(ex)).encode('ascii') \
+ b"\nERR"
else:
result_message = b"Bad command" \
b"\nERR"
conn.send(result_message)
class BasicThinServer(ThinServer):
"""
A basic thin server that can be extended by adding method hooks. Check the add_hook method documentation on how to
do so. This thin server can be used with the BasicThinClient.
"""
def __init__(self, port=65000, host='127.0.0.1', recv_size=1024, is_daemon=False, lockfile="/tmp/pythinclient.pid"):
super(BasicThinServer, self).__init__(port, host, recv_size, is_daemon, lockfile)
def on_accept(self, conn, addr):
"""
Handles what happens when a connection is accepted to the thin server.
:param conn: the socket connection that connected to the server
:param addr: the address that connected to the server
"""
# receive the message
message = conn.recv(self.recv_size)
# handle the message
self.on_receive(message, conn, addr)
# close the connection
conn.close()
class AsyncThinServer(ThinServer):
def __init__(self, port=65000, host='127.0.0.1', recv_size=1024, is_daemon=False, lockfile="/tmp/pythinclient.pid"):
super(AsyncThinServer, self).__init__(port, host, recv_size, is_daemon, lockfile)
# Connections are indexed by their address tuples
self.__connections = {}
def on_accept(self, conn, addr):
"""
Handles what happens when a connection is accepted to the thin server.
:param conn: the socket connection that connected to the server
:param addr: the address that connected to the server
"""
assert (addr not in self.__connections)
# create an asynchronous listener for this connection
listener = AsyncListener(conn, addr, self)
self.__connections[addr] = listener
listener.start()
def on_receive(self, message, conn, addr):
# call the basic onreceive stuff
super(AsyncThinServer, self).on_receive(message, conn, addr)
# check to see if the listener is still alive; if not, remove it
if not self.__connections[addr].alive:
del self.__connections[addr]
# TODO : log these when we can log stuff
# print("deleting connection from %s" % str(addr))
# print("%s connections to the server right now" % len(self.__connections))
class AsyncListener(Thread):
def __init__(self, conn, addr, thin_server):
"""
Initializes a single connection listener for a client.
"""
super(AsyncListener, self).__init__()
self.conn = conn
self.addr = addr
self.thin_server = thin_server
self.alive = False
def run(self):
assert not self.alive
self.__listen_loop()
def __listen_loop(self):
self.alive = True
while self.alive:
message = self.conn.recv(self.thin_server.recv_size)
if len(message) == 0:
self.alive = False
self.thin_server.on_receive(message, self.conn, self.addr)
| {
"repo_name": "alekratz/pythinclient",
"path": "pythinclient/server.py",
"copies": "1",
"size": "8016",
"license": "bsd-3-clause",
"hash": -7787661052449574000,
"line_mean": 35.9400921659,
"line_max": 120,
"alpha_frac": 0.5782185629,
"autogenerated": false,
"ratio": 4.351791530944626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5430010093844625,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aleksandar Savkov'
import re
from os import makedirs
from pandas.io.html import read_html
# output folder
dp = 'data/'
# make sure folder exists
try:
makedirs(dp)
except OSError:
pass # dir exists
# output files
hiphen_affix_path = '%s/medaffix_with_hiphens.txt' % dp
affix_path = '%s/medaffix.txt' % dp
suffix_path = '%s/medsuffix.txt' % dp
prefix_path = '%s/medprefix.txt' % dp
# Wikipedia URL
url = 'http://en.wikipedia.org/wiki/List_of_medical_roots,_' \
'suffixes_and_prefixes'
# parsed tables at this URL
tables = read_html(url, attrs={'class': 'wikitable'}, header=0)
# names of interesting columns in the tables
regular_keys = [
'Affix',
'Greek root in English',
'Latin root in English',
'Other root in English'
]
# former names of interesting columns
# in case they are restored in the future
ignoramus_keys = [
'Preffix or suffix',
'Preffix/suffix'
]
# all column names
keys = regular_keys + ignoramus_keys
# affix entries
entries = []
# collecting all entries
for t in tables:
for k in keys:
try:
entries.extend(t[k])
except KeyError:
pass # no biggie: not all keys and tables are interesting
# processed affixes
terms = []
for e in entries:
# check for empty entries
if len(e) < 2:
continue
# split possible comma separated sub-entries and clean them
sub_entries = map(lambda x: x.strip(), e.split(','))
# expanding all entries with longer forms in braces
# e.g. cry(o)- => cry-, cryo-
for se in sub_entries:
if re.search('\([^\)]+\)', se):
short_entry = re.sub('\([^\)]+\)', '', se).split(' ')[0]
long_entry = re.sub(r'\(([^\)]+)\)', r'\1', se).split(' ')[0]
if short_entry:
terms.append(short_entry)
if long_entry:
terms.append(long_entry)
elif se:
terms.append(se.split(' ')[0])
# writing to files
with open(hiphen_affix_path, 'w') as f:
f.write('\n'.join(terms))
with open(affix_path, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in terms]))
with open(prefix_path, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in terms if x.endswith('-')]))
with open(suffix_path, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in terms if x.startswith('-')]))
| {
"repo_name": "savkov/MedAffix",
"path": "scripts/medaffix.py",
"copies": "1",
"size": "2361",
"license": "mit",
"hash": 8393681290960247000,
"line_mean": 25.5280898876,
"line_max": 80,
"alpha_frac": 0.5997458704,
"autogenerated": false,
"ratio": 3.177658142664872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4277404013064872,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aleksandar Savkov'
import re
import StringIO
def parse_ftvec_templ(self, s, r):
"""Parses a feature vector template string into a FeatureTemplate object.
*Important*: if resources (e.g. embeddings) are used in the feature template
they should be provided during the parsing in the `r` parameter in order to
be prepacked as parameters to the feature extraction function.
:param s: feature vector string
:type s: str
:param r: dictionary of resources
:type r: dict
:return: FeatureTemplate
"""
fts_str = [x for x in re.sub('[\t ]', '', s).split(';')]
for ft in fts_str:
# empty featues (...; ;feature:params)
if ft.strip() == '':
continue
# no parameter features
no_par = ':' not in ft
# misplaced column without parameters
no_par_end_col = ft.count(':') == 1 and ft.endswith(':')
if no_par or no_par_end_col:
fn = ft if no_par else ft[:-1]
self.add_feature(fn)
continue
# function name & parameter values
fn, v = ft.split(':', 1)
# value matches
m = re.match('(?:\[([0-9:,-]+)\])?(.+)?', v)
# window range
fw = parse_range(m.group(1)) if m.group(1) else None
# function parameters
fp = []
# adding resources to the parameters if required
if fn in r.keys():
fp.append(r[fn])
# adding function parameters if specified
if m.group(2) is not None:
fp.extend([x for x in m.group(2).split(',') if x])
# name, window, parameters
self.add_win_features(fn, fw, tuple(fp))
def parse_range(r):
"""Parses a range in string representation adhering to the following
format:
1:3,6,8:9 -> 1,2,3,6,8,9
:param r: range string
:type r: str
"""
rng = []
# Range strings
rss = [x.strip() for x in r.split(',')]
for rs in rss:
if ':' in rs:
# Range start and end
s, e = (int(x.strip()) for x in rs.split(':'))
for i in range(s, e + 1):
rng.append(int(i))
else:
rng.append(int(rs))
return rng
def nrange(start, stop, step):
"""Returns the indices of n-grams in a context window. Works much like
range(start, stop, step), but the stop index is inclusive, and indices are
included only if the step can fit between the candidate index and the stop
index.
:param start: starting index
:type start: int
:param stop: stop index
:type stop: int
:param step: n-gram length
:type step: int
:return: n-gram indices from left to right
:rtype: list of int
"""
idx = start
rng = []
while idx + step <= stop + 1:
rng.append(idx)
idx += 1
return rng
def parse_ng_range(fw, n):
"""Transforms context window index list to a context window n-gram index
list.
:param fw: context window
:type fw: list of int
:param n: n in n-grams
:type n: int
:return: n-gram indices
:rtype: list of int
"""
subranges = []
cur = None
rng = []
for i in fw:
if cur == None or cur + 1 == i:
rng.append(i)
cur = i
else:
subranges.append(rng)
rng = [i]
cur = i
subranges.append(rng)
nrng = []
for sr in subranges:
for i in nrange(sr[0], sr[-1], n):
nrng.append(i)
return nrng
def gen_ft(r, ftt=0, n=1, idx=0):
rng = parse_range(r)
fts = []
assert n > 0, 'n needs to be a positive number.'
for i, ci in enumerate(rng):
f = 'U%02d:' % (idx + i)
for i in range(n):
f = '%s%s[%s,%s]' % (f, '%x', ci + i, ftt)
fts.append(f)
return fts
def to_crfpp_template(ftvec):
fts = []
b = False
for fs in ftvec.split(';'):
if fs == 'B':
b = True
continue
fn, fw = fs[:fs.find('[')], fs[fs.find('[')+1:fs.find(']')]
n = 3 if fn.startswith('tri') else 2 if fn.startswith('bi') else 1
fts.extend(gen_ft(fw, int(fn.endswith('pos')), n, len(fts)))
if b:
fts.append('B')
return '\n'.join(fts)
| {
"repo_name": "savkov/crfppftvec",
"path": "crfppftvec.py",
"copies": "1",
"size": "4253",
"license": "mit",
"hash": -5902834145117482000,
"line_mean": 24.7757575758,
"line_max": 80,
"alpha_frac": 0.5367975547,
"autogenerated": false,
"ratio": 3.4718367346938774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9505493290707805,
"avg_score": 0.0006281997372144122,
"num_lines": 165
} |
__author__ = 'Aleksandar Savkov'
import re
import warnings
import requests
from bs4 import BeautifulSoup
from os import makedirs
def get_next_cat_page(soup, affix_type):
atags = soup.findAll(name='a',
attrs={'title': 'Category:English %ses' % affix_type})
urls = [x['href'] for x in atags if 'next 200' in x.string and x['href']]
if len(urls) > 2:
warnings.warn('There is more than two url candidates.')
elif len(urls) == 0:
return None
return 'https://en.wiktionary.org/%s' % urls[0]
def scrape_category(url, affix_type=None):
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
next_url = get_next_cat_page(soup, affix_type) if affix_type else None
atags = soup.findAll(name='a')
affixes = [x['title']
for x
in atags
if x.has_attr('title') and re.match('^[a-z-]+$', x['title'])]
if next_url:
affixes.extend(scrape_category(next_url, affix_type))
return affixes
# output folder
dp = 'data/'
# make sure folder exists
try:
makedirs(dp)
except OSError:
pass # dir exists
# output files
hiphen_affix_path = '%s/wikiaffix_with_hiphens.txt' % dp
affix_path = '%s/wikiaffix.txt' % dp
suffix_path = '%s/wikisuffix.txt' % dp
prefix_path = '%s/wikiprefix.txt' % dp
vsfxp = '%s/wikiverbsuffix.txt' % dp
nsfxp = '%s/wikinounsuffix.txt' % dp
asfxp = '%s/wikiadjsuffix.txt' % dp
rsfxp = '%s/wikiadvsuffix.txt' % dp
isfxp = '%s/wikiinflsuffix.txt' % dp
# Wiktionary Category URLs
suff_url = 'https://en.wiktionary.org/wiki/Category:English_suffixes'
pref_url = 'https://en.wiktionary.org/wiki/Category:English_prefixes'
verb_url = 'https://en.wiktionary.org/wiki/Category:' \
'English_verb-forming_suffixes'
noun_url = 'https://en.wiktionary.org/wiki/Category:' \
'English_noun-forming_suffixes'
infl_url = 'https://en.wiktionary.org/wiki/Category:' \
'English_inflectional_suffixes'
adv_url = 'https://en.wiktionary.org/wiki/Category:' \
'English_adverb-forming_suffixes'
adj_url = 'https://en.wiktionary.org/wiki/Category:' \
'English_adjective-forming_suffixes'
# scraping the categories
sfxs = scrape_category(suff_url, affix_type='suffix')
prfxs = scrape_category(pref_url, affix_type='prefix')
# scraping the subcategories
vsfxs = scrape_category(verb_url) # forming verbs
nsfxs = scrape_category(noun_url) # forming nouns
isfxs = scrape_category(infl_url) # inflectional
asfxs = scrape_category(adv_url) # forming adjectives
rsfxs = scrape_category(adj_url) # forming adverbs
# writing to files
with open(hiphen_affix_path, 'w') as f:
f.write('\n'.join(sfxs + prfxs))
with open(affix_path, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in sfxs + prfxs]))
with open(prefix_path, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in prfxs if x.endswith('-')]))
with open(suffix_path, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in sfxs if x.startswith('-')]))
with open(vsfxp, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in vsfxs if x.startswith('-')]))
with open(nsfxp, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in nsfxs if x.startswith('-')]))
with open(isfxp, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in isfxs if x.startswith('-')]))
with open(asfxp, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in asfxs if x.startswith('-')]))
with open(rsfxp, 'w') as f:
f.write('\n'.join([x.replace('-', '') for x in rsfxs if x.startswith('-')]))
| {
"repo_name": "savkov/MedAffix",
"path": "scripts/wikiaffix.py",
"copies": "1",
"size": "3581",
"license": "mit",
"hash": -8129105397221860000,
"line_mean": 35.1717171717,
"line_max": 80,
"alpha_frac": 0.636693661,
"autogenerated": false,
"ratio": 2.8488464598249803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.398554012082498,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aleks'
# -*- coding: utf-8 -*-
import re
from bs4 import Tag
def _feature_video_title(tag):
previous = tag.previous_element
while (previous is not None) and (type(previous) != Tag or previous.name == 'p'):
previous = previous.previous_element
if previous.name != 'h3':
raise LookupError()
title = re.sub('^P\.?(\s+)?S\.?\s+', '', previous.string, 1)
title = title.replace(u' - ', u' — ')
return previous, title
def filter_feature_video(post, soup):
tags = soup.find_all('iframe', src=re.compile('youtube.com')) + soup.find_all('object')
for tag in reversed(tags):
try:
previous, title = _feature_video_title(tag)
except LookupError:
continue
if tag.name == 'iframe':
match = re.search('/([^/]+)$', tag['src'])
else:
match = re.search('/([^/?&]+)[?&]', tag.embed['src'])
if match:
post['feature_video'] = {
'id': match.group(1),
'title': title
}
tag.decompose()
previous.decompose()
break
return post, soup
| {
"repo_name": "nevkontakte/drupal2acrylamid",
"path": "drupal/filters/feature_video.py",
"copies": "1",
"size": "1155",
"license": "mit",
"hash": -8097336494852650000,
"line_mean": 24.0652173913,
"line_max": 91,
"alpha_frac": 0.5247181266,
"autogenerated": false,
"ratio": 3.707395498392283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4732113624992283,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.