text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'christoph'
from restapi.models import Hostname, AddressUpdate
from django.template import Context, loader, Template
from django.conf import settings
import os
import datetime
import subprocess
class DyndnsService(object):
def __init__(self):
self.nsd = NsdService()
def get_host(self, hostname):
hosts = Hostname.objects.filter(hostname=hostname)
if len(hosts) == 0:
return None
elif len(hosts) == 1:
return hosts[0]
else:
assert False, 'Multiple hosts with the same hostname {} found'.format(hostname)
def create_new_host(self, hostname, key_fingerprint):
new_host = Hostname(hostname=hostname,keyFingerprint=key_fingerprint)
new_host.save()
zone_name = self.zone_name(hostname)
self.nsd.add_zone(zone_name)
def update_host_addresses(self, host, ipv4, ipv6):
last_update = AddressUpdate.objects.filter(hostname=host).order_by('-created')[:1]
if len(last_update) == 0 or last_update[0].ipv4 != ipv4 or last_update[0].ipv6 != ipv6:
address_update = AddressUpdate(ipv4=ipv4, ipv6=ipv6, hostname=host)
address_update.save()
today = datetime.datetime.utcnow().date()
updates_today = AddressUpdate.objects.filter(created__gt=today)
serial = today.strftime("%Y%m%d{0:02d}").format(len(updates_today))
zone_name = self.zone_name(host.hostname)
self.nsd.update_zone(zone_name, ipv4, ipv6, serial)
def delete_host(self, host):
host.delete()
zone_name = self.zone_name(host.hostname)
self.nsd.delete_zone(zone_name)
def zone_name(self, hostname):
return '{}.{}'.format(hostname, settings.ZONE_DOMAIN)
def find_hosts_by_key_fingerprint(self, key_fingerprint):
return Hostname.objects.filter(keyFingerprint=key_fingerprint)
class NsdService(object):
def add_zone(self, zone_name):
self.write_zone_file(zone_name, '', '', 0)
self.nsd_control('addzone', zone_name, settings.ZONES_PATTERN)
def update_zone(self, zone_name, ipv4, ipv6, serial):
self.write_zone_file(zone_name, ipv4, ipv6, serial)
self.nsd_control('reload', zone_name)
def delete_zone(self, zone_name):
self.nsd_control('delzone', zone_name)
self.delete_zone_file(zone_name)
def write_zone_file(self, zone_name, ipv4, ipv6, serial):
result = loader.render_to_string(settings.ZONE_TEMPLATE, {'zonename':zone_name, 'ipv4':ipv4, 'ipv6': ipv6, 'serial':serial})
file = open('{}{}{}.zone'.format(settings.ZONES_DIRECTORY, os.path.sep, zone_name), 'w+')
file.write(result)
file.close()
def delete_zone_file(self, zone_name):
zone_file = '{}{}{}.zone'.format(settings.ZONES_DIRECTORY, os.path.sep, zone_name)
if os.path.isfile(zone_file):
os.remove(zone_file)
def nsd_control(self, *args):
if settings.NSD_CONTROL_PATH:
if settings.SUDO_NSD_CONTROL:
cmd = ['/usr/bin/sudo', settings.NSD_CONTROL_PATH]
else:
cmd = [settings.NSD_CONTROL_PATH]
for arg in args:
cmd.append(arg)
subprocess.call(cmd)
| {
"repo_name": "cemonds/tinhat-dyndns-server",
"path": "restapi/services.py",
"copies": "1",
"size": "3279",
"license": "mit",
"hash": 4275618283869078500,
"line_mean": 38.0357142857,
"line_max": 132,
"alpha_frac": 0.6273254041,
"autogenerated": false,
"ratio": 3.4735169491525424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4600842353252542,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christoph'
import sqlite3
from flask import g
from contextlib import closing
import random
import string
import collections
def connect_db(app):
conn = sqlite3.connect(app.config['DATABASE'])
conn.row_factory = sqlite3.Row
return conn
def init_db(app):
with closing(connect_db(app)) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def add_picture(db, picture_name, width, height, public):
c = db.cursor()
c.execute("SELECT * from pictures WHERE pictures.filename=?", (picture_name, ))
if c.fetchone() is None:
c.execute("INSERT INTO pictures (filename, width, height, public_viewable) VALUES (?, ?, ?, ?)", (picture_name, width, height, public))
db.commit()
return True
else:
return False
def alter_picture(db, picture_name, width=None, height=None, public=None):
c = db.cursor()
if width is not None:
c.execute("UPDATE pictures SET width=? WHERE filename=? ", (width, picture_name))
if height is not None:
c.execute("UPDATE pictures SET height=? WHERE filename=? ", (height, picture_name))
if public is not None:
c.execute("UPDATE pictures SET public_viewable=? WHERE filename=? ", (public, picture_name))
db.commit()
def add_person(db, person_name, session_key=None):
if session_key is None:
session_key = ''.join(random.choice(string.ascii_uppercase) for _ in range(6))
c = db.cursor()
c.execute("SELECT * from persons WHERE persons.name=?", (person_name, ))
if c.fetchone() is None:
c.execute("INSERT INTO persons (name, session_key) VALUES (?, ?)", (person_name, session_key))
db.commit()
return True
else:
return False
def delete_person(db, person_name):
c = db.cursor()
c.execute("DELETE FROM persons WHERE name=?", (person_name, ))
db.commit()
def merge_session_keys(db, persons, session_key=None):
if session_key is None:
session_key = ''.join(random.choice(string.ascii_uppercase) for _ in range(6))
c = db.cursor()
for person in persons:
c.execute("UPDATE persons SET session_key=? WHERE name=?", (session_key, person))
db.commit()
def connect_person_with_picture(db, picture_name, person_name):
c = db.cursor()
c.execute("""
INSERT
INTO personsOnPicture (picture_id, person_id)
VALUES
(
(SELECT id from pictures WHERE filename=?),
(SELECT id from persons WHERE name=?)
);
"""
, (picture_name, person_name)
)
db.commit()
def unconnect_person_from_picture(db, picture_name, person_name):
c = db.cursor()
c.execute("""
DELETE FROM personsOnPicture
WHERE id IN (
SELECT personsOnPicture.id
FROM personsOnPicture
INNER JOIN pictures ON personsOnPicture.picture_id=pictures.id
INNER JOIN persons ON personsOnPicture.person_id=persons.id
WHERE pictures.filename=?
AND persons.name=?
)
"""
, (picture_name, person_name)
)
db.commit()
def get_all_pictures_of_a_person(db, person_name):
c = db.cursor()
db_result = c.execute("""
SELECT pictures.filename
FROM personsOnPicture
INNER JOIN pictures ON personsOnPicture.picture_id=pictures.id
INNER JOIN persons ON personsOnPicture.person_id=persons.id
WHERE
persons.name=?
ORDER BY pictures.filename;
"""
, (person_name,)
)
return db_result.fetchall()
def get_all_pictures_of_a_session_key(db, session_key):
c = db.cursor()
db_result = c.execute("""
SELECT pictures.filename, pictures.width, pictures.height
FROM personsOnPicture
INNER JOIN pictures ON personsOnPicture.picture_id=pictures.id
INNER JOIN persons ON personsOnPicture.person_id=persons.id
WHERE
persons.session_key=?
ORDER BY pictures.filename;
"""
, (session_key,)
)
return db_result.fetchall()
def get_pictures_with_session_key_and_name(db, session_key, picture_name):
c = db.cursor()
db_result = c.execute("""
SELECT pictures.filename
FROM personsOnPicture
INNER JOIN pictures ON personsOnPicture.picture_id=pictures.id
INNER JOIN persons ON personsOnPicture.person_id=persons.id
WHERE
persons.session_key=?
AND
pictures.filename=?;
"""
, (session_key, picture_name)
)
return db_result.fetchall()
def get_picture_data(db, picture_name):
c = db.cursor()
db_result = c.execute("""
SELECT *
FROM pictures
WHERE
pictures.filename=?;
"""
, ( picture_name, )
)
return db_result.fetchone()
def get_all_persons_on_picture(db, picture_name):
c = db.cursor()
db_result = c.execute("""
SELECT persons.name
FROM personsOnPicture
INNER JOIN pictures ON personsOnPicture.picture_id=pictures.id
INNER JOIN persons ON personsOnPicture.person_id=persons.id
WHERE
pictures.filename=?;
"""
, ( picture_name, )
)
return db_result.fetchall()
def get_next_picture_name(db, picture_name):
c = db.cursor()
db_result = c.execute("""
SELECT filename
FROM pictures
ORDER BY filename;
""")
while db_result.fetchone()["filename"] != picture_name:
pass
row = db_result.fetchone()
if row is None:
return None
else:
return row["filename"]
def get_prev_and_next_picture_name(db, picture_name, person_name=None):
c = db.cursor()
if person_name is None:
db_result = c.execute("""
SELECT filename
FROM pictures
ORDER BY filename;
""")
else:
db_result = c.execute("""
SELECT pictures.filename
FROM personsOnPicture
INNER JOIN pictures ON personsOnPicture.picture_id=pictures.id
INNER JOIN persons ON personsOnPicture.person_id=persons.id
WHERE
persons.name=?
ORDER BY pictures.filename;
"""
, (person_name,))
cur = db_result.fetchone()
prev = None
while (cur is not None) and (cur["filename"] != picture_name):
prev = cur
cur = db_result.fetchone()
next_row = db_result.fetchone()
if prev is not None:
prev = prev["filename"]
if next_row is not None:
next_file = next_row["filename"]
return prev, next_file
def get_all_picture_names(db):
c = db.cursor()
return map(lambda x: x["filename"],
c.execute("""
SELECT filename from pictures ORDER BY filename;"""))
def is_picture_public(db, picture_name):
c = db.cursor()
result = c.execute("""
SELECT *
FROM pictures
WHERE
filename=?
AND
public_viewable=1""", (picture_name, ))
return result.fetchone() is not None
def get_all_public_pictures(db):
c = db.cursor()
result = c.execute("""
SELECT *
FROM pictures
WHERE public_viewable=1
ORDER BY filename""")
return result.fetchall()
def get_all_persons(db):
c = db.cursor()
return c.execute("""
SELECT name, session_key
FROM persons
ORDER BY session_key;
""")
def rename_person(db, old_name, new_name):
c = db.cursor()
session_key = c.execute("SELECT session_key FROM persons WHERE name=?;" , (old_name, )).fetchone()["session_key"]
add_person(db, new_name, session_key)
c.execute("""
UPDATE personsOnPicture
SET person_id=(SELECT id FROM persons WHERE name=?)
WHERE person_id=(SELECT id FROM persons WHERE name=?);
""", (new_name, old_name)
)
db.commit()
delete_person(db, old_name)
def get_all_persons_and_numbers(db):
c = db.cursor()
return c.execute("""
SELECT persons.name, persons.session_key, COUNT(*)
FROM personsOnPicture
INNER JOIN persons ON personsOnPicture.person_id=persons.id
GROUP BY persons.name;
""")
def get_all_persons_grouped_by_session_keys(db):
db_result = get_all_persons_and_numbers(db)
ret_dict = collections.defaultdict(list)
for name, session_key, count in db_result:
ret_dict[session_key].append(dict(name=name, count=count))
return ret_dict
def get_all_pictures_of_session_key_and_public(db, session_key):
session_key_pictures = get_all_pictures_of_a_session_key(db, session_key)
ret = []
for pic in session_key_pictures:
ret.append(dict(filename=pic["filename"],
file_path=session_key + "/" + pic["filename"],
width=pic["width"],
height=pic["height"]))
public_pictures = get_all_public_pictures(db)
for pic in public_pictures:
ret.append(dict(filename=pic["filename"],
file_path="all/" + pic["filename"],
width=pic["width"],
height=pic["height"]))
exist_set = set([])
final_ret = []
for pic in ret:
if pic["filename"] not in exist_set:
exist_set.add(pic["filename"])
final_ret.append(pic)
return sorted(final_ret, key=lambda x:x["filename"])
| {
"repo_name": "hikhvar/fotoShare",
"path": "database/__init__.py",
"copies": "1",
"size": "9231",
"license": "mit",
"hash": -3237555303500923000,
"line_mean": 27.4030769231,
"line_max": 143,
"alpha_frac": 0.6152096198,
"autogenerated": false,
"ratio": 3.690923630547781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4806133250347781,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christoph'
"""
Copied from the python documentation https://docs.python.org/2/library/csv.html#csv-examples
"""
import csv, codecs, cStringIO
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row) | {
"repo_name": "hikhvar/fotoShare",
"path": "unicode_helper.py",
"copies": "1",
"size": "1913",
"license": "mit",
"hash": 3519121681618111000,
"line_mean": 28.90625,
"line_max": 96,
"alpha_frac": 0.6063774177,
"autogenerated": false,
"ratio": 3.7731755424063116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48795529601063115,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christoph'
import scrapy
from jtr_scrapper.items import JtrTeamRankingItem, JtrTournamentItem, JtrTournamentPartition
import datetime
import uuid
import utils
import Geohash as geohash
import geopy
import geopy.exc
import geopy.distance as geodistance
class Jtr_Spider(scrapy.Spider):
name = "jtrspider"
allowed_domains = ["turniere.jugger.org"]
start_urls = [ "http://turniere.jugger.org/rank.team.php" ]
geo_locator = geopy.Nominatim()
location_cache = {}
def parse(self, response):
title = response.xpath('//div[@class="title"]/text()').extract_first()
if title == "Teamwertung":
return self.parse_starting_page(response)
def parse_starting_page(self, response):
ranking = 0
for sel in response.xpath('//div[@class="content"]/table/tr'):
team_link = sel.xpath('td/a/@href').extract_first()
if team_link is not None:
team_name = sel.xpath('td/a/text()').extract_first()
data = sel.xpath('td/text()').extract()
ranking_item = JtrTeamRankingItem()
ranking_item['team_name'] = utils.unescape(team_name)
if len(data) == 4:
ranking, city, tournaments, points = data
else:
city, tournaments, points = data
ranking_item['ranking'] = int(ranking.split("/")[0].strip().strip("."))
ranking_item['hometown'] = utils.unescape(city)
ranking_item['points'] = float(points)
ranking_item['number_of_tournaments'] = utils.unescape(tournaments)
ranking_item['crawl_date'] = datetime.datetime.now()
yield ranking_item
yield scrapy.Request(response.urljoin(team_link), callback=self.parse_team_site)
def parse_team_site(self, response):
team = response.xpath('//div[@class="title"]/text()').extract_first()
for sel in response.xpath('//div[@class="content"]/table/tr'):
tournament_link = sel.xpath('td/a/@href').extract_first()
if tournament_link is not None:
data = sel.xpath('td/text()').extract()
tournament_name = sel.xpath('td/a/text()').extract_first()
if len(data) == 6:
date, tournament_town, ranking, zf, tw, points = data
item = JtrTournamentPartition()
item['tournament_date'] = date
item['crawl_date'] = datetime.datetime.now()
item['ranking'] = int(ranking.split("/")[0].strip().strip("."))
home_town, team_name = team.split("-", 1)
item['team_name'] = utils.unescape(team_name.strip())
item['team_hometown'] = utils.unescape(home_town.strip())
item['tournament_town'] = utils.unescape(tournament_town)
item['tournament_name'] = utils.unescape(tournament_name)
home_town = self._locate(home_town)
tournament_town = self._locate(tournament_town)
item["team_hometown_position"] = self._get_geohash(home_town)
item["tournament_town_position"] = self._get_geohash(tournament_town)
item["distance"] = self._get_distance(home_town, tournament_town)
yield item
#yield scrapy.Request(response.urljoin(tournament_link), callback=self.find_tournament_results)
def find_tournament_results(self, response):
results_link = response.xpath('//a[@title="Ergebnisse"]/@href').extract_first()
yield scrapy.Request(response.urljoin(results_link), callback=self.parse_tournament_results)
def parse_tournament_results(self, response):
pass
def _locate(self, town_name):
town_name = utils.unescape(town_name.strip())
if town_name not in self.location_cache:
try:
self.location_cache[town_name] = self.geo_locator.geocode(town_name)
except geopy.exc.GeocoderTimedOut:
print "Geocoder Timeout."
return None
return self.location_cache[town_name]
def _get_geohash(self, town):
if town is not None:
return geohash.encode(town.latitude, town.longitude)
else:
return None
def _get_distance(self, town_a, town_b):
if town_a is None or town_b is None:
return None
else:
return geodistance.great_circle(town_a.point, town_b.point).kilometers
| {
"repo_name": "hikhvar/jtr_scrapper",
"path": "jtr_scrapper/spiders/jtr_spider.py",
"copies": "1",
"size": "4628",
"license": "mit",
"hash": -1772705891333194000,
"line_mean": 44.8217821782,
"line_max": 115,
"alpha_frac": 0.5814606742,
"autogenerated": false,
"ratio": 3.8374792703150913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4918939944515091,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christoph Statz, christoph.statz <at> tu-dresden.de"
import numpy as np
from pyvisfile.silo import SiloFile, \
DB_HDF5, DB_CLOBBER, DBOPT_CYCLE, DBOPT_DTIME, DB_COLLINEAR, \
DBOPT_XUNITS, DBOPT_YUNITS, DBOPT_XLABEL, DBOPT_YLABEL, \
DBOPT_UNITS, DB_NODECENT, DBObjectType, DB_LOCAL, \
DBOPT_HI_OFFSET
x_len = 50
y_len = 40
x = np.linspace(-10., 20., x_len)
y = np.linspace(20., 40., y_len)
# Funny, but in row-major y comes first.
data_global = np.zeros((y_len, x_len), dtype=np.float64, order="C")
x_from = [0, 19, 0, 19]
x_to = [21, 50, 21, 50]
y_from = [0, 0, 19, 19]
y_to = [21, 21, 40, 40]
# Mark the Ghostzones (at least in one direction).
hi_offset = [(1, 1), (0, 1), (1, 0), (0, 0)]
data_global[:20, :20] = 0.
data_global[20:, :20] = 1.
data_global[:20, 20:] = 2.
data_global[20:, 20:] = 3.
mesh_names = []
var_names = []
for i in range(4):
file_name = "example_%05d.silo" % i
s = SiloFile(file_name,
mode=DB_CLOBBER,
filetype=DB_HDF5,
target=DB_LOCAL,
fileinfo=f"Example Silo {i:05d}.")
axes = (x[x_from[i]:x_to[i]], y[y_from[i]:y_to[i]])
# Copy necessary due to slicing!
data = data_global[y_from[i]:y_to[i], x_from[i]:x_to[i]].copy()
options = dict()
options[DBOPT_CYCLE] = 99
options[DBOPT_DTIME] = 0.99
options[DBOPT_XLABEL] = "X"
options[DBOPT_YLABEL] = "Y"
options[DBOPT_XUNITS] = "a"
options[DBOPT_YUNITS] = "b"
options[DBOPT_HI_OFFSET] = hi_offset[i]
mesh_name = "mesh"
s.put_quadmesh(mesh_name, axes, coordtype=DB_COLLINEAR, optlist=options)
mesh_names.append((f"{file_name}:{mesh_name}", DB_COLLINEAR))
options = dict()
options[DBOPT_UNITS] = "unit"
var_name = "variable"
s.put_quadvar1(var_name, mesh_name, data, data.shape,
centering=DB_NODECENT, optlist=options)
var_names.append((f"{file_name}:{mesh_name}", DBObjectType.DB_QUADVAR))
options = dict()
options[DBOPT_CYCLE] = 99
options[DBOPT_DTIME] = 0.99
options[DBOPT_XLABEL] = "xx"
options[DBOPT_YLABEL] = "yy"
options[DBOPT_XUNITS] = "a"
options[DBOPT_YUNITS] = "b"
s = SiloFile("example.silo",
mode=DB_CLOBBER,
filetype=DB_HDF5,
target=DB_LOCAL,
fileinfo="Example Metadata.")
s.put_multimesh("mesh", mesh_names, optlist=options)
s.put_multivar("scalar", var_names, optlist=options)
| {
"repo_name": "inducer/pyvisfile",
"path": "examples/multimesh_rowmajor.py",
"copies": "1",
"size": "2400",
"license": "mit",
"hash": -8538491446414694000,
"line_mean": 26.9069767442,
"line_max": 76,
"alpha_frac": 0.6125,
"autogenerated": false,
"ratio": 2.5396825396825395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8635462566224087,
"avg_score": 0.0033439946916905276,
"num_lines": 86
} |
runDB = []
errorDB = []
notInstDB = []
dbs = ['FNOSDS', 'FNGCDDS', 'IBM_FORMS_DATA_SOURCE', 'activities', 'blogs', 'communities', 'dogear', 'files', 'forum', 'homepage', 'metrics', 'mobile', 'news', 'oauth provider', 'profiles', 'search', 'wikis'] # List of all databases to check
for db in dbs: # loop through databases
ds = AdminConfig.getid( '/DataSource:' + db + '/' )
try:
checkDS = AdminControl.testConnection( ds )
if checkDS == "WASX7217I: Connection to provided datasource was successful." :
# print 'Connect to %s was successful' % db
runDB.append( db )
else :
errorDB.append( db )
# print 'Error: %s is not available' % db
except:
if notInstDB != "All DB checked ":
notInstDB.append( db )
runDB.sort()
errorDB.sort()
notInstDB.sort()
print ''
print '\tConnection to DataSource successful: \n'
try:
for db in runDB:
print '\t\t' + db
except:
print '\t\tNo running DB'
if notInstDB:
print ''
print '\tDB not installed: \n'
try:
for db in notInstDB:
print'\t\t' + db
except:
print '\t\tAll DB checked'
if errorDB:
print ''
print '\tERROR connecting to: \n'
try:
for db in errorDB:
print '\t\t' + db
except:
print '\t\tAll DB running\n'
print ''
| {
"repo_name": "stoeps13/ibmcnxscripting",
"path": "WebSphere/checkDataSource.py",
"copies": "1",
"size": "1479",
"license": "apache-2.0",
"hash": -6723606180777789000,
"line_mean": 25.4107142857,
"line_max": 244,
"alpha_frac": 0.5794455713,
"autogenerated": false,
"ratio": 3.2505494505494505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9175961300775892,
"avg_score": 0.030806744214711683,
"num_lines": 56
} |
import sys
import os
# Load all jython commands, when they are not loaded
try:
NewsActivityStreamService.listApplicationRegistrations()
except NameError:
print "Connections Commands not loaded! Load now: "
execfile("loadAll.py")
class cnxMenu:
menuitems = []
# Function to add menuitems
def AddItem( self, text, function ):
self.menuitems.append( {'text': text, 'func':function} )
# Function for printing
def Show( self ):
c = 1
print '\n\tWebSphere and Connections Administration'
print '\t----------------------------------------', '\n'
for l in self.menuitems:
print '\t',
print c, l['text']
c = c + 1
print
def Do( self, n ):
self.menuitems[n]["func"]()
def cfgDataSource():
execfile( "cfgDataSource.py" )
def cfgJ2EERoleBackup():
execfile( "cfgJ2EERoleBackup.py" )
def cfgJ2EERoleRestore():
execfile( "cfgJ2EERoleRestore.py" )
def cfgJ2EERolesRestricted():
execfile( "cfgJ2EERolesRestricted.py" )
def cfgJ2EERolesUnrestricted():
execfile( "cfgJ2EERolesUnrestricted.py" )
def cfgJ2EERoleGlobalModerator():
execfile( "cfgJ2EERoleGlobalModerator.py" )
def cfgJ2EERoleMetricsReader():
execfile( "cfgJ2EERoleMetricsReader.py" )
def cfgJ2EERoleMetricsReportRun():
execfile( "cfgJ2EERoleMetricsReportRun.py" )
def cfgJ2EERoleSocialMail():
execfile( "cfgJ2EERoleSocialMail.py" )
def cfgJVMHeap():
execfile( "cfgJVMHeap.py" )
def cfgLogFiles():
execfile( "cfgLogFiles.py" )
def cfgMonitoringPolicy():
execfile( 'cfgMonitoringPolicy.py' )
def cfgJVMCustProp():
execfile( 'cfgJVMCustProp.py' )
def checkAppStatus():
execfile( 'checkAppStatus.py' )
def checkDataSource():
execfile( 'checkDataSource.py' )
def checkJVMHeap():
execfile( 'checkJVMHeap.py' )
def checkLogFiles():
execfile( 'checkLogFiles.py' )
def checkPorts():
execfile( 'checkPorts.py' )
def checkVariables():
execfile( 'checkVariables.py' )
def cnxFilesPolicies():
execfile( 'cnxFilesPolicies.py' )
def cnxLibraryPolicies():
execfile( 'cnxLibraryPolicies.py' )
def cnxMemberCheckExIDByEmail():
execfile( 'cnxMemberCheckExIDByEmail.py' )
def cnxMemberInactivateByEmail():
execfile( 'cnxMemberInactivateByEmail.py' )
def cnxMemberDeactAndActByEmail():
execfile( 'cnxMemberDeactAndActByEmail.py' )
def cnxMemberSyncAllByEXID():
execfile( 'cnxMemberSyncAllByEXID.py' )
def cnxCommunitiesReparenting():
execfile( 'cnxCommunitiesReparenting.py' )
def cnxmenu_cfgtasks():
execfile( 'cnxmenu_cfgtasks.py' )
def cnxmenu_useradmin():
execfile( 'cnxmenu_useradmin.py' )
def cnxmenu_comm():
execfile( 'cnxmenu_comm.py' )
def cnxmenu_checks():
execfile( 'cnxmenu_checks.py' )
def bye():
print "bye"
state = 'false'
sys.exit( 0 )
if __name__ == "__main__":
m = cnxMenu()
# m.AddItem( "Configure DataSources (cfgDataSource.py)", cfgDataSource )
# m.AddItem( 'Backup J2EE Roles of all Applications (cfgJ2EERoleBackup.py)', cfgJ2EERoleBackup )
# m.AddItem( 'Restore J2EE Roles of all Applications (cfgJ2EERoleRestore.py)', cfgJ2EERoleRestore )
# m.AddItem( 'Set J2EE Roles initially (restricted) (cfgJ2EERolesRestricted.py)', cfgJ2EERolesRestricted )
# m.AddItem( 'Set J2EE Roles initially (unrestricted) (cfgJ2EERolesUnrestricted.py)', cfgJ2EERolesUnrestricted )
# m.AddItem( 'Set J2EE Roles for Moderator Roles (cfgJ2EERoleGlobalModerator.py)', cfgJ2EERoleGlobalModerator )
# m.AddItem( 'Set J2EE Role for Metrics Reader (cfgJ2EERoleMetricsReader.py)', cfgJ2EERoleMetricsReader )
# m.AddItem( 'Set J2EE Role for Metrics Report Run (cfgJ2EERoleMetricsReportRun)', cfgJ2EERoleMetricsReportRun )
# m.AddItem( 'Set J2EE Role for SocialMail (cfgJ2EERoleSocialMail)', cfgJ2EERoleSocialMail )
# m.AddItem( 'Configure JVM Heap Sizes (cfgJVMHeap.py)', cfgJVMHeap )
# m.AddItem( 'Configure SystemOut/Err Log Size (cfgLogFiles.py)', cfgLogFiles )
# m.AddItem( 'Configure Monitoring Policy (cfgMonitoringPolicy.py)', cfgMonitoringPolicy )
# m.AddItem( 'Check if all Apps are running (checkAppStatus.py)', checkAppStatus )
# m.AddItem( 'Check Database connections (checkDataSource.py)', checkDataSource )
# m.AddItem( 'Check JVM Heap Sizes (checkJVMHeap.py)', checkJVMHeap )
# m.AddItem( 'Check SystemOut/Err Log Sizes (checkLogFiles.py)', checkLogFiles )
# m.AddItem( 'Check / Show all used ports (checkPorts.py)', checkPorts )
# m.AddItem( 'Show WebSphere Variables (checkVariables.py)', checkVariables )
# m.AddItem( 'Work with Files Policies (cnxFilesPolicies.py)', cnxFilesPolicies )
# m.AddItem( 'Work with Libraries (cnxLibraryPolicies.py)', cnxLibraryPolicies )
# m.AddItem( 'Check External ID (all Apps & Profiles) (cnxMemberCheckExIDByEmail.py)', cnxMemberCheckExIDByEmail )
# m.AddItem( 'Deactivate and Activate a User in one step (cnxMemberDeactAndActByEmail.py)', cnxMemberDeactAndActByEmail )
# m.AddItem( 'Deactivate a User by email address (cnxMemberInactivateByEmail.py)', cnxMemberInactivateByEmail )
# m.AddItem( 'Synchronize ExtID for all Users in all Apps (cnxMemberSyncAllByEXID.py)', cnxMemberSyncAllByEXID )
# m.AddItem( 'Reparent/Move Communities (cnxCommunitiesReparenting.py)', cnxCommunitiesReparenting )
m.AddItem( 'Menu - IBM Connections Configuration Tasks', cnxmenu_cfgtasks )
m.AddItem( 'Menu - IBM Connections/WebSphere Check Tasks', cnxmenu_checks )
m.AddItem( 'Menu - IBM Connections User Admin Tasks', cnxmenu_useradmin )
m.AddItem( 'Menu - IBM Connections Community Admin Tasks', cnxmenu_comm )
m.AddItem( "Exit", bye )
state = 'True'
while state == 'True':
m.Show()
###########################
## Robust error handling ##
## only accept int ##
###########################
## Wait for valid input in while...not ###
is_valid=0
while not is_valid :
try :
n = int ( raw_input('Enter your choice [1-5] : ') )
if n < 6 and n > 0:
is_valid = 1 ## set it to 1 to validate input and to terminate the while..not loop
else:
print ( "'%s' is not a valid menu option.") % n
except ValueError, e :
print ("'%s' is not a valid integer." % e.args[0].split(": ")[1])
# n = input( "your choice> " )
m.Do( n - 1 )
| {
"repo_name": "stoeps13/ibmcnxscripting",
"path": "WebSphere/cnxmenu.py",
"copies": "1",
"size": "6463",
"license": "apache-2.0",
"hash": 2495343851678926300,
"line_mean": 33.9351351351,
"line_max": 124,
"alpha_frac": 0.6818814792,
"autogenerated": false,
"ratio": 3.105718404613167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9162793286477098,
"avg_score": 0.02496131946721383,
"num_lines": 185
} |
#import sys
#import os
# Load all jython commands, when they are not loaded
try:
NewsActivityStreamService.listApplicationRegistrations()
except NameError:
print "Connections Commands not loaded! Load now: "
execfile("loadAll.py")
class cnxMenu_cfgtasks:
menuitems = []
# Function to add menuitems
def AddItem( self, text, function ):
self.menuitems.append( {'text': text, 'func':function} )
# Function for printing
def Show( self ):
c = 1
print '\n\tWebSphere and Connections Administration - Checks Tasks'
print '\t----------------------------------------', '\n'
for l in self.menuitems:
print '\t',
print c, l['text']
c = c + 1
print
def Do( self, n ):
self.menuitems[n]["func"]()
def cfgDataSource():
execfile( "cfgDataSource.py" )
def cfgJ2EERoleBackup():
execfile( "cfgJ2EERoleBackup.py" )
def cfgJ2EERoleRestore():
execfile( "cfgJ2EERoleRestore.py" )
def cfgJ2EERolesRestricted():
execfile( "cfgJ2EERolesRestricted.py" )
def cfgJ2EERolesUnrestricted():
execfile( "cfgJ2EERolesUnrestricted.py" )
def cfgJ2EERoleGlobalModerator():
execfile( "cfgJ2EERoleGlobalModerator.py" )
def cfgJ2EERoleMetricsReader():
execfile( "cfgJ2EERoleMetricsReader.py" )
def cfgJ2EERoleMetricsReportRun():
execfile( "cfgJ2EERoleMetricsReportRun.py" )
def cfgJ2EERoleSocialMail():
execfile( "cfgJ2EERoleSocialMail.py" )
def cfgJVMHeap():
execfile( "cfgJVMHeap.py" )
def cfgJVMCustProp():
execfile( "cfgJVMCustProp.py" )
def cfgLogFiles():
execfile( "cfgLogFiles.py" )
def cfgMonitoringPolicy():
execfile( 'cfgMonitoringPolicy.py' )
def cnxBackToMainMenu():
execfile( 'cnxmenu.py')
def bye():
print "bye"
state = 'false'
sys.exit( 0 )
if __name__ == "__main__":
m = cnxMenu_cfgtasks()
m.AddItem( "Configure DataSources (cfgDataSource.py)", cfgDataSource )
m.AddItem( 'Backup J2EE Roles of all Applications (cfgJ2EERoleBackup.py)', cfgJ2EERoleBackup )
m.AddItem( 'Restore J2EE Roles of all Applications (cfgJ2EERoleRestore.py)', cfgJ2EERoleRestore )
m.AddItem( 'Set J2EE Roles initially (restricted) (cfgJ2EERolesRestricted.py)', cfgJ2EERolesRestricted )
m.AddItem( 'Set J2EE Roles initially (unrestricted) (cfgJ2EERolesUnrestricted.py)', cfgJ2EERolesUnrestricted )
m.AddItem( 'Set J2EE Roles for Moderator Roles (cfgJ2EERoleGlobalModerator.py)', cfgJ2EERoleGlobalModerator )
m.AddItem( 'Set J2EE Role for Metrics Reader (cfgJ2EERoleMetricsReader.py)', cfgJ2EERoleMetricsReader )
m.AddItem( 'Set J2EE Role for Metrics Report Run (cfgJ2EERoleMetricsReportRun)', cfgJ2EERoleMetricsReportRun )
m.AddItem( 'Set J2EE Role for SocialMail (cfgJ2EERoleSocialMail)', cfgJ2EERoleSocialMail )
m.AddItem( 'Configure JVM Heap Sizes (cfgJVMHeap.py)', cfgJVMHeap )
m.AddItem( 'Set Custom Parameter for Cache Issues in JVM (cfgJVMCustProp.py)', cfgJVMCustProp )
m.AddItem( 'Configure SystemOut/Err Log Size (cfgLogFiles.py)', cfgLogFiles )
m.AddItem( 'Configure Monitoring Policy (cfgMonitoringPolicy.py)', cfgMonitoringPolicy )
m.AddItem( 'Work with Files Policies (cnxFilesPolicies.py)', cnxFilesPolicies )
m.AddItem( 'Work with Libraries (cnxLibraryPolicies.py)', cnxLibraryPolicies )
m.AddItem( 'Back to Main Menu (cnxmenu.py)', cnxBackToMainMenu )
m.AddItem( "Exit", bye )
state = 'True'
while state == 'True':
m.Show()
###########################
## Robust error handling ##
## only accept int ##
###########################
## Wait for valid input in while...not ###
is_valid=0
while not is_valid :
try :
n = int ( raw_input('Enter your choice [1-16] : ') )
if n < 17 and n > 0:
is_valid = 1 ## set it to 1 to validate input and to terminate the while..not loop
else:
print ( "'%s' is not a valid menu option.") % n
except ValueError, e :
print ("'%s' is not a valid integer." % e.args[0].split(": ")[1])
# n = input( "your choice> " )
m.Do( n - 1 )
| {
"repo_name": "stoeps13/ibmcnxscripting",
"path": "WebSphere/cnxmenu_cfgtasks.py",
"copies": "1",
"size": "4226",
"license": "apache-2.0",
"hash": 4865302241897416000,
"line_mean": 32.2755905512,
"line_max": 114,
"alpha_frac": 0.653099858,
"autogenerated": false,
"ratio": 3.1966717095310138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4349771567531014,
"avg_score": null,
"num_lines": null
} |
__author__= 'Christo Robison'
import numpy as np
import h5py
import matplotlib.pyplot as plt
import collections
f = h5py.File('mnist40_Centroids.h5','r')
centroids = f['mnist40_Centroids_numpy'][:]
f.close()
f = h5py.File('mnist_train.h5','r')
mnist_train = f['data'][:]
mnist_train_labels = f['labels'][:]
f.close()
f = h5py.File('mnist_test.h5','r')
mnist_test = f['data'][:]
mnist_test_labels = f['labels'][:]
f.close()
#print(np.shape(mnist_test))
#print(np.shape(mnist_train))
#print(np.shape(mnist_test_labels))
#print(np.shape(mnist_train_labels))
print(np.shape(centroids))
def computeRBFbetas(X, centers, labels):
numNeurons = len(centers)
out = []
for i in range(numNeurons):
cen = centers[i,:]
diff = X - cen
sqrdDiff = np.sum(np.power(diff,2),axis=0)
#print(np.shape(sqrdDiff))
distances = np.sqrt(sqrdDiff)
sigma = np.mean(distances)
out.append(sigma)
print('Computing RBF beta value for centroid %i.' %(i+1))
betas = np.divide(1,(np.multiply(2,np.power(out,2))))
return betas
def computeRBFActivations(centers, betas, x):
'''x = input vector'''
diff = centers - x
sqrdDist = np.sum(np.power(diff,2),axis=1)
#print(np.shape(sqrdDist))
z = np.exp(np.multiply(-betas,sqrdDist))
return z
def trainRBFN(train_data, centroids, labels, n_classes, debug = False):
betas = computeRBFbetas(train_data, centroids, labels)
activations = []
print('Calculating activations for %i examples.' %len(train_data))
for i in range(len(train_data)):
z = computeRBFActivations(centroids, betas, train_data[i,:])
activations.append(z)
addedBias = np.ones((len(train_data),len(centroids)+1))
#add a column of ones (bias values) to the end of our maxtrix
addedBias[:,:-1] = activations
print('Done.')
newLabels = convert_labels(labels, n_classes)
theta = np.dot(np.linalg.pinv(np.dot(np.transpose(addedBias),addedBias)),
np.dot(np.transpose(addedBias),newLabels))
if debug:
f = h5py.File("RBF_Thetas.h5", "w")
f.create_dataset('labels', data=theta)
f.close()
out = collections.namedtuple('trainRBF', ['Theta', 'Betas', 'Activations', 'Centroids'])
o = out(Theta=theta,Betas= betas, Activations = addedBias, Centroids = centroids)
return o
def getThetas(activations, labels, n_classes): #not needed after all
out = []
for i in range(n_classes):
theta = np.dot(np.linalg.pinv(np.dot(np.transpose(activations), activations)),
np.dot(np.transpose(activations), labels))
out.append(theta)
return out
def convert_labels(labels,n_classes, debug = False):
for j in range(n_classes):
temp = labels == str(j)
temp = temp.astype(int)
if j > 0:
conv_labels = np.append(conv_labels, temp)
print(temp[:])
else:
conv_labels = temp
print(np.shape(conv_labels))
conv_labels = np.reshape(conv_labels, [len(labels), n_classes], order='F')
if debug: print(np.shape(conv_labels))
if debug:
f = h5py.File("mnist_newLabels.h5", "w")
f.create_dataset('labels', data=conv_labels)
f.close()
return conv_labels
def evaluateRBFN(Centroids, betas, theta, input, debug = False):
'''input is 1 test example'''
phis = computeRBFActivations(Centroids,betas,input)
addedBias = np.ones(len(centroids) + 1)
# add a column of ones (bias values) to the end of our maxtrix
addedBias[:-1] = phis
if debug: print(phis)
z = np.dot(np.transpose(theta), addedBias)
return z
def softMax(scores):
return np.exp(scores)/np.sum(np.exp(scores),axis=0)
# take centroids use them as value for each node (one for each centroid)
# calculate beta values (standard deviation of each centroid) and weights for output nodes
# one for each class, 10 in this case. One for each digit.
if __name__ == '__main__':
#temp = computeRBFbetas(mnist_train, centroids, mnist_train_labels)
#print(temp)
#bar = computeRBFActivations(centroids,temp,mnist_train[1,:])
#print(bar)
foo = trainRBFN(mnist_train, centroids, mnist_train_labels, 10, True)
print(np.shape(foo))
result = []
for i in range(len(mnist_test)):
res = evaluateRBFN(foo.Centroids, foo.Betas, foo.Theta, mnist_test[i, :])
result.append(res)
max_IDX = np.argmax(result, axis=1)
temp = map(str, max_IDX)
class_test = mnist_test_labels == temp
class_test = class_test.astype(int)
performance = np.mean(class_test)
print('RBFN classified MNIST with %f percent accuracy.' %performance)
for i in range(1):
plt.subplot(1, 1 / 1, i)
plt.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
left='off',
right='off',
labelbottom='off',
labelleft='off')
imgp = plt.imshow(np.transpose(foo.Theta))
plt.set_cmap('jet')
plt.suptitle('Output layer Weights for %i node RBF Network.' %41)
plt.show() | {
"repo_name": "Crobisaur/KMeans_MNIST",
"path": "CpE520_HW6.py",
"copies": "1",
"size": "5138",
"license": "mit",
"hash": -8869534383570345000,
"line_mean": 31.9423076923,
"line_max": 92,
"alpha_frac": 0.6270922538,
"autogenerated": false,
"ratio": 3.1618461538461538,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4288938407646154,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christo Robison'
import numpy as np
import scipy as sci
import h5py
from PIL import Image
import os
import collections
'''THis program reads in the entire MNist dataset'''
def readMnist(path = '/home/crob/Downloads/mnist/train'):
dataDir = '/home/crob/Downloads/mnist/train/'
#m = np.ndarray()
a = []
l = []
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".png"):
#a = sci.ndimage.imread(name, True)
#print(root, name)
im = np.array(Image.open(os.path.join(root, name)).convert('L'), 'f')
#print(im.shape)
np.hstack(im)
#print(im.shape)
#print(im)
a.append(np.reshape(im, 784))
l.append(root.replace(path+'/',''))
#print(root.replace(path+'/',''))
print(len(a))
p = np.array(a)
print(p.shape)
out = collections.namedtuple('examples',['data', 'label'])
o = out(data=p, label=l)
return o
if __name__ == '__main__':
path = '/home/crob/Downloads/mnist'
s = readMnist(path+'/test')
print(np.shape(s.data))
f = h5py.File("mnist_test.h5","w")
f.create_dataset('data', data=s.data)
f.create_dataset('labels', data=s.label)
f.close()
| {
"repo_name": "Crobisaur/KMeans_MNIST",
"path": "readMnist.py",
"copies": "1",
"size": "1326",
"license": "mit",
"hash": 838113359604378200,
"line_mean": 27.8260869565,
"line_max": 85,
"alpha_frac": 0.5467571644,
"autogenerated": false,
"ratio": 3.2106537530266346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9208965108167046,
"avg_score": 0.009689161851917684,
"num_lines": 46
} |
import numpy as np
import matplotlib.pyplot as plt
import skimage.draw as skdraw #Used for fast landscape triangle rastering
import scipy.misc #Used for downsampling rasterized images avoiding aliasing
import time #For timing kernel comparison
import sklearn.metrics.pairwise
##############################################################################
########## Plotting Functions ##########
##############################################################################
def plotDGM(dgm, color = 'b'):
# Create Lists
X = list(zip(*dgm)[0]);
Y = list(zip(*dgm)[1]);
# set axis values
axMin = min(min(X),min(Y));
axMax = max(max(X),max(Y));
axRange = axMax-axMin;
# plot points
plt.plot(X, Y,'%s.'%color)
# plot line
plt.plot([axMin-axRange/5,axMax+axRange/5], [axMin-axRange/5, axMax+axRange/5],'k');
# adjust axis
plt.axis([axMin-axRange/5,axMax+axRange/5, axMin-axRange/5, axMax+axRange/5])
# add labels
plt.xlabel('Time of Birth')
plt.ylabel('Time of Death')
def plotWassersteinMatching(I1, I2, matchidx):
plotDGM(I1, 'b')
plt.hold(True)
plotDGM(I2, 'r')
cp = np.cos(np.pi/4)
sp = np.sin(np.pi/4)
R = np.array([[cp, -sp], [sp, cp]])
I1Rot = I1.dot(R)
I2Rot = I2.dot(R)
for index in matchidx:
(i, j) = index
if i >= I1.shape[0] and j >= I2.shape[0]:
continue
if i >= I1.shape[0]:
diagElem = np.array([I2Rot[j, 0], 0])
diagElem = diagElem.dot(R.T)
plt.plot([I2[j, 0], diagElem[0]], [I2[j, 1], diagElem[1]], 'g')
elif j >= I2.shape[0]:
diagElem = np.array([I1Rot[i, 0], 0])
diagElem = diagElem.dot(R.T)
plt.plot([I1[i, 0], diagElem[0]], [I1[i, 1], diagElem[1]], 'g')
else:
plt.plot([I1[i, 0], I2[j, 0]], [I1[i, 1], I2[j, 1]], 'g')
##############################################################################
########## Diagram Comparison Functions ##########
##############################################################################
#Assumes first two columns of S and T are the coordinates of the persistence
#points, but allows for other coordinate columns (which are ignored in
#diagonal matching)
def getWassersteinDist(S, T):
import hungarian #Requires having compiled the library
N = S.shape[0]
M = T.shape[0]
#Handle the cases where there are no points in the diagrams
if N == 0:
S = np.array([[0, 0]])
N = 1
if M == 0:
T = np.array([[0, 0]])
M = 1
DUL = sklearn.metrics.pairwise.pairwise_distances(S, T)
#Put diagonal elements into the matrix
#Rotate the diagrams to make it easy to find the straight line
#distance to the diagonal
cp = np.cos(np.pi/4)
sp = np.sin(np.pi/4)
R = np.array([[cp, -sp], [sp, cp]])
S = S[:, 0:2].dot(R)
T = T[:, 0:2].dot(R)
D = np.zeros((N+M, N+M))
D[0:N, 0:M] = DUL
UR = np.max(D)*np.ones((N, N))
np.fill_diagonal(UR, S[:, 1])
D[0:N, M:M+N] = UR
UL = np.max(D)*np.ones((M, M))
np.fill_diagonal(UL, T[:, 1])
D[N:M+N, 0:M] = UL
D = D.tolist()
#Run the hungarian algorithm
matchidx = hungarian.lap(D)[0]
matchidx = [(i, matchidx[i]) for i in range(len(matchidx))]
matchdist = 0
for pair in matchidx:
(i, j) = pair
matchdist += D[i][j]
return (matchidx, matchdist, D)
#Do sorting and grabbing with the option to include birth times
#Zeropadding is also taken into consideration
def sortAndGrab(dgm, NBars = 10, BirthTimes = False):
dgmNP = np.array(dgm)
if dgmNP.size == 0:
if BirthTimes:
ret = np.zeros(NBars*2)
else:
ret = np.zeros(NBars)
return ret
#Indices for reverse sort
idx = np.argsort(-(dgmNP[:, 1] - dgmNP[:, 0])).flatten()
ret = dgmNP[idx, 1] - dgmNP[idx, 0]
ret = ret[0:min(NBars, len(ret))].flatten()
if len(ret) < NBars:
ret = np.append(ret, np.zeros(NBars - len(ret)))
if BirthTimes:
bt = dgmNP[idx, 0].flatten()
bt = bt[0:min(NBars, len(bt))].flatten()
if len(bt) < NBars:
bt = np.append(bt, np.zeros(NBars - len(bt)))
ret = np.append(ret, bt)
return ret
def getLandscapeRasterized(dgm, xrange, yrange, UpFac = 10):
I = np.array(dgm)
if I.size == 0:
return np.zeros((yrange.size, xrange.size))
NX = xrange.size
NY = yrange.size
#Rasterize on a finer grid and downsample
NXFine = UpFac*NX
NYFine = UpFac*NY
xrangeup = np.linspace(xrange[0], xrange[-1], NXFine)
yrangeup = np.linspace(yrange[0], yrange[-1], NYFine)
dx = xrangeup[1] - xrangeup[0]
dy = yrangeup[1] - yrangeup[0]
Y = 0.5*(I[:, 1] - I[:, 0]) #Triangle tips
L = np.zeros((NYFine, NXFine))
for ii in range(I.shape[0]):
x = [I[ii, 0], 0.5*np.sum(I[ii, 0:2]), I[ii, 1]]
y = [0, Y[ii], 0]
x = np.round((x - xrangeup[0])/dx)
y = np.round((y - yrangeup[0])/dy)
yidx, xidx = skdraw.polygon(y, x)
#Allow for cropping
yidx = np.minimum(yidx, L.shape[0]-1)
xidx = np.minimum(xidx, L.shape[1]-1)
L[yidx, xidx] += 1
L = scipy.misc.imresize(L, (NY, NX))
return L
#Get a discretized verison of the solution of the heat flow equation
#described in the CVPR 2015 paper
def getHeatRasterized(dgm, sigma, xrange, yrange, UpFac = 10):
I = np.array(dgm)
if I.size == 0:
return np.zeros((yrange.size, xrange.size))
NX = xrange.size
NY = yrange.size
#Rasterize on a finer grid and downsample
NXFine = UpFac*NX
NYFine = UpFac*NY
xrangeup = np.linspace(xrange[0], xrange[-1], NXFine)
yrangeup = np.linspace(yrange[0], yrange[-1], NYFine)
X, Y = np.meshgrid(xrangeup, yrangeup)
u = np.zeros(X.shape)
for ii in range(I.shape[0]):
u = u + np.exp(-( (X - I[ii, 0])**2 + (Y - I[ii, 1])**2 )/(4*sigma))
#Now subtract mirror diagonal
u = u - np.exp(-( (X - I[ii, 1])**2 + (Y - I[ii, 0])**2 )/(4*sigma))
u = (1.0/(4*np.pi*sigma))*u
u = scipy.misc.imresize(u, (NY, NX))
return u
#Evaluate the continuous heat-based kernel between dgm1 and dgm2 (more correct
#than L2 on the discretized verison above but may be slower because can't exploit
#Octave's fast matrix multiplication when evaluating many, many kernels)
def evalHeatKernel(dgm1, dgm2, sigma):
kSigma = 0
I1 = np.array(dgm1)
I2 = np.array(dgm2)
for i in range(I1.shape[0]):
p = I1[i, 0:2]
for j in range(I2.shape[0]):
q = I2[j, 0:2]
qc = I2[j, 1::-1]
kSigma += np.exp(-(np.sum((p-q)**2))/(8*sigma)) - np.exp(-(np.sum((p-qc)**2))/(8*sigma))
return kSigma / (8*np.pi*sigma)
#Return the pseudo-metric between two diagrams based on the continuous
#heat kernel
def evalHeatDistance(dgm1, dgm2, sigma):
return np.sqrt(evalHeatKernel(dgm1, dgm1, sigma) + evalHeatKernel(dgm2, dgm2, sigma) - 2*evalHeatKernel(dgm1, dgm2, sigma))
| {
"repo_name": "ctralie/MorseSSM",
"path": "DGMTools.py",
"copies": "1",
"size": "7377",
"license": "apache-2.0",
"hash": -1384315117805682000,
"line_mean": 35.3399014778,
"line_max": 127,
"alpha_frac": 0.5519859021,
"autogenerated": false,
"ratio": 2.9215841584158415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39735700605158414,
"avg_score": null,
"num_lines": null
} |
import csv
import mysql.connector
import configSettings as cs
import collections
import pickle
import pandas as pd
import os.path
import philly_tweet_tools
################# import db config settings ##################
cnx = mysql.connector.connect(user=cs.user, password=cs.password,
host=cs.host,
database=cs.database,
charset = 'utf8mb4')
# cursor for writing out to table
curOut = cnx.cursor(buffered=True)
# read in last 24 hours of data
df = pd.read_sql('SELECT * from docMatrix \
WHERE created_at >= (select date_sub(MAX(created_at),\
INTERVAL 24 hour)FROM docMatrix) ;', con=cnx)
df['frequency'] = df['frequency'].astype(int)
# import age and gender lexica
parent_directory = os.path.abspath(os.path.dirname(__file__))
age = pd.read_csv(parent_directory + '/lexica/emnlp14age.csv')
gender = pd.read_csv(parent_directory + '/lexica/emnlp14gender.csv')
# get intercept values
age_intercept = age[age.term == '_intercept'].loc[0,'weight']
gender_intercept = gender[gender.term == '_intercept'].loc[0,'weight']
age.columns = ['age_term', 'age_weight']
gender.columns = ['gender_term', 'gender_weight']
merged_df = pd.merge(df,age,how = 'left', \
left_on = 'word', right_on = 'age_term')
merged_df = pd.merge(merged_df,gender,how = 'left', \
left_on = 'word', right_on = 'gender_term')
for name, group in merged_df.groupby('id'):
# estimate age
age_group = group.dropna(subset = ['age_weight'])
if age_group.empty:
estimated_age = age_intercept
else:
estimated_age = sum(age_group.age_weight*age_group.frequency/\
age_group.tweet_length) + age_intercept
gender_group = group.dropna(subset = ['gender_weight'])
if gender_group.empty:
estimated_gender = gender_intercept
else:
estimated_gender = sum(gender_group.gender_weight*gender_group.frequency/\
gender_group.tweet_length) + gender_intercept
# write data to table
data = [float(estimated_gender),float(estimated_age), name]
stmt = "INSERT IGNORE INTO demographics \
(gender, age, id) VALUES (%s, %s, %s)"
curOut.execute(stmt, data)
cnx.commit()
cnx.close()
# get age and gender data and write to file for web app
philly_tweet_tools.get_gender_data()
philly_tweet_tools.get_age_data()
| {
"repo_name": "ctufts/philly_tweet_pulse",
"path": "mysql_python/updateDemographicTable.py",
"copies": "1",
"size": "2638",
"license": "mit",
"hash": -3252969724600779000,
"line_mean": 31.5679012346,
"line_max": 76,
"alpha_frac": 0.6758908264,
"autogenerated": false,
"ratio": 3.0252293577981653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42011201841981655,
"avg_score": null,
"num_lines": null
} |
import csv
import re
import happierfuntokenizing
import mysql.connector
import configSettings as cs
import collections
import pickle
import gensim
from nltk.tokenize import WhitespaceTokenizer
from nltk import WordNetLemmatizer, word_tokenize
from nltk.corpus import stopwords
import os.path
import json
################# read in tweets from main table ##################
cnx = mysql.connector.connect(user=cs.user, password=cs.password,
host=cs.host,
database=cs.database,
charset = 'utf8mb4')
# cursor=cnx.cursor()
curIn = cnx.cursor(buffered=True)
curOut = cnx.cursor(buffered=True)
# change query to get tweet id and tweets and date time
# ignore retweets
query = ("SELECT id, created_at, tweet FROM tweetTable WHERE retweeted = 0 \
AND created_at >= (select date_sub(MAX(created_at), \
INTERVAL 24 hour)FROM tweetTable)")
curIn.execute(query)
############# place tweets in a list #################
# initialize lemmatizer()
lem = WordNetLemmatizer()
# initialize stopwords
stopwords = stopwords.words('english')
parent_directory = os.path.abspath(os.path.dirname(__file__))
bannedWords = json.loads(open(parent_directory + '/lexica/badWords.json').read())
stopwords = stopwords + bannedWords + ['philly','philadelphia','...']
#for sword in additional_stopwords:
# stopwords.append(sword)
# create dataset to write to sql table #########
tok = happierfuntokenizing.Tokenizer()
tokenized_tweets = []
for id, created_at, tweet in curIn:
# tokenize and get word frequency
sentence = list(tok.tokenize(tweet.lower()))
tweet_length = len(sentence)
words = collections.Counter(sentence)
# write data to word frequency table
for key, value in words.items():
data = [key,value, tweet_length, created_at, id]
stmt = "INSERT IGNORE INTO docMatrix \
(word, frequency, tweet_length, \
created_at, id) VALUES (%s, %s, %s, %s, %s)"
curOut.execute(stmt, data)
# get set of filtered tweets for
#tokenized_tweets.append([s for s in sentence
# if s not in stopwords and len(s) > 2 ])
sentence_topic_model = WhitespaceTokenizer().tokenize(tweet.lower())
temp = []
for w in sentence_topic_model:
if w not in stopwords and \
len(w) > 2 and \
not (re.match('@\\w',w)) and \
not (re.match('#\\w',w)) and \
not (re.match('htt\\w',w)) and \
not (re.match('[^A-Za-z0-9]+', w)):
temp += [words for words in word_tokenize(w) if \
len(words) > 2 and words not in stopwords and
not (re.match('[^A-Za-z0-9]+', words))]
if temp:
tokenized_tweets.append(temp)
cnx.commit()
cnx.close()
# create dictionary and corpus then write to pickle
dictionary = gensim.corpora.Dictionary(tokenized_tweets)
corpus = [dictionary.doc2bow(text) for text in tokenized_tweets]
########## write out corpus ###############
parent_directory = os.path.abspath(os.path.dirname(__file__))
file_Name = parent_directory + "/data/corpus"
# open the file for writing
fileObject = open(file_Name,'wb')
pickle.dump(corpus,fileObject)
fileObject.close()
### write out dictionary #################
file_Name = parent_directory + "/data/dictionary"
# open the file for writing
fileObject = open(file_Name,'wb')
pickle.dump(dictionary,fileObject)
fileObject.close()
| {
"repo_name": "ctufts/philly_tweet_pulse",
"path": "mysql_python/updateTopicTable.py",
"copies": "1",
"size": "3661",
"license": "mit",
"hash": -840383019105886700,
"line_mean": 31.3982300885,
"line_max": 81,
"alpha_frac": 0.6700355094,
"autogenerated": false,
"ratio": 3.1614853195164074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9138113371486734,
"avg_score": 0.038681491485934624,
"num_lines": 113
} |
__author__ = 'Chuck Martin'
import os
import shutil
from django.core.management.base import BaseCommand
from django.conf import settings
from django.template.loader import render_to_string
from data_port.forms import MessageForm
from data_port.get_LAN_ip_address import get_LAN_ip_address
class Command(BaseCommand):
help = 'Makes a zipfile that can be posted to PhoneGap Build.'
def handle(self, *args, **options):
destination_dir = os.path.join(settings.SITE_PATH, 'phone_gap_bundle')
if os.path.exists(destination_dir):
shutil.rmtree(destination_dir)
os.mkdir(destination_dir)
# Build the page using the same template we used to render it to a web-page during local tests
form = MessageForm()
lan_ip_address = 'http://' + get_LAN_ip_address()
rendered = render_to_string('data_port/message_form.html',
{'STATIC_URL': '',
'form': form,
'IS_PHONEGAP': True,
'LAN_IP_ADDRESS': lan_ip_address})
fp = open(os.path.join(destination_dir, 'index.html'), 'w')
fp.write(rendered)
fp.close()
# Copy the static files
shutil.copytree(os.path.join(settings.SITE_PATH, 'static', 'phonegap'),
os.path.join(destination_dir, 'phonegap'))
shutil.copyfile(os.path.join(settings.SITE_PATH, 'data_port', 'config.xml'),
os.path.join(destination_dir, 'config.xml'))
# Zip it up and return it
shutil.make_archive(destination_dir, 'zip', destination_dir)
print 'Your zip file is at: ', destination_dir + '.zip'
| {
"repo_name": "cwurld/django-phonegap",
"path": "django_phonegap/data_port/management/commands/bundle_phonegap.py",
"copies": "1",
"size": "1740",
"license": "bsd-3-clause",
"hash": 3507171369397103000,
"line_mean": 39.4651162791,
"line_max": 102,
"alpha_frac": 0.5965517241,
"autogenerated": false,
"ratio": 3.918918918918919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004993824127076886,
"num_lines": 43
} |
__author__ = 'chuqiao'
import EventsPortal
from datetime import datetime
import logging
def logger():
"""
Function that initialises logging system
"""
global logger
# create logger with 'syncsolr'
logger = logging.getLogger('adddata')
logger.setLevel(logging.DEBUG)
# specifies the lowest severity that will be dispatched to the appropriate destination
# create file handler which logs even debug messages
fh = logging.FileHandler('adddatatest.log')
# fh.setLevel(logging.WARN)
# create console handler and set level to debug
ch = logging.StreamHandler()
# StreamHandler instances send messages to streams
# ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
# EventsPortal.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
logger()
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/upcoming?state=published&field_type_tid=All", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull", "http://bioevents-portal.org/events","http://localhost:8983/solr/event_portal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","139.162.217.53:8983/solr/eventsportal/")
if __name__ == '__main__':
logger.info('start at %s' % datetime.now())
EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull",
"http://bioevents-portal.org/events",
"139.162.217.53:8983/solr/eventsportal/")
logger.info('finish at %s' % datetime.now()) | {
"repo_name": "elixirhub/events-portal-scraping-scripts",
"path": "AddDataTest.py",
"copies": "1",
"size": "2436",
"license": "mit",
"hash": 1921533156218916900,
"line_mean": 38.9508196721,
"line_max": 207,
"alpha_frac": 0.7085385878,
"autogenerated": false,
"ratio": 3.510086455331412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4718625043131412,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chuqiao'
from apscheduler.schedulers.blocking import BlockingScheduler
import logging
import SyncSolr
import sys
def logger():
"""
Function that initialises logging system
"""
global logger
# create logger with 'syncsolr'
logger = logging.getLogger('updatesolr')
logger.setLevel(logging.DEBUG)
# specifies the lowest severity that will be dispatched to the appropriate destination
# create file handler which logs even debug messages
fh = logging.FileHandler('ScheduleSyncTwoSolrs.log')
# fh.setLevel(logging.WARN)
# create console handler and set level to debug
ch = logging.StreamHandler()
# StreamHandler instances send messages to streams
# ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
def scheduleUpdateSolr(csvUrl,iannSolrUrl):
"""
"""
logger()
logger.info('***Start updating every 12 hours***')
sched = BlockingScheduler()
sched.add_job(SyncSolr.init, 'interval', hours = 12, args=[csvUrl,iannSolrUrl])
sched.start()
# never goes there
logger.info('***Finished updating every 12 hours***')
try:
# Keeps the main thread alive.
while True:
time.sleep(20)
except (KeyboardInterrupt, SystemExit):
pass
# if len(sys.argv) == 3:
# args = sys.argv
# scheduleUpdateSolr(args[1],args[2])
# else:
# scheduleUpdateSolr(
#
# 'http://139.162.217.53:8983/solr/eventsportal/select?q=*:*&fl=eventId,name,alternateName,startDate,endDate,hostInstitution,description,eventType,keywords,topic,locationName,locationCity,locationCountry,locationPostcode,latitude,longitude,url,&rows=2147483647&wt=csv',
#
# 'http://localhost:8982/solr/iann'
# )
if __name__ == '__main__':
scheduleUpdateSolr("http://139.162.217.53:8983/solr/eventsportal/select?q=*:*&fl=eventId,name,alternateName,startDate,endDate,hostInstitution,description,eventType,keywords,topic,locationName,locationCity,locationCountry,locationPostcode,latitude,longitude,url,&rows=2147483647&wt=csv",
"http://iann.pro/solr/iann"
)
# scheduleUpdateSolr(sys.argv[1],sys.argv[2]) | {
"repo_name": "elixirhub/events-portal-scraping-scripts",
"path": "ScheduleSyncTwoSolrs.py",
"copies": "1",
"size": "2439",
"license": "mit",
"hash": 4308705427337492500,
"line_mean": 29.8860759494,
"line_max": 290,
"alpha_frac": 0.6908569086,
"autogenerated": false,
"ratio": 3.6294642857142856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9789400807347217,
"avg_score": 0.006184077393413536,
"num_lines": 79
} |
__author__ = 'chuqiao'
from geopy.geocoders import Nominatim
import EventsPortal
def getCoordinatesForAllData(sourceUrl,patternUrl):
data = EventsPortal.getAllEventsData(sourceUrl,patternUrl)
for docs in data:
latitudeField = docs['latitude']
if latitudeField == None:
geocity = docs['locationCity']
geolocator = Nominatim()
locatorCity = geocity
location = geolocator.geocode(locatorCity)
latitude = location.latitude
longitude = location.longitude
docs['latitudetest'] = latitude
docs['longitudetest'] = longitude
else:
pass
return data
def addAllDataToSolrFromUrl(sourceUrl,patternUrl,solrUrl):
"""
add data to a Solr index crawling events from a URL
"""
data = getCoordinatesForAllData(sourceUrl,patternUrl)
EventsPortal.addDataToSolr(data, solrUrl)
if __name__ == '__main__':
addAllDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All",
"http://bioevents-portal.org/events",
"139.162.217.53:8983/solr/eventsportal/") | {
"repo_name": "elixirhub/events-portal-scraping-scripts",
"path": "GetCoordinatesForAllData.py",
"copies": "1",
"size": "1206",
"license": "mit",
"hash": 3228737382286825500,
"line_mean": 19.4576271186,
"line_max": 106,
"alpha_frac": 0.6343283582,
"autogenerated": false,
"ratio": 3.954098360655738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5088426718855737,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chuqiao'
import csv
import urllib2
import pysolr
import logging
import sys
reload(sys)
sys.setdefaultencoding('utf8')
logging.basicConfig(filename='syncsolr.log', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d/%m/%Y %I:%M:%S', filemode='w', level=logging.DEBUG)
# def logger():
# """
# Function that initialises logging system
# """
# global logger
# # create logger with 'syncsolr'
# logger = logging.getLogger('syncsolr')
# logger.setLevel(logging.DEBUG)
#
# # specifies the lowest severity that will be dispatched to the appropriate destination
#
# # create file handler which logs even debug messages
# fh = logging.FileHandler('syncsolr.log')
# # fh.setLevel(logging.WARN)
#
# # create console handler and set level to debug
# ch = logging.StreamHandler()
# # StreamHandler instances send messages to streams
# # ch.setLevel(logging.DEBUG)
#
# # create formatter and add it to the handlers
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# fh.setFormatter(formatter)
# ch.setFormatter(formatter)
# # add the handlers to the logger
# logger.addHandler(ch)
# logger.addHandler(fh)
def init(csvUrl,iannSolrUrl):
# logger()
logging.info('****Starting synchronizing***')
syncSolr(csvUrl,iannSolrUrl)
def syncSolr(csvUrl,iannSolrUrl):
try:
logging.info("push data from a url of CSV file to IANN solr")
getDataFromCsv(csvUrl)
docs = getDataFromCsv(csvUrl)
deleteDataInSolr(iannSolrUrl)
pushToIannSolr(docs,iannSolrUrl)
logging.info('***Finished synchronizing***')
except Exception as e:
logging.error('***Synchronize failed*** \n%s' % str(sys.exc_info()))
def getDataFromCsv(csvUrl):
"""
crawling data form the url and generate a Iann Solr data structure
"""
url = csvUrl
response = urllib2.urlopen(url)
csvReader = csv.reader(response)
# start from next remove the header
csvReader.next()
# create the new header
header = ['id', 'title', 'subtitle', 'start', 'end', 'provider', 'description',
'category', 'keyword', 'field', 'venue', 'city', 'country', 'postcode','latitude','longitude',
'link']
data = []
for column in csvReader:
drow = dict(zip(header, column))
# transfer different values
if drow['category'] == 'Receptions and networking':
drow['category'] = "meeting"
elif drow['category']== 'Meetings and conferences':
drow['category'] = 'meeting'
elif drow['category']=='Awards and prizegivings':
drow['category'] ='meeting'
else:
drow['category'] ='course'
# give the start date value to end date if the end date is none
if drow['end'] != '':
drow['end'] = drow['end']
else:
drow['end']= drow['start']
# replace slash to none in keyword string
keywordValue = drow['keyword']
listKeywordValue = keywordValue.replace('\\,',',')
myKeywordlist = listKeywordValue.replace(' ','').split(',')
drow['keyword'] = myKeywordlist
fieldValue = drow['field']
listFieldValue = fieldValue.replace('\\,',',')
myFieldlist = listFieldValue.replace(' ','').split(',')
drow['field'] = myFieldlist
# replace slash to none in Venue string
venueValue = drow['venue']
listVenueValue = venueValue.replace('\\,',',')
drow['venue'] = listVenueValue
# insert value events into category
categoryValue = drow['category']
listCategories = [categoryValue, "event"]
drow['category'] = listCategories
# remove the keys with the empty values
drowRemoveEmptyValue = dict((k, v) for k, v in drow.iteritems() if v)
# add dict to a data list
data.append(drowRemoveEmptyValue)
return data
def deleteDataInSolr(iannSolrUrl):
"""
delete all the Solr data
"""
# solrUrl = 'http://localhost:8982/solr/iann'
solr = pysolr.Solr(iannSolrUrl, timeout=10)
query = '*:*'
solr.delete(q='%s' % query)
def pushToIannSolr(docs,iannSolrUrl):
"""
Adds data to Iann SOLR from a SOLR data structure
"""
solr = pysolr.Solr(iannSolrUrl, timeout=10)
solr.add(
docs
)
# if len(sys.argv) == 3:
# args = sys.argv
# init(args[1],args[2])
# else:
# init(
# "http://139.162.217.53:8983/solr/eventsportal/select?q=*:*&fl=eventId,name,alternateName,startDate,endDate,hostInstitution,description,eventType,keywords,topic,locationName,locationCity,locationCountry,locationPostcode,latitude,longitude,url,&rows=2147483647&wt=csv",
# "http://iann.pro/solrdev/iann"
# )
if __name__ == '__main__':
init(
"http://139.162.217.53:8983/solr/eventsportal/select?q=*:*&fl=eventId,name,alternateName,startDate,endDate,hostInstitution,description,eventType,keywords,topic,locationName,locationCity,locationCountry,locationPostcode,latitude,longitude,url,&rows=2147483647&wt=csv",
"http://iann.pro/solr/iann"
) | {
"repo_name": "elixirhub/events-portal-scraping-scripts",
"path": "SyncSolr.py",
"copies": "1",
"size": "5368",
"license": "mit",
"hash": 7923437870240841000,
"line_mean": 28.5,
"line_max": 277,
"alpha_frac": 0.6128912072,
"autogenerated": false,
"ratio": 3.529257067718606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4642148274918606,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chuqiao'
from apscheduler.schedulers.blocking import BlockingScheduler
import EventsPortal
import sys
import logging
def logger():
"""
Function that initialises logging system
"""
global logger
# create logger with 'syncsolr'
logger = logging.getLogger('scheduleAddData')
logger.setLevel(logging.DEBUG)
# specifies the lowest severity that will be dispatched to the appropriate destination
# create file handler which logs even debug messages
fh = logging.FileHandler('scheduleUpdateData.log')
# fh.setLevel(logging.WARN)
# create console handler and set level to debug
ch = logging.StreamHandler()
# StreamHandler instances send messages to streams
# ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
def scheduleUpdateSolr(sourceUrl,patternUrl,solrUrl):
"""
"""
logger()
logger.info('***Start updating every 12 hours***')
sched = BlockingScheduler()
sched.add_job(EventsPortal.addDataToSolrFromUrl, 'interval', hours=12, args=[sourceUrl,patternUrl,solrUrl])
sched.start()
try:
# Keeps the main thread alive.
while True:
time.sleep(20)
except (KeyboardInterrupt, SystemExit):
logger.error('Can not schedule add data to solr \n%s' % str(sys.exc_info()))
if __name__ == '__main__':
scheduleUpdateSolr(
"http://bioevents.pro/dailyevents",
"http://bioevents.pro/events",
"139.162.217.53:8983/solr/eventsportal/"
)
# scheduleUpdateSolr(sys.argv[1],sys.argv[2])
| {
"repo_name": "elixirhub/events-portal-scraping-scripts",
"path": "ScheduleUpdateData.py",
"copies": "1",
"size": "1884",
"license": "mit",
"hash": 2330816139601360000,
"line_mean": 23.7894736842,
"line_max": 111,
"alpha_frac": 0.656581741,
"autogenerated": false,
"ratio": 4.034261241970022,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018026809906399866,
"num_lines": 76
} |
__author__ = 'chuqiao'
import smtplib
import base64
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def viewlog(file):
file = open("Eventsportal.log")
file.seek(0,2)# Go to the end of the file
while True:
line = file.readline()
if "***Finished updating***" in line:
mailUpdate()
elif "***Updating failed***" in line:
mailAlert()
def mailUpdate():
fromaddr = 'bioeventsportal@gmail.com'
toaddr = 'info@bioevents.pro'
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "[Update-reports] Events portal update"
body = '''The Bioevents Solr has been updated.
'''
msg.attach(MIMEText(body, 'plain'))
username = 'bioeventsportal'
password = base64.b64decode('YmlvZXZlbnRzMzIx')
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(username, password)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
def mailAlert():
fromaddr = 'bioeventsportal@gmail.com'
toaddr = 'info@bioevents.pro'
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "[Update-reports] Events portal update failed"
body = '''The update for the Bioevents Solr has failed.
'''
msg.attach(MIMEText(body, 'plain'))
username = 'bioeventsportal'
password = base64.b64decode('YmlvZXZlbnRzMzIx')
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(username, password)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
if __name__ == '__main__':
viewlog(file)
| {
"repo_name": "elixirhub/events-portal-scraping-scripts",
"path": "UpdateEmailNotification.py",
"copies": "1",
"size": "1782",
"license": "mit",
"hash": 6753773975781303000,
"line_mean": 22.76,
"line_max": 67,
"alpha_frac": 0.6301907969,
"autogenerated": false,
"ratio": 3.3559322033898304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9389982430490356,
"avg_score": 0.019228113959894818,
"num_lines": 75
} |
__author__ = 'chuqiao'
import smtplib
import base64
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def viewlog(file):
file = open("syncsolr.log")
file.seek(0,2)# Go to the end of the file
while True:
line = file.readline()
if "***Finished synchronizing***" in line:
mailUpdate()
elif "***Synchronize failed***" in line:
mailAlert()
def mailUpdate():
fromaddr = 'bioeventsportal@gmail.com'
toaddr = 'info@bioevents.pro'
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "[Sync-reports] Synchronise two Solrs"
body = '''The IAnn Solr is now synchronised with the Bioevents Solr.
'''
msg.attach(MIMEText(body, 'plain'))
username = 'bioeventsportal'
password = base64.b64decode('YmlvZXZlbnRzMzIx')
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(username, password)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
def mailAlert():
fromaddr = 'bioeventsportal@gmail.com'
toaddr = 'info@bioevents.pro'
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "[Sync-reports]Synchronise two Solrs failed"
body = '''The synchronisation of two Solrs failed.
'''
msg.attach(MIMEText(body, 'plain'))
username = 'bioeventsportal'
password = base64.b64decode('YmlvZXZlbnRzMzIx')
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(username, password)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
if __name__ == '__main__':
viewlog(file)
| {
"repo_name": "elixirhub/events-portal-scraping-scripts",
"path": "SyncEmailNotification.py",
"copies": "1",
"size": "1799",
"license": "mit",
"hash": -3037982041571672600,
"line_mean": 22.9866666667,
"line_max": 72,
"alpha_frac": 0.6336853808,
"autogenerated": false,
"ratio": 3.306985294117647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9358913212207163,
"avg_score": 0.01635149254209693,
"num_lines": 75
} |
__author__ = 'chy'
from rest_framework import serializers
from question_answer.models import Question, Answer, Tag
# -*- coding: utf-8 -*-
class QuestionListSerializer(serializers.HyperlinkedModelSerializer):
answers_count = serializers.Field(source='get_answer_count')
class Meta:
model = Question
fields = ('id', 'title', 'trackback_url', 'answers_count')
class QuestionSerializer(serializers.HyperlinkedModelSerializer):
answers = serializers.Field(source='get_answer_rest')
author = serializers.PrimaryKeyRelatedField()
tags = serializers.Field(source='get_tag_rest')
tags_id = serializers.Field(source='get_tags_id_rest')
class Meta:
model = Question
fields = ('id', 'title', 'trackback_url', 'author', 'content', 'original_author', 'create_date', 'edit_date', 'answers','tags',"tags_id")
class AnswerSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Answer
class TagSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Tag
fields = ('id', 'title','create_date', 'edit_date') | {
"repo_name": "caihaoyu/code_god",
"path": "question_answer/serializers.py",
"copies": "1",
"size": "1123",
"license": "mit",
"hash": 2386881558278041000,
"line_mean": 32.0588235294,
"line_max": 145,
"alpha_frac": 0.6936776492,
"autogenerated": false,
"ratio": 3.99644128113879,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016013603026536827,
"num_lines": 34
} |
__author__ = 'cindylamm'
import time
import logging
import config as conf
from tsdbquery import tsdbquery
from sparkprocess import sparkprocess
# ###############################################################################
# Defaults
#
if conf.DEBUG:
FORMAT = '%(asctime)-0s %(levelname)s %(message)s [at line %(lineno)d]'
logging.basicConfig(level=logging.DEBUG, format=FORMAT, datefmt='%Y-%m-%dT%I:%M:%S')
else:
FORMAT = '%(asctime)-0s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt='%Y-%m-%dT%I:%M:%S')
# requests_log = logging.getLogger("requests")
# requests_log.setLevel(logging.ERROR)
# ###############################################################################
# Main script
#
if __name__ == '__main__':
dbConnector = tsdbquery.InfluxDbConnector()
request = dbConnector.do_GET()
pointsJsonList = dbConnector.getJsonlistFromRequest(request)
sparkProcessor = sparkprocess.SparkProcessor()
if conf.DEBUG:
sparkProcessor.write_json_to_file(conf.IN_FILE, pointsJsonList)
startProcessingTime = time.time()
startDate, endDate, size = sparkProcessor.process(pointsJsonList)
endProcessingTime = time.time()
processing_time = endProcessingTime - startProcessingTime
info = {
"startDate": startDate,
"endDate": endDate,
"aggregationInputSize": conf.BATCH_SIZE,
"aggregationOutputSize": size,
"processingTimeInSeconds": round(processing_time, 3)
}
sparkProcessor.write_json_to_file(conf.INFO_FILE, info)
| {
"repo_name": "clamm/spark-location-history",
"path": "src/process-data/process-data.py",
"copies": "1",
"size": "1564",
"license": "apache-2.0",
"hash": 8251940224899189000,
"line_mean": 27.962962963,
"line_max": 88,
"alpha_frac": 0.6227621483,
"autogenerated": false,
"ratio": 3.842751842751843,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.49655139910518425,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cirreth'
from libs import ownet
from plugins.SHomePlugin import SHomePlugin
import json
import logging
class OneWirePlugin(SHomePlugin):
#static
_READ_ATTEMPTS_COUNT = 3
#
_host = None
_port = None
_connection = None
def __init__(self, parameters):
self._host = parameters['host']
self._port = int(parameters['port'])
logging.debug('OneWirePlugin initialization')
self._connection = ownet.Connection(self._host, self._port)
logging.debug('\nOneWirePlugin list dir:')
for d in self._connection.dir(''):
logging.debug(d)
logging.debug('\n')
def _read(self, address):
attempt = 0
logging.debug('OneWirePlugin read address '+str(address)+' (attempt: '+str(attempt)+')')
while attempt < self._READ_ATTEMPTS_COUNT:
try:
if address.find('dir ') == 0:
return str(self.list(address[5:]))
res = self._connection.read(address)
break
except Exception as e:
attempt += 1
res = 'Exception occured: ' + str(e)
logging.debug('OneWirePlugin address '+address+' reading result : '+str(res))
return res
def _write(self, address, value):
logging.debug('OneWirePlugin trying to write address ( '+address+' ) and value ( '+str(value)+' )...')
res = self._connection.write(address, value)
logging.debug('OneWirePlugin have writen value ' + str(value) + ' to address '+address+' with result : '+str(res))
return 'Success'
def call(self, reference, value):
if value is not None:
return self._write(reference, value)
else:
return self._read(reference)
def list(self, reference=''):
dir = self._connection.dir(reference)
res = []
for d in dir:
res.append(d.decode())
return res | {
"repo_name": "Cirreth/shome",
"path": "plugins/onewire/OneWirePlugin.py",
"copies": "1",
"size": "1965",
"license": "mit",
"hash": -4570186932667019000,
"line_mean": 31.7666666667,
"line_max": 122,
"alpha_frac": 0.5745547074,
"autogenerated": false,
"ratio": 4.128151260504202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0034562821538642154,
"num_lines": 60
} |
__author__ = 'cirreth'
from plugins.SHomePlugin import SHomePlugin
import logging
import json
import pymysql
class DatabasePlugin(SHomePlugin):
_host = None
_login = None
_password = None
_database = None
_connection = None
_cursor = None
def __init__(self, params):
self._host = params['host']
self._database = params['database']
self._login = params['login']
self._password = params['password']
logging.debug('Database initialization')
self.__connect()
def call(self, reference, values={}):
return self.execute(reference)
def execute(self, request):
logging.debug('SQL: ( '+request+' )')
try:
self._cursor = self._connection.cursor(pymysql.cursors.DictCursor)
logging.debug('Request result: '+str(self._cursor.execute(request)))
except pymysql.OperationalError:
logging.warning('Connecting to database '+self._host+' : '+self._database)
self.__connect()
self._cursor.execute(request)
try:
return self._cursor.fetchall()
except Exception as e:
return str(e)
def __connect(self):
self._connection = pymysql.connect(host=self._host, user=self._login, passwd=self._password, db=self._database)
self._connection.autocommit(True)
self._cursor = self._connection.cursor(pymysql.cursors.DictCursor)
def list(self, reference=''):
raise NotImplementedError
| {
"repo_name": "Cirreth/shome",
"path": "plugins/database/DatabasePlugin.py",
"copies": "1",
"size": "1515",
"license": "mit",
"hash": 3594354991446144500,
"line_mean": 29.3,
"line_max": 119,
"alpha_frac": 0.6184818482,
"autogenerated": false,
"ratio": 4.243697478991597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5362179327191596,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cirreth'
import logging
from core.entities.Scenario import Scenario
class ActionProcessor:
def __init__(self, load_stored=True):
if load_stored:
self._scenarios = {scenario.name: scenario for scenario in Scenario.get_all()}
else:
self._scenarios = {}
def add_scenario(self, name, expression, description='', runoninit=False, published=False, save=False):
if name in self._scenarios:
raise Exception('Scenario with name '+name+' already exists')
self._scenarios[name] = Scenario(name, expression, description, runoninit, published)
if save:
self._scenarios[name].save()
def update_scenario(self, name, expression=None, description=None, runoninit=None, published=None):
if name not in self._scenarios:
raise Exception(name+' is not in scenarios')
scenario = self._scenarios[name]
scenario.expression = expression if expression else scenario.expression
scenario.description = description if description else scenario.description
scenario.runoninit = runoninit if runoninit else scenario.runoninit
scenario.published = published if published else scenario.published
scenario.save()
scenario.construct()
def get_scenario(self, name):
if name in self._scenarios:
return self._scenarios[name]
def delete_scenario(self, name):
scenario = Scenario.get(name)
if scenario:
scenario.delete()
del self._scenarios[name]
def execute(self, name, params={}):
if not isinstance(params, dict):
raise Exception('Parameters type must be dict')
if name in self._scenarios:
return self._scenarios[name].execute(params)
def list_all(self):
return self._scenarios.items() | {
"repo_name": "Cirreth/shome",
"path": "core/ActionProcessor.py",
"copies": "1",
"size": "1867",
"license": "mit",
"hash": 4147560557571044000,
"line_mean": 36.36,
"line_max": 107,
"alpha_frac": 0.6518478843,
"autogenerated": false,
"ratio": 4.520581113801453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001574450082288828,
"num_lines": 50
} |
__author__ = 'cirreth'
import logging
import time
from plugins.SHomePlugin import SHomePlugin
from libs.ledstrip import *
class LedStripPlugin(SHomePlugin):
_ledstrip = None
_cur_r=0
_cur_g=0
_cur_b=0
def __init__(self, parameters):
self._ledstrip = RGB()
def call(self, reference, values={}):
"""
#000000 set instantly
#000000:x smoothly, interval: x (float)
"""
logging.debug('Ledstrip plugin: '+reference)
spl = reference.split(':')
color = spl[0]
delay = float(spl[1]) if len(spl)==2 else 0
try:
self.set_color(color, delay)
except Exception as e:
logging.error(e)
self._ledstrip = RGB()
self.set_color(color)
def set_color(self, color: 'hex str (#000000)', interval=0):
rn = int(color[1:3], 16)
gn = int(color[3:5], 16)
bn = int(color[5:], 16)
if interval:
dr = (rn - self._cur_r)
dg = (gn - self._cur_g)
db = (bn - self._cur_b)
nsteps = max(abs(dr), abs(dg), abs(db))
stepdur = interval/nsteps
if stepdur<0.075:
stepdur = 0.075
nsteps=int(interval/0.075)
dr /= nsteps
dg /= nsteps
db /= nsteps
for i in range(0, nsteps):
self._ledstrip.setColor(self.rgb_to_hex(self._cur_r+dr*i, self._cur_g+dg*i, self._cur_b+db*i))
time.sleep(stepdur)
self._cur_r = rn
self._cur_g = gn
self._cur_b = bn
self._ledstrip.setColor(color)
@staticmethod
def rgb_to_hex(r, g, b):
return '#'+format(int(r), '02x')+format(int(g), '02x')+format(int(b), '02x')
def list(self, reference=''):
return '#000000 set instantly\n#000000:0.0 set in interval 0.0'
| {
"repo_name": "Cirreth/shome",
"path": "plugins/ledstrip/LedStripPlugin.py",
"copies": "1",
"size": "1955",
"license": "mit",
"hash": -518975058909406500,
"line_mean": 28.546875,
"line_max": 110,
"alpha_frac": 0.4987212276,
"autogenerated": false,
"ratio": 3.417832167832168,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9389082712042166,
"avg_score": 0.005494136678000413,
"num_lines": 64
} |
__author__ = 'civic'
from serial import Serial
import time
from datetime import (
datetime,
timedelta
)
import serial
import math
from .msg import (
AlarmSetting,
StopButton,
TemperatureUnit,
ToneSet,
WorkStatus,
RequestMessage,
ResponseMessage,
InitRequest,
InitResponse,
DevInfoRequest,
DevInfoResponse,
ParamPutRequest,
ParamPutResponse,
DataHeaderRequest,
DataHeaderResponse,
DataBodyRequest,
DataBodyResponse,
ClockSetRequest,
ClockSetResponse,
DevNumRequest,
DevNumResponse,
UserInfoRequest,
UserInfoResponse,
)
import six
class Device:
def __init__(self, serial_port, baudrate=115200, timeout=5):
if serial_port is not None:
self._ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
self._ser.close()
self.debug = False
self.wait_time = 0.5
self.encode = 'utf8'
def _talk(self, request, response):
"""
:type request: RequestMessage
"""
ba = request.to_bytes()
if (self.debug):
print("\nba length={}".format(len(ba)))
for i, b in enumerate(ba):
if six.PY2:
six.print_("{:02X} ".format(ord(b)), sep='', end='')
else:
six.print_("{:02X} ".format(b), end='')
if (i + 1) % 16 == 0:
six.print_()
six.print_()
self._ser.write(ba)
response.read(self._ser)
return response
def init(self):
"""
:rtype: InitResponse
"""
req = InitRequest()
try:
self._ser.open()
res = self._talk(req, InitResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def get_devinfo(self):
"""
:rtype: DevInfoResponse
"""
req = DevInfoRequest()
try:
self._ser.open()
res = self._talk(req, DevInfoResponse(self.encode))
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def update(self, req):
"""
:type req: ParamPutRequest
:rtype: ParamPutResponse
"""
try:
self._ser.open()
res = self._talk(req, ParamPutResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def get_data(self, callback=None, page_size=None):
"""
:type devinfo: DevInfoResponse
:rtype:list[(int,datetime,float)]
"""
devinfo = self.get_devinfo()
header = self.get_data_header(devinfo.station_no)
if page_size is None:
if devinfo.model_no == 40: # RC-4
page_size = 100
data_size = 1
elif devinfo.model_no == 42: #RC-4HC
page_size = 200
data_size = 2
elif devinfo.model_no == 50: #RC-5
page_size = 500
data_size = 1
else:
raise ValueError("Unknowm model_no (%d). can't decide page_size", devinfo.model_no)
page = int(math.ceil(header.rec_count * data_size / float(page_size)))
dt = timedelta(hours=devinfo.rec_interval.hour,
minutes=devinfo.rec_interval.minute,
seconds=devinfo.rec_interval.second)
data_list = []
base_time = devinfo.start_time
no = 1
try:
self._ser.open()
for p in range(page):
req = DataBodyRequest(devinfo.station_no, p)
count = page_size if (p+1) * page_size <= devinfo.rec_count * data_size else (devinfo.rec_count * data_size % page_size)
res = DataBodyResponse(count)
self._talk(req, res)
if devinfo.model_no == 42:
for rec_temp, rec_humi in zip(*[iter(res.records)] * 2):
data_list.append((no, base_time, rec_temp/10.0, rec_humi/10.0))
no += 1
base_time += dt
else:
for rec in res.records:
data_list.append((no, base_time, rec/10.0))
no += 1
base_time += dt
if callback is not None:
callback(data_list)
data_list = []
finally:
self._ser.close()
time.sleep(self.wait_time)
return data_list
def get_data_header(self, target_station_no):
"""
:rtype: DataHeaderResponse
"""
try:
self._ser.open()
req = DataHeaderRequest(target_station_no)
res = self._talk(req, DataHeaderResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def set_clock(self, station_no, set_time=None):
"""
:type station_no: int
:type set_time: datetime
:rtype:ClockSetResponse
"""
try:
self._ser.open()
if set_time is None:
set_time = datetime.now()
req = ClockSetRequest(station_no, set_time)
res = ClockSetResponse()
self._talk(req, res)
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def set_device_number(self, station_no, device_number):
"""
:type station_no: int
:type device_number: string
:rtype:DevNumResponse
"""
try:
self._ser.open()
req = DevNumRequest(station_no)
req.device_number = device_number
res = self._talk(req, DevNumResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def set_user_info(self, station_no, user_info):
"""
:type station_no: int
:type user_info: string
:rtype: UserInfo
"""
try:
self._ser.open()
req = UserInfoRequest(station_no, self.encode)
req.user_info = user_info
res = self._talk(req, UserInfoResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def raw_send(self, request_bytes, response_length):
request = RequestMessage()
request.to_bytes = lambda : request_bytes
response = ResponseMessage()
response.msg = None
def __read(ser):
response.msg = ser.read(response_length)
response.read = __read
try:
self._ser.open()
self._talk(request, response)
finally:
self._ser.close()
time.sleep(self.wait_time)
return response.msg
def get_latest(self, callback=None, page_size=None):
"""
:type devinfo: DevInfoResponse
:rtype:list[(int,datetime,float)]
"""
devinfo = self.get_devinfo()
if devinfo.rec_count == 0:
return (None, None, None)
header = self.get_data_header(devinfo.station_no)
if page_size is None:
if devinfo.model_no == 40: # RC-4
page_size = 100
data_size = 1
elif devinfo.model_no == 42: #RC-4HC
page_size = 200
data_size = 2
elif devinfo.model_no == 50: #RC-5
page_size = 500
data_size = 1
else:
raise ValueError("Unknowm model_no (%d). can't decide page_size", devinfo.model_no)
page = int(math.ceil(header.rec_count * data_size / float(page_size)))
dt = timedelta(hours=devinfo.rec_interval.hour,
minutes=devinfo.rec_interval.minute,
seconds=devinfo.rec_interval.second)
base_time = devinfo.start_time + dt * (header.rec_count-1)
no = header.rec_count
try:
self._ser.open()
p = page - 1
req = DataBodyRequest(devinfo.station_no, p)
count = page_size if (p+1) * page_size <= devinfo.rec_count * data_size else (devinfo.rec_count * data_size % page_size)
res = DataBodyResponse(count)
self._talk(req, res)
if devinfo.model_no == 42:
rec_temp, rec_humi = res.records[-2:]
latest = (no, base_time, rec_temp/10.0, rec_humi/10.0)
else:
rec = res.records[-1]
latest = (no, base_time, rec/10.0)
if callback is not None:
callback(latest)
finally:
self._ser.close()
time.sleep(self.wait_time)
return latest
| {
"repo_name": "civic/elitech-datareader",
"path": "elitech/__init__.py",
"copies": "1",
"size": "8950",
"license": "mit",
"hash": -2347113941363696000,
"line_mean": 27.4126984127,
"line_max": 136,
"alpha_frac": 0.5037988827,
"autogenerated": false,
"ratio": 3.9973202322465387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5001119114946538,
"avg_score": null,
"num_lines": null
} |
__author__ = 'civil'
import tkinter as tk
import matplotlib
from tkinter import Tk, Menu, Toplevel, Button
from tkinter.filedialog import askopenfilename, asksaveasfile
from tkinter.messagebox import showerror
def donothing():
filewin = Toplevel(root)
button = Button(filewin, text="Do nothing button")
button.pack()
def open_cal():
cal_open = open('scott0012.py')
cal = cal_open.read()
print(cal)
def load_file():
fname = askopenfilename(filetypes=(("Template files", "*.tplate"),
("HTML files", "*.html;*.htm"),
("All files", "*.*"),
("Python files", "*.py")))
if fname:
try:
# print("""here it comes: self.settings["template"].set(fname)""")
print(fname)
text_file = open(fname)
text = text_file.read()
print(text)
except: # <- naked except is a bad idea
showerror("Open Source File", "Failed to read file\n'%s'" % fname)
return
def write_file():
file_to_write= asksaveasfile(mode='w', defaultextension=".txt")
if file_to_write is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
# text2save = str(text.get(1.0, END)) # starts from `1.0`, not `0.0`
file_to_write.write('haha')
file_to_write.close() # `()` was missing
root = Tk()
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='New', command=donothing)
filemenu.add_command(label='Open', command=load_file)
filemenu.add_command(label='Save', command=donothing)
filemenu.add_command(label='Save as ...', command=write_file)
filemenu.add_command(label='Close', command=donothing)
filemenu.add_separator()
filemenu.add_command(label='Exit', command=root.quit)
menubar.add_cascade(label='File', menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label='Undo', command=donothing)
editmenu.add_separator()
editmenu.add_command(label='Cut', command=donothing)
editmenu.add_command(label='Copy', command=donothing)
editmenu.add_command(label='Paste', command=donothing)
editmenu.add_command(label='Delete', command=donothing)
editmenu.add_command(label='Select All', command=donothing)
menubar.add_cascade(label='Edit', menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='Help Index', command=donothing)
helpmenu.add_command(label='About ...', command=donothing)
menubar.add_cascade(label='Help', menu=helpmenu)
filewin = root
button = Button(filewin, text="graph")
button.place(x=50, y=50)
button2 = Button(filewin, text="calculator", command=open_cal)
button2.place(x=40, y=50)
button.pack()
button2.pack()
root.config()
root.config(menu=menubar)
root.mainloop() | {
"repo_name": "saintdragon2/python-3-lecture-2015",
"path": "civil-final/1st_presentation/4조/civil0531.py",
"copies": "1",
"size": "2834",
"license": "mit",
"hash": 4940356245414173000,
"line_mean": 30.1538461538,
"line_max": 91,
"alpha_frac": 0.6570218772,
"autogenerated": false,
"ratio": 3.2953488372093025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4452370714409303,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CJeon'
"""
clothing attributes를 효율적으로 관리하는 class
"""
class clothing_data(object):
def __init__(self, attribute_list):
self.attributes = attribute_list
def __repr__(self):
# to support print
# when called, it will print all the attributes.
return str(self.get_attributes())
def get_attributes(self, attribute_name = None):
"""
:param attribute_name: a string of required attribute or list of strings.
ex0) attribute_name = "Size"
ex1) attribute_name = ["Size", "Rating"]
:return: a string, or float value
POWER HARD CODING!
"""
if attribute_name == None:
return self.attributes
assert type(attribute_name) == type([]) or type(attribute_name) == type("")
if type(attribute_name) == type([]):
# if attribute_name is a list, i.e. multiple attributes are asked
return_list = []
for a_name in attribute_name:
return_list.append(self.get_attributes(a_name))
return return_list
if attribute_name == "Dress_ID" or attribute_name == "did":
return self.attributes[0]
elif attribute_name == "Style" or attribute_name == "st":
return self.attributes[1]
elif attribute_name == "Price" or attribute_name == "p":
return self.attributes[2]
elif attribute_name == "Rating" or attribute_name == "r":
return self.attributes[3]
elif attribute_name == "Size" or attribute_name == "sz":
return self.attributes[4]
elif attribute_name == "Season" or attribute_name == "sn":
return self.attributes[5]
elif attribute_name == "NeckLine" or attribute_name == "nl":
return self.attributes[6]
elif attribute_name == "SleeveLength" or attribute_name == "sl":
return self.attributes[7]
elif attribute_name == "waiseline" or attribute_name == "wl":
return self.attributes[8]
elif attribute_name == "Material" or attribute_name == "m":
return self.attributes[9]
elif attribute_name == "FabricType" or attribute_name == "ft":
return self.attributes[10]
elif attribute_name == "Decoration" or attribute_name == "dc":
return self.attributes[11]
elif attribute_name == "Pattern Type" or attribute_name == "pt":
return self.attributes[12]
else:
raise Exception("No such attribute. ", attribute_name)
| {
"repo_name": "adobecs5/urp2015",
"path": "classes/clothing_data_class.py",
"copies": "1",
"size": "2582",
"license": "apache-2.0",
"hash": 843323254173984000,
"line_mean": 39.03125,
"line_max": 83,
"alpha_frac": 0.5846994536,
"autogenerated": false,
"ratio": 3.9659442724458205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.505064372604582,
"avg_score": null,
"num_lines": null
} |
import itertools
import time
from operator import mul
start_time = time.time()
# Return all 6-digit permutations meeting AB-CD=EF, output as (EF, (A,B,C,D,E,F))
perm_1 = sc.parallelize(list(itertools.permutations([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 6)))\
.filter(lambda x : x[0] > 0 and x[2] > 0 and x[4] >0)\
.filter(lambda x : sum(map(mul, (10, 1, -10, -1, -10, -1), x))==0)\
.map(lambda x : (10*x[4]+x[5], x))\
.unpersist().cache()
# Return all 5-digit permutations meeting EF+GH=PPP, output as (EF, (E,F,G,H,P))
perm_2 = sc.parallelize(list(itertools.permutations([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 5)))\
.filter(lambda y : sum(map(mul, (10, 1, 10, 1, -111), y))==0)\
.filter(lambda y : y[0] > 0 and y[2] > 0 and y[4] >0)\
.map(lambda y : (10*y[0]+y[1], y))\
.unpersist().cache()
# Return all permutation meeting both conditions with distinct A,B,C,D,E,F,G,H,P
# Join by EF, then enforce distinctiveness
perm_ans = perm_1.join(perm_2).map(lambda kv : kv[1])\
.filter(lambda z : len(set(z[0] + z[1])) == 9)\
.map(lambda z : z[0][0:4] + z[1])\
.unpersist().cache()
solutions = perm_ans.collect()
print "Solutions: %s" %solutions
print "Number of solutions: %d" %len(solutions)
print "Elapsed time(sec): %0.4f" %(time.time() - start_time)
print "Spark running on 2-node m3.xlarge cluster"
#Solutions: [(8, 6, 5, 4, 3, 2, 7, 9, 1), (9, 0, 6, 3, 2, 7, 8, 4, 1), (9, 5, 2, 7, 6, 8, 4, 3, 1), (8, 5, 4, 6, 3, 9, 7, 2, 1), (9, 0, 2, 7, 6, 3, 4, 8, 1)]
#Number of solutions: 5
#Elapsed time(sec): 0.9005
#Spark running on 2-node m3.xlarge cluster
| {
"repo_name": "dng8888/ABCDEFGHPPP",
"path": "spark-python-smartbruteforce/abcdefghppp.py",
"copies": "4",
"size": "1849",
"license": "mit",
"hash": 4119717318074119000,
"line_mean": 39.1956521739,
"line_max": 157,
"alpha_frac": 0.6122228231,
"autogenerated": false,
"ratio": 2.4752342704149934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5087457093514994,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cjm'
import logging
import requests
from subprocess import call
from scigraph.renderers.Renderer import Renderer
from graphviz import Digraph
# TODO: different implementations for different types
def safe(s):
return s.replace(":","")
def apply_styles(graph, styles):
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
class GraphVizRenderer(Renderer):
"""
gv
"""
g = Digraph('G', format='png')
styles = {
'graph': {
'label': 'A Fancy Graph',
'fontsize': '16',
'fontcolor': 'white',
'bgcolor': '#333333',
'rankdir': 'BT',
},
'nodes': {
'fontname': 'Helvetica',
'shape': 'hexagon',
'fontcolor': 'white',
'color': 'white',
##'style': 'filled',
'fillcolor': '#006699',
},
'edges': {
'style': 'dashed',
'color': 'white',
'arrowhead': 'open',
'fontname': 'Courier',
'fontsize': '12',
'fontcolor': 'white',
}
}
cat_attr_map = {
'disease' : {
'style' : 'filled'
}
}
def __init__(self):
Renderer.__init__(self)
def render(self, obj):
## ASSUME BBOP GRAPH
apply_styles(self.g, self.styles)
nodes = obj.nodes
edges = obj.edges
for node in nodes:
self.add_node(node)
for edge in edges:
self.add_edge(edge)
filename = self.g.render(filename='img/g')
call(['open', 'img/g.png'])
#print(filename)
def add_node(self, node):
node_attrs = {
'label': self.format_label(node.label)
}
for c in node.meta.category_list:
if c in self.cat_attr_map:
attrs = self.cat_attr_map[c]
for a in attrs:
node_attrs[a] = attrs[a]
gn = self.g.node(safe(node.id), **node_attrs)
def add_edge(self, edge):
edge_attrs = {
'label': edge.predicate
}
self.g.edge(safe(edge.subject), safe(edge.target), **edge_attrs)
def format_label(self, label):
if label is None:
return None
return "\n".join(label.split(" "))
| {
"repo_name": "SciGraph/py-SciGraph",
"path": "scigraph/renderers/GraphVizRenderer.py",
"copies": "1",
"size": "2552",
"license": "apache-2.0",
"hash": -8696371646109301000,
"line_mean": 23.0754716981,
"line_max": 72,
"alpha_frac": 0.4905956113,
"autogenerated": false,
"ratio": 3.7419354838709675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4732531095170967,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cjm'
import logging
import requests
from scigraph.model.BBOPGraph import *
from scigraph.model.Concept import *
from scigraph.model.EntityAnnotationResults import *
# TODO: modularize into vocab/graph/etc?
class SciGraph:
"""
foo
"""
def __init__(self, url=None):
if url is not None:
self.url_prefix = url
else:
self.url_prefix = "http://geoffrey.crbs.ucsd.edu:9000/scigraph/"
#self.url_prefix = "http://datagraph.monarchinitiative.org/"
return
def neighbors(self, id=None, params={}):
response = self.get_response("graph/neighbors", id, "json", params)
if (response.status_code != 200):
print("UH-OH:"+str(response))
return BBOPGraph(response.json())
def graph(self, id=None, params={}):
g1 = self.neighbors(id, {'relationshipType':'subClassOf', 'blankNodes':'false', 'direction':'OUTGOING','depth':20})
g2 = self.neighbors(id, {'relationshipType':'subClassOf', 'direction':'INCOMING','depth':1})
g3 = self.neighbors(id, {'relationshipType':'equivalentClass', 'depth':1})
g1.merge(g2)
g1.merge(g3)
return g1
def autocomplete(self, term=None):
response = self.get_response("vocabulary/autocomplete", term)
return response.json()['list']
def search(self, term=None):
response = self.get_response("vocabulary/search", term)
concepts = []
for r in response.json()['concepts']:
concepts.append(Concept(r))
return concepts
def annotate(self, content=None):
## TODO: post not get
response = self.get_response("annotations/entities", None, "json", {'content':content, 'longestOnly':True})
return EntityAnnotationResults(response.json(), content)
def get_response(self, path="", q=None, format=None, payload={}):
url = self.url_prefix + path;
if q is not None:
url += "/" +q;
if format is not None:
url = url + "." + format;
r = requests.get(url, params=payload)
return r
| {
"repo_name": "SciGraph/py-SciGraph",
"path": "scigraph/api/SciGraph.py",
"copies": "1",
"size": "2122",
"license": "apache-2.0",
"hash": 93287713574919380,
"line_mean": 33.2258064516,
"line_max": 123,
"alpha_frac": 0.6055607917,
"autogenerated": false,
"ratio": 3.696864111498258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4802424903198258,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cjm'
import logging
import requests
# TODO: consider an external model
class BBOPGraph:
"""
foo
"""
nodemap = {}
def __init__(self, obj={}):
self.nodes = []
self.edges = []
self.add_json_graph(obj)
return
def add_json_graph(self, obj={}):
#print(obj)
for n in obj['nodes']:
self.add_node(Node(n))
for e in obj['edges']:
self.add_edge(Edge(e))
#print("EDGES="+str(self.edges))
def add_node(self, n) :
self.nodemap[n.id] = n
self.nodes.append(n)
def add_edge(self, e) :
self.edges.append(e)
def merge(self,g):
for n in g.nodes:
self.add_node(n)
for e in g.edges:
self.add_edge(e)
def get_node(self, id) :
return self.nodemap[id]
def get_label(self, id) :
return self.nodemap[id].label
def get_root_nodes(self, relations=[]):
roots = []
for n in self.nodes:
if (len(self.get_outgoing_edges(n.id, relations)) == 0):
roots.append(n)
return roots
def get_leaf_nodes(self, relations=[]):
roots = []
for n in self.nodes:
if (len(self.get_incoming_edges(n.id, relations)) == 0):
roots.append(n)
return roots
def get_outgoing_edges(self, nid, relations=[]):
el = []
for e in self.edges:
if (e.subject == nid):
if (len(relations) == 0 or e.predicate in relations):
el.append(e)
return el
def get_incoming_edges(self, nid, relations=[]):
el = []
for e in self.edges:
if (e.target == nid):
if (len(relations) == 0 or e.predicate in relations):
el.append(e)
return el
class Node:
def __init__(self, obj={}):
self.id = obj['id']
self.label = obj['lbl']
self.meta = Meta(obj['meta'])
def __str__(self):
return self.id+' "'+str(self.label)+'"'
class Edge:
def __init__(self, obj={}):
self.subject = obj['sub']
self.predicate = obj['pred']
self.target = obj['obj']
def __str__(self):
return self.subject +"-["+self.predicate+"]->"+self.target
class Meta:
def __init__(self, obj={}):
self.type_list = obj['types']
self.category_list = []
if 'category' in obj:
self.category_list = obj['category']
self.pmap = obj
| {
"repo_name": "SciGraph/py-SciGraph",
"path": "scigraph/model/BBOPGraph.py",
"copies": "1",
"size": "2554",
"license": "apache-2.0",
"hash": -8387259188252382000,
"line_mean": 23.5576923077,
"line_max": 69,
"alpha_frac": 0.4960845732,
"autogenerated": false,
"ratio": 3.587078651685393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4583163224885393,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cjoakim'
from .constants import Constants
class Distance(object):
def __init__(self, dist=0.0, uom=Constants.uom_miles()):
self.value = float(dist)
self.uom = self.unit_of_measure(uom)
def unit_of_measure(self, s):
u = str(s).strip().lower()
if u == 'k':
return Constants.uom_kilometers()
elif u == 'y':
return Constants.uom_yards()
else:
return Constants.uom_miles()
def is_miles(self):
return self.uom == Constants.uom_miles()
def is_kilometers(self):
return self.uom == Constants.uom_kilometers()
def is_yards(self):
return self.uom == Constants.uom_yards()
def as_miles(self):
if self.is_miles():
return self.value
elif self.is_kilometers():
return self.value / Constants.kilometers_per_mile()
else:
return self.value / Constants.yards_per_mile()
def as_kilometers(self):
if self.is_miles():
return self.value * Constants.kilometers_per_mile()
elif self.is_kilometers():
return self.value
else:
return self.value / Constants.yards_per_kilometer()
def as_yards(self):
if self.is_miles():
return self.value * Constants.yards_per_mile()
elif self.is_kilometers():
return self.value * Constants.yards_per_kilometer()
else:
return self.value
def add(self, another_instance):
if self.is_miles():
self.value = self.value + another_instance.as_miles()
elif self.is_kilometers():
self.value = self.value + another_instance.as_kilometers()
else:
self.value = self.value + another_instance.as_yards()
def subtract(self, another_instance):
if self.is_miles():
self.value = self.value - another_instance.as_miles()
elif self.is_kilometers():
self.value = self.value - another_instance.as_kilometers()
else:
self.value = self.value - another_instance.as_yards()
def __str__(self):
return "<Distance value:{0} uom:{1}>".format(self.value, self.uom)
| {
"repo_name": "cjoakim/m26-py",
"path": "m26/distance.py",
"copies": "1",
"size": "2226",
"license": "mit",
"hash": -2812487091825601500,
"line_mean": 30.3521126761,
"line_max": 74,
"alpha_frac": 0.5759209344,
"autogenerated": false,
"ratio": 3.5903225806451613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9666243515045161,
"avg_score": 0,
"num_lines": 71
} |
__author__ = 'cjoakim'
from .distance import Distance
from .elapsed_time import ElapsedTime
from .speed import Speed
class RunWalkCalculator(object):
@classmethod
def calculate(cls, run_hhmmss, run_ppm, walk_hhmmss, walk_ppm, miles):
result = dict()
result['run_hhmmss'] = run_hhmmss
result['run_ppm'] = run_ppm
result['walk_hhmmss'] = walk_hhmmss
result['walk_ppm'] = walk_ppm
result['miles'] = float(miles)
if run_hhmmss and run_ppm and walk_hhmmss and walk_ppm and miles:
run_duration_elapsed_time = ElapsedTime(run_hhmmss)
run_ppm_elapsed_time = ElapsedTime(run_ppm)
walk_duration_elapsed_time = ElapsedTime(walk_hhmmss)
walk_ppm_elapsed_time = ElapsedTime(walk_ppm)
distance = Distance(float(miles))
mile = Distance(float(1.0))
total_secs = float(run_duration_elapsed_time.secs +
walk_duration_elapsed_time.secs)
run_pct = float(run_duration_elapsed_time.secs / total_secs)
walk_pct = float(1.0 - run_pct)
run_secs = float(run_pct * run_ppm_elapsed_time.secs)
walk_secs = float(walk_pct * walk_ppm_elapsed_time.secs)
avg_secs = float(run_secs + walk_secs)
avg_time = ElapsedTime(avg_secs)
avg_speed = Speed(mile, avg_time)
result['avg_mph'] = avg_speed.mph()
result['avg_ppm'] = avg_speed.pace_per_mile()
result['proj_time'] = avg_speed.projected_time(distance)
result['proj_miles'] = distance.as_miles()
return result
| {
"repo_name": "cjoakim/m26-py",
"path": "m26/run_walk_calculator.py",
"copies": "1",
"size": "1653",
"license": "mit",
"hash": -2439331850620752400,
"line_mean": 36.5681818182,
"line_max": 74,
"alpha_frac": 0.5940713854,
"autogenerated": false,
"ratio": 3.306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44000713854,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cjoakim'
import math
from .elapsed_time import ElapsedTime
class Speed(object):
def __init__(self, d, et):
self.dist = d # an instance of Distance
self.etime = et # an instance of ElapsedTime
def mph(self):
return self.dist.as_miles() / self.etime.hours()
def kph(self):
return self.dist.as_kilometers() / self.etime.hours()
def yph(self):
return self.dist.as_yards() / self.etime.hours()
def pace_per_mile(self):
spm = self.seconds_per_mile()
mm = math.floor(spm / 60.0)
ss = spm - (mm * 60.0)
if ss < 10:
ss = "0{0}".format(ss)
else:
ss = "{0}".format(ss)
if len(ss) > 5:
ss = ss[0:5]
return "{0}:{1}".format(mm, ss)
def seconds_per_mile(self):
return float(self.etime.secs / self.dist.as_miles())
def projected_time(self, another_distance, algorithm='simple'):
if algorithm is 'riegel':
t1 = float(self.etime.secs)
d1 = self.dist.as_miles()
d2 = another_distance.as_miles()
t2 = t1 * math.pow(float(d2 / d1), float(1.06))
et = ElapsedTime(t2)
return et.as_hhmmss()
else:
secs = float(self.seconds_per_mile() * another_distance.as_miles())
et = ElapsedTime(secs)
return et.as_hhmmss()
def age_graded(self, event_age, graded_age):
ag_factor = event_age.max_pulse() / graded_age.max_pulse()
graded_secs = float((self.etime.secs)) * float(ag_factor)
graded_et = ElapsedTime(graded_secs)
return Speed(self.dist, graded_et)
def __str__(self):
template = "<Speed dist:{0} etime:{1}>"
return template.format(self.dist, self.etime)
| {
"repo_name": "cjoakim/m26-py",
"path": "m26/speed.py",
"copies": "1",
"size": "1808",
"license": "mit",
"hash": 7663225684585853000,
"line_mean": 28.1612903226,
"line_max": 79,
"alpha_frac": 0.5508849558,
"autogenerated": false,
"ratio": 3.1334488734835357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4184333829283536,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cjoakim'
import math
from numbers import Number
from .constants import Constants
class ElapsedTime(object):
def __init__(self, val):
self.secs = 0
self.hh = 0
self.mm = 0
self.ss = 0
if not val:
val = 0
if isinstance(val, Number):
self.initialize_from_number(val)
elif isinstance(val, str):
self.initialize_from_string(val)
def initialize_from_number(self, val):
sph = Constants.seconds_per_hour()
self.secs = float(val)
self.hh = math.floor(self.secs / sph)
rem = self.secs - (self.hh * sph)
self.mm = math.floor(rem / 60.0)
self.ss = rem - (self.mm * 60.0)
def initialize_from_string(self, val):
stripped = str(val).strip()
if len(stripped) > 0:
tokens = stripped.split(':')
if len(tokens) == 1:
self.ss = self.to_float(tokens[0])
elif len(tokens) == 2:
self.mm = self.to_float(tokens[0])
self.ss = self.to_float(tokens[1])
elif len(tokens) == 3:
self.hh = self.to_float(tokens[0])
self.mm = self.to_float(tokens[1])
self.ss = self.to_float(tokens[2])
else:
pass
self.secs = (self.hh * 3600.0) + (self.mm * 60.0) + self.ss
def to_float(self, s):
try:
return float(s)
except ValueError:
return float(0.0)
def hours(self):
return float(self.secs / Constants.seconds_per_hour())
def as_hhmmss(self):
hhs = self.zero_fill(self.hh)
mms = self.zero_fill(self.mm)
sss = self.zero_fill(self.ss)
return "{0}:{1}:{2}".format(hhs, mms, sss)
def zero_fill(self, n):
if n < 10:
return "0{0}".format(int(n))
else:
return "{0}".format(int(n))
def __str__(self):
template = "<ElapsedTime hh:{0} mm:{1} ss:{2} secs:{3}>"
return template.format(self.hh, self.mm, self.ss, self.secs)
| {
"repo_name": "cjoakim/m26-py",
"path": "m26/elapsed_time.py",
"copies": "1",
"size": "2100",
"license": "mit",
"hash": 5164572181177640000,
"line_mean": 27.3783783784,
"line_max": 68,
"alpha_frac": 0.5123809524,
"autogenerated": false,
"ratio": 3.3870967741935485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43994777265935486,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cjoakim'
class Age(object):
"""
Instances of this class represent the age of a person, as a float value
of their age in years.
"""
def __init__(self, n=0.0):
self.value = float(n)
def max_pulse(self):
if self.value < 20:
return 200.0
else:
return 220.0 - self.value
def add(self, another_instance):
if another_instance:
self.value = self.value + another_instance.value
return self.value
def subtract(self, another_instance):
if another_instance:
self.value = self.value - another_instance.value
return self.value
def training_zones(self):
results = list()
zones = [0.95, 0.90, 0.85, 0.80, 0.75]
for idx, pct in enumerate(zones):
data = dict()
data['zone'] = idx + 1
data['age'] = self.value
data['max'] = self.max_pulse()
data['pct_max'] = pct
data['pulse'] = self.max_pulse() * pct
results.append(data)
return results
def __str__(self):
return "<Age value:{0}>".format(self.value)
| {
"repo_name": "cjoakim/m26-py",
"path": "m26/age.py",
"copies": "1",
"size": "1171",
"license": "mit",
"hash": 1222010966062210600,
"line_mean": 26.880952381,
"line_max": 75,
"alpha_frac": 0.5328778822,
"autogenerated": false,
"ratio": 3.753205128205128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47860830104051283,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ckinsey'
import re
class StacheTag(object):
"""
Represents a parsed StacheTag
"""
def __init__(self, stachetag, *args, **kwargs):
"""
Takes a textual stachetag and turns it into a bonafide StacheTag
"""
# Ensure the authenticity of the stachetag
if not re.match(r'^\S+\{$', stachetag):
raise ValueError("Stachetag %s does not smell like a stachetag." % stachetag)
self.tag = stachetag[:-1]
self.stachetag = stachetag
class StacheWaxer(object):
"""
Takes a string, and can format its stachetags or return lists of stachetags
"""
def __init__(self, string, *args, **kwargs):
self._original_string = string
self._stache_cache = None
def __iter__(self):
"""
OMG ITS ITERABLE
"""
for stache in self.cached_staches():
yield stache
def get_tags(self):
"""
Gets boring old tags in word format.
"""
return [stache.tag for stache in self.cached_staches()]
def get_stachetags(self):
"""
Gets awesome tags in stachetag format
"""
return [stache.stachetag for stache in self.cached_staches()]
def get_hashtags(self):
"""
Gets stachtag in antiquated hashtag format
"""
raise NotImplementedError("Ain't gonna happen baby!")
def cached_staches(self):
"""
Finds all staches or returns the cache. Makes stachetag parsing lazy
"""
if self._stache_cache is None:
self._stache_cache = [StacheTag(stache) for stache in re.findall(r'\S+\{', self._original_string)]
return self._stache_cache
def format_staches(self, format_string=None):
"""
Takes a format string and uses it to format the original string
i.e.:
> waxer.format_staches(format_string="<a href='#{tag}'>{stachetag}</a>")
"string with <a href='#awesome'>awesome{</a> stachetags!"
"""
if format_string is None:
raise ValueError('StacheStash.format_tags requires a format_string argument!')
parsed_format_string = format_string.format(**{'tag': '\\1', 'stachetag': '\\1{'})
return re.sub(r'(\S+)\{', parsed_format_string, self._original_string)
| {
"repo_name": "heapsortjobs/python-stachetag",
"path": "stachetag/__init__.py",
"copies": "1",
"size": "2335",
"license": "mit",
"hash": 1220149922693318700,
"line_mean": 27.1325301205,
"line_max": 110,
"alpha_frac": 0.5841541756,
"autogenerated": false,
"ratio": 3.7240829346092506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4808237110209251,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ck'
import csv
import os
import sys
import json
import requests
from requests.auth import HTTPBasicAuth
bintray_api_root = "https://bintray.com/api/v1"
def read_prop_file(f):
return dict([(row[0].strip(), row[1].strip()) for row in csv.reader(open(f, 'r'), delimiter='=')])
def create_package(repo_name, package_name):
bintray_url = bintray_api_root + "/packages/hmrc/" + repo_name
git_repository = "https://github.com/hmrc/" + package_name
bintray_props = read_prop_file(os.path.expanduser('~/.bintray/.credentials'))
payload = {
"name": package_name,
"desc": package_name + " " + repo_name,
"labels": [],
"licenses": ["Apache-2.0"],
"vcs_url": git_repository,
"website_url": git_repository,
"issue_tracker_url": git_repository + "/issues",
"github_repo": "hmrc/" + package_name,
"public_download_numbers": True,
"public_stats": True
}
return requests.post(bintray_url,
data=json.dumps(payload),
auth=HTTPBasicAuth(bintray_props['user'], bintray_props['password']),
headers={'content-type': 'application/json'})
def create_packages(package_name):
repo_names = ['releases', 'release-candidates']
last_response = None
for repo_name in repo_names:
last_response = create_package(repo_name, package_name)
if last_response.status_code != 201:
return last_response
print("created " + package_name + " in " + repo_name)
return last_response
def check_exists_on_github(repository):
return requests.head("https://github.com/hmrc/" + repository)
if len(sys.argv) != 2:
print('usage: repository')
sys.exit(-1)
new_repository = sys.argv[1]
print('checking github repository exists')
github_response = check_exists_on_github(new_repository)
if github_response.status_code == 404:
print('failed to find github repository')
sys.exit(-1)
print('creating %s on bintray' % new_repository)
bintray_response = create_packages(new_repository)
if bintray_response.status_code == 201:
print('successfully created bintray repositories')
else:
print('failed to create bintray repositories with', str(bintray_response.status_code), str(bintray_response.text))
sys.exit(-1)
| {
"repo_name": "hmrc/kickstarters",
"path": "init-repository.py",
"copies": "2",
"size": "2353",
"license": "apache-2.0",
"hash": -1282165705763419000,
"line_mean": 28.049382716,
"line_max": 118,
"alpha_frac": 0.6417339567,
"autogenerated": false,
"ratio": 3.4705014749262535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112235431626254,
"avg_score": null,
"num_lines": null
} |
__author__ = 'clarkg1'
import time
from optparse import OptionParser
from top.render import Renderer
from prof.parse import ProfileLog
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-f', '--file', dest='file', metavar='LOGFILE', help='target file (normally "/path/to/prof.log")', default='./prof.log')
parser.add_option('-r', '--review', dest='review', help='manually advance through log entries to allow for easier review', action='store_true', default=False)
(options, args) = parser.parse_args()
log = ProfileLog(options.file)
render = Renderer()
render.advance = options.review
try:
while not render.finished:
entry = None
if render.advance or render.fetch:
entry = log.read()
if render.fetch:
render.fetch = False
render.update(entry)
if not entry:
time.sleep(0.05)
render.cleanup()
except KeyboardInterrupt:
render.cleanup()
pass
| {
"repo_name": "cubic1271/pybrof",
"path": "top.py",
"copies": "1",
"size": "1038",
"license": "mit",
"hash": -3779611215902435300,
"line_mean": 33.6,
"line_max": 162,
"alpha_frac": 0.6069364162,
"autogenerated": false,
"ratio": 4.119047619047619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5225984035247618,
"avg_score": null,
"num_lines": null
} |
__author__ = 'clarkg1'
import json
import os
import sh
import shutil
import sys
import threading
import time
import uuid
# TODO: Violating DRY because I'm too lazy to think about how to do this better ... :/
class OptionsEnabled:
PSUTIL_ENABLED = False
MAKO_ENABLED = False
def import_error(item):
print "Warning: unable to import `" + str(item) + "`"
print "This is a fatal error, and this script will therefore terminate"
print ""
print "If using this script in conjunction with configure.py:"
print "Please ensure that configure.py has been run *AND* that PYTHONPATH has been set to include the following:"
print "* /path/to/pybrig/env/lib/python2.7/site-packages"
print "* /path/to/pybrig/env/lib64/python2.7/site-packages"
print ""
print "For example:"
print "export PYTHONPATH=/tmp/pybrig/env/lib/python2.7/site-packages:/tmp/pybrig/env/lib64/python2.7/site-packages"
print ""
print "Additionally, please be sure that LD_LIBRARY_PATH is set (e.g. to '/tmp/pybrig/env/lib') or that configure.py"
print "was told to install packages into a known location (e.g. a directory listed in ld.so.conf)"
sys.exit(-1)
try:
import psutil
OptionsEnabled.PSUTIL_ENABLED = True
except ImportError:
import_error("psutil")
try:
from mako.template import Template
OptionsEnabled.MAKO_ENABLED = True
except ImportError:
import_error("mako")
class TrialInfo(object):
def __init__(self, uuid, name):
self.uuid = uuid
self.name = name
class BenchmarkInfo(object):
def __init__(self, system_uuid=None):
self.uuid = str(uuid.uuid4())
self.time = time.time()
self.system_uuid = system_uuid
self.trials = []
def add(self, trial):
self.trials.append(TrialInfo(str(trial.uuid), trial.name))
def json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class BenchmarkTrial(object):
def __init__(self, basedir, template, bro, capture, realtime=False, bare=False, scripts=[]):
self.basedir = basedir
self.template = template
self.dirstack = []
self.bro = bro
self.capture = capture
self.realtime = realtime
self.bare = bare
self.scripts = scripts
self.results = None
self.uuid = None
self.name = None
def pushd(self, dirname):
self.dirstack.append(os.getcwd())
os.chdir(dirname)
return self.dirstack
def popd(self):
if len(self.dirstack) > 0:
os.chdir(self.dirstack.pop())
return self.dirstack
def json(self):
return json.dumps(self.results, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def execute(self, name, params, callback=None):
self.uuid = uuid.uuid4()
self.params = params
# print "Opening template " + os.path.join(os.getcwd(), os.path.join('templates', self.template))
run_script = Template(filename=os.path.join('templates', self.template))
if not os.path.exists(self.basedir):
os.makedirs(self.basedir)
self.pushd(self.basedir)
if(os.path.exists(name)):
# print "Removing existing output directory: " + os.path.join(os.getcwd(), name)
sh.rm('-fr', name)
os.mkdir(name)
self.pushd(name)
# Render execution script template ...
out_script = run_script.render(**self.params)
# print "Launching trial in directory: " + os.getcwd()
out_file = open(self.template, 'w')
# ... to a file in our current directory.
out_file.write(out_script)
out_file.close()
# Construct the bro argument string.
args = []
if(self.realtime):
args.append('--pseudo-realtime')
if(self.bare):
args.append('-b')
args.append('-r')
args.append(self.capture)
map(lambda x: args.append(x), self.scripts)
# print "Launching bro ... "
process = self.bro(args, _bg=True)
process.wait()
std_out = open('.stdout', 'w')
std_out.write(process.stdout)
std_out.close()
std_err = open('.stderr', 'w')
std_err.write(process.stderr)
std_err.close()
if callback:
callback(self)
sh.rm('-f', sh.glob('*.log'))
self.popd()
self.popd()
| {
"repo_name": "cubic1271/pybrig",
"path": "bro/benchmark/info/trial.py",
"copies": "1",
"size": "4449",
"license": "bsd-3-clause",
"hash": -2244199212615908900,
"line_mean": 29.4726027397,
"line_max": 121,
"alpha_frac": 0.6122724208,
"autogenerated": false,
"ratio": 3.628874388254486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9685774276600057,
"avg_score": 0.011074506490885894,
"num_lines": 146
} |
__author__ = 'clarkmatthew'
from prettytable import PrettyTable
from colorama import Fore, init
from subprocess import Popen, PIPE
import re
cmd_string = 'euca-describe-images --show-empty-fields -h'
cmd = cmd_string.split()
p = Popen(cmd, stdout=PIPE)
p_out, p_err = p.communicate()
if p.returncode:
print str(p_out)
raise RuntimeError('Cmd:"{0}" failed. Code:{1}. stderr:"{1}"'
.format(cmd_string, p.returncode, p_err))
lines = p_out.splitlines()
args = {}
arg = None
for line in lines:
help = ""
print 'Looking at line:' + str(line)
if not line.strip():
continue
if re.search('^\w', line):
print 'Parsing arg line: ' + str(line)
line = line.strip()
split_line = line.split()
arg = line[0]
help = " ".join(line[1:-1])
args[arg] = help
print 'got arg:"{0}", and help:"{1}"'.format(arg, args[arg])
else:
print 'parsing help line for arg:{0}, adding:{1}'.format(arg, line.strip())
args[arg] += line.strip()
'''
pt = PrettyTable()
for arg in args:
pt.add_row([arg, args[arg]])
print pt
'''
| {
"repo_name": "bigschwan/simplecli",
"path": "menutree/euca2ools/euca2ools_command.py",
"copies": "2",
"size": "1136",
"license": "mit",
"hash": 7695601673091688000,
"line_mean": 21.2745098039,
"line_max": 83,
"alpha_frac": 0.5845070423,
"autogenerated": false,
"ratio": 3.2364672364672367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9716943692594366,
"avg_score": 0.020806117234573797,
"num_lines": 51
} |
__author__ = 'clarkmatthew'
from simplecli.basemenu import BaseMenu, CliError
from subprocess import Popen, PIPE
import os
import glob
class pkg_mgr(object):
def info(self, pkg):
raise NotImplementedError()
def install(self, pkg):
raise NotImplementedError()
def uninstall(self, pkg):
raise NotImplementedError()
def search(self, pkg):
raise NotImplementedError()
def cmd(self, cmd_string):
cmd = cmd_string.split()
p = Popen(cmd, stdout=PIPE)
p_out, p_err = p.communicate()
if p.returncode:
print str(p_out)
raise RuntimeError('Cmd:"{0}" failed. Code:{1}. stderr:"{1}"'
.format(cmd_string, p.returncode, p_err))
return p_out
class yum(pkg_mgr):
def info(self, pkg):
return self.cmd('yum info ' + str(pkg))
def install(self, pkg):
return self.cmd('yum install ' + str(pkg))
class Euca2oolsBase(BaseMenu):
name = None
_summary = None
_submenus = []
_source_url = "https://github.com/eucalyptus/euca2ools.git"
_latest_pkg_url = ""
_epel_url = "http://downloads.eucalyptus.com/software/euca2ools/2.1/rhel/6/x86_64/epel-release-6.noarch.rpm"
_repo_url = "http://downloads.eucalyptus.com/software/euca2ools/3.0/rhel/6/x86_64/euca2ools-release-3.0.noarch.rpm"
_pkg_mgr = None
@property
def pkg_mgr(self):
if self._pkg_mgr:
return self._pkg_mgr
pms = ['yum', 'apt', 'pip']
for pm in pms:
for path in os.environ['PATHS']:
pm_path = os.path.join(path, pm)
if os.path.isfile(pm_path) and os.access(pm_path, os.X_OK):
self._pkg_mgr = pm
return pm
raise RuntimeError('No package manager found available to this user:'
+ ",".join(pms))
def show_package_info(self):
cmd = "yum info "
p = Popen('')
def _preflight_checks(self):
config = getattr(self.env, 'euc2ools', None)
if not config:
def _find_euca2ools(self):
tools_paths = {}
for path in os.environ['PATH'].split(':'):
tools = glob.glob(os.path.join(path,'euca*'))
if tools:
tools_paths[path] = tools
return tools_paths
| {
"repo_name": "bigschwan/simplecli",
"path": "menutree/euca2ools/euca2oolsbase.py",
"copies": "2",
"size": "2364",
"license": "mit",
"hash": 6238456592523856000,
"line_mean": 24.6956521739,
"line_max": 119,
"alpha_frac": 0.5638747885,
"autogenerated": false,
"ratio": 3.4867256637168142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5050600452216814,
"avg_score": null,
"num_lines": null
} |
import json
class Namespace(object):
"""
Convert dict (if provided) into attributes and return a somewhat
generic object
"""
def __init__(self, newdict=None):
if newdict:
for key in newdict:
value = newdict[key]
try:
if isinstance(value, dict):
setattr(self, Namespace(value), key)
else:
setattr(self, key, value)
except:
print '"{0}" ---> "{1}" , type: "{2}"'.format(key,
value,
type(value))
raise
def _get_keys(self):
return vars(self).keys()
def _to_json(self):
return json.dumps(self,
default=lambda o: o.__dict__,
sort_keys=True,
indent=4)
| {
"repo_name": "tbeckham/DeploymentManager",
"path": "config_manager/namespace.py",
"copies": "3",
"size": "1031",
"license": "apache-2.0",
"hash": 8788768164762655000,
"line_mean": 30.2424242424,
"line_max": 78,
"alpha_frac": 0.3860329777,
"autogenerated": false,
"ratio": 5.129353233830845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012626262626262625,
"num_lines": 33
} |
__author__ = 'clarkmatthew'
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
import simplecli
setup(name = "simplecli",
version = simplecli.__version__,
description = "CLI framework and utilities for a "
"Hybrid Eucalyptus Cloud environment",
long_description="CLI framework and utilities for a "
"Hybrid Eucalyptus Cloud environment",
author = "Matt Clark",
author_email = "matt.clark@eucalyptus.com",
url = "http://open.eucalyptus.com",
install_requires = [],
packages = find_packages(),
license = 'BSD (Simplified)',
platforms = 'Posix; MacOS X;',
classifiers = [ 'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: System :: Systems Administration',
],
)
| {
"repo_name": "nephomaniac/simplecli",
"path": "setup.py",
"copies": "2",
"size": "1067",
"license": "mit",
"hash": -231561721902276260,
"line_mean": 35.7931034483,
"line_max": 67,
"alpha_frac": 0.5716963449,
"autogenerated": false,
"ratio": 4.445833333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.021031298192204236,
"num_lines": 29
} |
__author__ = 'ClarkWong'
from app import db, api
from flask.ext.restful import reqparse, abort, Resource, fields, marshal_with, marshal
from models import User
user_fields = {
'id' : fields.Integer,
'name' : fields.String,
'password' : fields.String,
'privilege' : fields.Integer,
'description' : fields.String
}
class UserApi(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('name', type=str, location='json')
self.parser.add_argument('password', type=str, location='json')
self.parser.add_argument('privilege', type=int, location='json')
self.parser.add_argument('description', type=str, location='json')
super(UserApi, self).__init__()
@marshal_with(user_fields)
def get(self, user_id):
user = User.query.filter_by(id=user_id).first()
if user:
return user, 201
else:
abort(404, message='User {} not found'.format(user_id))
def delete(self, user_id):
user = User.query.filter_by(id=user_id).first()
if user:
db.session.delete(user)
db.session.commit()
return { 'message' : 'Delete User {} succeed'.format(user_id)}, 201
else:
abort(404, message='User {} not found'.format(user_id))
@marshal_with(user_fields)
def put(self, user_id):
user = User.query.filter_by(id=user_id).first()
if user:
args = self.parser.parse_args()
for k,v in args.iteritems():
if v!= None:
setattr(user, k, v)
db.session.commit()
return user, 201
else:
abort(404, message='User {} not found'.format(user_id))
class UserListApi(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('name', type=str, required=True, location='json')
self.parser.add_argument('password', type=str, required=True, location='json')
self.parser.add_argument('privilege', type=int, required=True, location='json')
self.parser.add_argument('description', type=str, location='json')
super(UserListApi, self).__init__()
def get(self):
userList = User.query.all()
if userList:
return [marshal(user, user_fields) for user in userList]
else:
abort(404, message='No User at all')
@marshal_with(user_fields)
def post(self):
args = self.parser.parse_args()
name = args['name']
password = args['password']
privilege = args['privilege']
description = args['description']
user = User(name, password, privilege, description)
db.session.add(user)
db.session.commit()
return user, 201
api.add_resource(UserListApi, '/api/v1/users', endpoint='userList')
api.add_resource(UserApi, '/api/v1/users/<user_id>', endpoint='user')
| {
"repo_name": "njuwangchen/TRMSdemo",
"path": "app/views.py",
"copies": "1",
"size": "2977",
"license": "mit",
"hash": 4194409517529127000,
"line_mean": 33.2183908046,
"line_max": 87,
"alpha_frac": 0.5975814578,
"autogenerated": false,
"ratio": 3.6889714993804215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9764582449678025,
"avg_score": 0.004394101500479242,
"num_lines": 87
} |
__author__ = 'Claude'
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from Web.views import is_admin
from Web.models import CreateApplication
from Web.models import DeleteApplication
from Web.models import PortApplication
from Web.models import OperationRecord
@login_required
def applications_view(request):
is_administer = is_admin(request.user)
create_application_list = CreateApplication.objects.filter(applicant=request.user)
create_applications = list()
for create_application in create_application_list:
create_applications.append(create_application.application_info())
delete_application_list = DeleteApplication.objects.filter(applicant=request.user)
delete_applications = list()
for delete_application in delete_application_list:
delete_applications.append(delete_application.application_info())
port_application_list = PortApplication.objects.filter(applicant=request.user)
port_applications = list()
for port_application in port_application_list:
port_applications.append(port_application.application_info())
operation_list = OperationRecord.objects.filter(user=request.user)
operations = list()
for operation in operation_list:
operations.append(operation.get_info())
return render_to_response('applications.html', locals())
| {
"repo_name": "ClaudeZoo/iDashBoardWeb",
"path": "Web/application_views.py",
"copies": "2",
"size": "1389",
"license": "unlicense",
"hash": 4963300685352538000,
"line_mean": 41.0909090909,
"line_max": 86,
"alpha_frac": 0.7717782577,
"autogenerated": false,
"ratio": 4.30030959752322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6072087855223219,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Claude'
from django.utils import timezone
from django.shortcuts import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from Web.models import VM
from Web.models import MachineInfo
from Web.models import CreateApplication
from Web.models import DeleteApplication
def handle_notification(request):
print(request.body)
request_id = request.POST.get('request_id', '')
request_type = request.POST.get('request_type', '')
request_result = request.POST.get('request_result', '')
port = request.POST.get('port', '')
print("%s %s %s" % (request_id, request_type, request_result))
if request_type == 'new':
try:
create_application = CreateApplication.objects.get(id=request_id)
create_application.state = request_result
if request_result == 'success':
info = MachineInfo(last_connect_time=timezone.now(), wan_ip=create_application.host.info.wan_ip,
ssh_port=port, os_info=create_application.os)
info.save()
vm = VM(info=info, state='Offline', user=create_application.applicant, host=create_application.host,
uuid=request.POST.get('vm_uuid', ''), name=request.POST.get('vm_name', ''), os=create_application.os,
memory=create_application.memory, vm_type=create_application.vm_type)
vm.save()
else:
create_application.error = request.POST.get('error_information', '')
create_application.save()
return HttpResponse('hehe')
except ObjectDoesNotExist:
return HttpResponse('') | {
"repo_name": "ClaudeZoo/iDashBoardWeb",
"path": "Web/notification.py",
"copies": "2",
"size": "1685",
"license": "unlicense",
"hash": 8508097473920156000,
"line_mean": 47.1714285714,
"line_max": 125,
"alpha_frac": 0.6338278932,
"autogenerated": false,
"ratio": 4.2125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.58463278932,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Claude'
import datetime
import json
from django.shortcuts import render
from django.shortcuts import HttpResponse
from django.shortcuts import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from Web.models import VM
from Web.models import Host
@login_required
def refresh_homepage(request):
pass
@login_required
def refresh_vms(request):
vm_info_list = get_vm_info_list()
response = dict(data=vm_info_list)
response['Access-Control-Allow-Origin'] = '*'
return HttpResponse(json.dumps(response))
@login_required
def refresh_hosts(request):
host_info_list = get_host_info_list()
response = dict(data=host_info_list)
response['Access-Control-Allow-Origin'] = '*'
return HttpResponse(json.dumps(response))
def get_vm_info_list():
t = datetime.datetime.now()
t -= datetime.timedelta(seconds=60)
vms = VM.objects.exclude(state='deleted')
vm_info_list = []
for vm in vms:
info = vm.info
try:
disk_used = get_disk_used(info.disk)
memory = str(int(float(info.memory.split()[1].rstrip('k')) /
float(info.memory.split()[0].rstrip('k')) * 100 + 0.5)) + '%'
cpu = str(int(100 - float(info.percent_cpu.split()[3].split('%')[0]) + 0.5)) + '%'
except Exception, e:
disk_used = '0%'
memory = '0%'
cpu = '0%'
finally:
if info.last_connect_time < t:
state = "offline"
else:
state = 'online'
info_dict = dict(state=state, ip=info.lan_ip, wan_ip=info.wan_ip, port=info.ssh_port,
os=info.os_info[0:-8], vm_name=vm.name, Disk=disk_used, info_id=info.id,
Memory=memory, CPU=cpu, id=vm.id)
vm_info_list.append(info_dict)
return vm_info_list
def get_disk_used(disk_info):
disk_list = disk_info.split()
used = 0
size = 0
if (len(disk_list) - 7) % 6 == 0:
disk_number = (len(disk_list) - 7) / 6
for index in range(1, disk_number):
size_info = disk_list[index*6 + 2]
size = calculate_size(size_info, size)
used_info = disk_list[index*6 + 3]
used = calculate_size(used_info, used)
return str(int(100 * used / size + 0.5)) + '%'
def get_host_info_list():
t = datetime.datetime.now()
t -= datetime.timedelta(seconds=60)
hosts = Host.objects.filter(state="Online")
host_info_list = []
for host in hosts:
info = host.info
try:
disk_used = get_host_disk_used(info.disk)
memory = str(int(float(info.memory.split()[1].rstrip('k'))
/ float(info.memory.split()[0].rstrip('k')) * 100 + 0.5)) + '%'
cpu = str(int(100 - float(info.percent_cpu.split()[3].split('%')[0]) + 0.5)) + '%'
except Exception:
disk_used = '0%'
memory = '0%'
cpu = '0%'
finally:
if info.last_connect_time < t:
state = "Offline"
else:
state = 'Online'
info_dict = dict(state=state, ip=info.lan_ip, wan_ip=info.wan_ip, os=info.os_info[0:-8],
Disk=disk_used, Memory=memory, CPU=cpu, id=host.id, info_id=info.id,)
host_info_list.append(info_dict)
return host_info_list
def get_host_disk_used(disk_info):
disk_list = disk_info.split()
used = 0
size = 0
if (len(disk_list) - 7) % 6 == 0:
disk_number = (len(disk_list) - 7) / 6
for index in range(1, disk_number+1):
size_info = disk_list[index*6 + 2]
size = calculate_size(size_info, size)
used_info = disk_list[index*6 + 3]
used = calculate_size(used_info, used)
return str(int(100 * used / size + 0.5)) + '%'
def calculate_size(info, size):
if info[-1] == 'K':
size += float(info[:-1])
elif info[-1] == 'M':
size += float(info[:-1]) * 1024
elif info[-1] == 'G':
size += float(info[:-1]) * 1024 * 1024
else:
size += 0
return size
| {
"repo_name": "littlepig2013/iDashBoardWeb",
"path": "Web/vm_views.py",
"copies": "2",
"size": "4180",
"license": "unlicense",
"hash": 2763241019495619000,
"line_mean": 32.44,
"line_max": 101,
"alpha_frac": 0.5476076555,
"autogenerated": false,
"ratio": 3.3682514101531025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4915859065653102,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Claude'
import json
from django.shortcuts import HttpResponse
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from Web.views import is_admin
from Web.models import CreateApplication
from Web.models import DeleteApplication
from Web.models import PortApplication
from Web.create_vm_views import create_vm
from Web.delete_vm_views import delete_vm
from Web.nat_views import execute_nat
@login_required
def audit_view(request):
is_administer = is_admin(request.user)
create_application_list = CreateApplication.objects.filter(state='pending')
create_application_num = create_application_list.count()
create_applications = list()
for create_application in create_application_list:
create_applications.append(create_application.audit_info())
delete_application_list = DeleteApplication.objects.filter(state='pending')
delete_application_num = delete_application_list.count()
delete_applications = list()
for delete_application in delete_application_list:
delete_applications.append(delete_application.audit_info())
port_application_list = PortApplication.objects.filter(state='pending')
port_application_num = port_application_list.count()
port_applications = list()
for port_application in port_application_list:
port_applications.append(port_application.audit_info())
return render_to_response('audit.html', locals())
def approve_single_creation(request):
application_id = int(request.POST.get('id', ''))
application = create_vm(application_id)
if application:
application.reviewer = request.user
application.save()
return HttpResponse('1')
else:
return HttpResponse('0')
def refuse_single_creation(request):
application_id = int(request.POST.get('id', ''))
application = CreateApplication.objects.get(id=application_id)
application.state = 'rejected'
application.reviewer = request.user
application.save()
return HttpResponse("1")
def approve_all_creation(request):
application_list = json.loads(request.body.decode())
for application_id in application_list:
create_vm(application_id)
return HttpResponse('1')
def approve_single_delete(request):
application_id = request.POST.get('id', '')
application = delete_vm(application_id)
if application:
application.reviewer = request.user
application.save()
return HttpResponse('1')
else:
return HttpResponse('0')
def refuse_single_delete(request):
application_id = int(request.POST.get('id', ''))
application = DeleteApplication.objects.get(id=application_id)
application.state = 'rejected'
application.reviewer = request.user
application.save()
return HttpResponse("1")
def approve_all_delete(request):
application_list = json.loads(request.body.decode())
for application_id in application_list:
delete_vm(application_id)
return HttpResponse('1')
def approve_single_nat(request):
application_id = int(request.POST.get('id', ''))
application = execute_nat(application_id)
if application:
application.reviewer = request.user
application.save()
return HttpResponse("1")
else:
return HttpResponse("0")
def refuse_single_nat(request):
application_id = int(request.POST.get('id', ''))
application = PortApplication.objects.get(id=application_id)
ports = json.loads(application.host.ports_info)
ports['free'].append(application.host_port)
ports['used'].remove(application.host_port)
application.host.ports_info = json.dumps(ports)
application.host.save()
application.state = 'rejected'
application.reviewer = request.user
application.save()
return HttpResponse("1")
def approve_all_nat(request):
pass | {
"repo_name": "ClaudeZoo/iDashBoardWeb",
"path": "Web/audit_views.py",
"copies": "2",
"size": "3890",
"license": "unlicense",
"hash": 1983228150465842400,
"line_mean": 31.9745762712,
"line_max": 79,
"alpha_frac": 0.7151670951,
"autogenerated": false,
"ratio": 4.052083333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5767250428433333,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Claude'
import json
from django.shortcuts import HttpResponse
from django.shortcuts import render_to_response
from django.shortcuts import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from Web.models import Host
from Web.models import CreateApplication
from Web.communication import communicate
from Web.views import is_admin
@login_required
def apply_new_vm(request):
if request.POST.get('vm_type', '') and request.POST.get('os', '') and request.POST.get('memory', ''):
os = request.POST.get('os', '')
memory = int(request.POST.get('memory', ''))
vm_type = request.POST.get('vm_type', '')
reason = request.POST.get('reason', '')
application = CreateApplication(applicant=request.user, vm_type=vm_type, os=os, memory=memory, reason=reason,
state='pending')
is_administer = is_admin(request.user)
has_applied = 1
application.save()
return render_to_response('apply.html', locals())
else:
return HttpResponseRedirect('/apply/')
def create_vm(application_id):
host_list = Host.objects.filter(state='Online')
try:
application = CreateApplication.objects.get(id=application_id)
if host_list.count() > 0:
i, max_length, max_index = 0, 0, 0
while i < host_list.count():
ports = json.loads(host_list[i].ports_info)
if len(ports["free"]) > max_length:
max_index = i
max_length = len(ports["free"])
i += 1
if max_length > 0:
host = host_list[max_index]
ports = json.loads(host.ports_info)
port = ports["free"][0]
ports["free"].remove(port)
ports["used"].append(port)
request_dict = dict(request_id=application_id, request_type='new', port=port,
request_userid=application.applicant.id, request_memory=application.memory)
response = communicate(request_dict, host.ip, host.vm_manager_port)
if response and response['request_response'] == 'received':
application.state = 'In line'
application.host = host
elif not response:
ports["free"].append(port)
ports["used"].remove(port)
application.state = response['request_response']
host.ports_info = json.dumps(ports)
host.save()
else:
application.state = 'error'
application.error = 'Run of out port'
else:
application.state = 'error'
application.error = 'No host'
application.save()
return application
except ObjectDoesNotExist:
return None
| {
"repo_name": "ClaudeZoo/iDashBoardWeb",
"path": "Web/create_vm_views.py",
"copies": "2",
"size": "2975",
"license": "unlicense",
"hash": 7199266943134210000,
"line_mean": 40.9014084507,
"line_max": 117,
"alpha_frac": 0.5757983193,
"autogenerated": false,
"ratio": 4.407407407407407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5983205726707408,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Claude'
import json
from django.shortcuts import render_to_response
from django.shortcuts import HttpResponse
from django.contrib.auth.decorators import login_required
from Web.views import is_admin
from Web.models import MachineInfo
@login_required
def detail_view(request):
is_administer = is_admin(request.user)
return render_to_response('detail.html', locals())
@login_required
def get_detail(request, vm_id):
if request.user.is_authenticated():
vmDetail = {}
try:
vm = MachineInfo.objects.filter(id=vm_id)
if len(vm) != 0:
vmDetail = {'data': vm_id, 'Access-Control-Allow-Origin': '*'}
vmDetail['uName'] = vm[0].os_info[0:-8]
vmDetail['cpuInfo'] = vm[0].cpu_info
vmDetail['memory'] = vm[0].memory
vmDetail['memory_swap'] = vm[0].swap
vmDetail['cpuLoad'] = vm[0].percent_cpu
vmDetail['tasks'] = vm[0].tasks
vmDetail['userName'] = vm[0].username
vmDetail['ipv4'] = vm[0].inet4
vmDetail['ipv6'] = vm[0].inet6
vmDetail['broadcast'] = vm[0].broadcast
vmDetail['mask'] = vm[0].mask
vmDetail['dns'] = vm[0].dns
vmDetail['process'] = []
if len(vm[0].process) != 0:
process = vm[0].process.split("\n")
pinfodic = {}
for p in process:
pinfo = p.split()
if len(pinfo) < 12:
break
pinfodic['PID'] = pinfo[0]
pinfodic['USER'] = pinfo[1]
pinfodic['cpu'] = pinfo[8]
pinfodic['mem'] = pinfo[9]
pinfodic['cmd'] = pinfo[11]
vmDetail['process'].append(pinfodic.copy())
else:
vmDetail = {'IPAddress': [], 'stateInfo': []}
except Exception, e:
print e
return HttpResponse(e)
return HttpResponse(json.dumps(vmDetail))
return render_to_response('index.html', locals())
| {
"repo_name": "littlepig2013/iDashBoardWeb",
"path": "Web/detail_views.py",
"copies": "2",
"size": "2213",
"license": "unlicense",
"hash": 5237553236311738000,
"line_mean": 38.5178571429,
"line_max": 78,
"alpha_frac": 0.4993221871,
"autogenerated": false,
"ratio": 4.016333938294011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 56
} |
__author__ = 'Claude'
import json
from django.utils import timezone
from django.shortcuts import HttpResponse
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from Web.models import Host
from Web.models import VM
from Web.models import PortApplication
from Web.communication import communicate
@login_required
def apply_nat(request):
user = request.user
print(request.body)
vm_uuid = request.POST.get('uuid', '')
protocol = request.POST.get('protocol', '')
vm_port = int(request.POST.get('vm_port', ''))
host_port = int(request.POST.get('host_port', ''))
try:
vm = VM.objects.get(uuid=vm_uuid)
if vm.user == user:
application = PortApplication(applicant=user, vm=vm, host=vm.host, protocol=protocol,
vm_port=vm_port, host_port=host_port, state='pending')
ports = json.loads(vm.host.ports_info)
ports['free'].remove(host_port)
ports['used'].append(host_port)
vm.host.ports_info = json.dumps(ports)
vm.host.save()
application.save()
return HttpResponse('1')
else:
return HttpResponse('0')
except ObjectDoesNotExist:
return HttpResponse('0')
def execute_nat(application_id):
try:
application = PortApplication.objects.get(id=application_id)
host = application.vm.host
request_dict = dict(request_id=application_id, request_type='add_nat_rule',
request_userid=application.applicant.id, protocol=application.protocol,
host_port=application.host_port, guest_port=application.vm_port,
vm_name=application.vm.name, vm_uuid=application.vm.uuid)
response = communicate(request_dict, host.ip, host.vm_manager_port)
if response and response['request_result'] == 'success':
application.state = 'success'
nat_rules = json.loads(application.vm.nat_rules)
rule = dict(host_port=application.host_port, guest_port=application.vm_port, protocol=application.protocol)
nat_rules.append(json.dumps(rule))
application.vm.nat_rules = json.dumps(nat_rules)
application.vm.save()
elif not response:
application.state = response['request_result']
application.error = response['error_information']
else:
application.state = 'network_error'
application.save()
return application
except ObjectDoesNotExist:
return None
def get_free_ports(request):
host = Host.objects.get(id=request.GET['host'])
ports_dict = json.loads(host.ports_info)
free_ports_list = ports_dict["free"]
return HttpResponse(json.dumps(free_ports_list))
| {
"repo_name": "littlepig2013/iDashBoardWeb",
"path": "Web/nat_views.py",
"copies": "2",
"size": "2872",
"license": "unlicense",
"hash": -9195149607707513000,
"line_mean": 40.0285714286,
"line_max": 119,
"alpha_frac": 0.6368384401,
"autogenerated": false,
"ratio": 4.085348506401138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5722186946501138,
"avg_score": null,
"num_lines": null
} |
__author__ = 'claudia'
from config import wmata
from math import hypot
from multiprocessing import Pool
def f_range(mini, maxi, step):
while mini < maxi:
yield mini
mini += step
class DistanceCalculations:
def __init__(self, lat1, lat2, lon1, lon2, res):
self.lat1, self.lat2 = lat1, lat2
self.lon1, self.lon2 = lon1, lon2
self.res = res
print(self.lat1, self.lat2, self.lon1, self.lon2, self.res)
self.latlist = list(f_range(self.lat1, self.lat2, self.res))
self.lonlist = list(f_range(self.lon1, self.lon2, self.res))
def find_dist(self, stat, lata, longa):
dx = stat.location.lon - longa
dy = stat.location.lat - lata
return hypot(dx,dy)
def find_add(self, line, stat, lata, longa):
wa = self.find_dist(stat, lata, longa)
const = 1 / calc_trainfreq(line)
return const/(wa**2)
def point_calc(self, lata, longa):
i = 0.0
for s in wmata.lines.all:
for staa in wmata.lines[s].stations:
i += self.find_add(s, staa, lata, longa)
# print("calculated (" + str(lata) + "," + str(longa) + ")")
return i
def point_map(self):
print("lat length " + str(len(self.latlist)))
print("lon length " + str(len(self.lonlist)))
print(len(self.latlist) * len(self.lonlist))
input("Do this many points across 9 processors? (ctrl-C if u r scared) ")
pool = Pool(9)
single_lat_dict_list = pool.map(self.singlelat, self.latlist)
return dict(zip(self.latlist, single_lat_dict_list))
def singlelat(self, lat):
x = {lon: self.point_calc(lat, lon) for lon in self.lonlist}
print("lat done: " + str(lat))
return x
def calc_trainfreq(m):
return 1
def samplerunbeltway():
max_lon = 39.008953
min_lon = 38.823838
max_lat = -76.872716
min_lat = -77.204129
resi = .001
distance_calc_obj = DistanceCalculations(min_lat, max_lat, min_lon, max_lon, resi)
return distance_calc_obj.point_map()
def sampleruninnercity():
max_lon = 38.951372
min_lon = 38.869972
max_lat = -76.982933
min_lat = -77.982933
resi = .001
distance_calc_obj = DistanceCalculations(min_lat, max_lat, min_lon, max_lon, resi)
return distance_calc_obj.point_map()
def sampleruntiny():
max_lon = 38.917349
min_lon = 38.875307
max_lat = -77.003089
min_lat = -77.051867
resi = .001
distance_calc_obj = DistanceCalculations(min_lat, max_lat, min_lon, max_lon, resi)
return distance_calc_obj.point_map()
def samplerunfullcity():
max_lon = 39.136673
min_lon = 38.755377
max_lat = -76.829635
min_lat = -77.288315
resi = .001
distance_calc_obj = DistanceCalculations(min_lat, max_lat, min_lon, max_lon, resi)
return distance_calc_obj.point_map()
if __name__ == '__main__':
data = samplerunfullcity()
csvdata = []
for lat in data:
for lon in data[lat]:
csvdata.append((lat, lon, data[lat][lon]))
stringydata = "\n".join([",".join(list(map(str, i))) for i in csvdata])
with open("data.csv", "w") as f:
f.write(stringydata)
print("Done") | {
"repo_name": "fwilson42/dchacks2015",
"path": "scripts/algo-old.py",
"copies": "1",
"size": "3267",
"license": "mit",
"hash": 5613251609260047000,
"line_mean": 28.9816513761,
"line_max": 86,
"alpha_frac": 0.595959596,
"autogenerated": false,
"ratio": 3.0390697674418603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41350293634418606,
"avg_score": null,
"num_lines": null
} |
__author__ = 'claudia'
from config import wmata
import json
data = {
"metrobus": [],
"metrorail": []
}
for line in wmata.lines.all:
for staa in wmata.lines[line].stations:
if line == 'RD':
headway = 4.5
elif line == 'BL':
headway = 12
else:
headway = 6
data["metrorail"].append({
"lat": staa.location.lat,
"lon": staa.location.lon,
"headway": headway,
"weight": 10/headway,
"line": line,
"name": staa.name,
"station_code": staa.station_code
})
bus_stops = json.loads(open("bus-stops.json", "r").read())
bus_stops = bus_stops["Stops"]
for stop in bus_stops:
data["metrobus"].append({
"lat": float(stop["Lat"]),
"lon": float(stop["Lon"]),
"weight": len(stop["Routes"])/8,
"routes": stop["Routes"],
"name": stop["Name"],
"stop_id": stop["StopID"]
})
jsonexp = json.dumps(data)
print("Converted")
with open("gen/export.json", "w") as f:
f.write(jsonexp)
with open("export.json", "w") as f:
f.write(jsonexp)
print("Done") | {
"repo_name": "fwilson42/dchacks2015",
"path": "scripts/algo.py",
"copies": "1",
"size": "1155",
"license": "mit",
"hash": -8533517890982304000,
"line_mean": 24.6888888889,
"line_max": 58,
"alpha_frac": 0.5194805195,
"autogenerated": false,
"ratio": 3.1385869565217392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41580674760217395,
"avg_score": null,
"num_lines": null
} |
__author__ = 'claudia'
import markovmodels as mm
import re
import string
#from bs4 import BeautifulSoup
#import requests
data = mm.db
data.connect()
data.create_tables([mm.Subject, mm.Word, mm.WordLink])
print("created tables")
#def import_wiki(url):
# file_orig = open("temp_orig.txt", 'w')
# file_x = open("temp_x.txt", 'w')
# html_orig = requests.get(url).text
# bs_orig = BeautifulSoup(html_orig)
# article_text = bs_orig.get_text()
# file_orig.write(article_text)
# file_orig.close()
# links = bs_orig.findAll('a', href=re.compile(r'/wiki/*'))
# second_parts = [a['href'] for a in links]
# for x in second_parts:
# url_x = "http://wikipedia.org" + x
# html_x = requests.get(url_x).text
# bs_x = BeautifulSoup(html_x)
# x_text = bs_x.get_text()
# file_x.write(x_text)
# file_x.close()
# return "temp_orig.txt", "temp_x.txt"
def populate_list(text_file):
f = open(text_file)
list_in = []
for line in f:
for word in line.split():
list_in.append(re.sub('[%s]' % re.escape(string.punctuation+string.digits), '', word).lower())
print("populated list from file")
return list_in
def build_chain(list_in, weight, chained):
for x in range(0, len(list_in)-1):
current_word = list_in[x]
next_word = list_in[x+1]
if not (current_word in chained.keys()):
chained[current_word] = {}
cur = chained[current_word]
if not (next_word in cur.keys()):
cur[next_word] = 0
cur[next_word] = cur[next_word] + weight
print("built chain")
class SubjectChain:
chain = {}
def __init__(self, subject, main_wiki):
self.subj = subject
build_chain(populate_list("inputdata.txt"), 1, self.chain)
def chain_into_database(ch):
sub = mm.Subject.create(name="Cyclotron")
n = 0
h = ch.chain.keys()
for w in h:
n += 1
base = mm.get_word(w, sub)
for cle in ch.chain[w].keys():
wt = ch.chain[w][cle]
su = mm.get_word(cle, sub)
mm.WordLink.create(from_word=base, suggestion=su, weight=wt)
| {
"repo_name": "laudiacay/noteorious",
"path": "src/buildmarkov.py",
"copies": "1",
"size": "2154",
"license": "mit",
"hash": 1687796155896059000,
"line_mean": 27.3421052632,
"line_max": 106,
"alpha_frac": 0.5849582173,
"autogenerated": false,
"ratio": 2.9751381215469612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9050628861926568,
"avg_score": 0.0018934953840787148,
"num_lines": 76
} |
__author__ = 'Claudio'
import YamlHandler
class HTMLBuilder(YamlHandler.YamlHandler):
""" The HTML Builder handles the skeleton
"""
def __init__(self, path, file):
YamlHandler.YamlHandler.__init__(self, path, file)
self.__store_skeleton()
self.init = False
if self.skeleton:
self.init = True
def __store_skeleton(self):
self.skeleton = self.get()
self.init = True
def refresh(self):
if not self.init:
self.__store_skeleton()
"""
skeleton = get_skeleton(config['skeleton_path'], config['skeleton_file'])
html_complete = ''.join(skeleton['default'])
for css_object in skeleton['cssObjects']:
html_complete = ''.join([html_complete, css_object])
html_complete = append_politics(html_complete, skeleton['politics'])
html_complete = assemble_description(html_complete, skeleton['description'],
description, banner_list, header_complete)
html_complete = gallery(html_complete, skeleton['gallery'], image_gallery, skeleton['remover'])
if photo_gallery:
html_complete = gallery(html_complete, skeleton['photos'], photo_gallery, skeleton['remover'])
html_complete = ''.join([html_complete, skeleton['ending']])
"""
def main():
obj = HTMLBuilder('../../../ads_yaml', 'main.yml')
object = obj.get()
print object
if __name__ == '__main__':
main() | {
"repo_name": "claudiordgz/AdGenerator",
"path": "src/BuilderManager/Agents/HTMLBuilderAgent.py",
"copies": "1",
"size": "1443",
"license": "apache-2.0",
"hash": 3650683319300001000,
"line_mean": 31.8181818182,
"line_max": 102,
"alpha_frac": 0.6146916147,
"autogenerated": false,
"ratio": 3.9642857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5078977328985714,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Claudio'
import yaml
class ConnectionHandler(object):
""" The Connection Handler must handle all the I/O
operations to the User Communications model.
"""
def __init__(self):
self.modify_lst = list()
self.flags = dict()
self.__modify_model = { 'ftp' : self.__model_ftp,
'dropbox': self.__model_dropbox,
'sugarsync': self.__model_sugarsync,
'onedrive': self.__model_onedrive
}
def get_model(self, yaml_file):
stream = open(yaml_file, 'r')
self.model = yaml.load(stream)
stream.close()
self.setup()
def save_model(self, yaml_file):
for key, val in self.flags.iteritems():
self.model[key] = val
if self.modify is True:
for protocol in self.modify_lst:
self.__modify_model[protocol]()
print self.model
with open(yaml_file, 'w') as yaml_fd:
yaml_fd.write( yaml.dump(self.model, default_flow_style=False))
def setup(self):
if self.model:
self.get_ftp()
self.get_dropbox()
self.get_onedrive()
self.get_sugarsync()
self.model.clear()
def get_ftp(self):
self.ftp = self.get_config('has_ftp', 'ftp')
def modify_ftp(self, username, host, password, port):
self.ftp['username'] = username
self.ftp['host'] = host
self.ftp['password'] = password
self.ftp['port'] = port
if self.flags['has_ftp'] is not True:
self.flags['has_ftp'] = True
self.modify = True
self.modify_lst.append('ftp')
def get_dropbox(self):
self.dropbox = self.get_config('has_dropbox', 'dropbox')
def get_sugarsync(self):
self.sugarsync = self.get_config('has_sugarsync', 'sugarsync')
def get_onedrive(self):
self.onedrive = self.get_config('has_onedrive', 'onedrive')
def get_config(self, has_config_str, config_str):
if has_config_str not in self.flags:
self.flags[has_config_str] = ''
if has_config_str in self.model:
if self.model[has_config_str] is True:
self.flags[has_config_str] = True
return_val = self.model[config_str]
else:
self.flags[has_config_str] = False
return_val = dict()
return return_val
def __model_ftp(self):
if 'ftp' not in self.model:
self.model['ftp'] = dict()
self.model['ftp']['username'] = self.ftp['username']
self.model['ftp']['host'] = self.ftp['host']
self.model['ftp']['password'] = self.ftp['password']
self.model['ftp']['port'] = self.ftp['port']
def __model_dropbox(self):
pass
def __model_sugarsync(self):
pass
def __model_onedrive(self):
pass | {
"repo_name": "claudiordgz/AdGenerator",
"path": "src/Connection/ConnectionModelHandler.py",
"copies": "1",
"size": "2925",
"license": "apache-2.0",
"hash": -5160311608951619000,
"line_mean": 31.1538461538,
"line_max": 75,
"alpha_frac": 0.544957265,
"autogenerated": false,
"ratio": 3.6931818181818183,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4738139083181818,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Claudio'
""" In our implementation of thescale function (page 25), the body of the loop
executes the command data[j] = factor. Wehave discussed that numeric
types are immutable, and that use of the = operator in this context causes
the creation of a new instance (not the mutation of an existing instance).
How is it still possible, then, that our implementation of scale changes the
actual parameter sent by the caller?
"""
""" Had we implemented the scale function (page 25) as follows, does it work
properly?
def scale(data, factor):
for val in data:
val = factor
Explain why or why not.
"""
"""
MutableNum class from: http://blog.edwards-research.com/2013/09/mutable-numeric-types-in-python/
Allows you to pass the instance to a function, and with proper coding, allows you to modify the
value of the instance inside the function and have the modifications persist.
For example, consider:
> def foo(x): x *= 2
> x = 5
> foo(x)
> print(x)
This will print 5, not 10 like you may have hoped. Now using the MutableNum class:
> def foo(x): x *= 2
> x = MutableNum(5)
> foo(x)
> print(x)
This *will* print 10, as the modifications you made to x inside of the function foo will persist.
Note, however, that the following *will not* work:
> def bar(x): x = x * 2
> x = MutableNum(5)
> bar(x)
> print(x)
The difference being that [x *= 2] modifies the current variable x, while [x = x * 2] creates a new
variable x and assigns the result of the multiplication to it.
If, for some reason you can't use the compound operators ( +=, -=, *=, etc.), you can do something
like the following:
> def better(x):
> t = x
> t = t * 2
> # ... (Some operations on t) ...
>
> # End your function with a call to x.set()
> x.set(t)
"""
class MutableNum(object):
__val__ = None
def __init__(self, v): self.__val__ = v
# Comparison Methods
def __eq__(self, x): return self.__val__ == x
def __ne__(self, x): return self.__val__ != x
def __lt__(self, x): return self.__val__ < x
def __gt__(self, x): return self.__val__ > x
def __le__(self, x): return self.__val__ <= x
def __ge__(self, x): return self.__val__ >= x
def __cmp__(self, x): return 0 if self.__val__ == x else 1 if self.__val__ > 0 else -1
# Unary Ops
def __pos__(self): return self.__class__(+self.__val__)
def __neg__(self): return self.__class__(-self.__val__)
def __abs__(self): return self.__class__(abs(self.__val__))
# Bitwise Unary Ops
def __invert__(self): return self.__class__(~self.__val__)
# Arithmetic Binary Ops
def __add__(self, x): return self.__class__(self.__val__ + x)
def __sub__(self, x): return self.__class__(self.__val__ - x)
def __mul__(self, x): return self.__class__(self.__val__ * x)
def __div__(self, x): return self.__class__(self.__val__ / x)
def __mod__(self, x): return self.__class__(self.__val__ % x)
def __pow__(self, x): return self.__class__(self.__val__ ** x)
def __floordiv__(self, x): return self.__class__(self.__val__ // x)
def __divmod__(self, x): return self.__class__(divmod(self.__val__, x))
def __truediv__(self, x): return self.__class__(self.__val__.__truediv__(x))
# Reflected Arithmetic Binary Ops
def __radd__(self, x): return self.__class__(x + self.__val__)
def __rsub__(self, x): return self.__class__(x - self.__val__)
def __rmul__(self, x): return self.__class__(x * self.__val__)
def __rdiv__(self, x): return self.__class__(x / self.__val__)
def __rmod__(self, x): return self.__class__(x % self.__val__)
def __rpow__(self, x): return self.__class__(x ** self.__val__)
def __rfloordiv__(self, x): return self.__class__(x // self.__val__)
def __rdivmod__(self, x): return self.__class__(divmod(x, self.__val__))
def __rtruediv__(self, x): return self.__class__(x.__truediv__(self.__val__))
# Bitwise Binary Ops
def __and__(self, x): return self.__class__(self.__val__ & x)
def __or__(self, x): return self.__class__(self.__val__ | x)
def __xor__(self, x): return self.__class__(self.__val__ ^ x)
def __lshift__(self, x): return self.__class__(self.__val__ << x)
def __rshift__(self, x): return self.__class__(self.__val__ >> x)
# Reflected Bitwise Binary Ops
def __rand__(self, x): return self.__class__(x & self.__val__)
def __ror__(self, x): return self.__class__(x | self.__val__)
def __rxor__(self, x): return self.__class__(x ^ self.__val__)
def __rlshift__(self, x): return self.__class__(x << self.__val__)
def __rrshift__(self, x): return self.__class__(x >> self.__val__)
# Compound Assignment
def __iadd__(self, x): self.__val__ += x; return self
def __isub__(self, x): self.__val__ -= x; return self
def __imul__(self, x): self.__val__ *= x; return self
def __idiv__(self, x): self.__val__ /= x; return self
def __imod__(self, x): self.__val__ %= x; return self
def __ipow__(self, x): self.__val__ **= x; return self
# Casts
def __nonzero__(self): return self.__val__ != 0
def __int__(self): return self.__val__.__int__() # XXX
def __float__(self): return self.__val__.__float__() # XXX
def __long__(self): return self.__val__.__long__() # XXX
# Conversions
def __oct__(self): return self.__val__.__oct__() # XXX
def __hex__(self): return self.__val__.__hex__() # XXX
def __str__(self): return self.__val__.__str__() # XXX
# Random Ops
def __index__(self): return self.__val__.__index__() # XXX
def __trunc__(self): return self.__val__.__trunc__() # XXX
def __coerce__(self, x): return self.__val__.__coerce__(x)
# Represenation
def __repr__(self): return "%s(%d)" % (self.__class__.__name__, self.__val__)
# Define innertype, a function that returns the type of the inner value self.__val__
def innertype(self): return type(self.__val__)
# Define set, a function that you can use to set the value of the instance
def set(self, x):
if isinstance(x, (int, long, float)): self.__val__ = x
elif isinstance(x, self.__class__): self.__val__ = x.__val__
else: raise TypeError("expected a numeric type")
# Pass anything else along to self.__val__
def __getattr__(self, attr):
print("getattr: " + attr)
return getattr(self.__val__, attr)
def scale(data, factor):
for j in range(len(data)):
data[j] *= factor
return data | {
"repo_name": "claudiordgz/GoodrichTamassiaGoldwasser",
"path": "ch01/c116c1117.py",
"copies": "1",
"size": "6895",
"license": "mit",
"hash": -3282859988457104000,
"line_mean": 45.2818791946,
"line_max": 99,
"alpha_frac": 0.5444525018,
"autogenerated": false,
"ratio": 3.4876074860900355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45320599878900353,
"avg_score": null,
"num_lines": null
} |
__author__ = "ClaytonBat"
class Node:
def __init__(self,initdata):
self.data = initdata
self.next = None
def getData(self):
return self.data
def getNext(self):
return self.next
def setData(self,newdata):
self.data = newdata
def setNext(self,newnext):
self.next = newnext
class UnorderedList:
def __init__(self):
self.head = None
def isEmpty(self):
return self.head == None
def add(self,item):
temp = Node(item)
temp.setNext(self.head)
self.head = temp
def size(self):
current = self.head
count = 0
while current != None:
count = count + 1
current = current.getNext()
return count
def search(self,item):
current = self.head
found = False
while current != None and not found:
if current.getData() == item:
found = True
else:
current = current.getNext()
return found
def remove(self,item):
current = self.head
previous = None
found = False
while not found:
if current.getData() == item:
found = True
else:
previous = current
current = current.getNext()
if previous == None:
self.head = current.getNext()
else:
previous.setNext(current.getNext())
def append(self,item):
current = self.head
i = 0
while i < self.size() -1:
current = current.getNext()
i +=1
temp = Node(item)
current.setNext(temp)
mylist = UnorderedList()
mylist.add(1)
mylist.add(2)
mylist.add(3)
mylist.add(4)
print(mylist.size())
print("*"*10)
print(mylist.append(5))
print(mylist.size())
print(mylist.search(5))
print(mylist.search(1)) | {
"repo_name": "mcsoo/Exercises",
"path": "Lists.py",
"copies": "1",
"size": "1928",
"license": "mit",
"hash": -2544061640521459000,
"line_mean": 20.1978021978,
"line_max": 47,
"alpha_frac": 0.5248962656,
"autogenerated": false,
"ratio": 3.9266802443991855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49515765099991854,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Clayton Daley'
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("twisted-pyro.tests_manual")
log.info("Loading Application")
import pyro.protocol
from Pyro4.socketutil import getInterfaceAddress
class Foo(object):
def ping(self):
log.info("Foo was pinged!!!")
return "pong"
class Bar(object):
def ping(self):
log.info("Bar was pinged!!!")
return "bong"
factory = pyro.protocol.Pyro4ServerFactory()
tcp_port = pyro.protocol.reactor.listenTCP(5555, factory)
# Because the reactor is responsible for the actual TCP connection, we
# have to proactively route the data back into the factory to ensure
# that URIs generated by the factory include correct location data.
host = getInterfaceAddress("8.8.8.8")
port = tcp_port.getHost().port
log.debug("Setting PYRO Factory to host %s and port %d" % (host, port))
factory.setAddress(host, port)
foo = Foo()
bar = Bar()
fooId = factory.register(foo)
log.info("fooId is %s" % fooId)
barId = factory.register(bar)
log.info("barId is %s" % barId)
log.info("PYRO available on tcp_port %d" % tcp_port.getHost().port)
pyro.protocol.reactor.run() | {
"repo_name": "claytondaley/TxPyro",
"path": "tests/tests_manual_protocol.py",
"copies": "1",
"size": "1173",
"license": "apache-2.0",
"hash": 8666788946730633000,
"line_mean": 27.6341463415,
"line_max": 71,
"alpha_frac": 0.7186700767,
"autogenerated": false,
"ratio": 3.2493074792243766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9452454815662961,
"avg_score": 0.0031045480522832436,
"num_lines": 41
} |
__author__ = 'Clayton Daley'
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("test_pyro")
log.info("Logger started")
import pyro.proxy
import Pyro4
import Pyro4.futures
from pprint import pprint, pformat
from twisted.internet import reactor
import twisted.internet.defer
uri = "PYRONAME:mod_52b107eb0947a7fc310000c6"
envelopeTo = "recipient@mail.com"
envelopeFrom = "sender@mail.com"
message = """MIME-Version: 1.0\r\nFrom: sender@mail.com\r\nSubject: Test Mail using Twisted\r\nTo: recipient@mail.com\r\nContent-Type: text/plain; charset="us-ascii"\r\nContent-Transfer-Encoding: 7bit\r\n\r\nContent of plaintext mail to be sent through Twisted. Thanks, Clayton\r\n.\r\n"""
def stop_reactor(*args):
reactor.stop()
def log_and_relay(response):
log.info("Callback responded %s" % str(response))
return response
def run_native():
moda_twist = Pyro4.async(Pyro4.Proxy(uri))
log.info("Running test in Native")
f = moda_twist.email_process_message(message, envelopeTo, envelopeFrom)
log.info("Proxy returned type %s" % str(f.__class__))
# NOTE that native code will not execute callbacks on error
f.then(log_and_relay)
return f
def run_deferred():
mod_twist = pyro.proxy.PyroPatientProxy(uri)
modd_twist = pyro.proxy.PyroDeferredService(mod_twist)
log.info("Running test in Deferred")
d = modd_twist.email_process_message(message, envelopeTo, envelopeFrom)
log.info("Proxy returned type %s" % str(d.__class__))
d.addBoth(log_and_relay)
d.addBoth(stop_reactor)
return d
# This file is currently configured to be put in the parent directory of pyro.proxy
# For deferred, user must manually reactor.run() after d = run_deferred()
# Native does not require the reactor
# Native should propagate NamingError errors up through 'f' if the nameserver is unavailable
# Deferred should wait indefinitely for a nameserver | {
"repo_name": "claytondaley/TxPyro",
"path": "tests/tests_manual_proxy.py",
"copies": "1",
"size": "1919",
"license": "apache-2.0",
"hash": -4392532440357881300,
"line_mean": 34.5555555556,
"line_max": 289,
"alpha_frac": 0.73736321,
"autogenerated": false,
"ratio": 3.219798657718121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4457161867718121,
"avg_score": null,
"num_lines": null
} |
__author__ = 'clh'
import requests
import boto
# Config
# required
github_api_token = 'Your Github API Token'
#limit the search scope to your user account, or organization.
search_username = 'YourUserName or Org Name'
# optional, but you should have at least one of these.
aws_key_report_file = './aws_key_report.csv'
extra_token_file = './extra_tokens.csv'
def print_key_info(key):
print "key_id:{0}, status:{1}, owner:{2}".format(key['access_key_id'], key['status'], key['user_name'])
def search_github(key):
print "Searching Github for token:{0}".format(key[1].rstrip())
print "Token Description:{0}".format(key[0])
params = {'access_token': github_api_token, 'q': "{0} user:{1}".format(key, search_username)}
r = requests.get('https://api.github.com/search/code', params=params)
body = r.json()
print 'Found total count:{0}'.format(body['total_count'])
for item in body['items']:
print "Filename:{0}, link:{1}".format(item['name'], item['html_url'])
def main():
#Assumes that your AWS credentials are exported in the local environment.
iam = boto.connect_iam()
user_names = []
if aws_key_report_file:
with open(aws_key_report_file) as report_fp:
for line in report_fp:
user_names.append(line.split(',')[0])
#remove first two elements, they are the csv legend, and the <root_account>
user_names = user_names[2:]
else:
print 'Skipping AWS credential check, no aws key report file specified'
access_key_ids = []
#do AWS key lookup.
for user_name in user_names:
keys = iam.get_all_access_keys(user_name)
for key in keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']:
print_key_info(key)
access_key_ids.append(tuple(['AWS:' + key['user_name'], key['access_key_id']]))
#Read in extra search terms
if extra_token_file:
with open(extra_token_file) as report_fp:
for line in report_fp:
print line.rstrip()
access_key_ids.append(line.split(','))
else:
print 'Skipping extra credential check, no csv file specified'
for key in access_key_ids:
print '\n'
search_github(key)
if __name__ == '__main__':
main() | {
"repo_name": "codyhanson/github_cred_check",
"path": "github_cred_check.py",
"copies": "1",
"size": "2314",
"license": "mit",
"hash": -6179744931384365000,
"line_mean": 33.0441176471,
"line_max": 107,
"alpha_frac": 0.6253241141,
"autogenerated": false,
"ratio": 3.5007564296520424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9606568190083336,
"avg_score": 0.003902470733741285,
"num_lines": 68
} |
from flask import Flask, jsonify, abort, make_response, request, url_for
from flask_cors import CORS
from flask_mongoengine import MongoEngine
import datetime
app = Flask(__name__)
CORS(app, resources=r'/api/*', headers='Content-Type')
# app.config['CORS_HEADERS'] = 'Content-Type'
app.config['MONGODB_SETTINGS'] = {'db': 'officelunch'}
db = MongoEngine(app)
class User(db.Document):
name = db.StringField(max_length=255, required=True)
email = db.EmailField(required=True)
password = db.StringField(max_length=255, required=True)
phone = db.StringField() # Does this need to be required? This will need to be checked somewhere else in the application since there is no mongo field for phone numbers
userid = db.StringField(max_length=255, required=True)
member_of = db.ListField(db.StringField()) # list of lgids that the user belongs to
class Lgroup(db.Document):
name = db.StringField(max_length=255, required=True)
lgid = db.StringField(required=True)
users = db.ListField(db.DictField(), required=True)
origin = db.DictField(required=True) # This will be a python dictionary storing lat/long
tags = db.ListField(db.StringField(max_length=255), required=True)
dest_list = db.ListField(db.DictField()) # This is a list of dictionaries holding lat/long and # of votes
final_dest = db.DictField() # dictionary with lat/long of voting winner
admin = db.StringField(max_length=255, required=True) # id of the group admin
start_time = db.DateTimeField(default=datetime.datetime.now, required=True)
end_time = db.DateTimeField() # end date of the event
def auth_user(usr_email, pwd):
user = User.objects(email=usr_email, password=pwd)
if not user:
return {"exists": "no"}
else:
return user[0]
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/api/login', methods=['GET'])
def get_login():
data = ''
if 'username' in request.args and 'password' in request.args:
username = request.args['username']
password = request.args['password']
user = auth_user(username, password)
data = jsonify({'user': user})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
# Lunch Group information
@app.route('/api/groups', methods=['GET'])
def get_lunch_groups():
# Get a list of all groups
if not Lgroup.objects:
# Database empty
return {}
grp_lst = []
for grp in Lgroup.objects:
grp_lst.append(grp)
data = jsonify({'group_list': grp_lst})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group_by_name/<string:name>', methods=['GET'])
def get_lunch_group_by_name(name):
# Get a group by name
group = Lgroup.objects(name=name)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"group": group[0]})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>', methods=['GET'])
def get_lunch_group(gid):
# Get a group by ID
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"group": group[0]})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>', methods=['PUT'])
def put_lunch_group(gid):
# Create a new lunch group
if not request.json or 'name' not in request.json:
abort(400)
group = Lgroup.objects(lgid=gid)
if 'name' in request.json:
group.update_one(name=request.json['name'])
if 'users' in request.json:
group.update_one(users=request.json['users'])
if 'origin' in request.json:
group.update_one(origin=request.json['origin'])
if 'tags' in request.json:
group.update_one(tags=request.json['tags'])
if 'dest_list' in request.json:
group.update_one(dest_list=request.json['dest_list'])
if 'final_dest' in request.json:
group.update_one(final_dest=request.json['final_dest'])
if 'admin' in request.json:
group.update_one(admin=request.json['admin'])
if 'start_time' in request.json:
group.update_one(start_time=request.json['start_time'])
if 'end_time' in request.json:
group.update_one(end_time=request.json['end_time'])
print group
group.save()
resp = make_response(jsonify({'group': group}))
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp, 201
@app.route('/api/group', methods=['POST'])
def post_lunch_group():
# Create a new lunch group
if not request.json or 'name' not in request.json:
abort(400)
group = Lgroup(
name=request.json['name'] if 'name' in request.json else "",
lgid=str(int(Lgroup.objects.all()[len(Lgroup.objects.all()) - 1].lgid) + 1),
users=request.json['users'] if 'users' in request.json else {},
origin=request.json['origin'] if 'origin' in request.json else {},
tags=request.json['tags'] if 'tags' in request.json else [],
dest_list=request.json['dest_list'] if 'dest_list' in request.json else [],
final_dest=request.json['final_dest'] if 'final_dest' in request.json else {},
admin=request.json['admin'] if 'admin' in request.json else "",
start_time=request.json['start_time'] if 'start_time' in request.json else "",
end_time=request.json['end_time'] if 'end_time' in request.json else ""
)
print group
group.save()
resp = make_response(jsonify({'group': group}))
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp, 201
@app.route('/api/group/<string:gid>/name', methods=['GET'])
def get_group_name(gid):
# Get a group name by ID
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"name": group[0].name})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>/users', methods=['GET'])
def get_group_users(gid):
# Get a group's users by ID
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"users": group[0].users})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>/origin', methods=['GET'])
def get_group_origin(gid):
# Get a group's origin by ID
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"origin": group[0].origin})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>/tags', methods=['GET'])
def get_group_tags(gid):
# Get a group's tags by ID
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"tags": group[0].tags})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>/dest_list', methods=['GET'])
def get_group_dest_list(gid):
# Get a group's destination list by ID
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"dest_list": group[0].dest_list})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>/final_dest', methods=['GET'])
def get_group_final_dest(gid):
# Get a group's final destination by ID
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"final_dest": group[0].final_dest})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>/admin', methods=['GET'])
def get_group_admin(gid):
# Get a group's admin
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"admin": group[0].admin})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>/start_time', methods=['GET'])
def get_group_start_time(gid):
# Get a group's start time
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"start_time": group[0].start_time})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/group/<string:gid>/end_time', methods=['GET'])
def get_group_end_time(gid):
# Get a group's end_time
group = Lgroup.objects(lgid=gid)
if not group:
data = jsonify({"exists": "no"})
else:
data = jsonify({"end_time": group[0].end_time})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
# User Information
@app.route('/api/users', methods=['GET'])
def get_users():
# Get a list of all users
if not User.objects:
# Database empty
return {}
user_lst = []
for user in User.objects:
print user
user_lst.append(user)
data = jsonify({'users': user_lst})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/user_by_email/<string:usr_email>', methods=['GET'])
def get_user_by_email(usr_email):
# Get a user ID by email
if not User.objects:
# Database empty
return {}
user = User.objects(email=usr_email)
data = jsonify({'user': user})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/user/<string:uid>', methods=['GET'])
def get_user(uid):
# Get a user by uid
if not User.objects:
# Database empty
return {}
user = User.objects(userid=uid)
data = jsonify({'user': user})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/user/<string:uid>', methods=['PUT'])
def put_user(uid):
# Create a new lunch group
if not request.json or'email' not in request.json:
abort(400)
user = User.objects(userid=uid)
if 'name' in request.json:
user.update_one(name=request.json['name'])
if 'email' in request.json:
user.update_one(email=request.json['email'])
if 'password' in request.json:
user.update_one(password=request.json['password'])
if 'phone' in request.json:
user.update_one(phone=request.json['phone'])
if 'member_of' in request.json:
user.update_one(member_of=request.json['member_of'])
user.save()
resp = make_response(jsonify({'user': user}))
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp, 201
@app.route('/api/user', methods=['POST'])
def post_user():
# Create a new lunch group
if not request.json or'email' not in request.json:
abort(400)
user = User(
name=request.json['name'] if 'name' in request.json else "",
email=request.json['email'] if 'email' in request.json else "",
password=request.json['password'] if 'password' in request.json else "",
phone=request.json['phone'] if 'phone' in request.json else "",
userid=str(int(User.objects.all()[len(User.objects.all()) - 1].userid) + 1),
member_of=request.json['member_of'] if 'member_of' in request.json else []
)
user.save()
resp = make_response(jsonify({'user': user}))
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp, 201
@app.route('/api/user/<string:uid>/name', methods=['GET'])
def get_user_name(uid):
# Get a user's name by ID
user = User.objects(userid=uid)
if not user:
data = jsonify({"exists": "no"})
else:
data = jsonify({"username": user[0].name})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/user/<string:uid>/email', methods=['GET'])
def get_user_email(uid):
# Get a user's email by ID
user = User.objects(userid=uid)
if not user:
data = jsonify({"exists": "no"})
else:
data = jsonify({"email": user[0].email})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/user/<string:uid>/phone', methods=['GET'])
def get_user_phone(uid):
# Get a user's phone number by ID
user = User.objects(userid=uid)
if not user:
data = jsonify({"exists": "no"})
else:
data = jsonify({"phone": user[0].phone})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
@app.route('/api/user/<string:uid>/groups', methods=['GET'])
def get_user_groups(uid):
# Get a user's groups by ID
user = User.objects(userid=uid)
if not user:
data = jsonify({"exists": "no"})
else:
data = jsonify({"groups": user[0].member_of})
resp = make_response(data)
resp.mimetype = "application/json"
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST', 'GET', 'OPTIONS'
return resp
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| {
"repo_name": "OfficeLunch/OfficeLunch",
"path": "officelunch_api/rest_api.py",
"copies": "1",
"size": "16551",
"license": "mit",
"hash": 7034252528209153000,
"line_mean": 30.2873345936,
"line_max": 173,
"alpha_frac": 0.6316234669,
"autogenerated": false,
"ratio": 3.447406790252031,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45790302571520314,
"avg_score": null,
"num_lines": null
} |
__author__ = 'clipo'
import csv
import argparse
import logging as logger
import operator
import itertools
import os
import networkx as nx
import networkx.algorithms.isomorphism as iso
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
class continuityAnalysis():
def __init__(self):
self.columnNames = []
self.labels=[]
self.taxa={}
self.countOfTaxa=0
self.graph=nx.Graph(name="MaximumGraph", GraphID=1, is_directed=False)
self.minMaxGraph=nx.Graph(name="MaximumParsimony", GraphID=2, is_directed=False)
self.outputDirectory =""
self.FalseList=[None,0,False,"None","0","False"]
self.dimensions={1:"Location of Maximum Blade Width",
2: "Base Shape",
3: "Basal-Indentation",
4: "Constriction Ratio",
5: "Outer Tang Angle",
6: "Tang-Tip Shape",
7: "Fluting",
8: "Length/Width Ratio"}
#print self.dimensions[2]
self.classification={1:{1:"Proximal Quarter",2:"Secondmost Proximal Quarter",3:"Thirdmost Proximal Quarter",4:"Distal Quarter"},
2:{1:"Arc/Round",2:"Normal Curve",3:"Triangular", 4:"Folsomoid",5:"Flat",6:"Convex"},
3:{1:"No indentation",2:"Shallow",3:"Deep",4:"Very Deep"},
4:{1:"1.0",2:"0.90-0.99",3:"0.80-0.89",4:"0.70-0.79",5:"0.60-0.69",6:"0.50-0.59"},
5:{1:"93-115",2:"88-92",3:"81-87",4:"66-80",5:"51-65",6:"<=50"},
6:{1:"Pointed",2:"Round",3:"Blunt"},
7:{1:"Absent",2:"Present"},
8:{1:"1.00-1.99",2:"2.00-2.99",3:"3.00-3.99",4:"4.00-4.99",5:"5.00-5.99",6:">=6.00"}}
def openFile(self, filename):
try:
logger.debug("trying to open: %s ", filename)
file = open(filename, 'r')
except csv.Error as e:
logger.error("Cannot open %s. Error: %s", filename, e)
sys.exit('file %s does not open: %s') % ( filename, e)
reader = csv.reader(file, delimiter=' ', quotechar='|')
for row in reader:
row = map(str, row)
label = row[0]
self.labels.append(label)
row.pop(0)
self.taxa[label]=str(row[0])
#print "characters: ", str(row[0])
self.countOfTaxa += 1
return True
def saveGraph(self, graph, filename):
nx.write_gml(graph, filename)
def all_pairs(self, lst):
return list((itertools.permutations(lst, 2)))
def compareTaxa(self, taxa1, taxa2):
#print self.taxa[taxa1], "-", self.taxa[taxa2]
numberDiff = 0
numberSame = 0
count = 0
dimensionsChanged =[]
traitsChanged=[]
traitsSame=[]
dimensionsSame=[]
for t in self.taxa[taxa1]:
#print t, "-", self.taxa[taxa2][count]
if t == self.taxa[taxa2][count]:
numberSame += 1
#print "dimensions: ",self.dimensions[count+1]
dimensionsChanged.append(self.dimensions[count+1])
change = self.dimensions[count+1]+":"+self.classification[count+1][int(self.taxa[taxa2][count])]+"->"+self.classification[count+1][int(t)]
traitsChanged.append(change)
else:
#print "dimensions: ",self.dimensions[count+1]
dimensionsChanged.append(self.dimensions[count+1])
change = self.dimensions[count+1]+":"+self.classification[count+1][int(self.taxa[taxa2][count])]+"->"+self.classification[count+1][int(t)]
traitsChanged.append(change)
count += 1
#print "1: :", self.taxa[taxa1], "2: ", self.taxa[taxa2], "-->", number
#print "dimensionsChanged: ", dimensionsChanged
#print "traitsChanged: ", traitsChanged
return numberDiff, dimensionsChanged,traitsChanged,numberSame, dimensionsSame, traitsSame,
def createGraph(self):
allPairs = self.all_pairs(self.labels)
for pairs in allPairs:
num,dim,traits, numsame, dimsame, traitssame=self.compareTaxa(pairs[0],pairs[1])
stuffChanged=str(dim)+"=>"+str(traits)
#print "stuff:", stuffChanged
if pairs[0] not in self.graph.nodes():
self.graph.add_node(pairs[0], name=pairs[0], characterTraits=self.taxa[pairs[0]], connectedTo=pairs[1])
if pairs[1] not in self.graph.nodes():
self.graph.add_node(pairs[1], name=pairs[1], characterTraits=self.taxa[pairs[1]], connectedTo=pairs[0])
self.graph.add_edge(pairs[0], pairs[1],
weight=self.compareTaxa(pairs[0],pairs[1]),
dims=dim,
traits=traits,
stuffChanged=stuffChanged)
#print stuffChanged.strip('[]')
def saveGraph(self, graph, filename):
nx.write_gml(graph, filename[:-4]+".gml")
## from a "summed" graph, create a "min max" solution -- using Counts
def createMinMaxGraph(self):
## first need to find the pairs with the maximum occurrence, then we work down from there until all of the
## nodes are included
## the weight
maxWeight = 0
pairsHash = {}
traitList={}
dimList={}
stuffChanged={}
for e in self.graph.edges_iter():
d = self.graph.get_edge_data(*e)
fromTaxa = e[0]
toTaxa = e[1]
#print d['weight']
#print "weight: ", d['weight'][0]
currentWeight = int(d['weight'][0])
dimensions=d['weight'][1]
traits=d['weight'][2]
#stuff=d['weight'][3]
pairsHash[fromTaxa + "*" + toTaxa] = currentWeight
label = fromTaxa + "*" + toTaxa
traitList[label]=traits
dimList[label]=dimensions
#stuffChanged[label]=stuff
matchOnThisLevel = False
currentValue = 0
for key, value in sorted(pairsHash.iteritems(), key=operator.itemgetter(1), reverse=True):
#print key, "->", value
if value==0:
value=.0000000000001
if currentValue == 0:
currentValue = value
elif value < currentValue:
matchOnThisLevel = False ## we need to match all the connections with equivalent weights (otherwise we
## would stop after the nodes are included the first time which would be arbitrary)
## here we set the flag to false.
taxa1, taxa2 = key.split("*")
#print ass1, "-", ass2, "---",value
if taxa1 not in self.minMaxGraph.nodes():
self.minMaxGraph.add_node(taxa1, name=taxa1,characterTraits=self.taxa[taxa1])
if taxa2 not in self.minMaxGraph.nodes():
self.minMaxGraph.add_node(taxa2, name=taxa2, characterTraits=self.taxa[taxa2])
if nx.has_path(self.minMaxGraph, taxa1, taxa2) == False or matchOnThisLevel == True:
matchOnThisLevel = True ## setting this true allows us to match the condition that at least one match was
## made at this level
self.minMaxGraph.add_path([taxa1, taxa2], weight=value, dimensions=str(dimList[key]).strip('[]'),
traits=str(traitList[key]).strip('[]'),
#traitChanged=str(stuffChanged[key].strip('[]')),
inverseweight=(1/value ))
## Output to file and to the screen
def graphOutput(self):
graph=self.minMaxGraph
## Now make the graphic for set of graphs
plt.rcParams['text.usetex'] = False
basefilename = os.path.basename(self.args['inputfile'])[:-4]
newfilename = self.args['outputdirectory'] + "/" + basefilename + "-out.vna"
gmlfilename = self.args['outputdirectory'] + "/" + basefilename + "-out.gml"
self.saveGraph(graph, gmlfilename)
plt.figure(newfilename, figsize=(8, 8))
os.environ["PATH"] += ":/usr/local/bin:/usr/local/opt/graphViz/:"
pos = nx.graphviz_layout(graph)
edgewidth = []
### Note the weights here are biased where the *small* differences are the largest (since its max value - diff)
weights = nx.get_edge_attributes(graph, 'weight')
for w in weights:
edgewidth.append(weights[w])
maxValue = max(edgewidth)
widths = []
for w in edgewidth:
widths.append(((maxValue - w) + 1) * 5)
assemblageSizes = []
sizes = nx.get_node_attributes(graph, 'size')
#print sizes
for s in sizes:
#print sizes[s]
assemblageSizes.append(sizes[s])
nx.draw_networkx_edges(graph, pos, alpha=0.3, width=widths)
sizes = nx.get_node_attributes(graph, 'size')
nx.draw_networkx_nodes(graph, pos, node_color='w', alpha=0.4)
nx.draw_networkx_edges(graph, pos, alpha=0.4, node_size=0, width=1, edge_color='k')
nx.draw_networkx_labels(graph, pos, fontsize=10)
font = {'fontname': 'Helvetica',
'color': 'k',
'fontweight': 'bold',
'fontsize': 10}
plt.axis('off')
#plt.savefig(newfilename, dpi=150)
plt.show()
def checkMinimumRequirements(self):
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError(
"This function needs Graphviz and either PyGraphviz or Pydot. Please install GraphViz from http://www.graphviz.org/")
if self.args['inputfile'] in self.FalseList:
sys.exit("Inputfile is a required input value: --inputfile=../testdata/testdata.txt")
def addOptions(self, oldargs):
self.args = {'debug': None, 'inputfile': None, 'outputdirectory': None, 'pdf':None,'separator':None, 'missing':None, 'similarity':None,
'header':None}
for a in oldargs:
#print a
self.args[a] = oldargs[a]
def process(self,args):
self.addOptions(args)
self.checkMinimumRequirements()
self.openFile(self.args['inputfile'])
self.createGraph()
self.createMinMaxGraph()
self.graphOutput()
#self.saveGraph(self.minMaxGraph,self.args['inputfile'])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Conduct a continuity analysis')
#parser.add_argument('--debug', '-d', default=None, help='Sets the DEBUG flag for massive amounts of annotated output.')
parser.add_argument('--inputfile','-f', required=True,
help="The file to be analyzed (.txt file) ")
parser.add_argument('--outputdirectory', '-o', default=".", help="directory in which output files should be written.")
#parser.add_argument('--separator','-s', default="tab",
#help="The type of separator between characters (space, tab, none) ")
#parser.add_argument('--missing','-m',default=None, help='What to do with missing values (?) (e.g., estimate, none)')
#parser.add_argument('--similarity','-si',default="similarity", help="Use similarity or dissimlarity")
#parser.add_argument('--header','-hd', default=None, help='Whether or not there is a header (None, Yes)')
args={}
try:
args = vars(parser.parse_args())
except IOError, msg:
parser.error(str(msg))
sys.exit()
ca = continuityAnalysis()
results = ca.process(args)
''''
From the command line:
python ./continuityAnalysis.py --inputfile=../testdata/pfg.txt "
As a module:
from continuityAnalysis import continuityAnalysis
ca = continuityAnalysis()
args={}
args{'inputfile'}="../testdata/testdata-5.txt"
args{'screen'}=1
args{'debug'}=1
args('graphs'}=1
results = ca.process(args)
''''' | {
"repo_name": "mmadsen/characterstate-network",
"path": "buildCharacterstateNetwork.py",
"copies": "1",
"size": "12003",
"license": "apache-2.0",
"hash": 2763904985399182000,
"line_mean": 39.5540540541,
"line_max": 154,
"alpha_frac": 0.5799383487,
"autogenerated": false,
"ratio": 3.6740128558310374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9683770997825056,
"avg_score": 0.01403604134119614,
"num_lines": 296
} |
__author__ = 'clobo'
from collections import namedtuple #!
from pyperform import ComparisonBenchmark
MyNamedTuple = namedtuple('ButtonStatus', ('name', 'age', 'gender')) #!
class MyClass(object): #!
__slots__ = ('name', 'age', 'gender')
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Creation Speed Tests
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
@ComparisonBenchmark('class')
def create_class():
for i in xrange(1000):
c = MyClass('calvin', 25, 'male')
@ComparisonBenchmark('class')
def create_namedtuple():
for i in xrange(1000):
c = MyNamedTuple('calvin', 25, 'male')
@ComparisonBenchmark('class')
def create_dict():
for i in xrange(1000):
c = {'name': 'calvin', 'age': 25, 'gender': 'male'}
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Lookup Speed Tests
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
@ComparisonBenchmark('lookup')
def class_attr_lookup():
c = MyClass('calvin', 25, 'male')
for i in xrange(10000):
name, age, gender = c.name, c.age, c.gender
@ComparisonBenchmark('lookup')
def namedtuple_attr_lookup():
c = MyNamedTuple('calvin', 25, 'male')
for i in xrange(10000):
name, age, gender = c.name, c.age, c.gender
@ComparisonBenchmark('lookup')
def dict_attr_lookup():
c = {'name': 'calvin', 'age': 25, 'gender': 'male'}
for i in xrange(10000):
name, age, gender = c['name'], c['age'], c['gender']
ComparisonBenchmark.summarize('class')
ComparisonBenchmark.summarize('lookup')
| {
"repo_name": "lobocv/pyperform",
"path": "examples/dict_vs_class_vs_namedtuple.py",
"copies": "1",
"size": "1721",
"license": "mit",
"hash": -5709625545324312000,
"line_mean": 24.3088235294,
"line_max": 72,
"alpha_frac": 0.5258570598,
"autogenerated": false,
"ratio": 3.677350427350427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9697591553855203,
"avg_score": 0.0011231866590448378,
"num_lines": 68
} |
__author__ = 'clobo'
import logging
class CustomLogLevel(object):
def __init__(self, level, name, logger_name=None):
self.level = level
self.name = name
self.logger_name = logger_name
if logger_name is None:
self.logger = logging.getLogger()
else:
self.logger = logging.getLogger(logger_name)
def __call__(self, customlevel):
"""
Wrap the decorated function to take care of the setting up of the custom log level.
"""
# Add the new custom level to the list of known levels
logging.addLevelName(self.level, self.name)
def _wrapper(msg, *args, **kwargs):
# Check if the currently set level allows this log level to print.
if self.logger.isEnabledFor(level):
_msg, _args, _kwargs = customlevel(self.logger, msg, *args, **kwargs)
self.logger.log(level, _msg, *_args, **_kwargs)
# Create function bindings in the logger or if using the root logger, setup the bindings to allow
# calls to logging.mycustomlevel() much like logging.info(), logging.debug() etc.
setattr(self.logger, self.name.lower(), _wrapper)
if self.logger_name is None:
setattr(logging, self.name.lower(), _wrapper)
return customlevel
def new_log_level(level, name, logger_name=None):
"""
Quick way to create a custom log level that behaves like the default levels in the logging module.
:param level: level number
:param name: level name
:param logger_name: optional logger name
"""
@CustomLogLevel(level, name, logger_name)
def _default_template(logger, msg, *args, **kwargs):
return msg, args, kwargs
if __name__ == '__main__':
level = logging.INFO-5
name = 'MYLEVEL'
# logger_name = 'mylogger'
logger_name = None
@CustomLogLevel(level, name, logger_name=logger_name)
def myloglevel(logger, msg, *args, **kwargs):
return 'This is a custom level: %s' % msg, args, kwargs
# create_new_level(level, name, logger_name=logger_name)
logging.basicConfig()
if logger_name:
l = logging.getLogger(logger_name)
logger = l
else:
l = logging.getLogger()
logger = logging
l.setLevel(logging.INFO)
logger.info('this is a test')
logger.mylevel('this is a test')
l.setLevel(level)
logger.info('this is a test')
logger.mylevel('this is a test')
| {
"repo_name": "lobocv/pyperform",
"path": "pyperform/customlogger.py",
"copies": "1",
"size": "2485",
"license": "mit",
"hash": 6387122239963750000,
"line_mean": 30.0625,
"line_max": 105,
"alpha_frac": 0.6201207243,
"autogenerated": false,
"ratio": 3.894984326018809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5015105050318809,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
_author__ = 'cmantas'
from Node import Node
from VM import get_all_vms
from json import loads, dumps
from os import remove
from os.path import isfile
from lib.persistance_module import get_script_text, env_vars
from lib.tiramola_logging import get_logger
from threading import Thread
from lib.Cluster import *
class Clients(Cluster):
"""
Represents the Clients Cluster
"""
orchestrator = None # the VM to which the others report to
# the flavor and image for this cluster's VMs
flavor = env_vars["client_flavor"]
image = env_vars["cassandra_base_image"]
def __init__(self):
super(Clients, self).__init__()
self.cluster_name = "clients"
self.node_type = "client"
# the save file for saving/reloading the active cluster
self.save_file = home+"files/saved_%s_cluster.json" % self.cluster_name
# the logger for this file
self.log = get_logger('CLIENTS', 'INFO', logfile=home+'files/logs/Coordinator.log')
def find_orchestrator(self):
in_nodes = Node.get_all_nodes(check_active=True)
for n in in_nodes:
if "orchestrator" in n.name:
global orchestrator
orchestrator = n
return
def resume_cluster(self):
"""
Re-loads the cluster representation based on the VMs pre-existing on the IaaS and the 'save_file'
"""
self.log.info("Loading info from the IaaS")
if not isfile(self.save_file):
self.log.info("No existing created cluster")
saved_nodes = []
else:
saved_cluster = loads(open(self.save_file, 'r').read())
saved_nodes = saved_cluster['clients']
in_nodes = Node.get_all_nodes(check_active=True)
for n in in_nodes:
if n.name not in saved_nodes:
if "orchestrator" in n.name:
global orchestrator
orchestrator = n
self.log.debug('Found orchestrator %s' % n.name)
continue
else:
self.all_nodes.append(n)
#sort nodes by name
self.all_nodes.sort(key=lambda x: x.name)
def save_cluster(self):
"""
Creates/Saves the 'save_file'
:return:
"""
cluster = dict()
cluster["clients"] = [c.name for c in self.all_nodes]
string = dumps(cluster, indent=3)
f = open(self.save_file, 'w+')
f.write(string)
def create_cluster(self, count=1):
self.all_nodes = []
for i in range(count):
self.all_nodes.append(Node(self.cluster_name, node_type=self.node_type, number="%02d" % (i+1), create=True, IPv4=True,
flavor=self.flavor, image=self.image))
#save the cluster to file
self.save_cluster()
#wait until everybody is ready
self.wait_everybody()
self.find_orchestrator()
self.inject_hosts_files()
self.log.info('Every node is ready for SSH')
def inject_hosts_files(self):
"""
Creates a mapping of hostname -> IP for all the nodes in the cluster and injects it to all Nodes so that they
know each other by hostname. Also restarts the ganglia daemons
:return:
"""
self.log.info("Injecting host files")
hosts = dict()
for i in self.all_nodes:
hosts[i.name] = i.get_public_addr()
#add the host names to etc/hosts
orchestrator.inject_hostnames(hosts, delete=self.cluster_name)
for i in self.all_nodes:
i.inject_hostnames(hosts, delete=self.cluster_name)
self.all_nodes[0].run_command("service ganglia-monitor restart; service gmetad restart", silent=True)
orchestrator.run_command("service ganglia-monitor restart; service gmetad restart", silent=True)
def add_nodes(self, count=1):
"""
Adds a node to the cassandra cluster. Refreshes the hosts in all nodes
:return:
"""
self.log.info('Adding %d nodes' % count)
new_nodes = []
Node.flavor = env_vars['client_flavor']
for i in range(count):
#check if cluster did not previously exist
if i == 0 and len(self.all_nodes) == 0:
# give a floating IPv4 to the first node only
new_guy = Node(self.cluster_name, '', len(self.all_nodes)+1, create=True, IPv4=True)
else:
new_guy = Node(self.cluster_name, node_type="", number=len(self.all_nodes)+1, create=True)
self.all_nodes.append(new_guy)
new_nodes.append(new_guy)
self.save_cluster()
for n in new_nodes:
n.wait_ready()
#inject host files to everybody
n.inject_hostnames(self.get_hosts(private=True), delete=self.cluster_name)
n.bootstrap()
self.log.info("Node %s is live " % new_guy.name)
#inform all
self.inject_hosts_files()
def remove_nodes(self, count=1):
"""
Removes a node from the cassandra cluster. Refreshes the hosts in all nodes
:return:
"""
for i in range(count):
dead_guy = self.all_nodes.pop()
self.log.info("Removing node %s" % dead_guy.name)
dead_guy.decommission()
self.log.info("Client %s is removed" % dead_guy.name)
self.save_cluster()
self.inject_hosts_files()
def run(self, params):
self.bootstrap_cluster()
run_type = params['type']
servers = params['servers']
self.update_hostfiles(servers)
#choose type of run and do necessary actions
if run_type=='stress':
for c in self.all_nodes:
load_command = get_script_text(self.cluster_name, self.node_type, "run")
self.log.info("running stress workload on %s" % c.name)
c.run_command(load_command, silent=True)
elif run_type == 'sinusoid':
global env_vars
target = int(params['target']) / len(self.all_nodes)
offset = int(params['offset']) / len(self.all_nodes)
period = 60*int(params['period'])
threads = int(env_vars['client_threads'])
for c in self.all_nodes:
load_command = get_script_text(self.cluster_name, self.node_type, "run_sin") % (target, offset, period, threads)
#load_command += get_script_text(cluster_name, "", "run_sin") % (target, offset, period)
self.log.info("running sinusoid on %s" % c.name)
c.run_command(load_command, silent=True)
elif run_type == 'load':
record_count = int(params['records'])
start = 0
step = record_count/len(self.all_nodes)
threads = []
for c in self.all_nodes:
#load_command = get_script_text(self.cluster_name, self.node_type, "load") % (str(record_count), str(step), str(start))
load_command = get_script_text(self.cluster_name, self.node_type, "load").format(record_count, step, start)
#load_command += get_script_text(cluster_name, "", "load") % (str(record_count), str(step), str(start))
self.log.info("running load phase on %s for %d of %d records" % (c.name, step, record_count))
t = Thread(target=c.run_command, args=(load_command,) )
threads.append(t)
t.start()
start += step
self.log.info("waiting for load phase to finish in clients")
for t in threads:
t.join()
self.log.info("load finished")
def destroy_all(self):
"""
Destroys all the VMs in the cluster (not the orchestrator)
"""
self.log.info("Destroying the %s cluster" % self.cluster_name)
for n in self.all_nodes:
n.destroy()
remove(self.save_file)
my_Clients = Clients()
# always runs
my_Clients.resume_cluster()
| {
"repo_name": "cmantas/tiramola_v3",
"path": "ClientsCluster.py",
"copies": "1",
"size": "8124",
"license": "apache-2.0",
"hash": 3529025877205748700,
"line_mean": 36.4377880184,
"line_max": 135,
"alpha_frac": 0.5738552437,
"autogenerated": false,
"ratio": 3.742054352832796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9793267635132497,
"avg_score": 0.004528392280059928,
"num_lines": 217
} |
__author__ = 'cmantas'
from CassandraNode import CassandraNode as Node, get_script_text
from CassandraNode import get_all_nodes
from VM import Timer, get_all_vms
from time import sleep
from json import loads, dumps
from os import remove
from os.path import isfile
orchestrator = None
seeds = [] # the seed node(s) of the casssandra cluster !!! ONLY ONE IS SUPPORTED !!!
nodes = [] # the rest of the nodes of the Cassandra cluster
clients = [] # the clients of the cluster
stash = []
seed_name = "cassandra_seednode"
node_name = "cassandra_node_"
client_name = "cassandra_client_"
save_file = "files/saved_cluster.json"
def create_cluster(worker_count=0, client_count=0):
"""
Creates a Cassandra Cluster with a single Seed Node and 'worker_count' other nodes
:param worker_count: the number of the nodes to create-apart from the seednode
"""
#create the seed node
seeds.append(Node(seed_name, node_type="SEED", create=True))
#create the rest of the nodes
for i in range(worker_count):
name = node_name+str(len(nodes)+1)
nodes.append(Node(name, create=True))
for i in range(client_count):
name = client_name+str(len(clients)+1)
clients.append(Node(name, node_type="CLIENT", create=True))
#wait until everybody is ready
wait_everybody()
inject_hosts_files()
print "CLUSTER: Every node is ready for SSH"
save_cluster()
def wait_everybody():
for i in seeds + nodes + clients:
i.vm.wait_ready()
def bootstrap_cluster():
""" Runs the necessary boostrap commnands to each of the Seed Node and the other nodes """
print "CLUSTER: Running bootstrap scripts"
#bootstrap the seed node
seeds[0].bootstrap()
#bootstrap the rest of the nodes
for n in nodes+clients:
n.bootstrap(params={"seednode": seeds[0].vm.get_private_addr()})
print "CLUSTER: READY!!"
def resume_cluster():
"""
Re-Creates the cluster representation based on the VMs that already exist on the IaaS
:param worker_count the number of the nodes to include in the cluster
"""
find_orhcestrator()
if not isfile(save_file):
print "CLUSTER: No existing created cluster"
return
saved_cluster = loads(open(save_file, 'r').read())
saved_nodes = saved_cluster['nodes']
nodes[:] = []
seeds[:] = []
in_seeds, in_nodes, in_clients = get_all_nodes(check_active=True)
#check that all saved nodes actually exist and exit if not\
for n in saved_nodes:
if n not in [i.name for i in in_nodes]:
print "CLUSTER: ERROR, node %s does actually exist in the cloud, re-create the cluster" % n
remove(save_file)
exit(-1)
for n in in_nodes:
if n.name not in saved_nodes: in_nodes.remove(n)
nodes.extend(in_nodes)
seeds.extend(in_seeds)
clients.extend(in_clients)
def save_cluster():
cluster = dict()
cluster["seeds"] = [s.name for s in seeds]
cluster["nodes"] = [n.name for n in nodes]
cluster["clients"] = [c.name for c in clients]
cluster['note'] = "only the nodes are acually used"
string = dumps(cluster, indent=3)
f = open(save_file, 'w+')
f.write(string)
def kill_clients():
print "CLUSTER: Killing clients"
for c in clients: c.kill()
def kill_nodes():
print "CLUSTER: Killing cassandra nodes"
for n in seeds+nodes+stash:
n.kill()
def kill_all():
# kill 'em all
kill_clients()
kill_nodes()
def inject_hosts_files():
print "CLUSTER: Injectin host files"
hosts = dict()
for i in seeds+nodes + clients:
hosts[i.name] = i.vm.get_private_addr()
#add the host names to etc/hosts
orchestrator.inject_hostnames(hosts)
for i in seeds+nodes+clients:
i.vm.inject_hostnames(hosts)
seeds[0].vm.run_command("service ganglia-monitor restart")
orchestrator.run_command("service ganglia-monitor restart")
def find_orhcestrator():
vms = get_all_vms()
for vm in vms:
if "orchestrator" in vm.name:
global orchestrator
orchestrator = vm
return
def add_node():
name = node_name+str(len(nodes)+1)
print "CLUSTER: Adding node %s" % name
if not len(stash) == 0:
new_guy = stash[0]
del stash[0]
else:
new_guy = Node(name, create=True)
nodes.append(new_guy)
new_guy.vm.wait_ready()
#inject host files to everybody
inject_hosts_files()
new_guy.bootstrap()
print "CLUSTER: Node %s is live " % (name)
save_cluster()
def remove_node():
dead_guy = nodes[-1]
print "CLUSTER: Removing node %s" % dead_guy
dead_guy.decommission()
stash[:] = [nodes.pop()] + stash
print "CLUSTER: Node %s is removed" % dead_guy
save_cluster()
def run_load_phase(record_count):
#first inject the hosts file
host_text = ""
for h in seeds+nodes: host_text += h.vm.get_private_addr()+"\n"
start = 0
step = record_count/len(clients)
for c in clients:
load_command = "echo '%s' > /opt/hosts;" % host_text
load_command += get_script_text("ycsb_load") % (str(record_count), str(step), str(start), c.name[-1:])
print "CLUSTER: running load phase on %s" % c.name
c.vm.run_command(load_command, silent=True)
start += step
def run_sinusoid(target_total, offset_total, period):
target = target_total / len(clients)
offset = offset_total / len(clients)
#first inject the hosts file
host_text = ""
for h in seeds+nodes: host_text += h.vm.get_private_addr()+"\n"
start = 0
for c in clients:
load_command = "echo '%s' > /opt/hosts;" % host_text
load_command += get_script_text("ycsb_run_sin") % (target, offset, period, c.name[-1:])
print "CLUSTER: running workload on %s" % c.name
c.vm.run_command(load_command, silent=True)
def destroy_all():
for n in seeds+nodes+stash+clients:
n.vm.destroy()
remove(save_file)
def cluster_info():
"""
returns the available nodes and their addresses
:return:
"""
rv = orchestrator.name+ "\t:\t"+orchestrator.get_public_addr()+ "\t,\t"+ orchestrator.get_private_addr()+ "\n"
for n in seeds+nodes+clients:
rv += n.name+ "\t:\t"+n.vm.get_public_addr()+"\t,\t"+n.vm.get_private_addr()+ "\n"
return rv
#=============================== MAIN ==========================
#create_cluster(worker_count=1, client_count=2)
#resume active cluster
resume_cluster()
#kill all previous processes
# kill_all()
# #bootstrap cluster from scratch
# bootstrap_cluster()
# run_load_phase(100000)
# print "waiting 20 seconds for load phase to finish"
# sleep(20)
# run_sinusoid(target_total=200, offset_total=100, period=60)
# print "waiting to add node"
# sleep(30)
# add_node()
# print "waiting to remove node"
# sleep(30)
# remove_node()
#
# print "FINISED (%d seconds)" % timer.stop()
| {
"repo_name": "cmantas/cluster_python_tool",
"path": "CassandraCluster.py",
"copies": "1",
"size": "6930",
"license": "apache-2.0",
"hash": -6097949900996507000,
"line_mean": 28.6153846154,
"line_max": 114,
"alpha_frac": 0.6349206349,
"autogenerated": false,
"ratio": 3.3397590361445784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4474679671044578,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from kamaki.clients import ClientError
from kamaki.clients.astakos import AstakosClient
from kamaki.clients.astakos import getLogger
from kamaki.clients.cyclades import CycladesClient, CycladesNetworkClient
#http://www.synnefo.org/docs/kamaki/latest/developers/code.html#the-client-api-ref
from sys import stderr
from os.path import abspath
from base64 import b64encode
from lib.persistance_module import *
from logging import ERROR
USER = "cmantas"
#retrieve the credentials for the specified users
AUTHENTICATION_URL, TOKEN = get_credentials(USER)
synnefo_user = AstakosClient(AUTHENTICATION_URL, TOKEN)
synnefo_user.logger.setLevel(ERROR)
getLogger().setLevel(ERROR)
cyclades_endpoints = synnefo_user.get_service_endpoints("compute")
CYCLADES_URL = cyclades_endpoints['publicURL']
cyclades_client = CycladesClient(CYCLADES_URL, TOKEN)
cyclades_net_client = CycladesNetworkClient(CYCLADES_URL, TOKEN)
pub_keys_path = 'keys/just_a_key.pub'
priv_keys_path = 'keys/just_a_key'
#creates a "personality"
def personality(username):
"""
:param pub_keys_path: a path to the public key(s) to be used for this personality
:param ssh_keys_path: a path to the private key(s) to be used for this personality
"""
personality = []
with open(abspath(pub_keys_path)) as f:
personality.append(dict(
contents=b64encode(f.read()),
path='/root/.ssh/authorized_keys',
owner='root', group='root', mode=0600))
personality.append(dict(
contents=b64encode('StrictHostKeyChecking no'),
path='/root/.ssh/config',
owner='root', group='root', mode=0600))
return personality
def get_addreses(vm_id):
nics = cyclades_client.get_server_nics(vm_id)
addresses = nics["addresses"]
rv = []
for a in addresses:
for addr in addresses[a]:
rv.append({'version': addr['version'], 'ip': addr['addr'], 'type': addr['OS-EXT-IPS:type']})
kati = cyclades_client.servers_ips_get(vm_id)
return rv
def create_vm(name, flavor_id, image_id, IPv4, logger):
"""
Creates this VM in the okeanos through kamaki
"""
networks = [{'uuid': env_vars['cassandra_network_id']}]
if IPv4: networks.append({'uuid': 2216})
vm_id = -1
try:
logger.info("creating flavor %d, image %s" % (flavor_id, image_id))
my_dict = cyclades_client.create_server(name, flavor_id, image_id, personality=personality('root'),
networks=networks)
vm_id = my_dict['id']
except ClientError as e:
logger.error("failed to create server with kamaki")
logger.error(e)
# print('Error: %s' % e)
# if e.status:
# print('- error code: %s' % e.status)
# if e.details:
# for detail in e.details:
# print('- %s' % detail)
raise Exception("Failed creating server")
return vm_id
def get_vm_status(vm_id):
return cyclades_client.wait_server(vm_id)
def shutdown_vm(vm_id):
cyclades_client.shutdown_server(vm_id)
def startup_vm(vm_id):
resp = cyclades_client.start_server(vm_id)
new_status = cyclades_client.wait_server(vm_id)
if new_status == "ACTIVE": return True
else: return False
def destroy_vm(vm_id):
cyclades_client.delete_server(vm_id)
def get_vm_details(vm_id):
synnefo_dict = cyclades_client.get_server_details(vm_id)
name = synnefo_dict['name']
vm_id = synnefo_dict['id']
flavor_id = synnefo_dict['flavor']['id']
image_id = synnefo_dict['image']['id']
return {'name': name, 'id':vm_id, 'flavor_id': flavor_id, 'image_id': image_id}
def get_all_vm_ids():
vm_ids=[]
vm_list = cyclades_client.list_servers()
for v in vm_list: vm_ids.append(v['id'])
return vm_ids
| {
"repo_name": "cmantas/tiramola_v3",
"path": "lib/connector_okeanos.py",
"copies": "1",
"size": "3840",
"license": "apache-2.0",
"hash": -2107679832938864600,
"line_mean": 30.7355371901,
"line_max": 107,
"alpha_frac": 0.653125,
"autogenerated": false,
"ratio": 3.194675540765391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4347800540765391,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from kamaki.clients import ClientError
from kamaki.clients.astakos import AstakosClient
from kamaki.clients.astakos import getLogger #not working fuck yeah
# okeanos bullshit
#from kamaki.clients.astakos import CachedAstakosClient as AstakosClient
from kamaki.clients.cyclades import CycladesClient, CycladesNetworkClient
#http://www.synnefo.org/docs/kamaki/latest/developers/code.html#the-client-api-ref
from sys import stderr
from os.path import abspath
from base64 import b64encode
from lib.persistance_module import *
from logging import ERROR
USER = "cmantas"
#retrieve the credentials for the specified users
AUTHENTICATION_URL, TOKEN = get_credentials(USER)
synnefo_user = AstakosClient(AUTHENTICATION_URL, TOKEN)
synnefo_user.logger.setLevel(ERROR)
getLogger().setLevel(ERROR)
cyclades_endpoints = synnefo_user.get_service_endpoints("compute")
CYCLADES_URL = cyclades_endpoints['publicURL']
cyclades_client = CycladesClient(CYCLADES_URL, TOKEN)
cyclades_net_client = CycladesNetworkClient(CYCLADES_URL, TOKEN)
pub_keys_path = 'keys/just_a_key.pub'
priv_keys_path = 'keys/just_a_key'
#creates a "personality"
def personality(username):
"""
:param pub_keys_path: a path to the public key(s) to be used for this personality
:param ssh_keys_path: a path to the private key(s) to be used for this personality
"""
personality = []
with open(abspath(pub_keys_path)) as f:
personality.append(dict(
contents=b64encode(f.read()),
path='/root/.ssh/authorized_keys',
owner='root', group='root', mode=0600))
personality.append(dict(
contents=b64encode('StrictHostKeyChecking no'),
path='/root/.ssh/config',
owner='root', group='root', mode=0600))
return personality
def get_addreses(vm_id):
nics = cyclades_client.get_server_nics(vm_id)
addresses = nics["addresses"]
rv = []
for a in addresses:
for addr in addresses[a]:
rv.append({'version': addr['version'], 'ip': addr['addr'], 'type': addr['OS-EXT-IPS:type']})
kati = cyclades_client.servers_ips_get(vm_id)
return rv
def create_vm(name, flavor_id, image_id, log_path):
"""
Creates this VM in the okeanos through kamaki
"""
try:
net_id = env_vars['cassandra_network_id']
my_dict = cyclades_client.create_server(name, flavor_id, image_id, personality=personality('root'),
networks=[{'uuid': net_id}])
vm_id = my_dict['id']
except ClientError:
stderr.write('Failed while creating server %s' % name)
raise
if log_path:
with open(abspath(log_path), 'w+') as f:
from json import dump
dump(my_dict, f, indent=2)
return vm_id
def get_vm_status(vm_id):
return cyclades_client.wait_server(vm_id)
def shutdown_vm(vm_id):
cyclades_client.shutdown_server(vm_id)
def startup_vm(vm_id):
resp = cyclades_client.start_server(vm_id)
new_status = cyclades_client.wait_server(vm_id)
if new_status == "ACTIVE": return True
else: return False
def destroy_vm(vm_id):
cyclades_client.delete_server(vm_id)
def get_vm_details(vm_id):
synnefo_dict = cyclades_client.get_server_details(vm_id)
name = synnefo_dict['name']
vm_id = synnefo_dict['id']
flavor_id = synnefo_dict['flavor']['id']
image_id = synnefo_dict['image']['id']
return {'name': name, 'id':vm_id, 'flavor_id': flavor_id, 'image_id': image_id}
def get_all_vm_ids():
vm_ids=[]
vm_list = cyclades_client.list_servers()
for v in vm_list: vm_ids.append(v['id'])
return vm_ids
| {
"repo_name": "cmantas/cluster_python_tool",
"path": "lib/connector_okeanos.py",
"copies": "1",
"size": "3683",
"license": "apache-2.0",
"hash": -2739490330299988000,
"line_mean": 30.2118644068,
"line_max": 107,
"alpha_frac": 0.6679337497,
"autogenerated": false,
"ratio": 3.1586620926243567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43265958423243567,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from lib.tiramola_logging import get_logger
from multiprocessing import Process
from lib.persistance_module import get_script_text, home, env_vars
class Cluster(object):
# the logger for this file
log = get_logger('CLUSTER', 'DEBUG', logfile=home+'files/logs/Coordinator.log')
def __init__(self):
self.all_nodes = []
self.log = Cluster.log
# the name of the cluster is used as a prefix for the VM names
self.cluster_name = "cluster"
pass
@staticmethod
def wait_proc(proc, node, timeout, log=None):
"""
Waits for a process to finish running for a given timeout and throws an exception if not finished
:param proc:
:param node:
:return:
"""
proc.join(timeout)
#check if it has not finished yet fail if so
if proc.is_alive():
if not log is None:
log.error("Timeout occurred for process")
proc.terminate()
raise Exception("Script timed out for "+node.name)
elif not log is None: log.debug(node.name+" DONE")
@staticmethod
def run_script(script_content, nodes, serial=True, timeout=600, log=None):
"""
Runs a script to the specified VMs
:param script_content:
:param serial:
:param timeout:
:return: None
"""
if not log is None:
log.info('Running a script to %d nodes' % len(nodes))
procs = []
#start the procs that add the nodes
for node in nodes:
p = Process(target=node.run_command, args=(script_content,))
procs.append(p)
p.start()
if serial:
# if adding in serial, wait each proc
if not log is None:log.debug("waiting for node #"+node.name)
Cluster.wait_proc(p, node, timeout)
if not serial:
#wait for all the procs to finish in parallel
if not log is None:log.debug("Waiting for all the procs to finish")
for i in range(len(nodes)):
Cluster.wait_proc(procs[i], nodes[i], timeout)
if not log is None: log.info("Finished running script")
def run_to_all(self, script_content, serial=True, timeout=600):
"""
Runs a script to all the nodes in the cluster
:param script_content:
:param serial:
:param timeout:
"""
self.run_script(script_content, self.all_nodes, serial, timeout, self.log)
def wait_everybody(self):
"""
Waits for all the Nodes in the cluster to be SSH-able
"""
self.log.info('Waiting for SSH on all nodes')
for i in self.all_nodes:
i.wait_ready()
def bootstrap_cluster(self):
"""
Runs the necessary boostrap commnands to each of the Seed Node and the other nodes
"""
for n in self.all_nodes:
n.bootstrap()
self.inject_hosts_files()
def kill_nodes(self):
"""
Runs the kill scripts for all the nodes in the cluster
"""
self.log.info("Killing nodes")
for n in self.all_nodes:
n.kill()
def update_hostfiles(self, servers):
if not env_vars["update_hostfiles"]:
self.log.info("Not updtading ycsb client host files")
return
self.log.info("updating hostfiles")
# generate ycsb-specific hosts file text
host_text = ""
if "cassandra_seednode" in servers.keys(): del servers["cassandra_seednode"]
#generate the "hosts" text for YCSB
for key, value in servers.iteritems(): host_text += value+"\n"
host_text = host_text[:-1] # remove trailing EOL
#DEBUG keep just one host
#host_text = servers["cassandra_node_01"]
command = "echo '%s' > /opt/hosts;" % host_text
self.run_script(command, self.all_nodes, serial=False)
def get_hosts(self, string=False, private=False):
"""
Produces a mapping of hostname-->IP for the nodes in the cluster
:param include_clients: if False (default) the clients are not included
:param string: if True the output is a string able to be appended in /etc/hosts
:return: a dict or a string of hostnames-->IPs
"""
hosts = dict()
for i in self.all_nodes:
if private:
hosts[i.name] = i.get_private_addr()
else:
hosts[i.name] = i.get_public_addr()
return hosts
def node_count(self):
return len(self.all_nodes)
def exists(self):
if len(self.all_nodes) == 0:
return False
else:
return True
def get_monitoring_endpoint(self):
"""
returns the IP of the node that has the monitoring data we want
"""
return self.all_nodes[0].get_public_addr() | {
"repo_name": "cmantas/tiramola_v3",
"path": "lib/Cluster.py",
"copies": "1",
"size": "4957",
"license": "apache-2.0",
"hash": -8607584156181632000,
"line_mean": 30.7820512821,
"line_max": 105,
"alpha_frac": 0.5757514626,
"autogenerated": false,
"ratio": 4.056464811783961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004879383469206129,
"num_lines": 156
} |
__author__ = 'cmantas'
from Node import Node
from VM import Timer
from json import loads, dumps
from os import remove
from os.path import isfile
from lib.persistance_module import get_script_text, env_vars
from lib.tiramola_logging import get_logger
from multiprocessing import Process
from lib.Cluster import Cluster
orchestrator = None # the VM to which the others report to
seeds = [] # the seed node(s) of the casssandra cluster !!! ONLY ONE IS SUPPORTED !!!
nodes = [] # the rest of the nodes of the Cassandra cluster
stash = [] # list of the Nodes that are available (active) but not used
# the name of the cluster is used as a prefix for the VM names
cluster_name = env_vars['active_cluster_name']
# the save file for saving/reloading the active cluster
save_file = "files/saved_%s_cluster.json" % cluster_name
# the flavor and image for the VMs used int the cluster
Node.flavor = env_vars["default_flavor"]
Node.image = env_vars["cassandra_base_image"]
log = get_logger('CASSANDRA', 'DEBUG', logfile='files/logs/Coordinator.log')
def create_cluster(worker_count=0):
"""
Creates a Cassandra Cluster with a single Seed Node and 'worker_count' other nodes
:param worker_count: the number of the nodes to create-apart from the seednode
"""
global nodes, stash, seeds
nodes = []
seeds = []
stash = []
#create the seed node
seeds.append(Node(cluster_name, node_type="seed", number=0, create=True, IPv4=True))
#create the rest of the nodes
for i in range(worker_count):
stash.append(Node(cluster_name, node_type="node", number="%02d" % (i+1), create=True, IPv4=True))
#save the cluster to file
save_cluster()
#wait until everybody is ready
Cluster.wait_nodes(seeds+nodes)
find_orchestrator()
inject_hosts_files()
log.info('Every node is ready for SSH')
def wait_everybody():
"""
Waits for all the Nodes in the cluster to be SSH-able
"""
log.info('Waiting for SSH on all nodes')
for i in seeds + nodes:
i.wait_ready()
def bootstrap_cluster(used):
"""
Runs the necessary boostrap commnands to each of the Seed Node and the other nodes
"""
global stash, nodes
#drecrement used to accommodate for the seednode
used -= 1
nodes = stash[:used]
stash = stash[used:]
inject_hosts_files()
log.info("Running bootstrap scripts")
#bootstrap the seed node
seeds[0].bootstrap()
#bootstrap the rest of the nodes
for n in nodes:
n.bootstrap()
save_cluster()
log.info("READY!!")
def find_orchestrator():
in_nodes = Node.get_all_nodes(check_active=True)
for n in in_nodes:
if "orchestrator" in n.name:
global orchestrator
orchestrator = n
return
def resume_cluster():
"""
Re-loads the cluster representation based on the VMs pre-existing on the IaaS and the 'save_file'
"""
log.info("Loading info from the IaaS")
global nodes, seeds, stash
if not isfile(save_file):
log.info("No existing created cluster")
return
saved_cluster = loads(open(save_file, 'r').read())
saved_nodes = list(set(saved_cluster['nodes']))
saved_seeds = list(set(saved_cluster['seeds']))
saved_stash = list(set(saved_cluster['stash']))
nodes[:] = []
seeds[:] = []
in_nodes = Node.get_all_nodes(check_active=True)
#check that all saved nodes actually exist
for n in saved_nodes:
if n not in [i.name for i in in_nodes]:
log.error("node %s does actually exist in the cloud, re-create the cluster" % n)
remove(save_file)
exit(-1)
for n in in_nodes:
if n.name not in saved_nodes+saved_seeds:
if n.name in saved_stash:
stash.append(n)
if "orchestrator" in n.name:
global orchestrator
orchestrator = n
continue
else:
if n.type == "seed":
seeds.append(n)
elif n.type == "node": nodes.append(n)
#sort nodes by name
nodes.sort(key=lambda x: x.name)
stash.sort(key=lambda x: x.name)
def save_cluster():
"""
Creates/Saves the 'save_file'
:return:
"""
cluster = dict()
cluster["seeds"] = [s.name for s in seeds]
cluster["nodes"] = [n.name for n in nodes]
cluster["stash"] = [c.name for c in stash]
string = dumps(cluster, indent=3)
f = open(save_file, 'w+')
f.write(string)
def kill_nodes():
"""
Runs the kill scripts for all the nodes in the cluster
"""
log.info("Killing cassandra nodes")
global seeds, nodes, stash
procs = []
for n in seeds+nodes+stash:
p=Process(target=n.kill, args=())
procs.append(p)
p.start()
stash = nodes + stash
nodes = []
save_cluster()
for p in procs:
p.join()
def inject_hosts_files():
"""
Creates a mappNng of hostname -> IP for all the nodes in the cluster and injects it to all Nodes so that they
know each other by hostname. Also restarts the ganglia daemons
:return:
"""
log.info("Injecting host files")
hosts = get_hosts(private=env_vars["private_network"], include_stash=True)
#add the host names to etc/hosts
orchestrator.inject_hostnames(hosts, delete=cluster_name)
for i in seeds+nodes:
i.inject_hostnames(hosts, delete=cluster_name)
seeds[0].run_command("service ganglia-monitor restart; service gmetad restart", silent=True)
orchestrator.run_command("service ganglia-monitor restart; service gmetad restart", silent=True)
def add_one_node(stash_index):
"""
Helper function for add_nodes
"""
if not len(stash) == 0:
new_guy = stash[stash_index]
log.debug("Using %s from my stash" % new_guy.name)
else:
raise Exception("Adding a node out of stash is not implemented yet")
#new_guy = Node(cluster_name, 'node', str(len(nodes)+1), create=True)
new_guy.wait_ready()
new_guy.inject_hostnames(get_hosts(private=env_vars["private_network"], include_stash=True), delete=cluster_name)
new_guy.bootstrap()
def wait_node(proc):
proc.join(env_vars['add_node_timeout'])
#check if it has not finished yet fail if so
if proc.is_alive():
log.error("Timeout occurred for adding a node, exiting")
proc.terminate()
raise Exception("Node ADD was timed out for one node")
def add_nodes(count=1):
"""
Adds a node to the cassandra cluster. Refreshes the hosts in all nodes
:return:
"""
log.info('Adding %d nodes' % count)
global nodes, seeds, stash
procs = []
#start the procs that add the nodes
for i in range(count):
p = Process(target=add_one_node, args=(i,))
procs.append(p)
p.start()
if (env_vars["serial_add"]):
# if adding in serial, wait each proc
log.info("waiting for node #"+str(i))
wait_node(p)
if(not env_vars["serial_add"]):
#wait for all the procs to finish if adding in parallel
log.debug("Waiting for all the procs to finish adding")
for p in procs:
wait_node(p)
#housekeeping for the stash and nodes list
nodes += stash[:count]
del stash[:count]
#save the current cluster state
save_cluster()
#inform all
inject_hosts_files()
log.info("Finished adding %d nodes" % count)
def remove_nodes(count=1):
"""
Removes a node from the cassandra cluster. Refreshes the hosts in all nodes
:return:
"""
action = env_vars['cassandra_decommission_action']
for i in range(count):
dead_guy = nodes.pop()
log.info("Removing node %s" % dead_guy.name)
if action == "KEEP":
stash[:] = [dead_guy] + stash
dead_guy.decommission()
log.info("Node %s is removed" % dead_guy.name)
save_cluster()
inject_hosts_files()
def destroy_all():
"""
Destroys all the VMs in the cluster (not the orchestrator)
"""
log.info("Destroying the %s cluster" % cluster_name)
for n in seeds+nodes+stash:
n.destroy()
remove(save_file)
def get_hosts(string=False, private=False, include_stash=False):
"""
Produces a mapping of hostname-->IP for the nodes in the cluster
:param string: if True the output is a string able to be appended in /etc/hosts
:return: a dict or a string of hostnames-->IPs
"""
hosts = dict()
all_nodes = seeds + nodes
if include_stash:
all_nodes += stash
for i in all_nodes:
if private:
hosts[i.name] = i.get_private_addr()
else:
hosts[i.name] = i.get_public_addr()
if private:
hosts['cassandra_seednode'] = seeds[0].get_private_addr()
else:
hosts['cassandra_seednode'] = seeds[0].get_public_addr()
return hosts
def node_count():
return len(seeds+nodes)
def exists():
if len(seeds+nodes) == 0:
return False
else:
return True
def get_monitoring_endpoint():
"""
returns the IP of the node that has the monitoring data we want
"""
return seeds[0].get_public_addr()
def repair_cluster():
command = "nodetool repair ycsb"
Cluster.run_script(command, seeds+nodes, serial=False)
def set_cluster_size(count):
diff = node_count() - count
if diff>0:
log.info("Will remove %d nodes to match cluster size: %d" %(diff, count))
remove_nodes(diff)
elif diff<0:
diff = -diff
log.info("Will add %d nodes to match cluster size: %d" %(diff, count))
add_nodes(diff)
def compaction():
command = "nodetool compact ycsb"
log.info("Running compaction")
timer = Timer.get_timer()
Cluster.run_script(command, seeds+nodes, serial=False)
log.info("Done compacting ({0}sec".format(timer.stop()))
#=============================== MAIN ==========================
################ INIT actions ###########
resume_cluster()
########################################
| {
"repo_name": "cmantas/tiramola_v3",
"path": "CassandraCluster.py",
"copies": "1",
"size": "10124",
"license": "apache-2.0",
"hash": -190637696955258660,
"line_mean": 28.7764705882,
"line_max": 117,
"alpha_frac": 0.6174436981,
"autogenerated": false,
"ratio": 3.5710758377425043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46885195358425047,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from sqlite3 import connect
from tools import myplot, show
from ConsoleBackend import ConsoleBackend
import traceback
class SQLiteBackend(ConsoleBackend):
def __init__(self, sql_file):
self.file=sql_file
self.connection = connect(self.file)
self.cursor = self.connection.cursor()
#aliases
self.commit= self.connection.commit
self.execute = self.cursor.execute
def report(self, experiment_name, **kwargs):
# make metrics timeline a string
if "metrics_timeline" in kwargs:
kwargs['metrics_timeline'] = '"' + str(kwargs['metrics_timeline'] )+'"'
key_string="( "
value_string="( "
for k, v in kwargs.iteritems():
key_string += str(k) + ","
value_string+=str(v) + ","
key_string = key_string[:-1] + ')'
value_string = value_string[:-1]+')'
query = 'INSERT INTO '+ experiment_name+key_string + " VALUES "+ value_string
#print query
try:
self.execute(query)
self.commit()
except:
print "Query failed!"
print "query was: "+query
print(traceback.format_exc())
def query(self, experiment, query, tuples=True):
# ignoring experiment name because it is included in the query
rows = self.execute(query)
# return rows as tupple(list) rather than
if tuples: return tuple(rows)
else: return rows
@staticmethod
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def plot_query(self, experiment, query, **kwargs):
rows = self.query(experiment, query)
# transpose the rows
rows_transposed = zip(*rows)
# plot the result
myplot(*rows_transposed, **kwargs)
show()
return rows_transposed
def dict_query(self, experiment, query):
# a factory from rows-->dict
rows =tuple(self.query(experiment, query))
# r= rows.fetchone()
# return self.dict_factory(self.cursor, rows[0])
return map(lambda t:self.dict_factory(self.cursor, t), rows)
def __str__(self):
return super(SQLiteBackend, self).__str__() + "({0})".format(self.file)
| {
"repo_name": "project-asap/IReS-Platform",
"path": "asap-tools/monitoring/lib/SQLiteBackend.py",
"copies": "1",
"size": "2352",
"license": "apache-2.0",
"hash": 5019332883007767000,
"line_mean": 28.4,
"line_max": 85,
"alpha_frac": 0.5824829932,
"autogenerated": false,
"ratio": 4.048192771084337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5130675764284337,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from sys import stderr
from os.path import exists
from os import mkdir
from scp_utils import *
import ntpath
import thread
from lib.persistance_module import env_vars
#choose the IaaS provider
infrastructure = env_vars['iaas']
if infrastructure == 'okeanos':
from lib import connector_okeanos as iaas
if infrastructure == 'openstack':
from lib import connector_eucalyptus as iaas
LOGS_DIR = "files/VM_logs"
ATTEMPT_INTERVAL = 2
class VM:
class Address:
"""
Helper class that represents an IP address
"""
def __init__(self, version, ip, in_type):
self.version = version
self.ip = ip
self.type = in_type
def __str__(self):
rv = "%s IPv%d: %s" % (self.type, self.version, self.ip)
return rv
def __init__(self, name, flavor_id, image_id, create=False, wait=False, log_path=LOGS_DIR):
"""
VM class constructor
"""
#set attributes
self.created = False
self.name = name
self.flavor_id = flavor_id
self.log_path = log_path
self.image_id = image_id
self.public_addresses = []
self.log_path = log_path
self.addresses = []
self.id = -1
if create:
self.create(wait)
def load_addresses(self):
"""
loads the IP interfaces from the IaaS
:return:
"""
addr_list = iaas.get_addreses(self.id)
for a in addr_list:
addr = self.Address(a['version'], a['ip'], a['type'])
self.addresses.append(addr)
def from_dict(self, in_dict):
"""
creates a VM from dictionary containing 'name' and 'id' reccords
"""
self.name = in_dict['name']
self.id = in_dict['id']
def create(self, wait):
print ("VM: creating '"+self.name+"'"),
if wait:
print "(sync)"
self.create_sync()
self.wait_ready()
else:
print "(async)"
thread.start_new_thread(self.create_sync, ())
def create_sync(self):
"""
Creates this VM in the underlying IaaS provider
"""
#start the timer
timer = Timer()
timer.start()
self.id = iaas.create_vm(self.name, self.flavor_id, self.image_id, LOGS_DIR+"/%s.log" % self.name)
new_status = iaas.get_vm_status(self.id)
delta = timer.stop()
print 'VM: IaaS status for "%s" is now %s (took %d sec)' % (self.name, new_status, delta )
self.created = True
self.load_addresses()
def shutdown(self):
"""
Issues the 'shutdown' command to the IaaS provider
"""
print 'VM: Shutting down "%s" (id: %d)' % (self.name, self.id)
return iaas.shutdown_vm(self.id)
def startup(self):
"""
boots up an existing VM instance in okeanos
:return: true if VM exist false if not
"""
if not self.created: return False;
print 'VM: starting up "%s" (id: %d)' % (self.name, self.id)
return iaas.startup_vm(self.id)
def destroy(self):
"""Issues the 'destory' command to the IaaS provider """
print "VM: Destroying %s" % self.name
iaas.destroy_vm(self.id)
def __str__(self):
text = ''
text += '========== VM '+self.name+" ===========\n"
text += "ID: "+str(self.id)+'\n'
text += 'host: %s\n' % self.get_host()
text += "Addresses (%s):" % len(self.addresses)
for a in self.addresses:
text += " [" + str(a) + "],"
text += "\nCloud Status: %s\n" % self.get_cloud_status()
return text
@staticmethod
def vm_from_dict(in_dict):
"""
creates a VM instance from a synnefo "server" dict
:param in_dict: "server" or "server details" dictionary from synnefo
:return: a VM instance for an existing vm
"""
vm_id, name, flavor_id, image_id = in_dict['id'], in_dict['name'], in_dict['flavor_id'], in_dict['image_id']
rv = VM(name, flavor_id, image_id)
rv.created = True
rv.id = vm_id
rv.load_addresses()
return rv
@staticmethod
def from_id(vm_id):
""" creates a VM instance from the VM id """
vm_dict = iaas.get_vm_details(vm_id)
return VM.vm_from_dict(vm_dict)
def get_cloud_status(self):
return iaas.get_vm_status(self.id)
def run_command(self, command, user='root', indent=0, prefix="\t$: ", silent=False):
"""
runs a command to this VM if it actually exists
:param command:
:param user:
:return:
"""
if not self.created:
stderr.write('this VM does not exist (yet),'
' so you cannot run commands on it')
return "ERROR"
if not silent:
print "VM: [%s] running SSH command \"%s\"" % (self.name, command)
return run_ssh_command(self.get_public_addr(), user, command, indent, prefix)
def put_files(self, files, user='root', remote_path='.', recursive=False):
"""
Puts a file or a list of files to this VM
"""
put_file_scp(self.get_host(), user, files, remote_path, recursive)
def run_files(self, files):
"""
puts a file in the VM and then runs it
:param files:
:return:
"""
self.put_files(files)
filename = ''
remote_path = ''
if not isinstance(files, (list, tuple)):
head, tail = ntpath.split(files)
filename = tail or ntpath.basename(head)
remote_path = "~/scripts/" + filename
else:
for f in files:
head, tail = ntpath.split(f)
short_fname = (tail or ntpath.basename(head))
filename += short_fname + ' '
remote_path += "~/scripts/"+short_fname+"; "
#generate the command that runs the desired scripts
command = 'chmod +x %s; ' \
'mkdir -p scripts;' \
'mv %s ~/scripts/ 2>/dev/null;' \
'%s'\
% (filename, filename, remote_path)
return self.run_command(command)
def wait_ready(self):
"""
Waits until it is able to run SSH commands on the VM or a timeout is reached
"""
success = False
attempts = 0
if not self.created:
while not self.created: sleep(3)
print "VM: [%s] waiting for SSH deamon (addr: %s)" % (self.name, self.get_public_addr())
#time to stop trying
end_time = datetime.now()+timedelta(seconds=ssh_giveup_timeout)
timer = Timer()
timer.start()
#print("VM: Trying ssh, attempt "),
while not success:
#if(attempts%5 == 0): print ("%d" % attempts),
attempts += 1
if test_ssh(self.get_public_addr(), 'root'):
success = True
else:
if datetime.now() > end_time:
break
sleep(ATTEMPT_INTERVAL)
if success: print ("VM: %s now ready" % self.name),
else: print("VM: %s FAIL to be SSH-able" % self.name),
print (" (took " + str(timer.stop())+" sec)")
return success
def get_public_addr(self):
""" Returns a publicly accessible IP address !!! for now, only checks for IPv6+fixed !!!"""
if len(self.addresses) == 0:
self.load_addresses()
for i in self.addresses:
if i.type == "fixed" and i.version == 6:
return i.ip
return None
def get_private_addr(self):
#find fixed ip
for i in self.addresses:
if i.version == 4 and i.type == "fixed":
return i.ip
def inject_hostnames(self, hostnames):
#add some default hostnames
hostnames["localhost"] = "127.0.0.1"
hostnames["ip6-localhost ip6-loopback"] = "::1"
hostnames["ip6-localnet"] = "fe00::0"
hostnames["ip6-mcastprefix"] = "ff00::0"
hostnames["ip6-allnodes"] = "ff02::1"
hostnames["ip6-allrouters"] = "ff02::2"
text=""
for host in hostnames.keys():
text += "\n%s %s" % (hostnames[host], host)
self.run_command("echo '## AUTO GENERATED #### \n%s' > /etc/hosts; echo %s >/etc/hostname" % (text, self.name), silent=True)
def get_all_vms(check_active=False):
"""
Creates VM instances for all the VMs of the user available in the IaaS
"""
vms = []
vm_ids = iaas.get_all_vm_ids()
for vm_id in vm_ids:
vm = VM.vm_from_dict(iaas.get_vm_details(vm_id))
if check_active and vm.get_cloud_status() != "ACTIVE":
continue
else:
vms.append(vm)
return vms
if not exists(LOGS_DIR):
mkdir(LOGS_DIR)
class Timer():
"""
Helper class that gives the ablility to measure time between events
"""
def __init__(self):
self.started = False
self.start_time = 0
def start(self):
assert self.started is False, " Timer already started"
self.started = True
self.start_time = int(round(time() * 1000))
def stop(self):
end_time = int(round(time() * 1000))
assert self.started is True, " Timer had not been started"
start_time = self.start_time
self.start_time = 0
return float(end_time - start_time)/1000
@staticmethod
def get_timer():
timer = Timer()
timer.start()
return timer | {
"repo_name": "cmantas/cluster_python_tool",
"path": "VM.py",
"copies": "1",
"size": "9679",
"license": "apache-2.0",
"hash": -4126158221563864000,
"line_mean": 31.3745819398,
"line_max": 132,
"alpha_frac": 0.5366256845,
"autogenerated": false,
"ratio": 3.6732447817836813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4709870466283681,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from sys import stderr
from os.path import exists
from os import mkdir, remove, makedirs
from time import time, sleep
import ntpath
import thread
from lib.scp_utils import *
from lib.persistance_module import env_vars, home
from lib.tiramola_logging import get_logger
#choose the IaaS provider
infrastructure = env_vars['iaas']
if infrastructure == 'okeanos':
from lib import connector_okeanos as iaas
if infrastructure == 'openstack':
from lib import connector_eucalyptus as iaas
LOGS_DIR = home+"files/VM_logs"
ATTEMPT_INTERVAL = 2
class VM (object):
class Address:
"""
Helper class that represents an IP address
"""
def __init__(self, version, ip, in_type):
self.version = version
self.ip = ip
self.type = in_type
def __str__(self):
rv = "%s IPv%d: %s" % (self.type, self.version, self.ip)
return rv
def __init__(self, name, flavor_id, image_id, create=False, wait=False, IPv4=False, log_path=LOGS_DIR):
"""
VM class constructor
"""
#set attributes
self.created = False
self.name = name
self.flavor_id = flavor_id
self.log_path = log_path
self.image_id = image_id
self.public_addresses = []
self.addresses = []
self.id = -1
self.IPv4 = IPv4
if not exists(LOGS_DIR):
makedirs(LOGS_DIR)
self.logfile = "%s/%s.log" % (LOGS_DIR, self.name)
self.log = get_logger('[%s]' % self.name, 'INFO', logfile=self.logfile)
if create:
self.create(wait)
def load_addresses(self):
"""
loads the IP interfaces from the IaaS
:return:
"""
addr_list = iaas.get_addreses(self.id)
for a in addr_list:
addr = self.Address(a['version'], a['ip'], a['type'])
self.addresses.append(addr)
def from_dict(self, in_dict):
"""
creates a VM from dictionary containing 'name' and 'id' reccords
"""
self.name = in_dict['name']
self.id = in_dict['id']
def create(self, wait=False):
if wait:
self.log.info("creating (synchronously)")
self.create_sync()
self.wait_ready()
else:
self.log.info("creating (async)")
thread.start_new_thread(self.create_sync, ())
def create_sync(self):
"""
Creates this VM in the underlying IaaS provider
"""
#start the timer
timer = Timer()
timer.start()
try:
self.id = iaas.create_vm(self.name, self.flavor_id, self.image_id, self.IPv4, self.log)
except Exception as e:
self.log.error(e)
exit(-1)
new_status = iaas.get_vm_status(self.id)
delta = timer.stop()
if new_status == 'ERROR':
self.log.error("IaaS creation FAILED")
return
self.log.info('IaaS status is now %s (took %d sec)' % (new_status, delta ) )
self.created = True
self.load_addresses()
def shutdown(self):
"""
Issues the 'shutdown' command to the IaaS provider
"""
self.log.info('Shutting down (id: %d)' % self.id)
return iaas.shutdown_vm(self.id)
def startup(self):
"""
boots up an existing VM instance in okeanos
:return: true if VM exist false if not
"""
if not self.created: return False;
self.log.info('starting up (id: %d)' % self.id)
return iaas.startup_vm(self.id)
def destroy(self):
"""Issues the 'destory' command to the IaaS provider """
self.log.info("Destroying ")
iaas.destroy_vm(self.id)
#delete the logfile
try:
remove(self.logfile)
except: pass
def __str__(self):
text = ''
text += '========== VM '+self.name+" ===========\n"
text += "ID: "+str(self.id)+'\n'
text += "Addresses (%s):" % len(self.addresses)
for a in self.addresses:
text += " [" + str(a) + "],"
text += "\nCloud Status: %s\n" % self.get_cloud_status()
return text
@staticmethod
def vm_from_dict(in_dict):
"""
creates a VM instance from a synnefo "server" dict
:param in_dict: "server" or "server details" dictionary from synnefo
:return: a VM instance for an existing vm
"""
vm_id, name, flavor_id, image_id = in_dict['id'], in_dict['name'].strip(), in_dict['flavor_id'], in_dict['image_id']
rv = VM(name, flavor_id, image_id)
rv.created = True
rv.id = vm_id
rv.load_addresses()
return rv
@staticmethod
def from_id(vm_id):
""" creates a VM instance from the VM id """
vm_dict = iaas.get_vm_details(vm_id)
return VM.vm_from_dict(vm_dict)
def get_cloud_status(self):
return iaas.get_vm_status(self.id)
def run_command(self, command, user='root', indent=0, prefix="\t$: ", silent=False):
"""
runs a command to this VM if it actually exists
:param command:
:param user:
:return:
"""
if not self.created:
stderr.write('this VM does not exist (yet),'
' so you cannot run commands on it')
return "ERROR"
self.log.debug("running SSH command:\n\n%s\n\n" % reindent(command, 5))
rv = run_ssh_command(self.get_public_addr(), user, command, indent, prefix, logger=self.log)
if rv is not None:
self.log.debug("command returned:\n\n %s\n\n" % rv)
return rv
def put_files(self, files, user='root', remote_path='.', recursive=False):
"""
Puts a file or a list of files to this VM
"""
put_file_scp(self.get_public_addr(), user, files, remote_path, recursive)
def run_files(self, files):
"""
puts a file in the VM and then runs it
:param files:
:return:
"""
self.put_files(files)
filename = ''
remote_path = ''
if not isinstance(files, (list, tuple)):
head, tail = ntpath.split(files)
filename = tail or ntpath.basename(head)
remote_path = "~/scripts/" + filename
else:
for f in files:
head, tail = ntpath.split(f)
short_fname = (tail or ntpath.basename(head))
filename += short_fname + ' '
remote_path += "~/scripts/"+short_fname+"; "
#generate the command that runs the desired scripts
command = 'chmod +x %s; ' \
'mkdir -p scripts;' \
'mv %s ~/scripts/ 2>/dev/null;' \
'%s'\
% (filename, filename, remote_path)
return self.run_command(command)
def wait_ready(self):
"""
Waits until it is able to run SSH commands on the VM or a timeout is reached
"""
success = False
attempts = 0
if not self.created:
self.log.debug("Not active yet. Sleeping")
while not self.created: sleep(3)
self.log.debug("Waiting for SSH daemon (%s)" % self.get_public_addr())
#time to stop trying
end_time = datetime.now()+timedelta(seconds=env_vars['ssh_giveup_timeout'])
self.log.debug("end time:"+str(end_time))
timer = Timer()
timer.start()
#self.log.info(("VM: Trying ssh, attempt "),
while not success:
#if(attempts%5 == 0): self.log.info( ("%d" % attempts),
attempts += 1
self.log.debug("ssh attempt:"+str(attempts))
if test_ssh(self.get_public_addr(), 'root', logger=self.log):
success = True
else:
if datetime.now() > end_time:
break
sleep(ATTEMPT_INTERVAL)
delta = timer.stop()
if success:
self.log.debug("now ready (took %d sec)" % delta)
else:
self.log.error(" FAILED to be SSH-able (after %d sec)" % delta)
return success
def get_public_addr(self):
""" Returns a publicly accessible IP address !!! for now, only checks for IPv6+fixed !!!"""
rv = None
if len(self.addresses) == 0:
self.load_addresses()
for i in self.addresses:
if i.type == "fixed" and i.version == 6:
rv = i.ip
#if there is a floating IP available, return this one
for i in self.addresses:
if i.type == "floating" and i.version == 4:
rv = i.ip
return rv
def get_private_addr(self):
if len(self.addresses) == 0:
self.load_addresses()
#find fixed ip
for i in self.addresses:
if i.version == 4 and i.type == "fixed":
return i.ip
def get_all_vms(check_active=False):
"""
Creates VM instances for all the VMs of the user available in the IaaS
"""
log = get_logger("VM [static]", 'INFO')
log.debug("getting all VMs")
vms = []
vm_ids = iaas.get_all_vm_ids()
for vm_id in vm_ids:
vm = VM.vm_from_dict(iaas.get_vm_details(vm_id))
if check_active and vm.get_cloud_status() != "ACTIVE":
continue
else:
vms.append(vm)
return vms
if not exists(LOGS_DIR):
mkdir(LOGS_DIR)
class Timer():
"""
Helper class that gives the ablility to measure time between events
"""
def __init__(self):
self.started = False
self.start_time = 0
def start(self):
assert self.started is False, " Timer already started"
self.started = True
self.start_time = int(round(time() * 1000))
def stop(self):
end_time = int(round(time() * 1000))
assert self.started is True, " Timer had not been started"
start_time = self.start_time
self.start_time = 0
self.started = False
return float(end_time - start_time)/1000
@staticmethod
def get_timer():
timer = Timer()
timer.start()
return timer | {
"repo_name": "cmantas/tiramola_v3",
"path": "VM.py",
"copies": "1",
"size": "10280",
"license": "apache-2.0",
"hash": -6994011133846125000,
"line_mean": 31.128125,
"line_max": 124,
"alpha_frac": 0.5402723735,
"autogenerated": false,
"ratio": 3.7273386511965194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9756578363092192,
"avg_score": 0.002206532320865676,
"num_lines": 320
} |
__author__ = 'cmantas'
from time import sleep
import CassandraCluster as Servers
from ClientsCluster import my_Clients as Clients
from lib.persistance_module import env_vars, home
from Monitoring import MonitorVms
from new_decision_module import RLDecisionMaker as DM
from lib.tiramola_logging import get_logger
from time import time
from os import remove
from threading import Thread
####### STATIC VARS ###############
my_logger = get_logger('COORDINATOR', 'INFO', logfile=home+'files/logs/Coordinator.log')
my_logger.debug("--------- NEW RUN -----------------")
#the (pending) decision at the present moment
decision = None
running_process=None
#get the endpoint for the monitoring system
monitor_clients = MonitorVms(Clients.get_monitoring_endpoint())
monitor_servers = MonitorVms(Servers.get_monitoring_endpoint())
error = None
#check if cluster exists
if Servers.exists():
my_logger.info( "Cluster exists using it as is")
#make sure no workload is running
else:
my_logger.error("Create the cluster first and then run the coordinator")
exit(-1)
def implement_decision():
"""
Used to asynchronously implement the decision that has been updated by the run function
"""
global decision
action = decision["action"]
count = decision['count']
try:
if action == "ADD":
decision_module.pending_action = action
my_logger.debug("Will add %d nodes" % count)
Servers.add_nodes(count)
# artificially delay the decision in order to discard transient measurements
my_logger.info("Sleeping! (artificial delay)")
sleep(200 +env_vars['extra_decision_delay_per_node']*count)
elif action == "REMOVE":
decision_module.pending_action = action
my_logger.debug("Will remove %d nodes" % count)
Servers.remove_nodes(count)
#not supposed to be here for pass decsion
else:
return
#update the hosts files in clients
Clients.update_hostfiles(Servers.get_hosts())
# update the state
decision_module.pending_action = None
decision_module.currentState = Servers.node_count()
except Exception as e:
#in case the action was failed set a globall error var as true
global error
error = e
running_process = None
def check_for_error():
global error
if not (error is None):
my_logger.error("I detected an error in a previous action. Raising exception")
my_logger.error("Message:" + str(error))
running_process.join()
raise error
def run(timeout=None):
"""
Runs cluster with automatic decision taking
@param timeout: the time in seconds this run should last
"""
my_logger.debug("run: Time starts now, the experiment will take %s sec" % (str(timeout)))
# convert relative timeout to absolute time
if not timeout is None: timeout = time() + timeout
#set global error to None
global error
error = None
# init the decision module
global decision_module
decision_module = DM(Servers)
# the time interval between metrics refresh
metrics_interval = env_vars["metric_fetch_interval"]
# main loop that fetches metric and takes decisions
while (timeout is None) or (time() <= timeout):
check_for_error()
sleep(metrics_interval)
# refresh the metrics
client_metrics = monitor_clients.refreshMetrics()
server_metrics = monitor_servers.refreshMetrics()
#take a decision based on the new metrics
global decision
decision = decision_module.take_decision(client_metrics, server_metrics)
# asynchronously implement that decision
if decision is None or decision["action"] == "PASS":
continue
global running_process
if not running_process is None:
running_process.join()
running_process = Thread(target=implement_decision, args=())
running_process.start()
# DONE
# join the running_process
if not running_process is None: running_process.join()
my_logger.info(" run is finished")
def train():
"""
Runs a training phase in order to collect a training set of metrics for the given cluster
"""
#change the gain function for training purposes
env_vars['gain'] = '0'
# load the training vars into the regular enviroment vars
t_vars = env_vars["training_vars"]
env_vars['decision_interval'] = t_vars['decision_interval']
env_vars['period'] = t_vars['period']
env_vars['max_cluster_size'] = t_vars['max_cluster_size']
env_vars['min_cluster_size'] = t_vars['min_cluster_size']
env_vars["add_nodes"] = 1
env_vars["rem_nodes"] = 1
env_vars["measurements_file"] = env_vars["training_file"]
env_vars['decision_threshold'] = 0
# remove the old measurements/training file so that it is replaced
try:remove(env_vars["measurements_file"])
except: pass
# # Sanity-Check the nodecount
# if Servers.node_count() != t_vars['min_cluster_size']:
# my_logger.error("TRAINING: Start training with the Minimum cluster size, %d (now:%d)" %(t_vars['min_cluster_size'], Servers.node_count()))
# exit()
Clients.kill_nodes()
Servers.kill_nodes()
Servers.bootstrap_cluster(t_vars['min_cluster_size'])
svr_hosts = Servers.get_hosts(private=env_vars["private_network"])
Clients.run({'type': 'load', 'servers': Servers.get_hosts(), 'records': t_vars['records']})
#create the parameters dictionary for the training phase
params = {'type': 'sinusoid', 'servers': svr_hosts, 'target': t_vars['target_load'],
'offset': t_vars['offset_load'], 'period': t_vars['period']}
# init the decision module
global decision_module
decision_module = DM(Servers)
#the time interval between metrics refresh
metrics_interval = env_vars["metric_fetch_interval"]
# run 1 period of workload for each of the the states between min and max cluster size
for i in range(env_vars['max_cluster_size'] - t_vars['min_cluster_size'] + 1):
my_logger.info("iteration "+str(i))
#run the workload with the specified params to the clients
Clients.run(params)
#This should only decide to add a node after a period is passed
global decision
#run for 1 period
timeout = time() + 60*env_vars['period']
while time() <= timeout:
#fetch metrics and takes decisions
sleep(metrics_interval)
# refresh the metrics
client_metrics = monitor_clients.refreshMetrics()
server_metrics = monitor_servers.refreshMetrics()
#only refresh metrics
decision_module.take_decision(client_metrics, server_metrics)
#manually add a node
decision = {"action": "ADD", 'count':1}
# synchronously implement that decision
implement_decision()
#stop the clients after one period has passed
Clients.kill_nodes()
my_logger.info("TRAINING DONE")
def test_vars():
print env_vars['gain']
print env_vars['max_cluster_size']
| {
"repo_name": "cmantas/tiramola_v3",
"path": "Coordinator.py",
"copies": "1",
"size": "7202",
"license": "apache-2.0",
"hash": 7686149717960657000,
"line_mean": 33.4593301435,
"line_max": 148,
"alpha_frac": 0.655234657,
"autogenerated": false,
"ratio": 4.059751972942503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5214986629942503,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from tools import *
from json import loads
ms = take_single("select metrics from mahout_kmeans_text where k=15 and documents=90300 and dimensions=53235;")[0]
mj = loads(ms)
cols = iter(["#727272", '#f1595f', '#79c36a', '#599ad3', '#f9a65a','#9e66ab','#cd7058', '#d77fb3'])
def timeline2vaslues(fieldname, metrics):
times =[]
values =[]
for k,v in metrics:
times.append(k)
values.append(v[fieldname])
return times, values
def sum_timeline_vals(fieldnames, metrics):
times =[]
values =[]
for k,v in metrics:
times.append(k)
sum = 0
for i in fieldnames:
if i.startswith("kbps"):
v[i]=int(v[i])
sum += v[i]
values.append(sum)
return times, values
# figure()
fig, ax1 = plt.subplots()
times, values = timeline2vaslues("cpu", mj)
d, = ax1.plot(times, values, color=next(cols))
ax1.set_ylabel('percentage (%)')
times, values = timeline2vaslues("mem", mj)
a, = ax1.plot(times, values, color=next(cols))
ax2 = ax1.twinx()
times, values = sum_timeline_vals(["kbps_read", "kbps_write"], mj)
ax2.set_ylabel("KB/s")
b, = ax2.plot(times, values, color=next(cols))
times, values = sum_timeline_vals(["net_in", "net_out"], mj)
c, = ax2.plot(times, values, color=next(cols))
plt.title("Mahout K-means Cluster Metrics")
plt.legend([d, a, b,c], ["CPU", "MEM", "Disk IO", "Net IO"], loc=3)
show() | {
"repo_name": "project-asap/IReS-Platform",
"path": "asap-tools/experiments/depricated/handler/metrics.py",
"copies": "1",
"size": "1434",
"license": "apache-2.0",
"hash": -2237060242500615200,
"line_mean": 25.0909090909,
"line_max": 115,
"alpha_frac": 0.6164574616,
"autogenerated": false,
"ratio": 2.822834645669291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8866458557077594,
"avg_score": 0.01456671003833924,
"num_lines": 55
} |
__author__ = 'cmantas'
from tools import *
def draw_kmeans_forDF(engine, minDF, list_k= [5,10,15,20]):
for k in list_k:
draw_single_kmeans(engine, k, minDF, title=engine.title()+" K-means (minDF={})".format(minDF), hide_minDF=True)
def draw_kmeans_forK(engine, k, list_minDF= [10,60,110,160]):
for minDF in list_minDF:
draw_single_kmeans(engine, k, minDF, title=engine.title()+" K-means (K={})".format(k))
k_list= [5,10,15,20];
# print list_k
# multi_graph("mahout_kmeans_text", "documents/1000", "avg(time/1000)", cond_producer("k", [5,10,15,20]), groupBy="documents", title="Mahout K-Means Documents vs Time", where="minDF=10")
draw_kmeans_forDF("mahout", 10)
figure()
draw_kmeans_forDF("spark", 10)
show()
draw_kmeans_forK("mahout", 10)
figure()
draw_kmeans_forK("weka", 10)
show()
exit()
# Spark Kmeans
# multi_graph("spark_kmeans_text", "avg(documents/1000)", "time/1000", cond_producer("minDF=10 and k", k_list), groupBy="documents", title="Spark Documents vs Time")
#
# exit()
# multi_graph("mahout_kmeans_text", "input_size/1048576", "time", ["k=5", "k=15", "k=15", "k=20"], groupBy="documents")
# exit()
# Weka Kmeans
# figure()
# for k in k_list:
# draw_single_kmeans("weka", k, 10)
# minDF vs time
## K-means
k=10
figure()
draw_single_kmeans("weka", k, 10,title="K-Means WEKA, Documents vs Time")
draw_single_kmeans("weka", k, 60)
draw_single_kmeans("weka", k, 110, where_extra=" weka_tfidf.documents<5500")
draw_single_kmeans("weka", k, 160)
show()
exit() | {
"repo_name": "project-asap/IReS-Platform",
"path": "asap-tools/experiments/depricated/handler/draw_kmeans_experiments.py",
"copies": "1",
"size": "1524",
"license": "apache-2.0",
"hash": 5589209333649970000,
"line_mean": 22.1060606061,
"line_max": 186,
"alpha_frac": 0.6509186352,
"autogenerated": false,
"ratio": 2.54,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36909186352,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from tools import *
def draw_single_move(frome, toe, minDf, **kwargs):
if frome == "weka":
table='arff2'+toe
elif toe == "weka":
table=frome+'2arff'
else:
table=frome+"2"+toe
tfidf_table= frome+"_tfidf"
query = join_query({'table':table, 'tfidf_table':tfidf_table, 'minDF':minDf})
plot_from_query(query, **kwargs)
def draw_many_moves(frome, toe, minDf_list=[10, 60, 110, 160]):
figure()
kwargs={}
for minDF in minDf_list:
kwargs['label']="minDF="+str(minDF)
kwargs['title'] ="Move "+ frome.title()+" to "+toe.title()
kwargs['ylabel']= "time (sec)"
kwargs['xlabel'] = 'documents/1000'
draw_single_move(frome, toe, minDF, **kwargs)
draw_many_moves("mahout","weka")
draw_many_moves("mahout","spark")
show()
exit()
def docs_vs_time(mover, tfidf, list, **kwargs):
join_multi(mover, tfidf,"documents/1000", "time/1000", list, **kwargs)
def size_vs_time(mover, tfidf, list, **kwargs):
join_multi(mover, tfidf,"input_size/1048576", "time/1000", list, **kwargs)
# docs_vs_time("mahout2spark", "mahout_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Mahout 2 Spark", xlabel="documents/1000", ylabel="time (sec)")
# size_vs_time("mahout2spark", "mahout_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Mahout 2 Spark", xlabel="size (MB)", ylabel="time (sec)")
#
# size_vs_time("mahout2arff", "mahout_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Mahout to arff", xlabel="size (MB)", ylabel="time (sec)")
# size_vs_time("spark2mahout", "spark_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Spark to Mahout", xlabel="size (MB)", ylabel="time (sec)")
size_vs_time("spark2arff", "spark_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Spark to arff", xlabel="size (MB)", ylabel="time (sec)")
# # # multi_graph_query(query, cond_producer("minDF", [10, 60, 110, 160]), )
show()
# figure()
# rx, ry = query2lists("select input_size/1048576, time/1000 from mahout2spark order by input_size ")
# myplot(rx,ry)
# show()
exit()
| {
"repo_name": "project-asap/IReS-Platform",
"path": "asap-tools/experiments/depricated/handler/draw_movers.py",
"copies": "1",
"size": "2135",
"license": "apache-2.0",
"hash": 8377254932109308000,
"line_mean": 35.186440678,
"line_max": 165,
"alpha_frac": 0.631381733,
"autogenerated": false,
"ratio": 2.769130998702983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8854751987622527,
"avg_score": 0.009152148816091419,
"num_lines": 59
} |
__author__ = 'cmantas'
import datetime
from ConsoleBackend import ConsoleBackend
from lib.tools import myplot, show, mycast
from pymongo import MongoClient
def cast_dict(a):
"""
given a dictionary of string-->string, it creates another dictionary with the same keys and the casted values of
the original as its values values
:param a: a dictionary of string-->string
:return:
"""
return dict(map(lambda (k, v): (k,mycast(v)), a.iteritems()))
class MongoBackend(ConsoleBackend):
def __init__(self, host='localhost', port=27017):
# set defaults again
if host is None:
self.host='localhost'
else:
self.host=host
if port is None:
self.port=27017
else:
self.port = int(port)
self.client = MongoClient(self.host, self.port)
# using the metrics db
self.db = self.client.metrics
def _get_collection(self, collection):
return eval("self.db.{0}".format(collection))
def query(self, experiment, query, sort=None, tupples=True):
dict_result = self.dict_query(experiment, query, sort)
return map(lambda d:d.values(), dict_result)
@staticmethod
def parse_query(query):
# eval query if it is string
if type(query) is str: q = eval(query)
else: q = query
if type(q) is tuple:
selection = q[0]
projection = q[1]
elif type(q) is dict:
selection = q
projection = None
else:
raise Exception("I cannot handle that kind of query: "+str(type(q)))
return selection,projection
def dict_query(self, experiment, query, sort=None):
selection, projection = self.parse_query(query)
return self.find(experiment, selection, projection, sort=sort)
def report(self, experiment_name, **kwargs):
# cast the dict values into their respective types
casted = cast_dict(kwargs)
# using the experiment name as a collection
metrics = eval("self.db.{0}".format(experiment_name))
r = metrics.insert_one(casted)
def find(self, experiment, selection={}, projection=None, tuples=True, sort=None):
collection = self._get_collection(experiment)
if sort is None:
rows = collection.find(selection, projection)
else:
rows = collection.find(selection, projection).sort(sort)
if tuples: return tuple(rows)
else: return rows
def aggregate(self, experiment, aggregation, tuples=True):
collection = self._get_collection(experiment)
rows = collection.aggregate(aggregation)
if tuples: return tuple(rows)
else: return rows
return rows
def __str__(self):
return "MongoBackend({0},{1})".format(self.host, self.port)
def plot_query(self, experiment, query, sort=None, show_plot=True, **kwargs):
# make sure that _id column is not returned
selection, projection = self.parse_query(query)
projection['_id'] = 0
rows = self.query(experiment, (selection,projection), sort)
# transpose the rows
rows_transposed = zip(*rows)
# plot the result
myplot(*rows_transposed, **kwargs)
# show the plot
if show_plot:show()
def plot_aggregation(self, experiment, query, show_plot=True, **kwargs):
# make sure that _id column is not returned
# selection, projection = self.parse_query(query)
# projection['_id'] = 0
rows = self.aggregate(experiment, query)
# rows from dict to tuple
rows = map(lambda d:d.values(), rows)
# transpose the rows
rows_transposed = zip(*rows)
# plot the result
myplot(*rows_transposed, **kwargs)
# show the plot
if show_plot:
show()
return rows_transposed
| {
"repo_name": "project-asap/IReS-Platform",
"path": "asap-tools/monitoring/lib/MongoBackend.py",
"copies": "1",
"size": "3929",
"license": "apache-2.0",
"hash": -3791131042456369700,
"line_mean": 29.6953125,
"line_max": 116,
"alpha_frac": 0.6105879359,
"autogenerated": false,
"ratio": 4.140147523709167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5250735459609167,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
import matplotlib.pyplot as plt
from ast import literal_eval
from time import time, sleep
from json import load, dumps
from os.path import isfile
try:
plt.style.use('fivethirtyeight')
except:
# print "You could get prettier graphs with matplotlib > 1.4"
pass
from matplotlib.pyplot import figure, show
def myplot(*args, **kwargs):
if "title" in kwargs:
title = kwargs["title"]
del(kwargs["title"])
plt.title(title)
if "xlabel" in kwargs:
xlabel = kwargs["xlabel"]
del(kwargs["xlabel"])
plt.xlabel(xlabel)
if "ylabel" in kwargs:
ylabel = kwargs["ylabel"]
del(kwargs["ylabel"])
plt.ylabel(ylabel)
plt.grid(True)
# plt.grid(which='both')
# plt.grid(which='minor', alpha=0.2)
plt.plot(*args, **kwargs)
plt.legend(loc = 'upper left')
def mycast(a):
"""
given a string, it returns its casted value to the correct type or the string itself if it can't be evaluated
if the input is a list or a dict it recursively calls itself on the input collection's (keys and) values
:param a: the input string
:return: the evaluated 'casted' result
"""
if isinstance(a, dict):
return dict(map(lambda (k, v): (mycast(k),mycast(v)), a.iteritems()))
elif isinstance(a, list):
return map(mycast, a)
else:
try:
return literal_eval(a)
except:
return a
def wait_for_file(filepath, timeout):
""" Keep waiting for a file to appear unless a timeout is reached
:param filepath:
:param timeout: the time needed to give up (default: 3sec)
:return: void
"""
end_time= time() + timeout
#wait
while not isfile(filepath) and time()<end_time:
sleep(0.2)
# if after wait no file then trouble
if not isfile(filepath):
print "ERROR: waited for monitoring data file, but timed out"
exit()
def _collect_json(metrics_file, timeout=3):
try:
# wait for the metrics file to be created (timeout secs)
if timeout: wait_for_file(metrics_file, timeout)
# collect the saved metrics from metrics file
with open(metrics_file) as f:
metrics = load(f)
return metrics
except:
#print 'Could not collect the metrics'
return {}
| {
"repo_name": "project-asap/IReS-Platform",
"path": "asap-tools/monitoring/lib/tools.py",
"copies": "1",
"size": "2356",
"license": "apache-2.0",
"hash": -8779479438703075000,
"line_mean": 27.0476190476,
"line_max": 113,
"alpha_frac": 0.6213921902,
"autogenerated": false,
"ratio": 3.787781350482315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4909173540682315,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
import sys
from CassandraCluster import *
args = sys.argv
if args[1] == "load_data":
record_count = int(args[2])
print "CLI: Loading %d records in the cluster" % record_count
run_load_phase(record_count)
elif args[1] == "run_sinusoid":
args = args[2:]
for i in range(len(args)):
if args[i] == "target":
target = int(args[i+1])
i += 1
elif args[i] == "period":
period = int(args[i+1])
i += 1
elif args[i] == "offset":
offset = int(args[i+1])
i += 1
print "CLI: running sinusoid for target=%d, offset=%d, period=%d" % (target, offset, period)
run_sinusoid(target, offset, period)
elif args[1] == "create_cluster":
args = args[2:]
for i in range(len(args)):
if args[i] == "nodes":
nodes = int(args[i+1])
i += 1
elif args[i] == "clients":
clients = int(args[i+1])
i += 1
print "CLI: creating cluster with %d nodes and %d clients" % (nodes, clients)
create_cluster(nodes-1, clients)
elif args[1] == "kill_workload":
kill_clients()
elif args[1] == "kill_nodes":
kill_nodes()
elif args[1] == "cluster":
print "\n\n=================================== CLUSTER ============================================\n"
print cluster_info()
elif args[1] == "bootstrap_cluster":
bootstrap_cluster()
elif args[1] == "destroy_all":
destroy_all()
else:
print """============== USAGE ==================
tiramola cluster (lists all the nodes)
tiramola create_cluster nodes 2 clients 2
tiramola bootstrap_cluster
tiramola load_data 100000
tiramola run_sinusoid target 100 offset 80 period 60
tiramola kill_workload
tiramola destroy all"""
| {
"repo_name": "cmantas/cluster_python_tool",
"path": "cli.py",
"copies": "1",
"size": "1778",
"license": "apache-2.0",
"hash": -1967723078958664000,
"line_mean": 30.75,
"line_max": 110,
"alpha_frac": 0.5478065242,
"autogenerated": false,
"ratio": 3.3931297709923665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44409362951923664,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
import sys
from lib.tiramola_logging import get_logger
from os import remove, mkdir
from shutil import move
raw_args = sys.argv
args = dict()
def parse_args():
chosen_function = raw_args[1]
global args
for arg in raw_args[2:]:
i = arg.find("=")
if i == -1:
args[arg] = True
else:
key = arg[:i]
value = arg[i+1:]
args[key] = value
return chosen_function
log = get_logger("CLI", 'INFO')
############################## AVAILABLE ACTIONS #######################################
def info():
print """============== USAGE ==================
tiramola hosts
tiramola private_hosts
tiramola create_cluster nodes=2 clients=2
tiramola bootstrap_cluster used=8
tiramola load_data records=100000
tiramola run_sinusoid target=100 offset=80 period=60 #period time in minutes
tiramola add_nodes [count=2]
tiramola remove_nodes [count=2]
tiramola kill_workload
tiramola kill_nodes
tiramola destroy_servers
tiramola add_clients count=2
tiramola train
tiramola auto_pilot time=60 #time in minutes
tiramola set_cluster_size count=5
tiramola watch
"""
def load_data():
try:
record_count = int(args["records"])
log.info("Loading %d records in the cluster" % record_count)
import CassandraCluster, ClientsCluster
svr_hosts = CassandraCluster.get_hosts(private=True)
args['type'] = 'load'
args['servers'] = svr_hosts
ClientsCluster.run(args)
except KeyError as e:
log.info("record_count requires argument %s" % e.args[0])
def run_sinusoid():
try:
global target, period, offset
target = int(args["target"])
period = 60 * int(args["period"])
args["period"] = period
offset = int(args["offset"])
log.info("running sinusoid for target=%d, offset=%d, period=%d sec" % (target, offset, period))
import CassandraCluster
from ClientsCluster import my_Clients
svr_hosts = CassandraCluster.get_hosts(private=False)
args['type'] = 'sinusoid'
args['servers'] = svr_hosts
my_Clients.run(args)
except KeyError as e:
log.info("run_sinusoid requires argument %s" % e.args[0])
def run_stress():
log.info("running stress workload" )
import CassandraCluster
from ClientsCluster import my_Clients
svr_hosts = CassandraCluster.get_hosts(private=False)
params = {'type':'stress', 'servers': svr_hosts}
my_Clients.run(params)
def create_cluster():
try:
nodes = int(args["nodes"])
log.info("creating cluster with %d nodes " % nodes)
import CassandraCluster
CassandraCluster.create_cluster(nodes-1)
except KeyError as e:
log.info("create_cluster requires argument %s" % e.args[0])
def create_clients():
try:
nodes = int(args["nodes"])
log.info("creating %d client nodes " % nodes)
from ClientsCluster import my_Clients
my_Clients.create_cluster(nodes)
except KeyError as e:
log.info("create_clients requires argument %s" % e.args[0])
def add_clients():
if "count" in args.keys():
count = int(args['count'])
else:
count = 1;
log.info("adding %d clients" % count)
from ClientsCluster import my_Clients
my_Clients.add_nodes(count)
def remove_clients():
if "count" in args.keys():
count = int(args['count'])
else:
count = 1;
log.info("removing %d clients" % count)
from ClientsCluster import my_Clients
my_Clients.remove_nodes(count)
def kill_workload():
log.info("killing workload")
from ClientsCluster import my_Clients
my_Clients.kill_nodes()
def kill_nodes():
log.info("killing cassandra nodes")
import CassandraCluster
CassandraCluster.kill_nodes()
def bootstrap_cluster():
try:
used = int(args['used'])
log.info('Bootstraping Cluster with %d nodes' % used)
import CassandraCluster
CassandraCluster.bootstrap_cluster(used)
except KeyError as e:
log.error("bootstrap_cluster requires argument %s" % e.args[0])
def destroy_servers():
import CassandraCluster
CassandraCluster.destroy_all()
def destroy_clients():
from ClientsCluster import my_Clients
my_Clients.destroy_all()
def hosts():
import CassandraCluster, ClientsCluster
svr_hosts = CassandraCluster.get_hosts(include_stash=True)
clnt_hosts = ClientsCluster.get_hosts()
hosts = dict(svr_hosts.items() + clnt_hosts.items())
rv = ""
for h in hosts.keys():
rv += hosts[h] + " " + h + "\n"
print rv
def private_hosts():
import CassandraCluster
hosts = CassandraCluster.get_hosts(include_clients=True, private=True)
rv = ""
for h in hosts.keys():
rv += hosts[h] + " " + h + "\n"
print rv
def add_nodes():
if "count" in args.keys():
count = int(args['count'])
else:
count = 1;
import CassandraCluster
CassandraCluster.add_nodes(count)
def remove_nodes():
if "count" in args.keys():
count = int(args['count'])
else:
count = 1;
import CassandraCluster
CassandraCluster.remove_nodes(count)
def train():
log.info(" Will run training routine. WARNING: will start workload automatically")
import Coordinator
Coordinator.train()
def auto_pilot():
log.info("Running Tiramola Auto Provisioning super algorithm")
global minutes, env_vars
try:
minutes = int(args['time'])
except KeyError as e:
log.error("auto_pilot requires argument %s" % e.args[0])
return
secs = 60 * minutes
import Coordinator
Coordinator.run(secs)
def monitor():
from lib.persistance_module import env_vars
log.info("simply monitoring")
global env_vars
env_vars["gain"] = '0'
import Coordinator
Coordinator.run()
def simulate():
try:
remove("files/measurements/measurements.txt")
except:
pass
from new_decision_module import RLDecisionMaker
fsm = RLDecisionMaker("localhost", 8)
fsm.simulate_training_set()
from lib.draw_experiment import draw_exp
try:
mkdir("files/measurements/simulation/")
except:
pass
move("files/measurements/measurements.txt", "files/measurements/simulation/measurements.txt")
draw_exp("files/measurements/simulation/measurements.txt")
def run_experiments():
try:
experiment_file = args['file']
except KeyError as e:
log.error("run_experiments requires argument %s" % e.args[0])
return
import Experiment
Experiment.run_experiments(experiment_file)
def repair():
import CassandraCluster
CassandraCluster.repair_cluster()
def set_cluster_size():
try:
count = int(args['count'])
except KeyError as e:
log.error("set cluster size requires argument %s" % e.args[0])
return
import CassandraCluster
CassandraCluster.set_cluster_size(count)
def watch():
import Experiment
Experiment.watch("experiments", Experiment.run_experiments_from_string)
############################ MAIN ################################################
function = parse_args()
try:
#just call the appropriate function with eval!
eval(function+"()")
except NameError as ne:
log.error("No such action")
print str(ne)
info()
| {
"repo_name": "cmantas/tiramola_v3",
"path": "cli.py",
"copies": "1",
"size": "7438",
"license": "apache-2.0",
"hash": 2393308565163120600,
"line_mean": 24.8263888889,
"line_max": 103,
"alpha_frac": 0.6304114009,
"autogenerated": false,
"ratio": 3.6931479642502483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9811292703399228,
"avg_score": 0.0024533323502040297,
"num_lines": 288
} |
__author__ = 'cmantas'
import traceback
from lib.tiramola_logging import get_logger
from os import remove, mkdir, listdir
from shutil import move, copy
from time import strftime
from os.path import isdir, isfile, join, exists, basename
from random import random
from json import load, dumps, loads
from lib.persistance_module import env_vars, reload_env_vars, home
from time import sleep
from sys import exc_info
from ClientsCluster import my_Clients as ClientsCluster
import CassandraCluster
from VM import Timer
## global logger
log = get_logger("EXPERIMENT", 'INFO', logfile=home+'files/logs/Coordinator.log')
o_ev = {}
measurements_dir = "files/measurements"
def list_files(dir_path):
"""
lists all files (not dirs) in a given directory
:param dir_path:
:return:
"""
return [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
def wait_get_one(dir_path):
"""
returns the content
:param dir_path:
:return:
"""
files = list_files(dir_path)
print_once = True
while len(files)==0:
if print_once:
print "Waiting for files..."
print_once = False
sleep(1)
files = list_files(dir_path)
fname = dir_path + "/" + files.pop()
return fname
def watch(dir_path, callback):
"""
watches a directory and when there are files available in it, it loads their contents to memory, moves them
and then calls the callback function giving the file contents as an argument
:param dir_path:
:param callback:
:return:
"""
while True:
fname = wait_get_one(dir_path)
f = open(fname, 'r')
contents = f.read()
f.close
done_dir = dir_path+"/done"
if not exists(done_dir):
mkdir(done_dir)
try:
remove(done_dir+"/"+basename(fname))
except:
pass
move(fname, done_dir)
callback(contents)
def run_sinusoid(target, period, offset):
svr_hosts = CassandraCluster.get_hosts(private=False)
args = dict()
args["target"] = target
args["period"] = period
args['type'] = 'sinusoid'
args['servers'] = svr_hosts
args['offset'] = offset
ClientsCluster.run(args)
def run_stress():
log.info("running stress workload")
svr_hosts = CassandraCluster.get_hosts(private=True)
params = {'type': 'stress', 'servers': svr_hosts}
ClientsCluster.run(params)
def experiment(name, target, period, offset, periods_count):
"""
runs a full experiment and outputs the the results to directory inside the measurements dir
:param name:
:param target:
:param period:
:param offset:
:param minutes:
:return:
"""
#delete any previous measurements
try:
remove("%s/measurements.txt" % measurements_dir)
except:
pass
#empty the contents of the coordinator.log
try:
open('files/logs/Coordinator.log', 'w+').close()
except:
pass
# kill the workload (in case there were leftover clients running)
log.info("killing workload")
ClientsCluster.kill_nodes()
# create a directory for the experiment results
dir_path = measurements_dir+"/"+name
if isdir(dir_path): dir_path += "_"+str(int(random()*1000))
try:mkdir(dir_path)
except:log.error("Could not create experiment directory");exit(-1)
success = False
try:
# actually run the tiramola automatic provisioning algorithm
try:
import Coordinator
for i in range(periods_count):
CassandraCluster.compaction()
run_sinusoid(target, period, offset)
log.info("Running the Coordinator for period " + str(i))
Coordinator.run(60 * period)
log.info("killing workload")
ClientsCluster.kill_nodes()
success = True
except:
print traceback.format_exc()
traceback.print_exc(file=open(dir_path+"/errors", "w+"))
if not success:
log.info(" killing workload")
ClientsCluster.kill_nodes()
# move the measurements file
move("files/measurements/measurements.txt", dir_path)
# move the predictions file
if isfile("files/measurements/predictions.txt"): move("files/measurements/predictions.txt", dir_path)
info_long = "target = %d\noffset = %d\nperiod = %dmin\nperiods = %dmin\ndate = %s" %\
(target, offset, period/60, periods_count, strftime('%b%d-%H:%M'))
global env_vars
info_long += "\ngain = " + env_vars['gain']
info_long += "\ndecision_interval = " + str(env_vars['decision_interval'])
info_long += "\ndecision_threshold = " + str(int(float(env_vars['decision_threshold'])*100)) + "%"
try:
global o_ev
info_long += "\n" + dumps(o_ev, indent=3)
except:
pass
#write information to file
with open (dir_path+"/info", 'w+') as f:
f.write(info_long)
# move the Coordinator log
try:
copy("files/logs/Coordinator.log", dir_path)
except:
pass
#draw the result graphs
from lib.draw_experiment import draw_exp
try:
draw_exp(dir_path+"/measurements.txt")
except:
traceback.print_exc(file=open(dir_path+"/errors", "w+"))
log.info("EXPERIMENT DONE: Result measurements in: "+dir_path)
except:
traceback.print_exc(file=open(dir_path+"/errors", "w+"))
return success
def simulate():
try:
remove("files/measurements/measurements.txt")
except:
pass
from new_decision_module import RLDecisionMaker
fsm = RLDecisionMaker("localhost", 8)
fsm.simulate_training_set()
from lib.draw_experiment import draw_exp
try:
mkdir("files/measurements/simulation/")
except:
pass
move("files/measurements/measurements.txt", "files/measurements/simulation/measurements.txt")
draw_exp("files/measurements/simulation/measurements.txt")
def clean_start():
success = False
while not success:
try:
#clean-start the cluster by default or if clean is True
CassandraCluster.kill_nodes()
used = env_vars["min_cluster_size"]
CassandraCluster.bootstrap_cluster(used)
#load_data
svr_hosts = CassandraCluster.get_hosts()
args = {'type': 'load', 'servers': svr_hosts, 'records': env_vars['records']}
ClientsCluster.run(args)
success = True
except:
print "Unexpected error on clean:" + str(exc_info()[0])
print traceback.format_exc()
log.error("Failed to clean, restarting")
sleep(120)
def run_experiments(experiment_file):
"""
loads the experiments from a file to a list and runs them in batch
:param experiment_file:
:return:
"""
#load the file with all the experiments
exp_list = load(open(experiment_file))
run_batch_experiments(exp_list)
def run_experiments_from_string(string_exp):
exp_list = loads(string_exp)
run_batch_experiments(exp_list)
def run_batch_experiments(exp_list):
"""
runs a batch of experiments as specified to the experiment file
:param experiment_file:
:return:
"""
#run each one of the experiments
log.info("running batch experiments")
for exp in exp_list:
# overwrite the given env_vars
from lib.persistance_module import env_vars
reload_env_vars()
global o_ev, env_vars
o_ev = exp['env_vars']
env_vars.update(o_ev)
if 'simulation' in exp and exp['simulation']:
simulate()
else:
target = int(exp['workload']["target"])
period = int(exp['workload']["period"])
offset = int(exp['workload']["offset"])
periods_count = int(exp["periods_count"])
name = exp['name']
#run the experiment
tries = 5
success = False
while not success and tries > 0:
if (not ('clean' in exp)) or bool(exp['clean']):
clean_start()
else:
#make sure the cluster is at its min size
CassandraCluster.set_cluster_size(env_vars["min_cluster_size"])
success = experiment(name, target, period, offset, periods_count)
if not success:
log.info("Experiment failed, sleeping 10mins and Retrying")
sleep(600)
tries -=1
if __name__ == '__main__':
print 'testing experiments creation'
run_experiments("test.json")
| {
"repo_name": "cmantas/tiramola_v3",
"path": "Experiment.py",
"copies": "1",
"size": "8839",
"license": "apache-2.0",
"hash": -3012856640105823700,
"line_mean": 29.9055944056,
"line_max": 111,
"alpha_frac": 0.5970132368,
"autogenerated": false,
"ratio": 3.974370503597122,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5071383740397122,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
filename = 'files/measurements/clean-metrics.txt'
class Metric:
def __init__(self, state, lamda, throughput, latency):
self.state = int(round(float(state)))
self.lamda = int(round(float(lamda)))
self.throughput = int(round(float(throughput)))
self.latency = float(latency)
def __str__(self):
return "state: %d, lambda: %d, throughput: %d, latency: %f" % \
(self.state, self.lamda, self.throughput, self.latency)
metrics = []
#parse metrics
for line in open(filename, 'r'):
ll = line.split()
metric = Metric(ll[0], ll[1], ll[2], ll[3])
metrics.append(metric)
min_lambda = (min(metrics, key=lambda x: x.lamda)).lamda
max_lambda = (max(metrics, key=lambda x: x.lamda)).lamda
min_state = (min(metrics, key=lambda x: x.state)).state
max_state = (max(metrics, key=lambda x: x.state)).state
#sort my lambda
#metrics.sort(key=lambda x: x.lamda)
#emprical
tp_per_node = 800
representative_metrics = []
# clusterize by lambda
min_cluster_lambda = 0
for state in range(min_state, max_state+1):
max_cluster_lambda = tp_per_node * state
cluster = [m for m in metrics if (m.lamda<max_cluster_lambda and m.lamda>=min_cluster_lambda)]
min_cluster_lambda = max_cluster_lambda
#process this cluster
if len(cluster) == 0:
continue
min_cluster_state = (min(metrics, key=lambda x: x.state)).state
max_cluster_state = (max(metrics, key=lambda x: x.state)).state
#for each of the states in this cluster
for state in range(min_cluster_state, max_cluster_state):
#find the metrics in this state
state_metrics = [m for m in cluster if m.state == state]
#process latencies for this state
min_state_latency = (min(state_metrics, key=lambda x: x.latency)).latency
max_state_latency = (max(state_metrics, key=lambda x: x.latency)).latency
avg_latency = sum( [m.latency for m in state_metrics ]) / len(state_metrics)
median_latency = state_metrics[len(state_metrics)/2].latency
print 'State %2s. Latency: min %3.3f, avg %3.3f, median %3.3f, max %3.3f' % \
(state, min_state_latency, avg_latency, median_latency, max_state_latency)
#process throughput for this state
avg_throughput = sum([m.throughput for m in state_metrics]) / len(state_metrics)
print "average throughput = " + str(avg_throughput)
#keep only one representative metric
representative_metrics.append( Metric(state, (max_cluster_lambda+min_cluster_lambda)/2, avg_throughput, median_latency))
## export the representative metrics to json list
exported_metrics = []
for m in representative_metrics:
exported_metrics.append({"lambda": m.lamda, "state": m.state, 'throughput': m.throughput, 'latency': m.latency})
from json import dump
dump(exported_metrics, file("files/measurements/processed_metrics.json", "w+"), indent=3 ) | {
"repo_name": "cmantas/tiramola_v3",
"path": "obsolete/process_training_set_test.py",
"copies": "1",
"size": "2952",
"license": "apache-2.0",
"hash": -6069783286410424000,
"line_mean": 32.5568181818,
"line_max": 128,
"alpha_frac": 0.6629403794,
"autogenerated": false,
"ratio": 3.272727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9397172153928633,
"avg_score": 0.0076990996397278,
"num_lines": 88
} |
__author__ = 'cmantas'
from sklearn.preprocessing import LabelEncoder
from pyspark.mllib.classification import LabeledPoint
import joblib
from sys import maxint
input_fname = "/home/cmantas/Data/Result_W2V_IMR_New.csv"
test1_fname = "/home/cmantas/Data/Result_W2V_IMR_test1.csv"
wanted_lines = 100
def get_labels_from_imr_dataset(line):
"""
Parses a line of the IMR dataset and only keeps the labels
:param line:
:return: a tuple of the 2 labels
"""
sl = line.split(";")
if not sl[1].isdigit():
return
label1 = int(sl[1])
label2 = int(sl[2])
return (label1, label2)
def parse_imr_line(line):
"""
Parses a line of the IMR csv dataset to tupples
:param line:
:return: ( (label1 (int), label2 (int)), features(list of float) )
"""
sl = line.split(";")
if not sl[1].isdigit():
return
label1 = int(sl[1])
label2 = int(sl[2])
features = map(float, sl[3:])
return ((label1, label2), features)
def parse_imr_line_encoding_labels(L1_encoder, L2_encoder, line):
"""
Parses a line of the IMR csv dataset to to encoded tupples (with consecutive class IDs)
:param L1_encoder: the LabelEncoder for the first label
:param L2_encoder: the LabelEncoder for the second label
:param line:
:return: ( (label1 (int), label2 (int)), features(list of float) )
"""
rv = parse_imr_line(line)
if rv is None : return
(label1, label2), features = rv[0], rv[1]
l1, l2 = L1_encoder.transform(label1), L2_encoder.transform(label2)
return ( (l1, l2) , features)
def create_labeled_point( labels_and_features, wanted_category):
"""
Parses the line using the parser function lambda, and creates a LabeledPoing with
the 'wanted' category as label
:param line: the line to parse
:param parser_function: the lambda function that creates the tuples
:param line: the string line to parse
"""
labels = labels_and_features[0]
features = labels_and_features[1]
return LabeledPoint(labels[wanted_category], features)
def get_labels_from_csv_dataset(raw_data_rrd):
"""
Given an imr csv file, returns the set of unique cat1 and cat2 labels in that file
:param fname: The path to the csv file
:return:
"""
label_tuples_rrd = raw_data_rrd.map(get_labels_from_imr_dataset).filter(lambda line: line is not None)
l1_rdd = label_tuples_rrd.map(lambda (l1,l2): l1)
l2_rdd = label_tuples_rrd.map(lambda (l1,l2): l2)
labels_1 = l1_rdd.distinct().collect()
labels_2 = l2_rdd.distinct().collect()
return labels_1, labels_2
def create_label_encoders(input_csv_file):
labels_1, labels_2 = get_labels_from_csv_dataset(input_csv_file)
L1_encoder = LabelEncoder();L1_encoder.fit(labels_1)
L2_encoder = LabelEncoder();L2_encoder.fit(labels_2)
return L1_encoder, L2_encoder
def store_label_encoders(enc1, enc2, le_path):
joblib.dump( (enc1, enc2), le_path)
def load_label_encoders(le_path):
(l1e, l2e) = joblib.load(le_path)
return l1e, l2e
def read_file_and_get_labels(imr_fname, wanted_lines=maxint):
labels1 = []
labels2 = []
line_count=0
with open(test1_fname) as f:
for line in f:
# limit the read lines
line_count +=1
if line_count>=wanted_lines: break
labels = get_labels_from_imr_dataset(line)
if labels is None: continue
labels1.append(labels[0])
labels2.append(labels[1])
return labels1, labels2
def do_encoding(input_fname, output_fname, line_encoding_function, wanted_lines=maxint):
fout = open(input_fname, "w+")
line_count=0
with open(input_fname) as f:
for line in f:
# limit the read lines
line_count +=1
if line_count>=wanted_lines: break
# pass the line to the encoding function
point = line_encoding_function(line)
if point is None: continue
# construct the output string and write it
string_labels = str(point[0][0])+";"+str(point[0] [0]) +";"
string_features = map(lambda f: str(f),point[1])
re_mapped_line = "0.0;"+string_labels+";".join(string_features)
fout.write(re_mapped_line+"\n")
fout.close()
print "Phase1:",
labels1, labels2 = read_file_and_get_labels(input_fname, wanted_lines)
# create the label encoders (mapping from big_integer_labels -> consecutive_small_integer_label
L1_encoder = LabelEncoder();L1_encoder.fit(labels1)
L2_encoder = LabelEncoder();L2_encoder.fit(labels2)
print L1_encoder.get_params()
print "DONE \nLabel counts of",wanted_lines,"lines are: \n\t" , len(L1_encoder.classes_), len(L2_encoder.classes_)
print "\tmax class label values: ", max(L1_encoder.classes_), max(L2_encoder.classes_)
# with open("labels_1.csv","w+") as l1f:
# # convert to list of strings
# label_strings = map(str, L1_encoder.classes_.tolist())
# l1f.write(",".join(label_strings))
#
# with open("labels_2.csv","w+") as l1f:
# label_strings = map(str, L2_encoder.classes_.tolist())
# l1f.write(",".join(label_strings))
# a function that encodes labels based on L1, L2 encoders
encoding_closure = lambda l: parse_imr_line_encoding_labels(L1_encoder, L2_encoder, l)
do_encoding(input_fname, test1_fname, encoding_closure)
print " OK"
| {
"repo_name": "project-asap/IReS-Platform",
"path": "asap-tools/spark/IMR_tools_old.py",
"copies": "1",
"size": "5394",
"license": "apache-2.0",
"hash": 2310007588811610600,
"line_mean": 29.4745762712,
"line_max": 114,
"alpha_frac": 0.6492398962,
"autogenerated": false,
"ratio": 3.1710758377425043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4320315733942504,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from VM import VM, Timer, LOGS_DIR
from VM import get_all_vms
from lib.persistance_module import env_vars, get_script_text
from lib.tiramola_logging import get_logger
class Node (VM):
"""
Class that represents a node a cluster. extends VM
"""
def __init__(self, cluster_name='', node_type='', number=0, create=False, IPv4=False, wait=False, vm=None, flavor=None, image=None):
"""
Creates a Node object.
:param cluster_name:
:param node_type: the type of the node
:param create: if True then the actual VM will be created
:param vm: used to create a Node from a pre-existing VM
"""
self.bootstrapped = False
self.name = cluster_name + "_" + node_type + "_" + str(number)
self.type = node_type
self.number = number
self.cluster_name = cluster_name
if flavor is None:
self.flavor = env_vars["default_flavor"]
else:
self.flavor = flavor
if image is None:
self.image = env_vars["default_image"]
else:
self.image = image
if not vm is None:
# init a node from a VM
self.from_vm(vm)
else:
#create a VM for this node
super(Node, self).__init__(self.name, self.flavor, self.image, IPv4=IPv4, create=create, wait=wait)
def __str__(self):
rv = "Node || name: %s, type: %s" % (self.name, self.type)
return rv
def from_vm(self, vm):
"""
Creates a Node from a pre-existing VM
:param vm:
:return:
"""
self.name = vm.name
if not vm.created:
self.log.error("this VM is not created, so you can't create a node from it")
return
self.log = vm.log
super(Node, self).__init__(self.name, self.flavor, self.image, IPv4=vm.IPv4)
self.id = vm.id
self.created = True
self.type = self.name[self.name.find("_")+1:][:self.name[self.name.find("_")+1:].find("_")]
self.cluster_name = self.name[:self.name.find("_")]
self.log.debug("cluster = "+self.cluster_name)
self.addresses = vm.addresses
def bootstrap(self):
"""
Runs the required bootstrap scripts on the node
"""
command = ""
self.log.debug("Running bootstrap script")
command += get_script_text(self.cluster_name, self.type, "bootstrap")
timer = Timer.get_timer()
rv = self.run_command(command)
self.log.debug("command returned:\n"+str(rv))
self.log.info("now bootstrapped (took %d sec)" % timer.stop())
self.bootstrapped = True
def decommission(self):
"""
Cecommissions a node from the Cluster
:return:
"""
self.log.info( "running decommission script")
command = get_script_text(self.cluster_name, self.type, "decommission")
timer = Timer.get_timer()
self.run_command(command, silent=True)
action = env_vars["%s_decommission_action" % self.cluster_name]
if action == "KEEP": pass
elif action == "SHUTDOWN": self.shutdown()
elif action == "DESTROY": self.destroy()
self.log.info( "now decommissioned (took %d sec)" % (timer.stop()))
def kill(self):
"""
Runs the required scripts to kill the application being run in the cluster
"""
self.log.debug ( "running kill script")
command = get_script_text(self.cluster_name, self.type, "kill")
self.run_command(command, silent=True)
def inject_hostnames(self, hostnames, delete=None):
"""
appends hostnames to /etc/hosts file in the node so that it inlcludes the given hostnames and ips
if delete is specified, it removes from /etc/hosts the lines containing the given string
:param hostnames: a mapping of hostnames-->IPs
"""
text = ""
if not delete is None:
# delete required entries
self.run_command('sed -i "/.*%s.*/g" /etc/hosts; sed -i "/^$/d" /etc/hosts' % delete)
for host in hostnames.keys():
text += "\n%s %s" % (hostnames[host], host)
self.run_command("echo '%s' >> /etc/hosts; echo %s >/etc/hostname" %
(text, self.name), silent=True)
def get_status(self):
"""
Gets the status of the node, combining IaaS status and hooks inside the VM
TODO: specify status script and run to get status
:return the status
"""
if self.get_cloud_status() != "ACTIVE":
return "stopped"
#wait for the vm to be ready and SSH-able
self.wait_ready()
status = self.run_command("ctool status", indent=0, prefix='')
return status.strip()
@staticmethod
def get_all_nodes(cluster_name="", check_active=False):
"""
Returns a Node instance for each one of the VMs running in the cluster
:param cluster_name: only return Nodes of the specified cluster (whose name starts with 'cluster_name')
:param check_active: if true only return VMs whose IaaS status is 'ACTIVE'
:return:
"""
vms = get_all_vms(check_active=check_active)
nodes = []
for vm in vms:
if (cluster_name != "") and (not vm.name.startswith(cluster_name)):
continue
else:
node = Node(vm=vm)
nodes.append(node)
return nodes
| {
"repo_name": "cmantas/tiramola_v3",
"path": "Node.py",
"copies": "1",
"size": "5541",
"license": "apache-2.0",
"hash": -1703996145183386400,
"line_mean": 35.94,
"line_max": 136,
"alpha_frac": 0.5746255189,
"autogenerated": false,
"ratio": 3.8399168399168397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49145423588168397,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
from VM import VM, Timer
from VM import get_all_vms
from lib.persistance_module import env_vars, get_script_text
class CassandraNode:
"""
Class that represents a node in a cassandra cluster. Can be of type 'SEED' or 'REGULAR' (default)
"""
#static vars
image = env_vars["cassandra_base_image"]
def __init__(self, name=None, node_type="REGULAR", create=False, vm=None):
"""
Creates a CassandraNode object.
:param name:
:param node_type: if "SEED" then will be treated as seednode
:param create: if True then the actual VM will be created
:param vm: if not None then this CassandraNode will be created from an existing vm
"""
self.bootstraped = False
self.name = name
self.type = node_type
self.vm = None
if not vm is None:
# init a node from a VM
self.from_vm(vm)
if create:
self.create()
def __str__(self):
rv = "Cassandra Node || name: %s, type: %s" % (self.name, self.type)
return rv
def create(self):
"""
creates the VM that this Cassandra Node will run on
:return:
"""
flavor = env_vars["cassandra_%s_flavor" % self.type]
#create the VM
self.vm = VM(self.name, flavor, self.image, create=True)
def from_vm(self, vm):
"""
Creates a CassandraNode from an existing VM
:param vm:
:return:
"""
if not vm.created:
print "this VM is not created, so you cann't create a node from it"
self.name = vm.name
self.vm = vm
if "seed" in vm.name:
self.type = "SEED"
elif "client" in vm.name:
self.type = "CLIENT"
else:
self.type = "REGULAR"
def bootstrap(self, params = None):
"""
Bootstraps a node with the rest of the Casandra cluster
"""
command = ""
print "NODE: [%s] running bootstrap scripts" % self.name
if self.type == "SEED":
command += get_script_text("cassandra_seednode_bootstrap")
elif self.type == "CLIENT":
if self.name.endswith('1'):
command += get_script_text("ganglia_endpoint")
command += get_script_text("cassandra_client_bootstrap")
else:
command = get_script_text("cassandra_node_bootstrap")
timer = Timer.get_timer()
self.vm.run_command(command, silent=True)
print "NODE: %s is now bootstrapped (took %d sec)" % (self.name, timer.stop())
self.bootstraped = True
def decommission(self):
"""
Cecommissions a node from the Cassandra Cluster
:return:
"""
print "NODE: Decommissioning node: " + self.name
keyspace = env_vars['keyspace']
timer = Timer.get_timer()
self.vm.run_command("nodetool repair -h %s %s" % (self.name, keyspace))
self.vm.run_command("nodetool decommission")
print "NODE: %s is decommissioned (took %d secs)" % (self.name, timer.stop())
#self.vm.shutdown()
def kill(self):
command = get_script_text("cassandra_kill")
self.vm.run_command(command, silent=True)
def get_status(self):
"""
Gets the status of the node as far as Cassandra is concerned (uses hooks inside of VM)
:return:
"""
if self.vm.get_cloud_status() != "ACTIVE":
return "stopped"
#wait for the vm to be ready and SSH-able
self.vm.wait_ready()
status = self.vm.run_command("ctool status", indent=0, prefix='')
return status.strip()
def inject_server_hosts(self, hosts):
text = ""
for h in hosts:
text += h + "\n"
print "injecting: \n"+text
self.vm.run_command("echo '%s' > /opt/hosts")
def __str__(self):
str = "Node %s" % self.name
return str
def get_all_nodes(check_active=False):
vms = get_all_vms(check_active=check_active)
nodes = []
seeds = []
clients = []
for vm in vms:
if not vm.name.startswith("cassandra"):
continue
else:
node = CassandraNode(vm=vm)
if node.type == 'SEED': seeds.append(node)
elif node.type == "CLIENT": clients.append(node)
else: nodes.append(node)
return seeds, nodes, clients
| {
"repo_name": "cmantas/cluster_python_tool",
"path": "CassandraNode.py",
"copies": "1",
"size": "4457",
"license": "apache-2.0",
"hash": -7305861535435517000,
"line_mean": 30.8357142857,
"line_max": 101,
"alpha_frac": 0.5611397801,
"autogenerated": false,
"ratio": 3.7611814345991563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9770192725738582,
"avg_score": 0.010425697792114882,
"num_lines": 140
} |
__author__ = 'cmantas'
import argparse
from os import system
parser = argparse.ArgumentParser(description='runs TF/IDF on a directory of text docs')
# parser.add_argument("-o","--operation", help="Operation: Load or Save", required=True)
parser.add_argument("-i","--input", help="the local input directory", required=True)
parser.add_argument("-do", "--distributed_output", help="the output file in HDFS", required=False)
# parser.add_argument("-t", "--type", help="the output file format, seq or text", required=False)
args = parser.parse_args()
docs_dir = args.input
d_out = args.distributed_output
system("hdfs dfs -rm -r %s"%d_out)
d_out = "hdfs://master:9000/" + d_out
from pyspark import SparkContext, RDD, SparkConf
conf = SparkConf().setAppName("Move").__setattr__("spark.hadoop.validateOutputSpecs", "false")
sc = SparkContext("local", appName="Text-Loader")
# if args.operation not in ["save", "load"]:
# raise Exception("Wrong args")
# exit()
# if "save" in args.operation:
# if args.type not in ["text", "seq"]:
# raise Exception("The output type can be either seq(Sequence file) or text(Text file)")
# exit()
documents = sc.wholeTextFiles(docs_dir)
documents.saveAsSequenceFile(d_out)
# else:
# seq_file = sc.sequenceFile("hdfs://master:9000/"+args.input)
# text = seq_file.collect()
# for line in text:
# print line[1].strip()
| {
"repo_name": "project-asap/IReS-Platform",
"path": "asap-tools/spark/text_loader.py",
"copies": "1",
"size": "1405",
"license": "apache-2.0",
"hash": 6106496425426773000,
"line_mean": 27.6734693878,
"line_max": 100,
"alpha_frac": 0.6775800712,
"autogenerated": false,
"ratio": 3.252314814814815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9411234371590443,
"avg_score": 0.003732102884874385,
"num_lines": 49
} |
__author__ = 'cmantas'
import argparse
parser = argparse.ArgumentParser(description='runs kmeans on spark for .csv files')
parser.add_argument('operation', help='"train" or "classify"')
parser.add_argument("-it","--iterations", help="the max iterations of the algorithm", type=int, default=3)
parser.add_argument("-i", "--input", help="the input data path", required=True)
parser.add_argument("-mf", "--model-file", help="path of the model file", required=True)
args = parser.parse_args()
from re import split
from pyspark import SparkContext, SparkConf
from pyspark.mllib.classification import SVMWithSGD, SVMModel
from pyspark.mllib.regression import LabeledPoint
from common import *
def parse_point(line):
"""
Loads and parses a line of data
:param line:
:return:
"""
values = [float(x) for x in split(' |,|;', line)]
return LabeledPoint(values[0], values[1:])
def train_model(training_data, iterations, model_file_path, calculate_error=True):
"""
Trains an SVG model and saves it
:param training_data:
:param iterations:
:param model_file_path:
:return:
"""
parsed_data = sc.textFile(training_data).map(parse_point)
# Build the model
model = SVMWithSGD.train(parsed_data, iterations=iterations)
# Save the model
model.save(sc, model_file_path)
print "Model saved in: ", model_file_path
if calculate_error:
#predictions
labelsAndPreds = parsed_data.map(lambda p: (p.label, model.predict(p.features)))
trainErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(parsed_data.count())
print("============Training Error = " + str(trainErr))
def classify_with_model(input_data_path, model_file_path):
input_parsed = sc.textFile(input_data_path).map(parse_point)
model = SVMModel.load(sc, model_file_path)
labels = input_parsed.map(lambda p: model.predict(p.features))
labels.saveAsTextFile("predictions")
if __name__ == "__main__":
model_file_path = to_hdfs_url(args.model_file)
operation = args.operation
# choose the operation
if operation.lower() == "train":
training_data = to_hdfs_url(args.input)
iterations = args.iterations
# init the spark context
if "sc" not in globals():
print "=== INIT sc"
conf = SparkConf()
conf.set("spark.hadoop.validateOutputSpecs", "false")
sc = SparkContext( appName="SVM Train", conf=conf)
print "=== TRAINING"
train_model(training_data, iterations, model_file_path)
print "=== TRAINED"
elif args.operation.lower() == "classify":
input_file_path = to_hdfs_url(args.input)
# init the spark context
if "sc" not in globals():
conf = SparkConf()
conf.set("spark.hadoop.validateOutputSpecs", "false")
sc = SparkContext( appName="SVM Classification", conf=conf)
classify_with_model(input_file_path, model_file_path)
else:
print "I do not know operation '%s'" % args.operation
| {
"repo_name": "project-asap/IReS-Platform",
"path": "asap-tools/spark/svm_spark.py",
"copies": "1",
"size": "3086",
"license": "apache-2.0",
"hash": -6986023773304116000,
"line_mean": 33.2888888889,
"line_max": 106,
"alpha_frac": 0.6461438756,
"autogenerated": false,
"ratio": 3.65207100591716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.479821488151716,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
import logging, sys, logging.handlers
configured_loggers = []
def get_logger(name, level='INFO', show_level=False, show_time=False, logfile=None):
new_logger = logging.getLogger(name)
#skip configuration if already configured
if name in configured_loggers:
return new_logger
new_logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler(sys.stdout)
#construct format
console_format = '|%(name)19s: %(message)s'
if show_level: console_format = '[%(levelname)s] ' + console_format
if show_time: console_format = '%(asctime)-15s - ' + console_format
formatter = logging.Formatter(console_format, "%b%d %H:%M:%S")
#add console handler
console_handler.setFormatter(formatter)
eval("console_handler.setLevel(logging.%s)" % level)
new_logger.addHandler(console_handler)
#Different handler for logfile
if not logfile is None:
file_handler = logging.FileHandler(logfile)
file_handler.setLevel(logging.DEBUG)
fformat = '%(asctime)-15s[%(levelname)5s] %(name)20s: %(message)s'
fformatter = logging.Formatter(fformat, "%b%d %H:%M:%S")
file_handler.setFormatter(fformatter)
#print "adding handler for %s" % logfile
new_logger.addHandler(file_handler)
new_logger.propagate = False
#logging.root.disabled = True
configured_loggers.append(name)
return new_logger
def get_logger_test(name):
new_logger = logging.getLogger(name)
new_logger.name = name
new_logger.setLevel(logging.DEBUG)
#create console handler
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('[%(levelname)s] - %(name)s: %(message)s')
handler.setFormatter(formatter)
#add console handler
new_logger.addHandler(handler)
#create file handler
handler = logging.FileHandler('test.log')
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s] %(asctime)s - %(name)s: %(message)s')
handler.setFormatter(formatter)
new_logger.addHandler(handler)
return new_logger
if __name__ == "__main__":
logger = get_logger("tralalo")
logger.debug("debug message")
logger.info("this is an info message") | {
"repo_name": "cmantas/tiramola_v3",
"path": "lib/tiramola_logging.py",
"copies": "1",
"size": "2296",
"license": "apache-2.0",
"hash": 7519820989774407000,
"line_mean": 30.9027777778,
"line_max": 88,
"alpha_frac": 0.6781358885,
"autogenerated": false,
"ratio": 3.776315789473684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4954451677973684,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
#python ssh lib
import paramiko
import string
import sys
from socket import error as socketError
sys.path.append('lib/scp.py')
from lib.scp import SCPClient
from datetime import datetime, timedelta
from lib.persistance_module import env_vars, home
from time import time
import sys, traceback
ssh_timeout = 10
def reindent(s, numSpaces, prefix=''):
s = string.split(s, '\n')
s = [(numSpaces * ' ') +prefix+ string.lstrip(line) for line in s]
s = string.join(s, '\n')
return s
def run_ssh_command(host, user, command, indent=1, prefix="$: ", logger=None):
"""
runs a command via ssh to the specified host
:param host:
:param user:
:param command:
:return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(home+env_vars["priv_key_path"])
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not logger is None:
logger.debug("Connecting to SSH")
timer = Timer.get_timer()
try:
ssh.connect(host, username=user, timeout=ssh_timeout, pkey=private_key, allow_agent=False, look_for_keys=False)
if not logger is None:
logger.debug("connected in %d sec. now Running SSH command" % timer.stop())
timer.start()
### EXECUTE THE COMMAND ###
stdin, stdout, stderr = ssh.exec_command(command)
ret = ''
for line in stdout:
ret += line
for line in stderr:
ret += line
# close the ssh connection
ssh.close()
if not logger is None:
logger.debug("SSH command took %d sec" % timer.stop())
return reindent(ret, indent, prefix=prefix)
except:
if not logger is None:
logger.error("Could not connect to "+ str(host))
traceback.print_exc()
def put_file_scp (host, user, files, remote_path='.', recursive=False):
"""
puts the specified file to the specified host
:param host:
:param user:
:param files:
:param remote_path:
:param recursive:
:return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(env_vars["priv_key_path"])
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, timeout=ssh_giveup_timeout, pkey=private_key)
scpc=SCPClient(ssh.get_transport())
scpc.put(files, remote_path, recursive)
ssh.close()
def test_ssh(host, user, logger=None):
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
end_time = datetime.now()+timedelta(seconds=ssh_giveup_timeout)
try:
rv = run_ssh_command(host, user, 'echo success', logger=logger)
return True
except:
return False
# except:
# print "error in connecting ssh:", sys.exc_info()[0]
return False
class Timer():
"""
Helper class that gives the ability to measure time between events
"""
def __init__(self):
self.started = False
self.start_time = 0
def start(self):
if self.started is True:
raise Exception("timer already started")
self.started = True
self.start_time = int(round(time() * 1000))
def stop(self):
end_time = int(round(time() * 1000))
if self.started is False:
print " Timer had not been started"
return 0.0
start_time = self.start_time
self.start_time = 0
self.started = False
return float(end_time - start_time)/1000
@staticmethod
def get_timer():
timer = Timer()
timer.start()
return timer | {
"repo_name": "cmantas/tiramola_v3",
"path": "lib/scp_utils.py",
"copies": "1",
"size": "3752",
"license": "apache-2.0",
"hash": 7833926596271534000,
"line_mean": 28.5511811024,
"line_max": 119,
"alpha_frac": 0.6202025586,
"autogenerated": false,
"ratio": 3.667644183773216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4787846742373216,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3 as lite
import sys, json
from datetime import datetime
from os import path
home = path.dirname(path.realpath(__file__))+"/../"
ENV_VARS_FILE = home+'files/env_vars.json'
OPENSTACK_NAMES_FILE = home +'files/openstack_names.json'
SCRIPTS_FILE = home+'files/scripts.json'
PRED_VARS_FILE = home+'files/pred_vars.json'
db_file = "files/persistance.db"
env_vars = {}
def reload_env_vars():
global env_vars
env_vars.update(json.loads(open(ENV_VARS_FILE, 'r').read()))
#load the env vars from file
env_vars = json.loads(open(ENV_VARS_FILE, 'r').read())
#load the openstack names from file
openstack_names = json.loads(open(OPENSTACK_NAMES_FILE, 'r').read())
#load the prediction vars from file
pred_vars = json.loads(open(PRED_VARS_FILE, 'r').read())
def get_credentials(user):
"""
retreives the authentication url and authentication token for the given user
:param user: the user name of for whom the credentials will be loaded
:return: url, token
"""
url = env_vars["auth_url"]
token = env_vars[user+"_token"]
return url, token
def get_script_text(cluster, node_type, script_type):
scripts = json.loads(open(SCRIPTS_FILE, 'r').read())
filename = home+scripts[cluster+"_"+node_type+"_"+script_type]
file = open(filename, 'r')
return file.read()+"\n"
def save_openstack_names():
with open(OPENSTACK_NAMES_FILE, 'w') as outfile:
json.dump(openstack_names, outfile, indent=3)
| {
"repo_name": "cmantas/tiramola_v3",
"path": "lib/persistance_module.py",
"copies": "1",
"size": "1532",
"license": "apache-2.0",
"hash": -2887049403545663000,
"line_mean": 25.8771929825,
"line_max": 81,
"alpha_frac": 0.6788511749,
"autogenerated": false,
"ratio": 3.1718426501035197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.435069382500352,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cmantas'
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3 as lite
import sys, json
from datetime import datetime
ENV_VARS_FILE = 'files/env_vars.json'
OPENSTACK_NAMES_FILE = 'files/openstack_names.json'
db_file = "files/persistance.db"
#load the env vars from file
env_vars = json.loads(open(ENV_VARS_FILE, 'r').read())
#load the openstack names from file
openstack_names = json.loads(open(OPENSTACK_NAMES_FILE, 'r').read())
def executescript(script):
try:
con = lite.connect(db_file)
cur = con.cursor()
cur.executescript(script)
con.commit()
except lite.Error, e:
if con: con.rollback()
print "Error %s:" % e.args[0]
finally:
try:
con
con.close()
except NameError:
pass
# INIT the tables
#executescript("CREATE TABLE IF NOT EXISTS ROLES(VMID INTEGER PRIMARY KEY, Role TEXT)")
def execute_query(query):
try:
con = lite.connect(db_file)
cur = con.cursor()
cur.execute(query)
rows = cur.fetchall()
return rows
except lite.Error, e:
if con: con.rollback()
print "Error %s:" % e.args[0]
return None
finally:
if con: con.close()
def execute_lookup(query):
for r in execute_query(query): return r
def get_credentials(user):
"""
retreives the authentication url and authentication token for the given user
:param user: the user name of for whom the credentials will be loaded
:return: url, token
"""
url = env_vars["auth_url"]
token = env_vars[user+"_token"]
return url, token
def store_openstack_name(vm_id, name):
"""
adds the name to the dictionary and writes it to the output file
:param vm_id:
:param name:
:return:
"""
openstack_names[vm_id] = name
with open(OPENSTACK_NAMES_FILE, 'w') as outfile:
json.dump(openstack_names, outfile, indent=3)
def get_script_text(name):
filename = env_vars[name]
file = open(filename, 'r')
return file.read()+"\n"
| {
"repo_name": "cmantas/cluster_python_tool",
"path": "lib/persistance_module.py",
"copies": "1",
"size": "2075",
"license": "apache-2.0",
"hash": 8469714468918350000,
"line_mean": 22.3146067416,
"line_max": 87,
"alpha_frac": 0.6187951807,
"autogenerated": false,
"ratio": 3.446843853820598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9490666017469019,
"avg_score": 0.01499460341031569,
"num_lines": 89
} |
__author__ = 'cmiller'
'''
Tool for anim exporting and importing between rigs for Hulk Show and expandable to others.
Also houses similar tools for baking final animation for Lighting export
1. Option to delete the keys
'''
# Imports
import os, os.path, json, sys
from maya import OpenMayaUI as omUI, cmds
from PySide import QtGui, QtCore, QtUiTools
from shiboken import wrapInstance
myDir = os.path.dirname(os.path.abspath(__file__))
myFile = os.path.join(myDir, 'CMiller_MafTools.ui')
class ExImFuncs(object):
def __init__(self):
cmds.selectPref(tso=1)
self.importOnly = False
self.__FullPath__ = cmds.file(q=1, sn=1)
##########################
#
# Functions
#
##########################
def setAnim(self, par='', ctlData={}, startFrame=0.0, endFrame=1.0, animLayer=""):
""" Applies animation data to a hierarchy of controllers.
:param par: Top level node to start from.
:param ctlData: Dictionary containing relevant keyframe data per object and attribute.
:param startFrame: Start frame of the animation.
:param endFrame: Last frame of the animation.
:param animLayer: Optional argument to apply data to an Animation Layer.
:return: None
"""
parSplit = par.split(":")[-1].split("|")[-1]
print parSplit
if parSplit in ctlData.keys():
print "par found: " + parSplit
attrs = ctlData[parSplit]
for attr in attrs.keys():
shortAttr = attr.split('.')[-1]
fullAttr = par + "." + shortAttr
if animLayer:
cmds.select(par, r=1)
cmds.animLayer(animLayer, e=1, aso=1)
cmds.select(cl=1)
tD = attrs[attr][0]
vD = attrs[attr][1]
weightedTanD = attrs[attr][2]
inTanD = attrs[attr][3]
outTanD = attrs[attr][4]
lockTanD = attrs[attr][5]
weightLockD = attrs[attr][6]
inAngleD = attrs[attr][7]
outAngleD = attrs[attr][8]
inWeightD = attrs[attr][9]
outWeightD = attrs[attr][10]
if cmds.attributeQuery(shortAttr, node=par, ex=1):
for ii in xrange(len(tD['time'])):
if startFrame <= tD['time'][ii] <= endFrame:
if animLayer:
cmds.setKeyframe(par, t=tD['time'][ii], v=float(vD['value'][ii]), at=shortAttr,
al=animLayer, breakdown=False, hierarchy='none', controlPoints=False,
shape=False)
else:
cmds.setKeyframe(par, t=tD['time'][ii], v=float(vD['value'][ii]), at=shortAttr,
breakdown=False, hierarchy='none', controlPoints=False, shape=False)
if weightedTanD:
try:
cmds.keyTangent(par, e=1, wt=int(weightedTanD['weightedTan'][0]), at=shortAttr)
except:
pass
for ii in xrange(len(tD['time'])):
if startFrame <= tD['time'][ii] <= endFrame:
try:
cmds.keyTangent(par + "." + shortAttr, e=1, t=(tD['time'][ii], tD['time'][ii]),
ia=inAngleD['inAngle'][ii], iw=inWeightD['inWeight'][ii],
oa=outAngleD['outAngle'][ii], ow=outWeightD['outWeight'][ii])
cmds.keyTangent(par + "." + shortAttr, e=1, t=(tD['time'][ii], tD['time'][ii]),
itt=inTanD['inTan'][ii], ott=outTanD['outTan'][ii])
cmds.keyTangent(par + "." + shortAttr, e=1, t=(tD['time'][ii], tD['time'][ii]),
lock=lockTanD['lockTan'][ii])
if weightedTanD['weightedTan'][0]:
cmds.keyTangent(par + "." + shortAttr, e=1, t=(tD['time'][ii], tD['time'][ii]),
lock=weightLockD['weightLock'][ii])
except:
print "tangent wtf at " + parSplit + "." + shortAttr
print "done with " + parSplit
def getAnim(self, par='', startFrame=0.0, endFrame=1.0):
""" Queries an object for relevant keyframe animation data.
:param par: Object to query.
:param startFrame: Start frame to query animation from.
:param endFrame: Last frame to query animation on.
:return: Dictionary of the attributes and their values.
"""
attrsKeyable = cmds.listAnimatable(par)
attrDict = {}
for attr in attrsKeyable:
tv = cmds.keyframe(attr, q=1, timeChange=1, valueChange=1, a=1, t=(startFrame, endFrame))
shortAttr = attr.split(':')[-1].split('|')[-1]
if not tv:
sVal = cmds.getAttr(attr, t=startFrame)
tv = [startFrame, sVal]
weightedTan = cmds.keyTangent(attr, q=1, weightedTangents=1, t=(startFrame, endFrame))
inTan = cmds.keyTangent(attr, q=1, itt=1, t=(startFrame, endFrame))
outTan = cmds.keyTangent(attr, q=1, ott=1, t=(startFrame, endFrame))
lockTan = cmds.keyTangent(attr, q=1, lock=1, t=(startFrame, endFrame))
weightLock = cmds.keyTangent(attr, q=1, weightLock=1, t=(startFrame, endFrame))
inAngle = cmds.keyTangent(attr, q=1, inAngle=1, t=(startFrame, endFrame))
outAngle = cmds.keyTangent(attr, q=1, outAngle=1, t=(startFrame, endFrame))
inWeight = cmds.keyTangent(attr, q=1, inWeight=1, t=(startFrame, endFrame))
outWeight = cmds.keyTangent(attr, q=1, outWeight=1, t=(startFrame, endFrame))
attrDict[shortAttr] = [{'time': tv[0::2]}, {'value': tv[1::2]}, {'weightedTan': weightedTan},
{'inTan': inTan}, {'outTan': outTan}, {'lockTan': lockTan},
{'weightLock': weightLock}, {'inAngle': inAngle}, {'outAngle': outAngle},
{'inWeight': inWeight}, {'outWeight': outWeight}]
return attrDict
def constraintBake(self, obj, ex='none'):
""" Bakes down the constraints on an object.
:param obj: Target object.
:param ex: Valid arguments (hierarchy) are 'above', 'below', 'both', 'none'
:return: None
"""
startFrame = int(cmds.playbackOptions(q=1, min=1))
endFrame = int(cmds.playbackOptions(q=1, max=1))
cons = cmds.listConnections(obj, type="constraint", c=1)
if cons:
cmds.bakeResults(cons[0::2], t=(startFrame, endFrame), sm=1, hi=ex)
conList = list(set(cons[1::2]))
cmds.delete(conList)
def exportAnim(self, variant=""):
""" Exports animation on the selected object(s) to an .animMAF file.
:param variant: Optional argument for a modified name.
:return: Path to the .animMAF file.
"""
curSelection = cmds.ls(sl=1)
for topNode in curSelection:
savePath, startFrame, endFrame, aeDirPath = self.getSavePath(topNode, variant)
if os.path.exists(savePath):
myChoice = cmds.confirmDialog(title='File Exists!!',
message='This wip version already has an animMAF file. Do you want to overwrite?',
button=['Yes', 'No'], defaultButton='No', cancelButton='No',
dismissString='No')
if myChoice == 'No':
sys.exit(0)
cmds.warning('Currently Writing Out Frames %d to %d for object %s. You have not crashed.' % (
startFrame, endFrame, topNode))
cmds.refresh()
masterDict = {}
ctlDict = {}
initT = cmds.getAttr(topNode + ".t")
initR = cmds.getAttr(topNode + ".r")
initS = cmds.getAttr(topNode + ".s")
initPos = initT + initR + initS
parList = cmds.listRelatives(topNode, ad=1, f=1, type="transform")
# parList = list(set([cmds.listRelatives(i,f=1,p=1)[0] for i in hi]))
for par in parList:
self.constraintBake(par)
# off = cmds.listRelatives(par,p=1)[0]
shortPar = par.split(':')[-1].split('|')[-1]
# shortOff = off.split(':')[-1].split('|')[-1]
if shortPar == 'MASTER_CONTROL':
if initT == [(0.0, 0.0, 0.0)]:
initT = cmds.getAttr(par + ".t", t=startFrame)
initR = cmds.getAttr(par + ".r", t=startFrame)
initS = cmds.getAttr(par + ".s", t=startFrame)
initPos = initT + initR + initS
elif "tranRot_CTL" in shortPar:
if initT == [(0.0, 0.0, 0.0)]:
initT = cmds.getAttr(par + ".t", t=startFrame)
initR = cmds.getAttr(par + ".r", t=startFrame)
initS = cmds.getAttr(par + ".s", t=startFrame)
initPos = initT + initR + initS
'''
So somewhere in here, I need to check if the offset is constrained, and bake it if so.
Or maybe just have an option. But these people generally don't bake the constraints down.
1st world python problems...
'''
# is animated?
numKeys = cmds.keyframe(par, q=1, kc=1, t=(startFrame, endFrame))
if numKeys > 0:
# animated
print shortPar
shortParAttrDict = self.getAnim(par, startFrame, endFrame)
ctlDict[shortPar] = shortParAttrDict
'''
offKeys = cmds.keyframe(off, q=1, kc=1, t=(startFrame, endFrame))
if offKeys > 0:
shortOffAttrDict = self.getAnim(off,startFrame,endFrame)
ctlDict[shortOff] = shortOffAttrDict
# attrDict.keys()
# ctlDict.keys() ctlDict['x_ctrl']
# masterDict.keys()
'''
topNodeShort = topNode.split(":")[-1]
masterDict[topNodeShort] = ctlDict
masterDict['_init'] = initPos
with open(savePath, 'w') as file:
data = json.dump(masterDict, file)
print(savePath)
return savePath
def importAnim(self, animLayer='', murderKeys=False, dataFile=None):
""" Imports animation from an .animMAF file to the selected object.
:param animLayer: Optional argument for Animation Layer to import on.
:param murderKeys: Whether or not to delete pre-existing keyframes.
:param dataFile: The .animMAF file to reference.
:return: None
"""
topNode = cmds.ls(sl=1)[0]
startFrame = int(cmds.playbackOptions(q=1, min=1))
endFrame = int(cmds.playbackOptions(q=1, max=1))
# savePath, startFrame, endFrame, aeDirPath = self.getFilePath(topNode)
if dataFile:
initPos = dataFile[0]
ctlData = dataFile[1]
else:
savePath = cmds.fileDialog2(ds=2, fm=1, ff='MAF Files (*.animMAF)')[0]
with open(savePath, 'r') as file:
data = json.load(file)
if data.keys()[0] == '_init':
initPos = data[data.keys()[0]]
ctlData = data[data.keys()[1]]
else:
initPos = data[data.keys()[1]]
ctlData = data[data.keys()[0]]
parList = cmds.listRelatives(cmds.ls(sl=1)[0], ad=1, f=1, type="transform")
# parList = list(set([cmds.listRelatives(i,f=1,p=1)[0] for i in hi]))
for par in parList:
shortPar = par.split(':')[-1]
if shortPar == 'MASTER_CONTROL':
cmds.setAttr(par + '.t', initPos[0][0], initPos[0][1], initPos[0][2])
cmds.setAttr(par + '.r', initPos[1][0], initPos[1][1], initPos[1][2])
cmds.setAttr(par + '.s', initPos[2][0], initPos[2][1], initPos[2][2])
elif "tranRot_CTL" in shortPar:
cmds.setAttr(par + '.t', initPos[0][0], initPos[0][1], initPos[0][2])
cmds.setAttr(par + '.r', initPos[1][0], initPos[1][1], initPos[1][2])
cmds.setAttr(par + '.s', initPos[2][0], initPos[2][1], initPos[2][2])
# off = cmds.listRelatives(par,p=1)[0]
if murderKeys:
cmds.cutKey(par, time=(startFrame, endFrame), cl=1, option="keys")
# cmds.cutKey( off, time=(startFrame,endFrame), cl=1, option="keys")
self.setAnim(par, ctlData, startFrame, endFrame, animLayer)
# self.setAnim(off,ctlData,startFrame,endFrame,animLayer)
print "IMPORT COMPLETE!"
def replaceTarget(self, dataFile, old, new):
""" Replaces object name/target in .animMAF file.
:param dataFile: The .animMAF file to reference.
:param old: Original object name to replace.
:param new: New object name to replace with.
:return: The modified .animMAF file and the control list from it.
"""
dataFile[new] = dataFile.pop(old)
ctlList = dataFile.keys()
ctlList.sort()
return dataFile, ctlList
def getSavePath(self, obj, variant=""):
""" Attempt to find the save path to an .animMAF file for the currently selected object.
:param obj: Object to find/create a save path for.
:param variant: Optional argument for a modified name.
:return: Path to the .animMaf file, first frame, last frame, base directory || None.
"""
startFrame = int(cmds.playbackOptions(q=1, min=1))
endFrame = int(cmds.playbackOptions(q=1, max=1))
usr = os.getenv('USERNAME')
if self.__FullPath__:
baseDir = os.path.dirname(self.__FullPath__)
aeDirPath = baseDir + "/animMaf/"
if not os.path.isdir(aeDirPath):
os.makedirs(aeDirPath)
savePath = "%s%s_%s_%s.animMAF" % (aeDirPath, obj, str(variant), usr)
return savePath, startFrame, endFrame, aeDirPath
else:
cmds.warning("Please save the scene to set a working directory")
return None
def processStart(self):
""" Function for working with and baking out rigid objects.
:return: None
"""
self.topNode = cmds.ls(sl=1)[0]
self.startFrame = int(cmds.playbackOptions(q=1, min=1))
self.endFrame = int(cmds.playbackOptions(q=1, max=1))
if self.checkRigid(self.topNode):
self.bakeObjectsAction(self.topNode)
cmds.refresh()
self.exportBakedDataToFile(self.topNode)
def checkRigid(self, topNode):
""" Determines if an object is a rigid prop (does not have a skinCluster).
:param topNode: Object to check.
:return: True or False.
"""
pNode = cmds.listRelatives(topNode, p=1)[0]
if pNode.split(":")[-1] == "Model":
# We need to check where the constraint driver is
kids = cmds.listRelatives(pNode, c=1)
ctl = ""
for kid in kids:
if "parentConstraint" in kid:
modelCon = kid
ctl = [x for x in list(set(cmds.listConnections(kid, s=1, d=0))) if "CTL" in x][0]
tgtWgt = cmds.parentConstraint(kid, q=1, wal=1)[0]
# We've determined this is a Generic prop rig and the constraint is on a parent node
# We have the constraint as well as
break
if ctl:
# There is a constraint on the Model group, we'll need to emulate it.
self.tempCon = cmds.parentConstraint(ctl, topNode, mo=1)
cmds.setAttr("%s.%s" % (modelCon, tgtWgt), 0)
conns = cmds.listConnections(topNode)
if conns:
if len([x for x in conns if "parentConstraint" in x]) > 0:
# A parent constraint is attached to the object now, we may proceed
return True
else:
# Either a constraint is lower in the hierarchy, or this object is not even rigged. Let's quickly check:
if len([p for p in cmds.listRelatives(topNode, p=1, f=1)[0].split("|") if "Rig" in p]) > 0:
# We have a rig, let's proceed
return True
else:
# This isn't even a rigged object unless someone named it wrong. We need to abort and alert the user
cmds.warning("Object does not appear to be rigged. Skipping.")
return False
def bakeObjectsAction(self, obj):
""" Bakes down the animation of an object.
:param obj: Object to bake keyframes on.
:return: Number of baked channels.
"""
numBaked = cmds.bakeResults(obj, simulation=1, t=(self.startFrame, self.endFrame), hi="below", sb=1, dic=1,
sac=0,
pok=1, ral=0, bol=0, mr=1, cp=0, s=1)
print (str(numBaked) + " channels baked")
return numBaked
def exportBakedDataToFile(self, topNode, variant=""):
""" **UNDER CONSTRUCTION**
"""
pass
# range star to end
# make dict
masterDict = {}
ctlDict = {}
savePath, startFrame, endFrame, aeDirPath = self.getFilePath(topNode, variant)
initT = cmds.getAttr(topNode + ".t")
initR = cmds.getAttr(topNode + ".r")
initS = cmds.getAttr(topNode + ".s")
initPos = initT + initR + initS
# add to dict with key time
hi = cmds.listRelatives(topNode, ad=1, f=1)
parList = list(set([cmds.listRelatives(i, f=1, p=1)[0] for i in hi]))
for par in parList:
shortParAttrDict = self.getAnim(par, startFrame, endFrame)
ctlDict[par] = shortParAttrDict
masterDict[topNode] = ctlDict
masterDict['_init'] = initPos
with open(savePath, 'w') as file:
data = json.dump(masterDict, file)
print(savePath)
return savePath
def bakeOutWorldData(self):
""" **UNDER CONSTRUCTION**
"""
pass
bakeList = []
conList = []
for par in parList:
ctl = cmds.circle(n="%s" % (par.split("|")[-1].split(":")[-1]))[0]
pCon = cmds.parentConstraint(par, ctl, mo=0)[0]
bakeList.append(ctl)
conList.append(pCon)
print "Ready to bake"
cmds.bakeResults(bakeList, t=(startFrame, endFrame))
return bakeList
print "finished bake"
# parent to world
# rename to orig
# bake
# createCurves for every obj
# parent to world
# rename to orig
# bake
# cmds.file(savePath, f=1, typ="animExport", es=1, options="precision=8;intValue=17;nodeNames=1;verboseUnits=0;whichRange=2;range=%d:%d;options=keys;hierarchy=below;controlPoints=0;shapes=1;helpPictures=0;useChannelBox=0;copyKeyCmd=-animation objects -time >%d:%d> -float >%d:%d> -option keys -hierarchy below -controlPoints 0 -shape 1 "%(startFrame,endFrame,startFrame,endFrame,startFrame,endFrame))
'''
################################################
~User Interface~
################################################
'''
class KeyPressEater(QtCore.QObject):
""" I'm just fixing the stupid QT bugs in Maya where you lose focus with a modifier key.
"""
def eventFilter(self, obj, event):
""" Override the eventFilter to keep focus on windows by ignoring the first press of certain keys.
"""
if event.type() == QtCore.QEvent.KeyPress:
# Filter out Shift, Control, Alt
if event.key() in [QtCore.Qt.Key_Shift, QtCore.Qt.Key_Control, QtCore.Qt.Key_Alt, QtCore.Qt.Key_CapsLock,
QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter]:
return True
else:
# Standard event processing
return QtCore.QObject.eventFilter(self, obj, event)
def addFilter(ui):
""" Push the event filter into the UI.
"""
keyPressEater = KeyPressEater(ui)
ui.installEventFilter(keyPressEater)
def getMayaWindow():
""" Return Maya's main window.
"""
ptr = omUI.MQtUtil.mainWindow()
if ptr is not None:
return wrapInstance(long(ptr), QtGui.QMainWindow)
class cmmAnimExportToolUI(QtGui.QDialog, ExImFuncs):
def __init__(self, parent=getMayaWindow()):
"""Initialize the class, load the UI file.
"""
self.loadedData = None
self.dataFile = None
super(cmmAnimExportToolUI, self).__init__(parent)
self.loadedData = None
self.dataFile = None
self.loadedInit = None
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.loader = QtUiTools.QUiLoader(self)
self.UI = self.loader.load(myFile, self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.ExImFuncs = ExImFuncs()
addFilter(self.UI)
# Connect the elements
self.UI.curDirContents_pushButton.clicked.connect(self.dirListing)
self.UI.exportAnim_pushButton.clicked.connect(self.exportButtonAction)
self.UI.importAnim_pushButton.clicked.connect(self.importButtonAction)
# self.keyPressEvent(self.UI.keyPressEvent)
self.UI.loadMAFData_pushButton.clicked.connect(self.loadButtonAction)
self.UI.replaceMAFData_pushButton.clicked.connect(self.replaceButtonAction)
self.UI.saveMAFData_pushButton.clicked.connect(self.saveButtonAction)
if self.ExImFuncs.importOnly == True:
self.UI.exportTab.setEnabled(False)
print(self.UI.keyPressEvent)
# self.UI.keyPressEvent(self.keyPressEvent())
'''
self.UI.curDirContents_listWidget.clicked.connect(self.saveWeights)
self.UI.loadReg_btn.clicked.connect(self.loadWeights)
self.UI.loadWorld_btn.clicked.connect(self.loadWorldWeights)
self.UI.refresh_btn.clicked.connect(self.refreshNamespaceUI)
self.refreshNamespaceUI()
'''
# Show the window
self.UI.show()
def exportButtonAction(self):
""" GUI command variant of exportAnim.
"""
var = self.UI.fileAppend_lineEdit.text()
# world = self.UI.worldSpaceBake_checkBox.isChecked()
fp = self.ExImFuncs.exportAnim(var)
if fp:
self.UI.outputPath_label.setText('<a href="%s">%s</a>' % (str("/".join(fp.split('/')[:-1])), str(fp)))
def importButtonAction(self):
""" GUI command variant of importAnim.
"""
animLayer = self.UI.targetAnimLayer_lineEdit.text() # get this val from UI
delKeys = self.UI.deleteAnim_checkBox.isChecked()
if self.loadedData:
self.ExImFuncs.importAnim(animLayer, delKeys, [self.loadedInit, self.loadedData])
self.loadedData = None
self.dataFile = None
self.loadedInit = None
self.UI.loadedMAF_listWidget.clear()
self.UI.saveMAFData_pushButton.setEnabled(False)
else:
self.ExImFuncs.importAnim(animLayer, delKeys)
def loadButtonAction(self):
""" Load an .animMAF file into the UI.
:return: None
"""
savePath = cmds.fileDialog2(ds=2, fm=1, ff='MAF Files (*.animMAF)')[0]
with open(savePath, 'r') as file:
data = json.load(file)
if data.keys()[0] == '_init':
initPos = data[data.keys()[0]]
ctlData = data[data.keys()[1]]
else:
initPos = data[data.keys()[1]]
ctlData = data[data.keys()[0]]
ctlList = ctlData.keys()
ctlList.sort()
self.loadedListPopulate(ctlList)
self.loadedData = ctlData
self.loadedInit = initPos
self.dataFile = data
# enable save BUTTON
self.UI.saveMAFData_pushButton.setEnabled(True)
def saveButtonAction(self):
""" GUI command hook for saveNewMAF.
:return: None
"""
savePath = cmds.fileDialog2(ds=2, fm=1, ff='MAF Files (*.animMAF)')[0]
newMasterDict = {}
topNode = cmds.ls(sl=1)[0]
topNodeShort = topNode.split(":")[-1]
newMasterDict[topNodeShort] = self.loadedData
newMasterDict['_init'] = self.loadedInit
self.saveNewMAF(savePath, newMasterDict)
def saveNewMAF(self, dataFile, data):
""" Saves/Overwrites an .animMAF file.
:param dataFile: The .animMaf file to save to.
:param data: The data to save into the specified file.
:return: None
"""
with open(dataFile, 'w') as file:
data = json.dump(data, file)
def loadedListPopulate(self, ctlList):
""" Adds the passed controller list into the UI.
:param ctlList: List of controllers to display.
:return: None
"""
self.UI.loadedMAF_listWidget.clear()
self.UI.loadedMAF_listWidget.addItems(ctlList)
def replaceButtonAction(self):
""" Runs functions to modify the loaded .animMAF file.
:return: None
"""
oldObj = self.UI.loadedMAF_listWidget.currentItem().text()
newObj = self.UI.replaceMAFData_lineEdit.text()
newData, ctlList = self.ExImFuncs.replaceTarget(self.loadedData, oldObj, newObj)
self.UI.replaceMAFData_lineEdit.clear()
self.loadedData = newData
self.loadedListPopulate(ctlList)
def dirListing(self):
""" Lists all .animMAF files for the current scene in the UI.
:return: None
"""
fpReturns = self.ExImFuncs.getSavePath("None")
if fpReturns:
if os.path.exists(fpReturns[3]):
dirList = os.listdir(fpReturns[3])
self.UI.curDirContents_listWidget.clear()
for i in dirList:
if i.endswith(".animMAF"):
self.UI.curDirContents_listWidget.addItem(i)
else:
self.UI.curDirContents_listWidget.clear()
self.UI.curDirContents_listWidget.addItem("-None-")
def run():
""" Run the UI.
"""
global cmmAnimExportToolWin
try:
cmmAnimExportToolWin.close()
except:
pass
cmmAnimExportToolWin = cmmAnimExportToolUI()
| {
"repo_name": "CzarOfTheUniverse/MayaPythonTools",
"path": "CMiller_MafTools/CMiller_MafTools.py",
"copies": "1",
"size": "27850",
"license": "mit",
"hash": -4473608612826845000,
"line_mean": 38.9558823529,
"line_max": 408,
"alpha_frac": 0.533070018,
"autogenerated": false,
"ratio": 3.8626907073509016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48957607253509017,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cobedien'
import requests
import json
base_url = 'http://10.0.243.20/api/'
name_pwd = {'aaaUser': {'attributes': {'name': 'admin', 'pwd': 'cisco.123'}}}
json_credentials = json.dumps(name_pwd)
login_url = base_url + 'aaaLogin.json'
post_response = requests.post(login_url, data=json_credentials)
auth = json.loads(post_response.text)
login_attributes = auth['imdata'][0]['aaaLogin']['attributes']
auth_token = login_attributes['token']
cookies = {}
cookies['APIC-Cookie'] = auth_token
#sensor_url = base_url + 'topology/pod-1/node-202/sys/ch/supslot-1/sup/sensor-3.json'
#sensor_url = base_url + 'mo/topology/pod-1/node-202/sys/ch/supslot-1/sup/sensor-3/HDeqptTemp5min-0.json'
#sensor_url = base_url + 'mo/topology/HDfabricOverallHealth5min-0.json'
#sensor_url = base_url + 'mo/topology/pod-1/node-1.json'
#sensor_url = base_url + 'mo/uni/tn-THD/HDfvOverallHealth15min-0.json'
#sensor_url = base_url + 'node/class/fvTenant.json'
sensor_url = base_url + 'node/mo/topology/health.json'
print sensor_url
get_response = requests.get(sensor_url, cookies=cookies, verify=False)
data = get_response.json()
print data['imdata'][0]['fabricHealthTotal']['attributes']['cur']
#print data
#print data['imdata'][0]['fvTenant']['attributes']['name'] | {
"repo_name": "tecdct2941/nxos_dashboard",
"path": "test_uri.py",
"copies": "1",
"size": "1257",
"license": "apache-2.0",
"hash": 3891488434091734500,
"line_mean": 36,
"line_max": 105,
"alpha_frac": 0.7128082737,
"autogenerated": false,
"ratio": 2.7565789473684212,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3969387221068421,
"avg_score": null,
"num_lines": null
} |
__author__ = "Coda Hale <coda.hale@gmail.com>"
__date__ = "2008-03-19"
__version__ = "1.0"
__credits__ = """
Copyright (c) 2008 Coda Hale
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
A set of classes which make writing map/reduce tasks for Hadoop easy.
An example job, which reads countries from a Tab-Separated Value file and
outputs the number of times each country appears:
country_count.py:
from collections import defaultdict
from hadoop import Job
from hadoop.parsers import TSVParser
COUNTRY_COLUMN = 3
class CountryCount(Job):
def __init__(self):
super(CountryCount, self).__init__()
self.map_parser = TSVParser
def map(self, key, values, collector):
collector.collect(values[COUNTRY_COLUMN], 1)
def reduce(self, keys_and_values, collector):
countries = defaultdict(int)
for country, count in keys_and_values:
countries[country] += int(count)
for country, count in countries.iteritems():
collector.collect(country, count)
To run locally:
cat data.tsv | python country_count.py --map | python country_count.py --reduce
To run via Hadoop Streaming:
bin/hadoop jar contrib/streaming/hadoop-streaming-0.16.0.jar \\
-input my_countries.tsv -output country_counts \\
-mapper "country_count.py --map" -reducer "country_count.py --reduce"
"""
import sys
from hadoop.collectors import KeyValueCollector
from hadoop.parsers import LineParser, KeyValueParser
from hadoop.runner import Runner
class Job(object):
"""
The main Hadoop class. Your job classes should descend from this and
implement map() and reduce().
"""
def __init__(self):
"""
Creates a new job instance.
Override this to change the parser and collector types for your map()
and reduce() methods. They default to:
map_parser = LineParser
map_collector = KeyValueCollector
reduce_parser = KeyValueParser
reduce_collector = KeyValueCollector
"""
super(Job, self).__init__()
self.map_parser, self.map_collector = LineParser, KeyValueCollector
self.reduce_parser, self.reduce_collector = KeyValueParser, KeyValueCollector
def start_map(self, parser_stream=sys.stdin, collector_stream=sys.stdout):
"""
Starts the mapping process. Should only be called by the Runner.
"""
parser = self.map_parser(parser_stream)
collector = self.map_collector(collector_stream)
for data in parser:
if isinstance(data, tuple):
self.map(*(data + (collector,)))
else:
self.map(*(data, collector))
def start_reduce(self, parser_stream=sys.stdin, collector_stream=sys.stdout):
"""
Starts the reducing process. Should only be called by the Runner.
"""
parser = self.reduce_parser(parser_stream)
collector = self.reduce_collector(collector_stream)
self.reduce(parser, collector=collector)
def map(self, line, collector):
"""
Given a set of input values, generates a set of intermediate values and
passes them to the reducer.
This method *must* accept a named argument, collector, which is used to
pass intermediate values to the reducers.
The rest of the method signature depends on the output of the parser.
"""
raise NotImplementedError('map() is not implemented in this class')
def reduce(self, iterator, collector):
"""
Given a set of keys and intermediate values, reduces and collects the
final values.
This method *must* accept a named argument, collector, which is used to
collect final values.
"""
raise NotImplementedError('reduce() is not implemented in this class')
@classmethod
def main(cls, arguments=sys.argv, runner=Runner):
"""
Maps, reduces, or displays help based on command-line arguments.
"""
runner(cls).main(arguments) | {
"repo_name": "codahale/hadoop-streaming",
"path": "python/lib/hadoop/__init__.py",
"copies": "1",
"size": "4976",
"license": "mit",
"hash": 3708776750673975000,
"line_mean": 34.55,
"line_max": 83,
"alpha_frac": 0.6927250804,
"autogenerated": false,
"ratio": 4.274914089347079,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5467639169747078,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.