text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'Charles'
from collections import defaultdict
from warpper import check_files
from model import IMAGE_OUTPUT_MODEL
from datetime import datetime
import logging
import sys
import image
import numpy as np
reload(sys)
sys.setdefaultencoding('utf-8')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
date_tag = datetime.now().strftime("%Y-%m-%d")
logFormatter = logging.Formatter("%(asctime)s [%(threadName)s] [%(levelname)s] %(message)s")
fileHandler = logging.FileHandler("../logs/Main%s.log" % date_tag)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
@check_files("image_output_file")
def stat_webp_compress(image_output_file):
"""statistic compress radio use webp"""
overall_statistic = defaultdict(int)
real_type_count_statistic = defaultdict(int)
ori_size_statistic = defaultdict(int)
compress_size_statistic_50 = defaultdict(int)
compress_size_statistic_70 = defaultdict(int)
compress_size_statistic_75 = defaultdict(int)
with open(image_output_file) as r_handler:
for line in r_handler:
try:
if line and line.strip():
overall_statistic['all'] += 1
line = line.strip()
terms = line.split('\t')
if len(terms) != 29:
overall_statistic['format_wrong'] += 1
continue
overall_statistic['right'] += 1
# image_model = IMAGE_OUTPUT_MODEL(terms)
# print image_model.len_response_body
# print image_model.compress_size
len_response_body = int(terms[11])
compress_size_50 = int(terms[-8])
compress_size_70 = int(terms[-5])
compress_size_75 = int(terms[-2])
real_type_count_statistic[terms[12]] += 1
ori_size_statistic[terms[12]] += len_response_body
compress_size_statistic_50[terms[12]] += compress_size_50
compress_size_statistic_70[terms[12]] += compress_size_70
compress_size_statistic_75[terms[12]] += compress_size_75
except Exception as e:
overall_statistic['error'] += 1
logging.error("error {} in line {}".format(e, line))
logging.info("[STAT] overstat is {}".format(overall_statistic))
for item in ori_size_statistic:
print "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(item, real_type_count_statistic[item],
ori_size_statistic[item],
compress_size_statistic_75[item],
float(compress_size_statistic_75[item]) / ori_size_statistic[item],
compress_size_statistic_70[item],
float(compress_size_statistic_70[item]) / ori_size_statistic[item],
compress_size_statistic_50[item],
float(compress_size_statistic_50[item]) / ori_size_statistic[item],
)
@check_files("image_output_file")
def stat_non_webp_runtime(image_output_file):
overall_statistic = defaultdict(int)
real_type_count_statistic = defaultdict(int)
cwebp_runtime_statistic = defaultdict(float)
dwebp_runtime_statistic = defaultdict(float)
ziproxy_runtime_statistic = defaultdict(float)
with open(image_output_file) as r_handler:
for line in r_handler:
try:
if line and line.strip():
overall_statistic['all'] += 1
line = line.strip()
terms = line.split('\t')
if len(terms) != 23:
overall_statistic['format_wrong'] += 1
continue
overall_statistic['right'] += 1
image_model = IMAGE_OUTPUT_MODEL(terms)
try:
cwebp_runtime = float(image_model.cwebp_runtime)
except ValueError as e:
# print e
cwebp_runtime = 0.0
try:
dwebp_runtime = float(image_model.dwebp_runtime)
except ValueError as e:
# print e
dwebp_runtime = 0.0
try:
ziproxy_runtime = float(image_model.ziproxy_runtime)
except ValueError as e:
# print e
ziproxy_runtime = 0.0
real_type_count_statistic[image_model.real_type] += 1
cwebp_runtime_statistic[image_model.real_type] += cwebp_runtime
dwebp_runtime_statistic[image_model.real_type] += dwebp_runtime
ziproxy_runtime_statistic[image_model.real_type] += ziproxy_runtime
except Exception as e:
overall_statistic['error'] += 1
logging.error("error {} in line {}".format(e, line))
logging.info("[STAT] overstat is {}".format(overall_statistic))
for item in real_type_count_statistic:
print "{}\t{}\t{}\t{}\t{}".format(item, real_type_count_statistic[item],
cwebp_runtime_statistic[item], dwebp_runtime_statistic[item],
ziproxy_runtime_statistic[item])
@check_files("image_output_file")
def statistic_ssim(image_output_file):
overall_statistic = defaultdict(int)
count_statistic = defaultdict(int)
ori_size_statistic = defaultdict(int)
ori_pixel_statistic = defaultdict(int)
ssim75_statistic = defaultdict(float)
ssim70_statistic = defaultdict(float)
ssim50_statistic = defaultdict(float)
compressed75_size_statistic = defaultdict(int)
compressed70_size_statistic = defaultdict(int)
compressed50_size_statistic = defaultdict(int)
with open(image_output_file) as r_handler:
for line in r_handler:
try:
if line and line.strip():
overall_statistic['all'] += 1
line = line.strip()
terms = line.split('\t')
if len(terms) != 32:
overall_statistic['format_wrong'] += 1
continue
overall_statistic['right'] += 1
image_model = IMAGE_OUTPUT_MODEL(terms)
if image_model.real_type not in ['jpeg','webp','png']:
continue
pixel_type = image.image_pixel_type_detection(image_model.weight, image_model.height)
count_statistic[pixel_type] += 1
ori_pixel_statistic[pixel_type] += image_model.weight * image_model.height
ori_size_statistic[pixel_type] += image_model.len_response_body
# if image_model.real_type == 'png' and high_ssim < 0.1:
# compressed_size_statistic[pixel_type, 'high'] += ori_size
# compressed_size_statistic[pixel_type, 'median'] += ori_size
# compressed_size_statistic[pixel_type, 'low'] += ori_size
#
# ssim_statistic[pixel_type, 'high'] += 1
# ssim_statistic[pixel_type, 'median'] += 1
# ssim_statistic[pixel_type, 'low'] += 1
compressed75_size_statistic[pixel_type, '75'] += image_model.length_75
compressed70_size_statistic[pixel_type, '70'] += image_model.length_70
compressed50_size_statistic[pixel_type, '50'] += image_model.length_50
ssim75_statistic[pixel_type, '75'] += image_model.ssim_75
ssim70_statistic[pixel_type, '70'] += image_model.ssim_70
ssim50_statistic[pixel_type, '50'] += image_model.ssim_50
except Exception as e:
overall_statistic['error'] += 1
# logging.error("error {} in line {}".format(e, line))
logging.info("[STAT] overstat is {}".format(overall_statistic))
# logging.info("[STAT] ori_pixel_statistic is {}".format(ori_pixel_statistic))
# logging.info("[STAT] ori_size_statistic is {}".format(ori_size_statistic))
# logging.info("[STAT] compressed_size_statistic is {}".format(compressed_size_statistic))
# logging.info("[STAT] ssim_statistic is {}".format(ssim_statistic))
for pixel_type in ['Tiny', 'Small', 'Middle', 'Large']:
# for real_type in ['jpeg', 'png', 'gif', 'bmp']:
p = pixel_type
size = count_statistic[p]
avg_pixel = ori_pixel_statistic[p] / size if size > 0 else '-'
avg_size = ori_size_statistic[p] / size if size > 0 else '-'
avg_ssim_75 = ssim75_statistic[p, '75'] / size if size > 0 else '-'
avg_ssim_70 = ssim70_statistic[p, '70'] / size if size > 0 else '-'
avg_ssim_50 = ssim50_statistic[p, '50'] / size if size > 0 else '-'
avg_compressed_75 = compressed75_size_statistic[p, '75'] / size if size > 0 else '-'
avg_compressed_70 = compressed70_size_statistic[p, '70'] / size if size > 0 else '-'
avg_compressed_50 = compressed50_size_statistic[p, '50'] / size if size > 0 else '-'
print "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(p,size,avg_pixel,avg_size,avg_ssim_75,avg_ssim_70,avg_ssim_50,avg_compressed_75,avg_compressed_70,avg_compressed_50)
@check_files("image_output_file")
def statistic_size_total(image_output_file):
size = defaultdict(int)
with open(image_output_file) as r_handler:
for line in r_handler:
terms = line.split('\t')
if terms[13] in ['jpeg','png','webp']:
type = image.image_pixel_type_detection(int(terms[15]),int(terms[16]))
size[type] += 1
print size
@check_files("image_output_file")
def statistic_ziproxy_all_ssim(image_output_file):
# image_type, pixel_type
count_statistic = defaultdict(int) # count for image_type,pixel_type
size_statistic = {}
ssim_statistic = {}
with open(image_output_file) as r_handler:
try:
for line in r_handler:
if line and line.strip():
line = line.strip()
terms = line.split('\t')
pixel_type = image.image_pixel_type_detection(int(terms[14]), int(terms[15]))
# image_type = terms[12]
count_statistic[(pixel_type,)] += 1
pic_idx = [65,35,5,70,40,10,75,45,15,80,50,20,85,55,25,90,60,30,95]
for id, pic_size in zip(pic_idx ,terms[17::2],):
if (pixel_type,) not in size_statistic:
size_statistic[(pixel_type,)] = defaultdict(int)
size_statistic[(pixel_type,)][id] += int(pic_size)
size_statistic[(pixel_type,)][100] += float(terms[11])
for id, pic_ssim in zip(pic_idx ,terms[18::2],):
if (pixel_type,) not in ssim_statistic:
ssim_statistic[(pixel_type,)] = defaultdict(int)
# if float(pic_ssim) <= 0.1:
# pic_ssim = 0.95
ssim_statistic[(pixel_type,)][id] += float(pic_ssim)
except Exception as e:
print e,line
# for item, count in count_statistic.iteritems():
# print item,count
# for item, count in size_statistic.iteritems():
# print item, count
print 'size ratio:'
for item, l in size_statistic.iteritems():
result = "\t".join([ str(l[i]/float(l[100])) for i in range(5,100,5)])
print "{}\t{}".format(item,result)
print 'ssim'
for item, l in ssim_statistic.iteritems():
result = "\t".join([ str(l[i]/float(count_statistic[item])) for i in range(5,100,5)])
print "{}\t{}".format(item,result)
@check_files("image_output_file","content_type_file")
def statistic_table3(image_output_file,content_type_file):
key_map = {}
with open(content_type_file) as rr_handler:
for line in rr_handler:
line = line.strip()
terms = line.split('\t')
key_map[terms[0]] = terms[1]
print len(key_map)
statistic = defaultdict(int)
type_count_statistic = defaultdict(int) # count for
type_traffic_statistic = defaultdict(int)
type_median_statistic = {}
for w in ['zip', 'text', 'image', '-', 'octet-stream', 'other', 'video', 'audio','total']:
type_median_statistic[w] = []
with open(image_output_file) as r_handler:
try:
for line in r_handler:
statistic['total'] += 1
if line and line.strip():
line = line.strip()
terms = line.split('\t')
if len(terms) != 12:
statistic['format_w'] += 1
continue
if terms[-3] not in key_map:
statistic['miss'] += 1
continue
if key_map[terms[-3]] == 'text' and ("gzip" in terms[4] or "identity" in terms[4] or "deflate" in terms[4]):
type_count_statistic['zip'] += 1
type_traffic_statistic['zip'] += int(terms[-2])
type_median_statistic['zip'].append(int(terms[-2]))
type_count_statistic['total'] += 1
type_traffic_statistic['total'] += int(terms[-2])
type_median_statistic['total'].append(int(terms[-2]))
else:
type_count_statistic[key_map[terms[-3]]] += 1
type_traffic_statistic[key_map[terms[-3]]] += int(terms[-2])
type_median_statistic[key_map[terms[-3]]].append(int(terms[-2]))
type_count_statistic['total'] += 1
type_traffic_statistic['total'] += int(terms[-2])
type_median_statistic['total'].append(int(terms[-2]))
except Exception as e:
statistic['error'] += 1
print statistic
for k in type_count_statistic:
key = k
number_ratio = type_count_statistic[k]/float(type_count_statistic['total'])
traffic_ratio = type_traffic_statistic[k]/float(type_traffic_statistic['total'])
med = median(type_median_statistic[k])
avg = type_traffic_statistic[k] / float(type_count_statistic[k])
print "{}\t{}\t{}\t{}\t{}".format(key,number_ratio,traffic_ratio,med,avg)
def median(lst):
if not lst:
return
lst=sorted(lst)
return lst[len(lst)//2]
if __name__ == "__main__":
# statistic_ssim(image_output_file=sys.argv[1])
# stat_webp_compress(image_output_file=sys.argv[1])
#statistic_ziproxy_all_ssim(image_output_file=sys.argv[1])
statistic_table3(image_output_file=sys.argv[1],content_type_file=sys.argv[2]) | {
"repo_name": "CharlesZhong/Mobile-Celluar-Measure",
"path": "http_parser/statistic.py",
"copies": "1",
"size": "15519",
"license": "mit",
"hash": 3112415513610587000,
"line_mean": 39.5221932115,
"line_max": 178,
"alpha_frac": 0.5365036407,
"autogenerated": false,
"ratio": 3.9700690713737528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5006572712073752,
"avg_score": null,
"num_lines": null
} |
__author__ = 'charles'
from flask import Flask
from flask import render_template
from flask import request
from flask import send_file
import logging
from cards_generator import generate_output_file
#from google.appengine.api.logservice import logservice
#from werkzeug import secure_filename
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
logging.debug("Loading Home page")
if request.method == 'POST':
logging.info("Processing file ...")
file = request.files['file']
output_file = generate_output_file(file)
output_file.seek(0)
logging.info("Successfully processing file !")
return send_file(output_file, mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
as_attachment=True, attachment_filename='output.xlsx')
else:
return render_template('home.html')
@app.errorhandler(500)
def internal_error(error):
logging.error("Error : %s" % error)
return render_template('home.html', error=error)
if __name__ == '__main__':
app.run(debug=True)
| {
"repo_name": "xebia-france/agile-cards-generator",
"path": "webapp.py",
"copies": "1",
"size": "1107",
"license": "mit",
"hash": -2916159652181887000,
"line_mean": 27.3846153846,
"line_max": 115,
"alpha_frac": 0.6784101174,
"autogenerated": false,
"ratio": 3.84375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50221601174,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charles'
from httplib import HTTPResponse
class HTTP_Requset(object):
def __init__(self, header_keys, len_request, len_request_body):
self.user_token, self.user_conf = self.parse_X_QB(header_keys['X-QB'])
self.accept = header_keys['Accept'] if header_keys['Accept'] else '-'
self.accept_encoding = header_keys['Accept-Encoding'] if header_keys['Accept-Encoding'] else '-'
self.len_request = len_request
self.len_request_body = len_request_body
def parse_X_QB(self, X_QB):
user_token, user_conf = '-', '-'
if X_QB:
terms = X_QB.split('^')
if len(terms) == 4:
user_token = terms[0].strip() if terms[0] else '-'
user_conf = terms[2].strip() if terms[2] else '-'
return user_token, user_conf
def __repr__(self):
return "{}\t{}\t{}\t{}\t{}".format(self.user_token,
self.accept, self.accept_encoding,
self.len_request, self.len_request_body)
class HTTP_Response(object):
def __init__(self, status_code, header_keys, len_repsonse, len_response_body):
self.status_code = status_code
self.content_length = header_keys['Content-Length'] if header_keys['Content-Length'] else '-'
self.content_type = header_keys['Content-Type'] if header_keys['Content-Type'] else '-'
self.len_repsonse = len_repsonse
self.len_response_body = len_response_body
def __repr__(self):
return "{}\t{}\t{}\t{}\t{}".format(self.status_code,
self.content_length, self.content_type,
self.len_repsonse, self.len_response_body)
class Image_Model(object):
def __init__(self, real_type, md5, weight, height, pixel_count, ):
self.real_type = real_type
self.md5 = md5
self.weight = weight
self.height = height
self.pixel_count = pixel_count
self.quality = '-'
self.compress_md5 = '-'
self.compress_size = '-'
def set_zip(self, compress_md5, compress_size):
self.compress_md5 = compress_md5
self.compress_size = compress_size
def __repr__(self):
return "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(self.real_type, self.md5,
self.weight, self.height, self.pixel_count,
self.quality,
self.compress_md5, self.compress_size)
class IMAGE_OUTPUT_MODEL():
def __init__(self, terms):
self.requset_time = terms[0]
self.response_time = terms[1]
self.user_token = terms[2]
# self.user_conf = terms[3]
self.accept = terms[3]
self.accept_encoding = terms[4]
self.len_request = terms[5]
self.len_request_body = terms[6]
self.status_code = terms[7]
self.content_length = terms[8]
self.content_type = terms[9]
self.len_repsonse = terms[10]
self.len_response_body = int(terms[11])
self.real_type = terms[12]
self.md5 = terms[13]
self.weight = int(terms[14])
self.height = int(terms[15])
self.pixel_count = int(terms[16])
self.quality = terms[17]
self.compress_md5 = terms[18]
self.compress_size = terms[19]
self.length_75 = 0 if terms[-12] == '-' else int(terms[-12])
self.md5_75 = terms[-11]
self.runtime_75 = 0 if terms[-10] == '-' else float(terms[-10])
self.ssim_75 = 0 if terms[-9] == '-' else float(terms[-9])
self.length_70 = 0 if terms[-8] == '-' else int(terms[-8])
self.md5_70 = terms[-7]
self.runtime_70 = 0 if terms[-6] == '-' else float(terms[-6])
self.ssim_70 = 0 if terms[-5] == '-' else float(terms[-5])
self.length_50 = 0 if terms[-4] == '-' else int(terms[-4])
self.md5_50 = terms[-3]
self.runtime_50 = 0 if terms[-2] == '-' else float(terms[-2])
self.ssim_50 = 0 if terms[-1] == '-' else float(terms[-1])
# self.cwebp_runtime = terms[20]
# self.dwebp_runtime = terms[21]
# self.ziproxy_runtime = terms[22] | {
"repo_name": "CharlesZhong/Mobile-Celluar-Measure",
"path": "http_parser/model.py",
"copies": "1",
"size": "4322",
"license": "mit",
"hash": -4436949113195415000,
"line_mean": 37.5982142857,
"line_max": 104,
"alpha_frac": 0.5340120315,
"autogenerated": false,
"ratio": 3.545529122231337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9572890535636206,
"avg_score": 0.0013301236190262464,
"num_lines": 112
} |
__author__ = 'charles'
import argparse
import os
import sys
import re
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_bytes(ba, start, end, match_start, match_end, columns):
hexStr = ''
for i in range(start, end):
if i % columns == 0:
if match_start <= i <= match_end:
hexStr += bcolors.ENDC
if i > start:
hexStr += ' | '
hexStr += ''.join(map(chr, [x if 32 < x < 127 else ord('.') for x in ba[i-columns:i]]))
hexStr += format('\n[%08x] ' % i)
if match_end >= i >= match_start:
hexStr += bcolors.OKBLUE
elif i % 2 == 0:
hexStr += ' '
if i == match_start:
hexStr += bcolors.OKBLUE
if i == match_end:
hexStr += bcolors.ENDC
hexStr += format("%0.2x" % ba[i])
print(hexStr)
def main():
parser = argparse.ArgumentParser(description='Finds a sequence of bytes in the specified file.')
parser.add_argument('-r', '--raw-bytes', required=True,
help='sequence of bytes to search for (e.g., "\x00\x04"')
parser.add_argument('-i', '--input', required=True,
help='file to search in')
parser.add_argument('-g', '--toprint', default=32,
help='number of surrounding bytes to print (default 32)', type=int)
parser.add_argument('-c', '--columns', default=16,
help='number of octets per line (default 16)', type=int)
args = parser.parse_args()
if not os.path.exists(args.input):
print('Path {} not found.'.format(args.input))
sys.exit(1)
if not args.columns % 2 == 0:
print('Columns must be a multiple of 2.')
sys.exit(1)
ba = bytes(args.raw_bytes, 'utf-8')
regex = re.compile(ba)
bytes_read = open(args.input, 'rb').read()
for match in re.finditer(regex, bytes_read):
start = match.span()[0]
end = match.span()[1]
start = max(0, start - args.toprint)
start -= start % args.columns
end += args.toprint
end += args.columns - (end % args.columns)
end = min(end, len(bytes_read))
print_bytes(bytes_read, start, end, match.span()[0], match.span()[1], args.columns)
if __name__ == '__main__':
main() | {
"repo_name": "afrocheese/find_bytes",
"path": "find_bytes.py",
"copies": "1",
"size": "2488",
"license": "apache-2.0",
"hash": 2638892539311944700,
"line_mean": 31.3246753247,
"line_max": 103,
"alpha_frac": 0.5325562701,
"autogenerated": false,
"ratio": 3.445983379501385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9454919439310024,
"avg_score": 0.004724042058272179,
"num_lines": 77
} |
__author__ = 'Charles'
import os
import copy
from . import macro
class Module(object):
def __init__(self, name, target): self.name = name; self.target = target
def override(self, subcobj=None):
if issubclass(subcobj.__class__, self):
subcobj.compilers = copy.deepcopy(self.compilers)
subcobj.dependencies = copy.deepcopy(self.dependencies)
# Details
name = ""
target = ""
path = ""
# Needed
compilers = dict()
dependencies = set() #module objects
def solve_dependencies(self, func): self.dependencies = set(map(func, self.dependencies))
def build(self, link):
self.make(set(link))
def make(self, link):
pass
def collect(self, ext, parse_inc_sep=os.sep, parse_inc_stat="#include"):
all_name = list()
inc_data = dict()
for name, fullname, root in macro.walk(self.path, ext):
inc = ["%s "%fullname]
all_name.append(fullname+".o") #[:-1-len(ext)]
for i in self.parse_include(root, name, os.sep): inc.append(i)
inc_data[fullname] = (" ".join(inc))
return all_name, inc_data
macrosdefined = set()
def parse_include(self, path, name, sep=os.sep, include_stat="#include"):
skipping = False
with open(path+sep+name, "r") as f:
for i in f.readlines():
if skipping:
if "#endif" in i:
skipping = False
continue
if "#define" in i:
self.macrosdefined.add(i.split()[1])
continue
if "#ifndef" in i:
if i.split()[1] in self.macrosdefined:
skipping = True
continue
if "#ifdef" in i:
if i.split()[1] not in self.macrosdefined:
skipping = True
continue
if "#undef" in i:
if i.split()[1] in self.macrosdefined:
self.macrosdefined.remove(i.split()[1])
if include_stat in i:
if '<' in i or '>' in i:
continue
elif '\'' in i:
t = '\''
elif '\"' in i:
t = '\"'
else:continue
i = i[i.index(t)+1:]
i = i[:i.index(t)]
li = i.split(sep)
p = path.split(sep)
for j in li:
if j == "..":
p.pop()
elif j == ".":
pass
else:
p.append(j)
yield sep.join(p)
for j in self.parse_include(sep.join(p[:-1]), p[-1], sep, include_stat): yield j
| {
"repo_name": "chen-charles/sysbd",
"path": "sysbd/module.py",
"copies": "1",
"size": "2969",
"license": "mit",
"hash": 410617296560472100,
"line_mean": 29.9270833333,
"line_max": 100,
"alpha_frac": 0.4338160997,
"autogenerated": false,
"ratio": 4.385524372230428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5319340471930428,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charles'
import os
from . import macro
#import macro
class Directory(object):
def __init__(self, name, parentDir, isModule=False):
self.parent = parentDir
self.children = set()
self.parent.add(self)
self.name = name
self.isModule = bool(isModule)
def getPath(self):
return self.parent.getPath() + os.sep + self.name
def add(self, directory):
self.children.add(directory)
class DirectoryRoot(Directory):
def __init__(self, name, fullpath, isModule=False):
super(DirectoryRoot, self).__init__(name, self, isModule)
self.fullpath = fullpath
self.children.clear()
def getPath(self):
return self.fullpath
def buildTree(ppath):
dirDict = dict()
dirDict[ppath] = DirectoryRoot("", ppath)
for dirpath, dirs, files in os.walk(ppath):
if dirpath == ppath: continue
dirDict[dirpath] = Directory(dirpath.split(os.sep)[-1], dirDict[os.sep.join(dirpath.split(os.sep)[:-1])])
return dirDict[ppath]
def buildModuleTree(ppath, modulefname="module.ini"):
dirDict = dict()
dirDict[ppath] = DirectoryRoot("", ppath)
for dirpath, dirs, files in os.walk(ppath):
if dirpath == ppath:
if modulefname in files: dirDict[dirpath].isModule = True
continue
dirDict[dirpath] = Directory(dirpath.split(os.sep)[-1], dirDict[os.sep.join(dirpath.split(os.sep)[:-1])])
if modulefname in files: dirDict[dirpath].isModule = True
return dirDict
def dwalk(ppath, modulefname="module.ini"):
mTree = buildModuleTree(ppath, modulefname)
for dirpath, dirs, files in os.walk(ppath):
t = dirs
for i in t:
p = os.path.join(dirpath, i)
if mTree[p].isModule: dirs.remove(i)
yield dirpath, dirs, files
def dwalkext(path, ext, modulefname="module.ini"):
for root, dirs, files in dwalk(path, modulefname):
for name in files:
if name.split(".")[-1].lower() == ext.lower():
yield name, os.path.join(root, name), root
| {
"repo_name": "chen-charles/sysbd",
"path": "sysbd/directory.py",
"copies": "1",
"size": "1870",
"license": "mit",
"hash": -4714995162490486000,
"line_mean": 26.1014492754,
"line_max": 107,
"alpha_frac": 0.6989304813,
"autogenerated": false,
"ratio": 2.9588607594936707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41577912407936707,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charles'
try:
import os
import sysbd
import envir
import traceback
import inspect
print("PROJECTPATH:", envir.PROJECTPATH, end="\n\n")
# add dependencies as user-defined macros, then solve them during compile time
# bdr = builder.Builder(mod)
# bdr.build(envir.solve_dependencies)
# for name, fullname, root in sysbd.macro.walk(envir.PROJECTPATH, "pybuild"):
# print("pybuild found:", fullname, os.path.dirname(fullname))
# with open(fullname) as f: exec(f.read(), {"envir": envir, "sysbd": sysbd, "fpath": os.path.dirname(fullname)})
for name, fullname, root in sysbd.macro.walk(envir.PROJECTPATH, "ini"):
if name == "module.ini":
print("module found:", fullname, os.path.dirname(fullname))
filenames = os.listdir(os.path.dirname(fullname))
found = None
for filename in filenames:
if os.path.isfile(filename) and filename.endswith('.pybuild'):
found = os.path.dirname(fullname)+os.sep+filename
if found is not None:
print("not using default pybuild")
with open(found) as f: exec(f.read(), {"envir": envir, "sysbd": sysbd, "fpath": os.path.dirname(fullname)})
else:
print("using default pybuild")
if "DEFAULTPYBUILD" not in globals() or "DEFAULTPYBUILD" not in locals():
with open(os.path.dirname(inspect.getfile(inspect.currentframe()))+os.sep+"pybuild.default") as f: exec(f.read(), {"envir": envir, "sysbd": sysbd, "fpath": os.path.dirname(fullname)})
else:
exec(DEFAULTPYBUILD, {"envir": envir, "sysbd": sysbd, "fpath": os.path.dirname(fullname)})
# you can always redefine the build behaviour
for i in envir.MODULEMAP.values():
bdr = sysbd.builder.Builder(i, envir.solve_dependencies)
bdr.build()
sysbd.builder.MODULEBUILT.clear()
except Exception as err:
input("\n\nFailed. \n\n%s\n"%traceback.format_exc())
exit(1)
print("\n\nSucceeded. \n")
| {
"repo_name": "chen-charles/sysbd",
"path": "build.py",
"copies": "1",
"size": "1870",
"license": "mit",
"hash": -5153221422475887000,
"line_mean": 32.3928571429,
"line_max": 188,
"alpha_frac": 0.6903743316,
"autogenerated": false,
"ratio": 3.040650406504065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9012496940888843,
"avg_score": 0.043705559443044203,
"num_lines": 56
} |
__author__ = 'Charlie'
# Attempt at Mahendran and Vedaldi's Understanding Deep Image Representations by Inverting them
import numpy as np
import tensorflow as tf
import scipy.io
import scipy.misc
from datetime import datetime
import os, sys, inspect
utils_path = os.path.abspath(
os.path.realpath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_path not in sys.path:
sys.path.insert(0, utils_path)
import TensorflowUtils as utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("image_path", "", """Path to image to be inverted""")
tf.flags.DEFINE_string("model_dir", "Models_zoo/", """Path to the VGGNet model mat file""")
tf.flags.DEFINE_string("log_dir", "logs/ImageInversion_logs/", """Path to save logs and checkpoint if needed""")
DATA_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
LEARNING_RATE = 1e0
MAX_ITERATIONS = 10000
INVERT_LAYER = 'conv5_1'
def get_model_data():
filename = DATA_URL.split("/")[-1]
filepath = os.path.join(FLAGS.model_dir, filename)
if not os.path.exists(filepath):
raise IOError("VGG Model not found!")
data = scipy.io.loadmat(filepath)
return data
def get_image(image_dir):
image = scipy.misc.imread(image_dir)
image = np.ndarray.reshape(image.astype(np.float32), (((1,) + image.shape)))
return image
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = utils.max_pool_2x2(current)
elif kind == 'norm':
current = tf.nn.lrn(current, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
net[name] = current
assert len(net) == len(layers)
return net
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
model_data = get_model_data()
invert_image = get_image(FLAGS.image_path)
print invert_image.shape
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
processed_image = utils.process_image(invert_image, mean_pixel).astype(np.float32)
weights = np.squeeze(model_data['layers'])
invert_net = vgg_net(weights, processed_image)
dummy_image = utils.weight_variable(invert_image.shape, stddev=np.std(invert_image) * 0.1)
tf.histogram_summary("Image Output", dummy_image)
image_net = vgg_net(weights, dummy_image)
with tf.Session() as sess:
invert_layer_features = invert_net[INVERT_LAYER].eval()
loss = 2 * tf.nn.l2_loss(image_net[INVERT_LAYER] - invert_layer_features) / invert_layer_features.size
tf.scalar_summary("Loss", loss)
summary_op = tf.merge_all_summaries()
train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
best_loss = float('inf')
best = None
summary_writer = tf.train.SummaryWriter(FLAGS.log_dir)
sess.run(tf.initialize_all_variables())
for i in range(1, MAX_ITERATIONS):
train_op.run()
if i % 10 == 0 or i == MAX_ITERATIONS - 1:
this_loss = loss.eval()
print('Step %d' % (i)),
print(' total loss: %g' % this_loss)
summary_writer.add_summary(summary_op.eval(), global_step=i)
if this_loss < best_loss:
best_loss = this_loss
best = dummy_image.eval()
output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
scipy.misc.imsave("invert_check.png", output)
output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
scipy.misc.imsave("output.png", output)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "ImageArt/ImageInversion.py",
"copies": "1",
"size": "4731",
"license": "mit",
"hash": 5583885898626082000,
"line_mean": 34.0444444444,
"line_max": 112,
"alpha_frac": 0.6081166772,
"autogenerated": false,
"ratio": 3.1666666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42747833438666666,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charlie'
# Implementation based on neural style paper
import numpy as np
import tensorflow as tf
import scipy.io
import scipy.misc
from datetime import datetime
import os, sys, inspect
utils_path = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_path not in sys.path:
sys.path.insert(0, utils_path)
import TensorflowUtils as utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("model_dir", "Models_zoo/", """Path to the VGG model mat file""")
tf.flags.DEFINE_string("content_path", "", """Path to content image to be drawn in different style""")
tf.flags.DEFINE_string("style_path", "", """Path to style image to use""")
tf.flags.DEFINE_string("log_dir", "logs/Neural_style_logs/", """Path to save logs and checkpoint if needed""")
DATA_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
CONTENT_WEIGHT = 2e-2
CONTENT_LAYERS = ('relu2_2',)
STYLE_WEIGHT = 2e-1
STYLE_LAYERS = ('relu1_2', 'relu2_2', 'relu3_3','relu4_2')
VARIATION_WEIGHT = 1
LEARNING_RATE = 1e-2
MOMENTUM = 0.9
MAX_ITERATIONS = int(1 + 1e5)
def get_model_data():
filename = DATA_URL.split("/")[-1]
filepath = os.path.join(FLAGS.model_dir, filename)
if not os.path.exists(filepath):
raise IOError("VGG Model not found!")
data = scipy.io.loadmat(filepath)
return data
def get_image(image_dir):
image = scipy.misc.imread(image_dir)
print image.shape
image = np.ndarray.reshape(image.astype(np.float32), (((1,) + image.shape)))
return image
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = utils.avg_pool_2x2(current)
net[name] = current
assert len(net) == len(layers)
return net
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
model_data = get_model_data()
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
content_image = get_image(FLAGS.content_path)
print content_image.shape
processed_content = utils.process_image(content_image, mean_pixel).astype(np.float32)
style_image = get_image(FLAGS.style_path)
processed_style = utils.process_image(style_image, mean_pixel).astype(np.float32)
content_net = vgg_net(weights, processed_content)
style_net = vgg_net(weights, processed_style)
dummy_image = utils.weight_variable(content_image.shape, stddev=np.std(content_image) * 0.1)
image_net = vgg_net(weights, dummy_image)
with tf.Session() as sess:
content_losses = []
for layer in CONTENT_LAYERS:
feature = content_net[layer].eval()
content_losses.append(tf.nn.l2_loss(image_net[layer] - feature))
content_loss = CONTENT_WEIGHT * reduce(tf.add, content_losses)
style_losses = []
for layer in STYLE_LAYERS:
features = style_net[layer].eval()
features = np.reshape(features, (-1, features.shape[3]))
style_gram = np.matmul(features.T, features) / features.size
image_layer = image_net[layer]
_, height, width, number = map(lambda i: i.value, image_layer.get_shape())
size = height * width * number
feats = tf.reshape(image_layer, (-1, number))
image_gram = tf.matmul(tf.transpose(feats), feats) / size
style_losses.append(0.5*tf.nn.l2_loss(image_gram - style_gram))
style_loss = STYLE_WEIGHT * reduce(tf.add, style_losses)
tv_y_size = utils.get_tensor_size(dummy_image[:, 1:, :, :])
tv_x_size = utils.get_tensor_size(dummy_image[:, :, 1:, :])
tv_loss = VARIATION_WEIGHT * (
(tf.nn.l2_loss(dummy_image[:, 1:, :, :] - dummy_image[:, :content_image.shape[1] - 1, :, :]) /
tv_y_size) +
(tf.nn.l2_loss(dummy_image[:, :, 1:, :] - dummy_image[:, :, :content_image.shape[2] - 1, :]) /
tv_x_size))
loss = content_loss + style_loss + tv_loss
train_step = tf.train.MomentumOptimizer(LEARNING_RATE,MOMENTUM).minimize(loss)
best_loss = float('inf')
best = None
sess.run(tf.initialize_all_variables())
for i in range(1, MAX_ITERATIONS):
train_step.run()
if i % 10 == 0 or i == MAX_ITERATIONS - 1:
this_loss = loss.eval()
print('Step %d' % (i)),
print(' total loss: %g' % this_loss)
if this_loss < best_loss:
best_loss = this_loss
best = dummy_image.eval()
output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
scipy.misc.imsave("output_check.png", output)
if i % 100 == 0 or i == MAX_ITERATIONS - 1:
print(' content loss: %g' % content_loss.eval()),
print(' style loss: %g' % style_loss.eval()),
print(' tv loss: %g' % tv_loss.eval())
output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
scipy.misc.imsave("output.png", output)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "ImageArt/NeuralStyle.py",
"copies": "1",
"size": "6289",
"license": "mit",
"hash": -2803190280620934700,
"line_mean": 35.3526011561,
"line_max": 125,
"alpha_frac": 0.5905549372,
"autogenerated": false,
"ratio": 3.1940071102082275,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9274931827070272,
"avg_score": 0.0019260440675912603,
"num_lines": 173
} |
__author__ = 'Charlie'
# Implementation draws details from https://github.com/hardmaru/cppn-tensorflow
import numpy as np
import tensorflow as tf
import os, sys, inspect
# import scipy.misc as misc
import matplotlib.pyplot as plt
utils_folder = os.path.abspath(
os.path.realpath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_folder not in sys.path:
sys.path.insert(0, utils_folder)
import TensorflowUtils as utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("image_size", "256", "Image size")
tf.flags.DEFINE_integer("genome_length", "10", "Length of genome to be given as input")
tf.flags.DEFINE_integer("net_size", "32", "Breadth of hidden units")
tf.flags.DEFINE_string("mode", "color", "Color/ Gray image output")
def show_image(image):
plt.subplot(1, 1, 1)
if NUM_CHANNELS == 1:
plt.imshow(255*image.reshape(IMAGE_SIZE, IMAGE_SIZE), cmap='Greysarr ', interpolation='nearest')
else:
plt.imshow(255 * image, interpolation='nearest')
plt.axis('off')
plt.show()
def mlp(inputs, output_dimension, scope=""):
shape = inputs.get_shape().as_list()
W_fc1 = utils.weight_variable([shape[1], output_dimension])
b_fc1 = utils.bias_variable([output_dimension])
linear = tf.matmul(inputs, W_fc1) + b_fc1
return linear
def generate_image(x, y, r, z):
with tf.name_scope("input_map") as scope:
reshape_size = BATCH_SIZE * IMAGE_SIZE * IMAGE_SIZE
x_reshpaed = tf.reshape(x, [reshape_size, 1])
x_linear = mlp(x_reshpaed, FLAGS.net_size)
# var_scope.reuse_variables()
y_reshaped = tf.reshape(y, [reshape_size, 1])
y_linear = mlp(y_reshaped, FLAGS.net_size)
r_reshaped = tf.reshape(r, [reshape_size, 1])
r_linear = mlp(r_reshaped, FLAGS.net_size)
with tf.name_scope("z_input") as scope:
z_reshaped = tf.reshape(z, (BATCH_SIZE, 1, Z_DIMENSION)) * tf.ones((IMAGE_SIZE * IMAGE_SIZE, 1),
dtype=tf.float32)
z_reshaped = tf.reshape(z_reshaped, [reshape_size, Z_DIMENSION])
z_linear = mlp(z_reshaped, FLAGS.net_size)
with tf.name_scope("hidden") as scope:
h_1 = tf.nn.tanh(x_linear + y_linear + r_linear + z_linear)
h_2 = tf.nn.tanh(mlp(h_1, FLAGS.net_size))
h_3 = tf.nn.tanh(mlp(h_2, FLAGS.net_size))
h_4 = tf.nn.tanh(mlp(h_3, FLAGS.net_size))
with tf.name_scope("output") as scope:
output = tf.sigmoid(mlp(h_4, NUM_CHANNELS))
return tf.reshape(output, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS])
def get_coordinates():
x_range = -1 + np.arange(IMAGE_SIZE) * 2.0 / (IMAGE_SIZE - 1) # uniform points from -1 to 1 mapped to image size
y_range = -1 + np.arange(IMAGE_SIZE) * 2.0 / (IMAGE_SIZE - 1)
x_mat = np.matmul(np.ones((IMAGE_SIZE, 1)), x_range.reshape((1, IMAGE_SIZE)))
y_mat = np.matmul(y_range.reshape((IMAGE_SIZE, 1)), np.ones((1, IMAGE_SIZE)))
r_mat = (x_mat ** 2 + y_mat ** 2) ** 0.5
x_mat = np.expand_dims(np.tile(x_mat.flatten(), (BATCH_SIZE, 1)), axis=2)
y_mat = np.expand_dims(np.tile(y_mat.flatten(), (BATCH_SIZE, 1)), axis=2)
r_mat = np.expand_dims(np.tile(r_mat.flatten(), (BATCH_SIZE, 1)), axis=2)
return x_mat, y_mat, r_mat
def main(argv=None):
print "Setting up variables..."
z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIMENSION])
x = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE * IMAGE_SIZE, 1])
y = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE * IMAGE_SIZE, 1])
r = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE * IMAGE_SIZE, 1])
gen_image = generate_image(x, y, r, z)
z_vec = np.random.normal(size=(BATCH_SIZE, Z_DIMENSION))
# z_vec = np.random.uniform(-1.0, 1.0, size=(BATCH_SIZE, Z_DIMENSION)).astype(np.float32)
# with open("genome.csv", "a") as f:
# np.savetxt(f, z_vec, delimiter=",")
x_vec, y_vec, r_vec = get_coordinates()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
feed = {x: x_vec, y: y_vec, r: r_vec, z: z_vec}
image = sess.run(gen_image, feed_dict=feed)
show_image(image[0])
if __name__ == "__main__":
IMAGE_SIZE = FLAGS.image_size
NUM_CHANNELS = 3 if FLAGS.mode == "color" else 1
BATCH_SIZE = 1
Z_DIMENSION = FLAGS.genome_length
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "GenerativeNetworks/SimpleCPPN.py",
"copies": "1",
"size": "4415",
"license": "mit",
"hash": -1713713630714220300,
"line_mean": 35.4876033058,
"line_max": 117,
"alpha_frac": 0.6201585504,
"autogenerated": false,
"ratio": 2.9007884362680683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4020946986668068,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charlie'
# Implementation to deep dream with VGG net
import tensorflow as tf
import numpy as np
import scipy.io
import scipy.misc
from datetime import datetime
import os, sys, inspect
utils_path = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_path not in sys.path:
sys.path.insert(0, utils_path)
import TensorflowUtils as utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("image_path", "", """Path to image to be dreamed""")
tf.flags.DEFINE_string("model_dir", "Models_zoo/", """Path to the VGGNet model mat file""")
tf.flags.DEFINE_string("logs_dir", "logs/Deepdream_logs/", """Path to save logs and checkpoint if needed""")
DATA_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
LEARNING_RATE = 2
MAX_ITERATIONS = 1
DREAM_LAYER = "conv5_2"
DREAM_FEATURE = 128
def get_model_data():
filename = DATA_URL.split("/")[-1]
filepath = os.path.join(FLAGS.model_dir, filename)
if not os.path.exists(filepath):
raise IOError("VGGNet Model not found!")
data = scipy.io.loadmat(filepath)
return data
def get_image(image_dir):
image = scipy.misc.imread(image_dir)
image = np.ndarray.reshape(image.astype(np.float32), ((1,) + image.shape))
return image
def save_image(filename, image, mean_pixel):
output = utils.unprocess_image(image, mean_pixel)
output = np.uint8(np.clip(output, 0, 255))
scipy.misc.imsave(filename, output)
print "Image saved!"
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = utils.max_pool_2x2(current)
elif kind == 'norm':
current = tf.nn.lrn(current, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
net[name] = current
assert len(net) == len(layers)
return net
def resize_image(image, size):
image_to_resize = image[0]
# print image_to_resize.shape
# print size
resized = scipy.misc.imresize(image_to_resize, size).astype(np.float32)
return np.expand_dims(resized, 0)
def deepdream_image(model_params, image, octave_scale=1.4, no_of_octave=4):
filename = "%s_deepdream_%s.jpg" % (os.path.splitext((FLAGS.image_path.split("/")[-1]))[0], DREAM_LAYER)
processed_image = utils.process_image(image, model_params["mean_pixel"]).astype(np.float32)
input_image = tf.placeholder(tf.float32)
dream_net = vgg_net(model_params["weights"], input_image)
def calc_grad_tiled(img, gradient, tile_size=512):
sz = tile_size
h, w = img.shape[1:3]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 2), sy, 1)
gradient_val = np.zeros_like(img)
for y in xrange(0, max(h - sz // 2, sz), sz):
for x in xrange(0, max(w - sz // 2, sz), sz):
sub_img = img_shift[:, y:y + sz, x:x + sz]
# print sub_img.shape
g = sess.run(gradient, {input_image: sub_img})
gradient_val[:, y:y + sz, x:x + sz] = g
return np.roll(np.roll(gradient_val, -sx, 2), -sy, 1)
step = LEARNING_RATE
feature = DREAM_FEATURE
with tf.Session() as sess:
dream_layer_features = dream_net[DREAM_LAYER][:, :, :, feature]
feature_score = tf.reduce_mean(dream_layer_features)
grad_op = tf.gradients(feature_score, input_image)[0]
dummy_image = processed_image.copy()+100.0
for itr in xrange(5):
octaves = []
for i in xrange(no_of_octave - 1):
hw = dummy_image.shape[1:3]
lo = resize_image(dummy_image, np.int32(np.float32(hw) / octave_scale))
hi = dummy_image - resize_image(dummy_image, hw)
dummy_image = lo
octaves.append(hi)
for octave in xrange(no_of_octave):
if octave > 0:
hi = octaves[-octave]
dummy_image = resize_image(dummy_image, hi.shape[1:3]) + hi
for i in xrange(MAX_ITERATIONS):
grad = calc_grad_tiled(dummy_image, grad_op)
dummy_image += grad * (step / (np.abs(grad).mean() + 1e-8))
print '.',
print "."
# step /= 2.0 # halfing step size every itr
feature += 15
temp_file = "%d_%s" % (itr, filename)
# print dummy_image.shape
output = dummy_image.reshape(processed_image.shape[1:]) - 100.0
save_image(os.path.join(FLAGS.logs_dir, "checkpoints", temp_file), output, model_params["mean_pixel"])
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
model_data = get_model_data()
dream_image = get_image(FLAGS.image_path)
# dream_image = np.random.uniform(size=(1, 300, 300, 3)) + 100.0
print dream_image.shape
model_params = {}
mean = model_data['normalization'][0][0][0]
model_params["mean_pixel"] = np.mean(mean, axis=(0, 1))
model_params["weights"] = np.squeeze(model_data['layers'])
deepdream_image(model_params, dream_image, no_of_octave=3)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "ImageArt/DeepDream.py",
"copies": "1",
"size": "6231",
"license": "mit",
"hash": -3418248685045084700,
"line_mean": 35.0173410405,
"line_max": 114,
"alpha_frac": 0.5856202857,
"autogenerated": false,
"ratio": 3.1030876494023905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41887079351023904,
"avg_score": null,
"num_lines": null
} |
__author__ = 'charlie'
import numpy as np
import os
import random
from six.moves import cPickle as pickle
from tensorflow.python.platform import gfile
import glob
import TensorflowUtils as utils
DATA_URL = 'http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip'
def read_dataset(data_dir):
pickle_filename = "MITSceneParsing.pickle"
pickle_filepath = os.path.join(data_dir, pickle_filename)
if not os.path.exists(pickle_filepath):
utils.maybe_download_and_extract(data_dir, DATA_URL, is_zipfile=True)
SceneParsing_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
result = create_image_lists(os.path.join(data_dir, SceneParsing_folder))
print ("Pickling ...")
with open(pickle_filepath, 'wb') as f:
pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
else:
print ("Found pickle file!")
with open(pickle_filepath, 'rb') as f:
result = pickle.load(f)
training_records = result['training']
validation_records = result['validation']
del result
return training_records, validation_records
def create_image_lists(image_dir):
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
directories = ['training', 'validation']
image_list = {}
for directory in directories:
file_list = []
image_list[directory] = []
file_glob = os.path.join(image_dir, "images", directory, '*.' + 'jpg')
file_list.extend(glob.glob(file_glob))
#print(file_list)
if not file_list:
print('No files found')
else:
for f in file_list:
filename = os.path.splitext(f.split("/")[-1])[0]
filename = filename.replace("images", "annotations")
#annotation_file = os.path.join(image_dir, "annotations", directory, filename + '.png')
annotation_file = os.path.join(image_dir.replace("ADEChallengeData2016", ""), filename + '.png')
print("annotation_file:" + annotation_file)
if os.path.exists(annotation_file):
record = {'image': f, 'annotation': annotation_file, 'filename': filename}
image_list[directory].append(record)
else:
print("Annotation file not found for %s - Skipping" % filename)
random.shuffle(image_list[directory])
no_of_images = len(image_list[directory])
print ('No. of %s files: %d' % (directory, no_of_images))
print(image_list)
return image_list
| {
"repo_name": "DeepSegment/FCN-GoogLeNet",
"path": "read_MITSceneParsingData.py",
"copies": "1",
"size": "2629",
"license": "mit",
"hash": 5069208943157499000,
"line_mean": 36.0281690141,
"line_max": 112,
"alpha_frac": 0.6135412704,
"autogenerated": false,
"ratio": 3.8435672514619883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9943893029571815,
"avg_score": 0.00264309845803445,
"num_lines": 71
} |
__author__ = 'charlie'
import numpy as np
import os
import random
from six.moves import cPickle as pickle
from tensorflow.python.platform import gfile
import glob
import TensorflowUtils as utils
# DATA_URL = 'http://sceneparsing.csail.mit.edu/data/ADEChallengeData2016.zip'
DATA_URL = 'http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip'
def read_dataset(data_dir):
pickle_filename = "MITSceneParsing.pickle"
pickle_filepath = os.path.join(data_dir, pickle_filename)
if not os.path.exists(pickle_filepath):
utils.maybe_download_and_extract(data_dir, DATA_URL, is_zipfile=True)
SceneParsing_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
result = create_image_lists(os.path.join(data_dir, SceneParsing_folder))
print ("Pickling ...")
with open(pickle_filepath, 'wb') as f:
pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
else:
print ("Found pickle file!")
with open(pickle_filepath, 'rb') as f:
result = pickle.load(f)
training_records = result['training']
validation_records = result['validation']
del result
return training_records, validation_records
def create_image_lists(image_dir):
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
directories = ['training', 'validation']
image_list = {}
for directory in directories:
file_list = []
image_list[directory] = []
file_glob = os.path.join(image_dir, "images", directory, '*.' + 'jpg')
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No files found')
else:
for f in file_list:
filename = os.path.splitext(f.split("/")[-1])[0]
annotation_file = os.path.join(image_dir, "annotations", directory, filename + '.png')
if os.path.exists(annotation_file):
record = {'image': f, 'annotation': annotation_file, 'filename': filename}
image_list[directory].append(record)
else:
print("Annotation file not found for %s - Skipping" % filename)
random.shuffle(image_list[directory])
no_of_images = len(image_list[directory])
print ('No. of %s files: %d' % (directory, no_of_images))
return image_list
| {
"repo_name": "shekkizh/FCN.tensorflow",
"path": "read_MITSceneParsingData.py",
"copies": "2",
"size": "2416",
"license": "mit",
"hash": 3316867346930717000,
"line_mean": 35.6060606061,
"line_max": 102,
"alpha_frac": 0.6225165563,
"autogenerated": false,
"ratio": 3.751552795031056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5374069351331056,
"avg_score": null,
"num_lines": null
} |
__author__ = 'charlie'
import numpy as np
import os, sys, inspect
import random
from six.moves import cPickle as pickle
from tensorflow.python.platform import gfile
import glob
utils_path = os.path.abspath(
os.path.realpath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_path not in sys.path:
sys.path.insert(0, utils_path)
import TensorflowUtils as utils
DATA_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
def read_dataset(data_dir):
pickle_filename = "flowers_data.pickle"
pickle_filepath = os.path.join(data_dir, pickle_filename)
print('##########gadgsdgdsgdsfe44765534')
print('os.path.exists(pickle_filepath) =', os.path.exists(pickle_filepath) )
if not os.path.exists(pickle_filepath):
utils.maybe_download_and_extract(data_dir, DATA_URL, is_tarfile=True)
flower_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
result = create_image_lists(os.path.join(data_dir, flower_folder) , testing_percentage=0.15, validation_percentage=0.15 )
print "Training set: %d" % len(result['train'])
print "Test set: %d" % len(result['test'])
print "Validation set: %d" % len(result['validation'])
print "Pickling ..."
with open(pickle_filepath, 'wb') as f:
pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
else:
print "Found pickle file!"
with open(pickle_filepath, 'rb') as f:
result = pickle.load(f)
training_images = result['train']
testing_images = result['test']
validation_images = result['validation']
del result
print ("Training: %d, Validation: %d, Test: %d" % (
len(training_images), len(validation_images), len(testing_images)))
return training_images, testing_images, validation_images
def create_image_lists(image_dir, testing_percentage=0.15, validation_percentage=0.15 ):
"""
Code modified from tensorflow/tensorflow/examples/image_retraining
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
training_images = []
sub_dirs = [x[0] for x in os.walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No files found')
continue
print "No. of files found: %d" % len(file_list)
training_images.extend([f for f in file_list])
random.shuffle(training_images)
no_of_images = len(training_images)
validation_offset = int(validation_percentage * no_of_images)
validation_images = training_images[:validation_offset]
test_offset = int(testing_percentage * no_of_images)
testing_images = training_images[validation_offset:validation_offset + test_offset]
training_images = training_images[validation_offset + test_offset:]
result = {
'train': training_images,
'test': testing_images,
'validation': validation_images,
}
return result
| {
"repo_name": "BerenLuthien/HyperColumns_ImageColorization",
"path": "read_FlowersDataset.py",
"copies": "1",
"size": "3558",
"license": "bsd-3-clause",
"hash": -1541117904404964400,
"line_mean": 36.8510638298,
"line_max": 129,
"alpha_frac": 0.6385609893,
"autogenerated": false,
"ratio": 3.619532044760936,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9737759064528998,
"avg_score": 0.004066793906387551,
"num_lines": 94
} |
__author__ = 'Charlie'
import numpy as np
import tensorflow as tf
import os, sys, inspect
import scipy.io
import scipy.misc as misc
from datetime import datetime
utils_folder = os.path.abspath(
os.path.realpath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_folder not in sys.path:
sys.path.insert(0, utils_folder)
import TensorflowUtils as utils
import AnalogyDataLoader
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "Data_zoo/Analogy_data/", "Path to analogy data")
tf.flags.DEFINE_string("logs_dir", "logs/Analogy_logs/", "Path to logs")
tf.flags.DEFINE_string("mode", "train", "Model mode - train/test")
tf.flags.DEFINE_integer("batch_size", "64", "Batch size for training")
MAX_ITERATIONS = 1 + int(1e5)
IMAGE_SIZE = 48
LEARNING_RATE = 1e-3
ANALOGY_COEFF = 1
REGULARIZER = 1e-6
DATA_URL = "http://www-personal.umich.edu/~reedscot/files/nips2015-analogy-data.tar.gz"
def add_to_regularization_and_summary(var):
tf.histogram_summary(var.op.name, var)
tf.add_to_collection("reg_loss", tf.nn.l2_loss(var))
def encoder_conv(image):
with tf.name_scope("enc_conv1") as scope:
W_conv1 = utils.weight_variable([3, 3, 3, 32], name="W_conv1")
b_conv1 = utils.bias_variable([32], name="b_conv1")
h_conv1 = tf.nn.tanh(utils.conv2d_strided(image, W_conv1, b_conv1))
with tf.name_scope("enc_conv2") as scope:
W_conv2 = utils.weight_variable([3, 3, 32, 64], name="W_conv2")
b_conv2 = utils.bias_variable([64], name="b_conv2")
h_conv2 = tf.nn.tanh(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))
with tf.name_scope("enc_conv3") as scope:
W_conv3 = utils.weight_variable([3, 3, 64, 128], name="W_conv3")
b_conv3 = utils.bias_variable([128], name="b_conv3")
h_conv3 = tf.nn.tanh(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))
with tf.name_scope("enc_conv4") as scope:
W_conv4 = utils.weight_variable([3, 3, 128, 256], name="W_conv4")
b_conv4 = utils.bias_variable([256], name="b_conv4")
h_conv4 = tf.nn.tanh(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))
with tf.name_scope("enc_fc") as scope:
image_size = IMAGE_SIZE // 16
h_conv4_flatten = tf.reshape(h_conv4, [-1, image_size * image_size * 256])
W_fc5 = utils.weight_variable([image_size * image_size * 256, 512], name="W_fc5")
b_fc5 = utils.bias_variable([512], name="b_fc5")
encoder_val = tf.matmul(h_conv4_flatten, W_fc5) + b_fc5
return encoder_val
def decoder_conv(embedding):
image_size = IMAGE_SIZE // 16
with tf.name_scope("dec_fc") as scope:
W_fc1 = utils.weight_variable([512, image_size * image_size * 256], name="W_fc1")
b_fc1 = utils.bias_variable([image_size * image_size * 256], name="b_fc1")
h_fc1 = tf.nn.relu(tf.matmul(embedding, W_fc1) + b_fc1)
with tf.name_scope("dec_conv1") as scope:
h_reshaped = tf.reshape(h_fc1, tf.pack([tf.shape(h_fc1)[0], image_size, image_size, 256]))
W_conv_t1 = utils.weight_variable([3, 3, 128, 256], name="W_conv_t1")
b_conv_t1 = utils.bias_variable([128], name="b_conv_t1")
deconv_shape = tf.pack([tf.shape(h_fc1)[0], 2 * image_size, 2 * image_size, 128])
h_conv_t1 = tf.nn.relu(
utils.conv2d_transpose_strided(h_reshaped, W_conv_t1, b_conv_t1, output_shape=deconv_shape))
with tf.name_scope("dec_conv2") as scope:
W_conv_t2 = utils.weight_variable([3, 3, 64, 128], name="W_conv_t2")
b_conv_t2 = utils.bias_variable([64], name="b_conv_t2")
deconv_shape = tf.pack([tf.shape(h_conv_t1)[0], 4 * image_size, 4 * image_size, 64])
h_conv_t2 = tf.nn.relu(
utils.conv2d_transpose_strided(h_conv_t1, W_conv_t2, b_conv_t2, output_shape=deconv_shape))
with tf.name_scope("dec_conv3") as scope:
W_conv_t3 = utils.weight_variable([3, 3, 32, 64], name="W_conv_t3")
b_conv_t3 = utils.bias_variable([32], name="b_conv_t3")
deconv_shape = tf.pack([tf.shape(h_conv_t2)[0], 8 * image_size, 8 * image_size, 32])
h_conv_t3 = tf.nn.relu(
utils.conv2d_transpose_strided(h_conv_t2, W_conv_t3, b_conv_t3, output_shape=deconv_shape))
with tf.name_scope("dec_conv4") as scope:
W_conv_t4 = utils.weight_variable([3, 3, 3, 32], name="W_conv_t4")
b_conv_t4 = utils.bias_variable([3], name="b_conv_t4")
deconv_shape = tf.pack([tf.shape(h_conv_t3)[0], IMAGE_SIZE, IMAGE_SIZE, 3])
pred_image = utils.conv2d_transpose_strided(h_conv_t3, W_conv_t4, b_conv_t4, output_shape=deconv_shape)
return pred_image
def read_train_inputs(loader):
return loader.next()
def read_eval_inputs(loader):
return loader.tests['rotate']
def train(loss, step):
# learning_rate = tf.train.exponential_decay(LEARNING_RATE, global_step=step, decay_steps=MAX_ITERATIONS / 4,
# decay_rate=0.99)
# return tf.train.RMSPropOptimizer(learning_rate, 0.99, momentum=0.9).minimize(loss, global_step=step)
return tf.train.MomentumOptimizer(LEARNING_RATE, 0.9).minimize(loss, global_step=step)
def main(argv=None):
global_step = tf.Variable(0, trainable=False)
img_A = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3])
img_B = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3])
img_C = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3])
img_D = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3])
tf.image_summary("A", img_A, max_images=2)
tf.image_summary("B", img_B, max_images=2)
tf.image_summary("C", img_C, max_images=2)
tf.image_summary("Ground_truth", img_D, max_images=2)
print "Setting up encoder.."
with tf.variable_scope("encoder") as scope:
enc_A = encoder_conv(img_A)
scope.reuse_variables()
enc_B = encoder_conv(img_B)
enc_C = encoder_conv(img_C)
enc_D = encoder_conv(img_D)
print "Setting up analogy calc.."
# analogy calculation
analogy_input = tf.concat(1, [enc_B - enc_A, enc_C])
W_analogy1 = utils.weight_variable([1024, 512], name="W_analogy1")
b_analogy1 = utils.bias_variable([512], name="b_analogy1")
analogy_fc1 = tf.nn.relu(tf.matmul(analogy_input, W_analogy1) + b_analogy1)
W_analogy2 = utils.weight_variable([512, 512], name="W_analogy2")
b_analogy2 = utils.bias_variable([512], name="b_analogy2")
analogy_fc2 = tf.nn.relu(tf.matmul(analogy_fc1, W_analogy2) + b_analogy2)
pred = decoder_conv(enc_C + analogy_fc2)
tf.image_summary("Pred_image", pred, max_images=2)
print "Setting up regularization/ summary variables..."
for var in tf.trainable_variables():
add_to_regularization_and_summary(var)
print "Loss and train setup..."
loss1 = tf.sqrt(2*tf.nn.l2_loss(pred - img_D)) / FLAGS.batch_size
tf.scalar_summary("image_loss", loss1)
loss2 = tf.sqrt(2* tf.nn.l2_loss(enc_D - enc_C - analogy_fc2)) / FLAGS.batch_size
tf.scalar_summary("analogy_loss", loss2)
loss3 = tf.add_n(tf.get_collection("reg_loss"))
tf.scalar_summary("regularization", loss3)
total_loss = loss1 + ANALOGY_COEFF * loss2 + REGULARIZER * loss3
tf.scalar_summary("Total_loss", total_loss)
train_op = train(total_loss, global_step)
summary_op = tf.merge_all_summaries()
utils.maybe_download_and_extract(FLAGS.data_dir, DATA_URL, is_tarfile=True)
print "Initializing Loader class..."
loader = AnalogyDataLoader.Loader(FLAGS.data_dir, FLAGS.batch_size)
eval_A, eval_B, eval_C, eval_D = read_eval_inputs(loader)
eval_feed = {img_A: eval_A, img_B: eval_B, img_C: eval_C, img_D: eval_D}
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print "Setting up summary and saver..."
summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Model restored!"
for step in xrange(MAX_ITERATIONS):
A, B, C, D = read_train_inputs(loader)
feed_dict = {img_A: A, img_B: B, img_C: C, img_D: D}
if step % 1000 == 0:
eval_loss = sess.run([loss1, loss2, loss3, total_loss], feed_dict=eval_feed)
print "Evaluation: (Image loss %f, Variation loss %f, Reg loss %f) total loss %f" % tuple(eval_loss)
sess.run(train_op, feed_dict=feed_dict)
if step % 100 == 0:
[loss_val, summary_str] = sess.run([total_loss, summary_op], feed_dict=feed_dict)
print "%s Step %d: Training loss %f" % (datetime.now(), step, loss_val)
summary_writer.add_summary(summary_str, global_step=step)
saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=step)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "GenerativeNetworks/ImageAnalogy.py",
"copies": "1",
"size": "9032",
"license": "mit",
"hash": 8847596187631892000,
"line_mean": 42.4230769231,
"line_max": 116,
"alpha_frac": 0.6335252436,
"autogenerated": false,
"ratio": 2.8251485767907414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3958673820390741,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charlie'
import numpy as np
import tensorflow as tf
import scipy.misc as misc
import os, sys, argparse, inspect
import random
utils_path = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_path not in sys.path:
sys.path.insert(0, utils_path)
import TensorflowUtils as utils
ap = argparse.ArgumentParser("Train a network to guess RGB of image")
ap.add_argument("-i", "--image", required=True, help="Path to image")
args = vars(ap.parse_args())
LOG_DIR = "logs/NeuralArtist_logs/"
NEURONS_PER_LAYER = 20
LEARNING_RATE = 1e-4
MOMENTUM_RATE = 0.9
MAX_ITERATIONS = 100000
current_index = 0
def get_next_batch(batch_size):
global current_index
batch_inputs = []
batch_preds = []
for i in range(batch_size):
# index = random.randrange(image_size)
index = current_index
batch_inputs.append(input_value[index, :])
batch_preds.append(image_reshape[index, :])
current_index += 1
if current_index == image_size:
current_index = 0
return batch_inputs, batch_preds
def inference(inputs):
with tf.name_scope("input"):
W1 = utils.weight_variable([2, NEURONS_PER_LAYER], name="weights_1")
b1 = utils.bias_variable([NEURONS_PER_LAYER], name="bias_1")
tf.histogram_summary("W1", W1)
tf.histogram_summary("b1", b1)
h1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(inputs, W1), b1))
with tf.name_scope("hidden2"):
W2 = utils.weight_variable([NEURONS_PER_LAYER, NEURONS_PER_LAYER], name="weights_2")
b2 = utils.bias_variable([NEURONS_PER_LAYER], name="bias_2")
tf.histogram_summary("W2", W2)
tf.histogram_summary("b2", b2)
h2 = tf.nn.relu(tf.matmul(h1, W2) + b2)
with tf.name_scope("hidden3"):
W3 = utils.weight_variable([NEURONS_PER_LAYER, NEURONS_PER_LAYER], name="weights_3")
b3 = utils.bias_variable([NEURONS_PER_LAYER], name="bias_3")
tf.histogram_summary("W3", W3)
tf.histogram_summary("b3", b3)
h3 = tf.nn.relu(tf.matmul(h2, W3) + b3)
with tf.name_scope("hidden4"):
W4 = utils.weight_variable([NEURONS_PER_LAYER, NEURONS_PER_LAYER], name="weights_4")
b4 = utils.bias_variable([NEURONS_PER_LAYER], name="bias_4")
tf.histogram_summary("W4", W4)
tf.histogram_summary("b4", b4)
h4 = tf.nn.relu(tf.matmul(h3, W4) + b4)
with tf.name_scope("hidden5"):
W5 = utils.weight_variable([NEURONS_PER_LAYER, NEURONS_PER_LAYER], name="weights_5")
b5 = utils.bias_variable([NEURONS_PER_LAYER], name="bias_5")
tf.histogram_summary("W5", W5)
tf.histogram_summary("b5", b5)
h5 = tf.nn.relu(tf.matmul(h4, W5) + b5)
with tf.name_scope("hidden6"):
W6 = utils.weight_variable([NEURONS_PER_LAYER, NEURONS_PER_LAYER], name="weights_6")
b6 = utils.bias_variable([NEURONS_PER_LAYER], name="bias_6")
tf.histogram_summary("W6", W6)
tf.histogram_summary("b6", b6)
h6 = tf.nn.relu(tf.matmul(h5, W6) + b6)
with tf.name_scope("hidden7"):
W7 = utils.weight_variable([NEURONS_PER_LAYER, NEURONS_PER_LAYER], name="weights_7")
b7 = utils.bias_variable([NEURONS_PER_LAYER], name="bias_7")
tf.histogram_summary("W7", W6)
tf.histogram_summary("b7", b6)
h7 = tf.nn.relu(tf.matmul(h6, W7) + b7)
with tf.name_scope("hidden8"):
W8 = utils.weight_variable([NEURONS_PER_LAYER, NEURONS_PER_LAYER], name="weights_8")
b8 = utils.bias_variable([NEURONS_PER_LAYER], name="bias_8")
tf.histogram_summary("W8", W6)
tf.histogram_summary("b8", b6)
h8 = tf.nn.relu(tf.matmul(h7, W8) + b8)
with tf.name_scope("output"):
W9 = utils.weight_variable([NEURONS_PER_LAYER, channels], name="weights_9")
b9 = utils.bias_variable([channels], name="bias_9")
tf.histogram_summary("W9", W9)
tf.histogram_summary("b9", b9)
pred = tf.matmul(h8, W9) + b9
return pred
def loss(pred, actual):
loss_val = tf.sqrt(2 *tf.nn.l2_loss(tf.sub(pred, actual))) / image_size
tf.scalar_summary("loss", loss_val)
return loss_val
def train(loss_val):
return tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss_val)
def main(argv=None):
best_loss = float('Inf')
with tf.Graph().as_default():
inputs = tf.placeholder(tf.float32, shape=(None, 2))
preds = tf.placeholder(tf.float32, shape=(None, 3))
pred_val = inference(inputs)
loss_val = loss(pred_val, preds)
train_op = train(loss_val)
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(LOG_DIR)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(LOG_DIR)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
for step in xrange(MAX_ITERATIONS):
batch_input, batch_pred = get_next_batch(BATCH_SIZE)
feed_dict = {inputs: batch_input, preds: batch_pred}
sess.run(train_op, feed_dict=feed_dict)
if (step % 100 == 0):
this_loss, summary_str = sess.run([loss_val, summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
print("Step %d, Loss %g" % (step, this_loss))
if (this_loss < best_loss):
pred_image = sess.run(pred_val, feed_dict={inputs: input_value})
pred_image = np.reshape(pred_image,
image.shape) # utils.unprocess_image(np.reshape(pred_image, image.shape), mean_pixel)
misc.imsave("neural_artist_check.png", pred_image)
best_loss = this_loss
if step%1000 == 0:
saver.save(sess, LOG_DIR + "model.ckpt", global_step=step)
best_image = sess.run(pred_val, feed_dict={inputs: input_value})
best_image = np.reshape(best_image,
image.shape) # utils.unprocess_image(np.reshape(best_image, image.shape), mean_pixel)
misc.imsave("neural_artist.png", best_image)
if __name__ == "__main__":
image = misc.imread(args["image"])
image = misc.imresize(image, (225,225))
# image = np.array([[[0, 0, 0], [1, 1, 1]], [[2, 2, 2], [3, 3, 3]]])
height, width, channels = image.shape
image_size = height * width
# mean_pixel = np.mean(image, axis=(0, 1))
# processed_image = utils.process_image(image, mean_pixel)
image_reshape = np.reshape(image, (-1, channels)).astype(np.float32)
BATCH_SIZE = image_size/5
print image_reshape.shape
input_list = []
for i in range(height):
for j in range(width):
input_list.append([i, j])
input_value = np.array(input_list)
print input_value.shape
input_value = input_value.astype(np.float32)
main()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "ImageArt/NeuralArtist.py",
"copies": "1",
"size": "7166",
"license": "mit",
"hash": 3885256271954654000,
"line_mean": 39.0335195531,
"line_max": 134,
"alpha_frac": 0.5979626012,
"autogenerated": false,
"ratio": 3.14989010989011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.424785271109011,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charlie'
import os, sys
import tarfile
import tensorflow as tf
from tensorflow.python.platform import gfile
from six.moves import urllib
import numpy as np
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_dir', 'Models_zoo/imagenet',
"""Path to classify_image_graph_def.pb, """)
tf.app.flags.DEFINE_string('image1', '',
"""Path to image 1.""")
tf.app.flags.DEFINE_string('image2', '',
"""Path to image 2.""")
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
IMAGE_SIZE = 299
IMAGE_DEPTH = 3
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape'
BOTTLENECK_TENSOR_SIZE = 2048
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents'
def ensure_name_has_port(tensor_name):
if ':' not in tensor_name:
name_with_port = tensor_name + ':0'
else:
name_with_port = tensor_name
return name_with_port
def maybe_download_and_extract():
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def create_inception_graph():
with tf.gfile.FastGFile(os.path.join(FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def get_unit_hypersphere_vector(feature):
mean_subtacted = np.subtract(feature, np.mean(feature))
normalize_vector = np.divide(mean_subtacted, np.sqrt(np.sum(mean_subtacted**2)))
return normalize_vector
def find_similarity(sess, image1, image2):
if not gfile.Exists(image1):
print ("Cannot find image 1 %s" % image1)
return -1
if not gfile.Exists(image2):
print ("Cannot find image 2 %s" % image2)
return -1
image_data1 = gfile.FastGFile(image1, 'rb').read()
image_data2 = gfile.FastGFile(image2, 'rb').read()
bottleneck_tensor = sess.graph.get_tensor_by_name(ensure_name_has_port(BOTTLENECK_TENSOR_NAME))
feature1 = sess.run(bottleneck_tensor,
{ensure_name_has_port(JPEG_DATA_TENSOR_NAME): image_data1})
normalized_feature1 = get_unit_hypersphere_vector(feature1)
print np.count_nonzero(feature1)
print np.max(feature1)
feature2 = sess.run(bottleneck_tensor,
{ensure_name_has_port(JPEG_DATA_TENSOR_NAME): image_data2})
normalized_feature2 = get_unit_hypersphere_vector(feature2)
print np.count_nonzero(feature2)
print np.max(feature2)
return np.sum(normalized_feature1 * normalized_feature2)
# float_similarity = tf.cast(tf.sub(feature1,feature2), dtype=tf.float32)
# l2_dist = tf.mul(2.0, tf.nn.l2_loss(float_similarity))
# # return tf.div(tf.sqrt(l2_dist),BOTTLENECK_TENSOR_SIZE)
# return tf.sqrt(l2_dist)
def main(argv=None):
maybe_download_and_extract()
create_inception_graph()
with tf.Session() as sess:
diff = find_similarity(sess, FLAGS.image1, FLAGS.image2)
# print ("The two images vary by %.5f" % sess.run(diff))
print ("Similarity score of the two images: %.5f" % diff)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "Misc/FindInceptionSimilarity.py",
"copies": "1",
"size": "3836",
"license": "mit",
"hash": 8449568167106041000,
"line_mean": 37.7474747475,
"line_max": 126,
"alpha_frac": 0.6504171011,
"autogenerated": false,
"ratio": 3.2316764953664703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.438209359646647,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charlie'
import pandas as pd
import numpy as np
import os, sys, inspect
from six.moves import cPickle as pickle
import scipy.misc as misc
IMAGE_SIZE = 96
NUM_LABELS = 30
VALIDATION_PERCENT = 0.1 # use 10 percent of training images for validation
IMAGE_LOCATION_NORM = IMAGE_SIZE / 2
np.random.seed(0)
def read_data(data_dir, force=False):
pickle_file = os.path.join(data_dir, "FaceDetectionData.pickle")
if force or not os.path.exists(pickle_file):
train_filename = os.path.join(data_dir, "training.csv")
data_frame = pd.read_csv(train_filename)
cols = data_frame.columns[:-1]
np.savetxt(os.path.join(data_dir, "column_labels.txt"), cols.values, fmt="%s")
data_frame['Image'] = data_frame['Image'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
data_frame = data_frame.dropna()
print "Reading training.csv ..."
# scale data to a 1x1 image with pixel values 0-1
train_images = np.vstack(data_frame['Image']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
train_labels = (data_frame[cols].values - IMAGE_LOCATION_NORM) / float(IMAGE_LOCATION_NORM)
permutations = np.random.permutation(train_images.shape[0])
train_images = train_images[permutations]
train_labels = train_labels[permutations]
validation_percent = int(train_images.shape[0] * VALIDATION_PERCENT)
validation_images = train_images[:validation_percent]
validation_labels = train_labels[:validation_percent]
train_images = train_images[validation_percent:]
train_labels = train_labels[validation_percent:]
print "Reading test.csv ..."
test_filename = os.path.join(data_dir, "test.csv")
data_frame = pd.read_csv(test_filename)
data_frame['Image'] = data_frame['Image'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
data_frame = data_frame.dropna()
test_images = np.vstack(data_frame['Image']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
with open(pickle_file, "wb") as file:
try:
print 'Picking ...'
save = {
"train_images": train_images,
"train_labels": train_labels,
"validation_images": validation_images,
"validation_labels": validation_labels,
"test_images": test_images,
}
pickle.dump(save, file, pickle.HIGHEST_PROTOCOL)
except:
print("Unable to pickle file :/")
with open(pickle_file, "rb") as file:
save = pickle.load(file)
train_images = save["train_images"]
train_labels = save["train_labels"]
validation_images = save["validation_images"]
validation_labels = save["validation_labels"]
test_images = save["test_images"]
return train_images, train_labels, validation_images, validation_labels, test_images
def save_sample_result(X, y, save_dir):
for i in range(X.shape[0]):
fn = os.path.join(save_dir, "checkpoints", "%d.jpg" % i)
for j in range(0, y.shape[1], 2):
pt1 = y[i, j + 1]
pt2 = y[i, j]
X[i, pt1 - 1:pt1 + 1, pt2 - 1:pt2 + 1] = 0
misc.imsave(fn, X[i, :, :, 0])
def kaggle_submission_format(test_images, test_labels, data_dir):
test_labels *= IMAGE_LOCATION_NORM
test_labels += IMAGE_LOCATION_NORM
test_labels = test_labels.clip(0, 96)
save_sample_result(test_images[0:16], test_labels[0:16], data_dir)
save_sample_result(test_images[0:16], test_labels[0:16], data_dir)
lookup_filename = os.path.join(data_dir, "IdLookupTable.csv")
lookup_table = pd.read_csv(lookup_filename)
values = []
cols = np.genfromtxt(os.path.join(data_dir, "column_labels.txt"), dtype=str)
for index, row in lookup_table.iterrows():
values.append((
row['RowId'],
test_labels[row.ImageId - 1][np.where(cols == row.FeatureName)[0][0]],
))
submission = pd.DataFrame(values, columns=('RowId', 'Location'))
submission.to_csv(os.path.join(data_dir, 'submission.csv'), index=False)
print "Submission created!"
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "FaceDetection/FaceDetectionDataUtils.py",
"copies": "1",
"size": "4210",
"license": "mit",
"hash": -5156132960241312000,
"line_mean": 39.0952380952,
"line_max": 100,
"alpha_frac": 0.6137767221,
"autogenerated": false,
"ratio": 3.4621710526315788,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45759477747315785,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charlie'
import tensorflow as tf
import os, sys, inspect
import numpy as np
import tensorflow.examples.tutorials.mnist as mnist
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
utils_folder = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_folder not in sys.path:
sys.path.insert(0, utils_folder)
import TensorflowUtils as utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("batch_size", "256", "Train batch size")
tf.flags.DEFINE_string("logs_dir", "logs/MNIST_logs/", "Path to logs dir")
tf.flags.DEFINE_float("regularization", "1e-4", "Regularization multiplier value")
IMAGE_SIZE = 28
MAX_ITERATIONS = 20001
LEARNING_RATE = 1e-3
NUM_LABELS = 10
COLORS = np.random.rand(NUM_LABELS)
def add_to_reg_loss_and_summary(W, b):
tf.histogram_summary(W.name, W)
tf.histogram_summary(b.name, b)
tf.add_to_collection("losses", tf.nn.l2_loss(W))
tf.add_to_collection("losses", tf.nn.l2_loss(b))
def inference_fc(image):
with tf.name_scope("fc1") as scope:
W_fc1 = utils.weight_variable([IMAGE_SIZE * IMAGE_SIZE, 50], name="W_fc1")
b_fc1 = utils.bias_variable([50], name="b_fc1")
add_to_reg_loss_and_summary(W_fc1, b_fc1)
h_fc1 = tf.nn.tanh(tf.matmul(image, W_fc1) + b_fc1)
with tf.name_scope("fc2") as scope:
W_fc2 = utils.weight_variable([50, 50], name="W_fc2")
b_fc2 = utils.bias_variable([50], name="b_fc2")
add_to_reg_loss_and_summary(W_fc2, b_fc2)
h_fc2 = tf.nn.tanh(tf.matmul(h_fc1, W_fc2) + b_fc2)
with tf.name_scope("fc3") as scope:
W_fc3 = utils.weight_variable([50, 3], name="W_fc3")
b_fc3 = utils.bias_variable([3], name="b_fc3")
add_to_reg_loss_and_summary(W_fc3, b_fc3)
h_fc3 = tf.nn.tanh(tf.matmul(h_fc2, W_fc3) + b_fc3)
with tf.name_scope("fc4") as scope:
W_fc4 = utils.weight_variable([3, 50], name="W_fc4")
b_fc4 = utils.bias_variable([50], name="b_fc4")
add_to_reg_loss_and_summary(W_fc4, b_fc4)
h_fc4 = tf.nn.tanh(tf.matmul(h_fc3, W_fc4) + b_fc4)
with tf.name_scope("fc5") as scope:
W_fc5 = utils.weight_variable([50, 50], name="W_fc5")
b_fc5 = utils.bias_variable([50], name="b_fc5")
add_to_reg_loss_and_summary(W_fc5, b_fc5)
h_fc5 = tf.nn.tanh(tf.matmul(h_fc4, W_fc5) + b_fc5)
# h_fc_dropout = tf.nn.dropout(h_fc5, 0.5)
with tf.name_scope("fc6") as scope:
W_fc6 = utils.weight_variable([50, IMAGE_SIZE * IMAGE_SIZE], name="W_fc6")
b_fc6 = utils.bias_variable([IMAGE_SIZE * IMAGE_SIZE], name="b_fc6")
add_to_reg_loss_and_summary(W_fc6, b_fc6)
pred = tf.matmul(h_fc5, W_fc6) + b_fc6
return h_fc3, pred
def inference_conv(image):
# incomplete :/
image_reshaped = tf.reshape(image, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
with tf.name_scope("conv1") as scope:
W_conv1 = utils.weight_variable([3, 3, 1, 32], name="W_conv1")
b_conv1 = utils.bias_variable([32], name="b_conv1")
add_to_reg_loss_and_summary(W_conv1, b_conv1)
h_conv1 = tf.nn.tanh(utils.conv2d_basic(image_reshaped, W_conv1, b_conv1))
with tf.name_scope("conv2") as scope:
W_conv2 = utils.weight_variable([3, 3, 32, 64], name="W_conv2")
b_conv2 = utils.bias_variable([64], name="b_conv2")
add_to_reg_loss_and_summary(W_conv2, b_conv2)
h_conv2 = tf.nn.tanh(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))
with tf.name_scope("conv3") as scope:
W_conv3 = utils.weight_variable([3, 3, 64, 128], name="W_conv3")
b_conv3 = utils.bias_variable([128], name="b_conv3")
add_to_reg_loss_and_summary(W_conv3, b_conv3)
h_conv3 = tf.nn.tanh(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))
with tf.name_scope("conv4") as scope:
W_conv4 = utils.weight_variable([3, 3, 128, 256], name="W_conv4")
b_conv4 = utils.bias_variable([256], name="b_conv4")
add_to_reg_loss_and_summary(W_conv4, b_conv4)
h_conv4 = tf.nn.tanh(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))
def main(argv=None):
print "Reading MNIST data..."
data = mnist.input_data.read_data_sets("MNIST_data", one_hot=True)
images = tf.placeholder(tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE])
tf.image_summary("Input", tf.reshape(images, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), max_images=1)
print "Setting up inference..."
encoded, output_image = inference_fc(images)
tf.image_summary("Output", tf.reshape(output_image, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), max_images=1)
print "Loss setup..."
loss1 = tf.nn.l2_loss(tf.sub(output_image, images)) / (IMAGE_SIZE * IMAGE_SIZE)
loss2 = tf.add_n(tf.get_collection("losses"))
loss = loss1 + FLAGS.regularization * loss2
tf.scalar_summary("Loss", loss)
tf.scalar_summary("Encoder_loss", loss1)
tf.scalar_summary("Reg_loss", loss2)
print "Setting up optimizer..."
train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
print 'Setting up graph summary...'
summary_op = tf.merge_all_summaries()
# print "Creating matplot fig"
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
with tf.Session() as sess:
summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph_def)
print "Creating saver..."
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Model restored..."
for step in xrange(MAX_ITERATIONS):
batch_image, batch_label = data.train.next_batch(FLAGS.batch_size)
feed_dict = {images: batch_image}
if step % 100 == 0:
summary_str, loss_val = sess.run([summary_op, loss], feed_dict=feed_dict)
print "Step %d Train loss %f" % (step, loss_val)
summary_writer.add_summary(summary_str, global_step=step)
if step % 1000 == 0:
saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=step)
test_compression = sess.run(encoded, feed_dict={images: data.test.images})
labels = np.argmax(data.test.labels, axis=1).reshape((-1, 1))
write_file = os.path.join(FLAGS.logs_dir, "checkpoint%d.txt" % step)
write_arr = np.hstack((test_compression, np.argmax(data.test.labels, axis=1).reshape((-1, 1))))
np.savetxt(write_file, write_arr)
# ax.clear()
# ax.scatter(test_compression[:, 0], test_compression[:, 1], test_compression[:, 2], s=10,
# c=COLORS[labels], marker='o')
# plt.show()
sess.run(train_op, feed_dict=feed_dict)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "Unsupervised_learning/MNISTAutoEncoder.py",
"copies": "1",
"size": "6987",
"license": "mit",
"hash": -8601272841587165000,
"line_mean": 41.8650306748,
"line_max": 111,
"alpha_frac": 0.6142836697,
"autogenerated": false,
"ratio": 2.877677100494234,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3991960770194234,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charlie'
# Layer visualization based on deep dream code in tensorflow for VGG net
import tensorflow as tf
import numpy as np
import scipy.io
import scipy.misc
from datetime import datetime
import os, sys, inspect
utils_path = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_path not in sys.path:
sys.path.insert(0, utils_path)
import TensorflowUtils as utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("visualize_layer", "conv5_1", "Layer to visualize")
tf.flags.DEFINE_integer("visualize_filter", "0", """filter to visualize in a layer""")
tf.flags.DEFINE_string("model_dir", "Models_zoo/", """Path to the VGGNet model mat file""")
tf.flags.DEFINE_string("logs_dir", "logs/Visualization_logs/", """Path to save logs and checkpoint if needed""")
DATA_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
LEARNING_RATE = 1.5
MAX_ITERATIONS = 10
VISUALIZE_LAYER = FLAGS.visualize_layer # Dream layers are usually conv layers
VISUALIZE_FEATURE = FLAGS.visualize_filter
def get_model_data():
filename = DATA_URL.split("/")[-1]
filepath = os.path.join(FLAGS.model_dir, filename)
if not os.path.exists(filepath):
raise IOError("VGGNet Model not found!")
data = scipy.io.loadmat(filepath)
return data
def get_image(image_dir):
image = scipy.misc.imread(image_dir)
image = np.ndarray.reshape(image.astype(np.float32), ((1,) + image.shape))
return image
def save_image(filename, image, mean_pixel):
output = utils.unprocess_image(image, mean_pixel)
output = np.uint8(np.clip(output, 0, 255))
scipy.misc.imsave(filename, output)
print "Image saved!"
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = utils.max_pool_2x2(current)
elif kind == 'norm':
current = tf.nn.lrn(current, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
net[name] = current
assert len(net) == len(layers)
return net
def visualize_layer(model_params):
image_shape = (299, 299, 3)
input_image = tf.placeholder(tf.float32)
dummy_image = np.random.uniform(size=image_shape) + 100.0
tf.histogram_summary("Image_Output", dummy_image)
image_net = vgg_net(model_params["weights"], input_image)
def resize_image(image, size):
expanded_image = tf.expand_dims(image, 0)
return tf.image.resize_bilinear(expanded_image, size)[0,:,:,:]
def calc_grad_tiled(img, grad_op, tile_size=512):
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
grad_val = np.zeros_like(img)
for y in xrange(0, max(h-sz//2, sz),sz):
for x in xrange(0, max(w-sz//2, sz),sz):
sub = img_shift[y:y+sz,x:x+sz]
g = sess.run(grad_op, {input_image:sub})
grad_val[y:y+sz,x:x+sz] = g
return np.roll(np.roll(grad_val, -sx, 1), -sy, 0)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
visualize_layer_feature = image_net[VISUALIZE_LAYER][:, :, :, VISUALIZE_FEATURE]
feature_score = tf.reduce_mean(visualize_layer_feature)
gradient = tf.gradients(feature_score, input_image)[0]
# for itr in xrange(MAX_ITERATIONS):
# grad, score = sess.run([gradient, feature_score], feed_dict={input_image: dummy_image})
#
# grad /= grad.std() + 1e-8
# dummy_image += LEARNING_RATE * grad
# print "Score %f" % score
octave_n = 4
octave_scale = 1.4
octaves = []
for i in xrange(octave_n - 1):
hw = dummy_image.shape[:2]
lo = resize_image(dummy_image, np.int32(np.float32(hw) / octave_scale))
hi = dummy_image - resize_image(dummy_image, hw)
dummy_image = lo
dummy_image = dummy_image.eval()
octaves.append(hi)
for octave in xrange(octave_n):
if octave > 0:
hi = octaves[-octave].eval()
dummy_image = resize_image(dummy_image, hi.shape[:2]) + hi
dummy_image = dummy_image.eval()
for i in xrange(MAX_ITERATIONS):
expanded_image = np.expand_dims(dummy_image, 0)
# grad = sess.run(gradient, {input_image: expanded_image})
grad = calc_grad_tiled(expanded_image, gradient)[0]
dummy_image += grad * (LEARNING_RATE / (np.abs(grad).mean() + 1e-7))
print '.',
output = dummy_image.reshape(image_shape)
filename = "visualization_%s_%d.jpg" % (VISUALIZE_LAYER, VISUALIZE_FEATURE)
save_image(os.path.join(FLAGS.logs_dir, filename), output, model_params["mean_pixel"])
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
model_data = get_model_data()
model_params = {}
mean = model_data['normalization'][0][0][0]
model_params["mean_pixel"] = np.mean(mean, axis=(0, 1))
model_params["weights"] = np.squeeze(model_data['layers'])
visualize_layer(model_params)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "ImageArt/LayerVisualization.py",
"copies": "1",
"size": "6296",
"license": "mit",
"hash": 7650208080421785000,
"line_mean": 36.0352941176,
"line_max": 112,
"alpha_frac": 0.5967280813,
"autogenerated": false,
"ratio": 3.1276701440635866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4224398225363587,
"avg_score": null,
"num_lines": null
} |
__author__ = "charlie"
import numpy as np
import tensorflow as tf
import os, sys, inspect
from datetime import datetime
utils_path = os.path.abspath(
os.path.realpath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_path not in sys.path:
sys.path.insert(0, utils_path)
import TensorflowUtils as utils
import Dataset_Reader.read_LabeledFacesWild as read_LabeledFacesWild
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "Data_zoo/Faces_lfw/", "Path to dataset")
tf.flags.DEFINE_string("logs_dir", "logs/ContextInpainting_logs/", "path to logs")
tf.flags.DEFINE_integer("batch_size", "64", "batch size")
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 2000
MAX_ITERATIONS = int(1e5 + 1)
LEARNING_RATE = 1e-3
IMAGE_SIZE = 128
def encoder(dataset, train_mode):
with tf.variable_scope("Encoder"):
with tf.name_scope("enc_conv1") as scope:
W_conv1 = utils.weight_variable_xavier_initialized([3, 3, 3, 32], name="W_conv1")
b_conv1 = utils.bias_variable([32], name="b_conv1")
h_conv1 = utils.conv2d_strided(dataset, W_conv1, b_conv1)
h_bn1 = utils.batch_norm(h_conv1, 32, train_mode, scope="conv1_bn")
h_relu1 = tf.nn.relu(h_bn1)
with tf.name_scope("enc_conv2") as scope:
W_conv2 = utils.weight_variable_xavier_initialized([3, 3, 32, 64], name="W_conv2")
b_conv2 = utils.bias_variable([64], name="b_conv2")
h_conv2 = utils.conv2d_strided(h_relu1, W_conv2, b_conv2)
h_bn2 = utils.batch_norm(h_conv2, 64, train_mode, scope="conv2_bn")
h_relu2 = tf.nn.relu(h_bn2)
with tf.name_scope("enc_conv3") as scope:
W_conv3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128], name="W_conv3")
b_conv3 = utils.bias_variable([128], name="b_conv3")
h_conv3 = utils.conv2d_strided(h_relu2, W_conv3, b_conv3)
h_bn3 = utils.batch_norm(h_conv3, 128, train_mode, scope="conv3_bn")
h_relu3 = tf.nn.relu(h_bn3)
with tf.name_scope("enc_conv4") as scope:
W_conv4 = utils.weight_variable_xavier_initialized([3, 3, 128, 256], name="W_conv4")
b_conv4 = utils.bias_variable([256], name="b_conv4")
h_conv4 = utils.conv2d_strided(h_relu3, W_conv4, b_conv4)
h_bn4 = utils.batch_norm(h_conv4, 256, train_mode, scope="conv4_bn")
h_relu4 = tf.nn.relu(h_bn4)
with tf.name_scope("enc_conv5") as scope:
W_conv5 = utils.weight_variable_xavier_initialized([3, 3, 256, 512], name="W_conv5")
b_conv5 = utils.bias_variable([512], name="b_conv5")
h_conv5 = utils.conv2d_strided(h_relu4, W_conv5, b_conv5)
h_bn5 = utils.batch_norm(h_conv5, 512, train_mode, scope="conv5_bn")
h_relu5 = tf.nn.relu(h_bn5)
with tf.name_scope("enc_fc") as scope:
image_size = IMAGE_SIZE // 32
h_relu5_flatten = tf.reshape(h_relu5, [-1, image_size * image_size * 512])
W_fc = utils.weight_variable([image_size * image_size * 512, 1024], name="W_fc")
b_fc = utils.bias_variable([1024], name="b_fc")
encoder_val = tf.matmul(h_relu5_flatten, W_fc) + b_fc
return encoder_val
def inpainter(embedding, train_mode):
with tf.variable_scope("context_inpainter"):
image_size = IMAGE_SIZE // 32
with tf.name_scope("dec_fc") as scope:
W_fc = utils.weight_variable([1024, image_size * image_size * 512], name="W_fc")
b_fc = utils.bias_variable([image_size * image_size * 512], name="b_fc")
h_fc = tf.nn.relu(tf.matmul(embedding, W_fc) + b_fc)
with tf.name_scope("dec_conv1") as scope:
h_reshaped = tf.reshape(h_fc, tf.pack([tf.shape(h_fc)[0], image_size, image_size, 512]))
W_conv_t1 = utils.weight_variable_xavier_initialized([3, 3, 256, 512], name="W_conv_t1")
b_conv_t1 = utils.bias_variable([256], name="b_conv_t1")
deconv_shape = tf.pack([tf.shape(h_reshaped)[0], 2 * image_size, 2 * image_size, 256])
h_conv_t1 = utils.conv2d_transpose_strided(h_reshaped, W_conv_t1, b_conv_t1, output_shape=deconv_shape)
h_bn_t1 = utils.batch_norm(h_conv_t1, 256, train_mode, scope="conv_t1_bn")
h_relu_t1 = tf.nn.relu(h_bn_t1)
with tf.name_scope("dec_conv2") as scope:
W_conv_t2 = utils.weight_variable_xavier_initialized([3, 3, 128, 256], name="W_conv_t2")
b_conv_t2 = utils.bias_variable([128], name="b_conv_t2")
deconv_shape = tf.pack([tf.shape(h_relu_t1)[0], 4 * image_size, 4 * image_size, 128])
h_conv_t2 = utils.conv2d_transpose_strided(h_relu_t1, W_conv_t2, b_conv_t2, output_shape=deconv_shape)
h_bn_t2 = utils.batch_norm(h_conv_t2, 128, train_mode, scope="conv_t2_bn")
h_relu_t2 = tf.nn.relu(h_bn_t2)
with tf.name_scope("dec_conv3") as scope:
W_conv_t3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128], name="W_conv_t3")
b_conv_t3 = utils.bias_variable([64], name="b_conv_t3")
deconv_shape = tf.pack([tf.shape(h_relu_t2)[0], 8 * image_size, 8 * image_size, 64])
h_conv_t3 = utils.conv2d_transpose_strided(h_relu_t2, W_conv_t3, b_conv_t3, output_shape=deconv_shape)
h_bn_t3 = utils.batch_norm(h_conv_t3, 64, train_mode, scope="conv_t3_bn")
h_relu_t3 = tf.nn.relu(h_bn_t3)
with tf.name_scope("dec_conv4") as scope:
W_conv_t4 = utils.weight_variable_xavier_initialized([3, 3, 3, 64], name="W_conv_t4")
b_conv_t4 = utils.bias_variable([3], name="b_conv_t4")
deconv_shape = tf.pack([tf.shape(h_relu_t3)[0], 16 * image_size, 16 * image_size, 3])
pred_image = utils.conv2d_transpose_strided(h_relu_t3, W_conv_t4, b_conv_t4, output_shape=deconv_shape)
return pred_image
def loss(pred, real):
loss_val = tf.sqrt(2 * tf.nn.l2_loss(tf.sub(pred, real))) / FLAGS.batch_size
tf.scalar_summary("Loss_objective", loss_val)
return loss_val
def train(loss_val, step):
learning_rate = tf.train.exponential_decay(LEARNING_RATE, step, 0.4 * MAX_ITERATIONS, 0.99)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss_val, global_step=step)
return learning_rate, train_op
def get_next_batch(dataset, step):
batch_image = []
batch_clip = []
return batch_image, batch_clip
def _read_input(filename_queue):
class DataRecord(object):
pass
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
record = DataRecord()
decoded_image = tf.image.decode_jpeg(value, channels=3)
decoded_image.set_shape([DATASET_IMAGE_SIZE, DATASET_IMAGE_SIZE, 3])
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
record.image = tf.image.resize_image_with_crop_or_pad(decoded_image_as_float, IMAGE_SIZE, IMAGE_SIZE)
record.clipped_image = tf.slice(record.image, [IMAGE_SIZE / 4, IMAGE_SIZE / 4, 0],
[IMAGE_SIZE / 2, IMAGE_SIZE / 2, 3])
padded_image = tf.image.resize_image_with_crop_or_pad(record.clipped_image, IMAGE_SIZE, IMAGE_SIZE)
record.input_image = tf.sub(record.image, padded_image)
print record.input_image.get_shape()
print record.clipped_image.get_shape()
return record
def read_input_queue(filename_queue):
read_input = _read_input(filename_queue)
num_preprocess_threads = 8
min_queue_examples = int(0.4 * NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN)
print "Shuffling"
input_image, clipped_image = tf.train.shuffle_batch([read_input.input_image, read_input.clipped_image],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * FLAGS.batch_size,
min_after_dequeue=min_queue_examples)
input_image = (input_image - 128) / 255.0
clipped_image = (clipped_image - 128) / 255.0
return input_image, clipped_image
def main(argv=None):
print "Setting up dataset..."
train_files, testing_files, validation_files = read_LabeledFacesWild.read_lfw_dataset(FLAGS.data_dir)
filename_queue = tf.train.string_input_producer(train_files)
input_image, clipped_image = read_input_queue(filename_queue)
phase_train = tf.placeholder(tf.bool)
global_step = tf.Variable(0, trainable=False)
print "Setting up inference model..."
embedding = encoder(input_image, phase_train)
tf.image_summary("Input_image", input_image, max_images=1)
pred_image = inpainter(embedding, phase_train)
tf.image_summary("Ground_truth", clipped_image, max_images=1)
tf.image_summary("Pred_image", pred_image, max_images=1)
reconst_loss = loss(pred_image, clipped_image)
learning_rate, train_op = train(reconst_loss, global_step)
print "Setting up summary and saver..."
summary_op = tf.merge_all_summaries()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Model restored!"
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
try:
for step in xrange(MAX_ITERATIONS):
while coord.should_stop():
print "Coordinator should stop!"
break
feed_dict = {phase_train: True}
if step % 100 == 0:
train_loss, summary_str = sess.run([reconst_loss, summary_op], feed_dict=feed_dict)
print "Step: %d, Train loss: %g" % (step, train_loss)
summary_writer.add_summary(summary_str, global_step=step)
if step % 1000 == 0:
lr = sess.run(learning_rate)
print "%s ===> Learning Rate: %f" % (datetime.now(), lr)
saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=step)
sess.run(train_op, feed_dict=feed_dict)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
if __name__ == "__main__":
DATASET_IMAGE_SIZE = read_LabeledFacesWild.IMAGE_SIZE
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "ContextEncoder/ContextInpainting.py",
"copies": "1",
"size": "10794",
"license": "mit",
"hash": -6370957698208667000,
"line_mean": 45.7272727273,
"line_max": 115,
"alpha_frac": 0.6081156198,
"autogenerated": false,
"ratio": 3.085763293310463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9181969393408187,
"avg_score": 0.0023819039404552444,
"num_lines": 231
} |
__author__ = 'Charlie'
import random
import os, sys
import tensorflow as tf
from datetime import datetime
import numpy as np
from six.moves import urllib
import tarfile
import csv
import hashlib
from tensorflow.python.client import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_image_dir', "Yelp_Data/train_photos", """ Path to train image directory""")
tf.app.flags.DEFINE_string('train_data_dir', "Yelp_Data/train_data", """ Path to other train data""")
tf.app.flags.DEFINE_string('test_image_dir', "Yelp_Data/test_photos", """ Path to test image directory""")
tf.app.flags.DEFINE_string('test_data_dir', "Yelp_Data/test_data", """ Path to other test data""")
tf.app.flags.DEFINE_string('train_dir', 'Yelp_logs/',
"""Where to save the trained graph's labels.""")
tf.app.flags.DEFINE_integer('batch_size', 512,
"""How many images to train on at a time.""")
tf.app.flags.DEFINE_integer('validation_percentage', 10,
"""What percentage of images to use as a validation set.""")
tf.app.flags.DEFINE_integer('train_steps', 100000, """No. of training steps """)
# File-system cache locations.
tf.app.flags.DEFINE_string('model_dir', 'Models_zoo/imagenet',
"""Path to classify_image_graph_def.pb, """)
tf.app.flags.DEFINE_string(
'bottleneck_dir', 'Yelp_Data/train_bottleneck',
"""Path to cache bottleneck layer values as files.""")
tf.app.flags.DEFINE_string('mode', "train", """Mode: train / test""")
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
IMAGE_SIZE = 299
IMAGE_DEPTH = 3
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape'
BOTTLENECK_TENSOR_SIZE = 2048
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents'
NUM_CLASSES = 9
def maybe_download_and_extract():
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def create_inception_graph():
with tf.gfile.FastGFile(os.path.join(FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def read_csv_files():
def create_labels(index_string):
label = np.zeros((1, NUM_CLASSES), dtype=np.float32)
index_split = index_string.split(" ")
if index_string and index_split[0]:
indexes = map(int, index_split)
label[:, indexes] = 1
return label
with open(os.path.join(FLAGS.train_data_dir, "train_photo_to_biz_ids.csv"), 'r') as photo_map_file:
photo_map_file.next()
photo_dict = dict((x[0], x[1]) for x in csv.reader(photo_map_file))
with open(os.path.join(FLAGS.train_data_dir, "train.csv"), 'r') as biz_map_file:
biz_map_file.next()
biz_dict = dict((x[0], create_labels(x[1])) for x in csv.reader(biz_map_file))
return photo_dict, biz_dict
def ensure_name_has_port(tensor_name):
if ':' not in tensor_name:
name_with_port = tensor_name + ':0'
else:
name_with_port = tensor_name
return name_with_port
def get_image_records():
class ImageRecord:
pass
train_records = []
eval_records = []
for _, _, files in os.walk(FLAGS.train_image_dir):
filenames = [x for x in files]
filenames_length = len(filenames)
eval_percentage_threshold = (FLAGS.validation_percentage/100.0) * filenames_length
for file in filenames:
# ImageRecord contains filename and imagename
record = ImageRecord()
record.filename = file
record.image_name = os.path.splitext(file)[0]
rand_val = random.randrange(filenames_length)
if rand_val < eval_percentage_threshold:
eval_records.append(record)
else:
train_records.append(record)
return train_records, eval_records
def get_train_image_path(image_record):
return os.path.join(FLAGS.train_image_dir, image_record.filename)
def get_bottleneck_path(image_record):
return os.path.join(FLAGS.bottleneck_dir, image_record.image_name + '.txt')
def get_train_image_data(image_record):
image_data_str = gfile.FastGFile(get_train_image_path(image_record), 'rb').read()
# image_data = tf.image.decode_jpeg(image_data_str)
# resized_image = tf.image.resize_images(image_data, IMAGE_SIZE, IMAGE_SIZE)
# decoded_image_as_float = tf.cast(resized_image, dtype=tf.float32)
# decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
# return decoded_image_4d
return image_data_str
def get_or_create_bottleneck_value(sess, image_record):
bottleneck_path = get_bottleneck_path(image_record)
if not os.path.exists(bottleneck_path):
bottleneck_tensor = sess.graph.get_tensor_by_name(ensure_name_has_port(BOTTLENECK_TENSOR_NAME))
# image_data = sess.run(get_train_image_data(image_record))
image_data = get_train_image_data(image_record)
bottleneck_values = np.squeeze(
sess.run(bottleneck_tensor, feed_dict={ensure_name_has_port(JPEG_DATA_TENSOR_NAME): image_data}))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_value = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_value
def create_bottleneck_cache(sess, records_list):
bottlenecks_created = 0
for image_record in records_list:
try:
get_or_create_bottleneck_value(sess, image_record)
bottlenecks_created += 1
if bottlenecks_created % 5000 == 0:
print "%s bottlenecks created: %d" % (datetime.now(), bottlenecks_created)
except:
print "Unable to use image: %s" % image_record.image_name
records_list.remove(image_record)
def get_random_bottlenecks(sess, records_list, batch_size, photo_biz_dict, biz_label_dict):
bottlenecks = []
labels = []
#records = []
for i in range(batch_size):
record_index = random.randrange(len(records_list))
#records.append(record_index)
image_record = records_list[record_index]
bottleneck_value = get_or_create_bottleneck_value(sess, image_record)
label = biz_label_dict[photo_biz_dict[image_record.image_name]]
bottlenecks.append(bottleneck_value)
labels.append(label)
#print records
return bottlenecks, np.squeeze(labels)
def inference(graph):
bottleneck_tensor = graph.get_tensor_by_name(ensure_name_has_port(BOTTLENECK_TENSOR_NAME))
layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, BOTTLENECK_TENSOR_SIZE], stddev=0.001),
name='pre_final_layer_weights')
tf.histogram_summary(layer_weights.name, layer_weights)
layer_biases = tf.Variable(tf.zeros([BOTTLENECK_TENSOR_SIZE]), name='pre_final_layer_biases')
tf.histogram_summary(layer_biases.name, layer_biases)
h_fc1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(bottleneck_tensor, layer_weights), layer_biases))
layer_weights2 = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, NUM_CLASSES], stddev=0.001),
name='final_layer_weights')
tf.histogram_summary(layer_weights2.name, layer_weights2)
layer_biases2 = tf.Variable(tf.zeros([NUM_CLASSES]), name='final_layer_biases')
tf.histogram_summary(layer_biases2.name, layer_biases2)
logits = tf.nn.bias_add(tf.matmul(h_fc1, layer_weights2), layer_biases2)
return logits
def losses(logits_linear, labels):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits_linear, labels, name="cross_entropy")
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.scalar_summary("Loss", cross_entropy_mean)
return cross_entropy_mean
def train(loss, global_step):
return tf.train.AdamOptimizer(1e-6).minimize(loss, global_step=global_step)
def evaluation(logits_linear, ground_truth):
entropy = tf.nn.softmax_cross_entropy_with_logits(logits_linear, ground_truth, name="eval_entropy")
eval_step = tf.reduce_mean(entropy)
tf.scalar_summary("Eval_entropy", eval_step)
return eval_step
def main(argv=None):
maybe_download_and_extract()
photo_biz_dict, biz_label_dict = read_csv_files()
train_image_records, eval_image_records = get_image_records()
with tf.Session() as sess:
create_inception_graph()
global_step = tf.Variable(0, trainable=False)
print "Creating bottleneck cache for training images..."
create_bottleneck_cache(sess, train_image_records)
print "Creating bottleneck cache for eval images..."
create_bottleneck_cache(sess, eval_image_records)
logits_linear = inference(sess.graph)
print "Inference"
label_placeholder = tf.placeholder(tf.float32, (None, NUM_CLASSES), name="ground_truth_tensor")
loss = losses(logits_linear, label_placeholder)
print "Loss"
train_op = train(loss, global_step)
print "Train"
eval_op = evaluation(logits_linear, label_placeholder)
bottleneck_tensor = sess.graph.get_tensor_by_name(ensure_name_has_port(BOTTLENECK_TENSOR_NAME))
summary_op = tf.merge_all_summaries()
saver = tf.train.Saver(tf.all_variables())
init = tf.initialize_all_variables()
sess.run(init)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, graph_def=sess.graph_def)
for step in xrange(FLAGS.train_steps):
train_bottlenecks, train_labels = get_random_bottlenecks(sess, train_image_records, FLAGS.batch_size,
photo_biz_dict, biz_label_dict)
train_feed = {bottleneck_tensor: train_bottlenecks, label_placeholder: train_labels}
sess.run(train_op, feed_dict=train_feed)
if step % 100 == 0:
cross_entropy, summary_str = sess.run([loss, summary_op], feed_dict=train_feed)
summary_writer.add_summary(summary_str, step)
str_log = '%s step:%d, Train Cross Entropy: %0.2f' % (datetime.now(), step, cross_entropy)
print str_log
if step % 5000 == 0:
eval_bottlenecks, eval_labels = get_random_bottlenecks(sess, eval_image_records, len(eval_image_records),
photo_biz_dict, biz_label_dict)
eval_entropy = sess.run(eval_op,
feed_dict={bottleneck_tensor: eval_bottlenecks, label_placeholder: eval_labels})
print "Eval Entropy %0.2f" % eval_entropy
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "Misc/YelpRestaurantClassification2.py",
"copies": "1",
"size": "11897",
"license": "mit",
"hash": 4436966991620708400,
"line_mean": 38.1348684211,
"line_max": 121,
"alpha_frac": 0.6482306464,
"autogenerated": false,
"ratio": 3.4484057971014495,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9573810174377481,
"avg_score": 0.004565253824793764,
"num_lines": 304
} |
__author__ = 'Charlie'
import tensorflow as tf
import os, sys
from six.moves import urllib
import tarfile
import time
from datetime import datetime
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', 'CIFAR10_Data/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_string('train_dir', 'Deblurring_logs/Deblurring_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
IMAGE_SIZE = 32
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def activation_summary(x):
"""Helper to create summaries for activations."""
tf.histogram_summary(x.op.name + '/activations', x)
tf.scalar_summary(x.op.name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def read_cifar10(filename_queue):
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = IMAGE_SIZE
result.width = IMAGE_SIZE
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# # The first bytes represent the label, which we convert from uint8->int32.
# result.label = tf.cast(
# tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
image4d = tf.cast(tf.reshape(result.uint8image, [-1, result.height, result.width, result.depth]), dtype=tf.float32)
W = tf.truncated_normal((5, 5, 3, 3), stddev=tf.random_uniform([1]))
result.noise_image = tf.reshape(conv2d_basic(image4d, W), [result.height, result.width, result.depth])
return result
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def inputs():
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
num_preprocess_threads = 16
min_queue_examples = int(0.4 * NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN)
input_images, ref_images = tf.train.shuffle_batch([read_input.noise_image, read_input.uint8image],
batch_size=FLAGS.batch_size, num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * FLAGS.batch_size,
min_after_dequeue=min_queue_examples)
tf.image_summary("Input_Noise_images", input_images)
tf.image_summary("Ref_images", ref_images)
return input_images, ref_images
def inference(images):
with tf.variable_scope("conv1") as scope:
kernel = _variable_with_weight_decay("weights", [5, 5, 3, 64], stddev=1e-4, wd=0.0)
conv = conv2d_basic(images, kernel)
bias = _variable_on_cpu("bias", [64], tf.constant_initializer(0.0))
h_conv1 = tf.nn.relu(conv + bias, name=scope.name)
activation_summary(h_conv1)
# norm1
norm1 = tf.nn.lrn(h_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
with tf.variable_scope("conv2") as scope:
kernel = _variable_with_weight_decay("weights", [1, 1, 64, 32], stddev=1e-4, wd=0.0)
conv = conv2d_basic(norm1, kernel)
bias = _variable_on_cpu("bias", [32], tf.constant_initializer(0.0))
h_conv2 = tf.nn.relu(conv + bias, name=scope.name)
activation_summary(h_conv2)
# norm2
norm2 = tf.nn.lrn(h_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
with tf.variable_scope("output") as scope:
kernel = _variable_with_weight_decay("weights", [5, 5, 32, 3], stddev=1e-4, wd=0.0)
conv = conv2d_basic(norm2, kernel)
bias = _variable_on_cpu("bias", [3], tf.constant_initializer(0.0))
result = tf.nn.bias_add(conv, bias, name=scope.name)
return result
def loss(pred, ref):
square_error = tf.nn.l2_loss(tf.sub(pred, ref))
l2_loss = tf.div(tf.cast(square_error, dtype=tf.float32), 3 * IMAGE_SIZE * IMAGE_SIZE, name="L2_Loss")
tf.add_to_collection("losses", l2_loss)
return tf.add_n(tf.get_collection("losses"), name="Total_loss")
def train(total_loss, global_step):
tf.scalar_summary("Total_loss", total_loss)
return tf.train.AdamOptimizer(1e-3).minimize(total_loss, global_step=global_step)
def main(argv=None):
maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
input_images, ref_images = inputs()
pred_images = inference(input_images)
total_loss = loss(pred_images, tf.cast(ref_images, dtype= tf.float32))
train_op = train(total_loss, global_step)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, graph_def=sess.graph_def)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, total_loss])
duration = time.time() - start_time
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "Misc/Deblurring.py",
"copies": "1",
"size": "10291",
"license": "mit",
"hash": -6244239061862444,
"line_mean": 38.1292775665,
"line_max": 119,
"alpha_frac": 0.6141288505,
"autogenerated": false,
"ratio": 3.5461750516884907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9635449369233351,
"avg_score": 0.004970906591027896,
"num_lines": 263
} |
__author__ = 'Charlie'
# Placeholder for implementation of justins generative neural style
import tensorflow as tf
import numpy as np
import scipy.io
import scipy.misc
from datetime import datetime
import os, sys, inspect
utils_path = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_path not in sys.path:
sys.path.insert(0, utils_path)
import TensorflowUtils as utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("model_dir", "Models_zoo/", """Path to the VGG model mat file""")
tf.flags.DEFINE_string("data_dir", "Data_zoo/CIFAR10_data/", """Path to the CIFAR10 data""")
tf.flags.DEFINE_string("style_path", "", """Path to style image to use""")
tf.flags.DEFINE_string("mode", "train", "Network mode train/ test")
tf.flags.DEFINE_string("test_image_path", "", "Path to test image - read only if mode is test")
tf.flags.DEFINE_integer("batch_size", "128", "Batch size for training")
tf.flags.DEFINE_string("log_dir", "logs/GenerativeNeural_style/", """Path to save logs and checkpoint if needed""")
MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
CONTENT_WEIGHT = 2e-2
CONTENT_LAYER = 'relu2_2'
STYLE_WEIGHT = 2e-2
STYLE_LAYERS = ('relu1_2', 'relu2_2', 'relu3_3')
VARIATION_WEIGHT = 1e-3
LEARNING_RATE = 1e-3
MAX_ITERATIONS = 20001
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 20000
IMAGE_SIZE = 32
def get_model_data():
filename = MODEL_URL.split("/")[-1]
filepath = os.path.join(FLAGS.model_dir, filename)
if not os.path.exists(filepath):
raise IOError("VGG Model not found!")
data = scipy.io.loadmat(filepath)
return data
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3' # 'conv3_4', 'relu3_4', 'pool3',
# 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
# 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
#
# 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
# 'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = utils.avg_pool_2x2(current)
net[name] = current
assert len(net) == len(layers)
return net
def read_cifar10(model_params, filename_queue):
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1 # 2 for CIFAR-100
result.height = IMAGE_SIZE
result.width = IMAGE_SIZE
result.depth = 3
image_bytes = result.height * result.width * result.depth
record_bytes = label_bytes + image_bytes
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
record_bytes = tf.decode_raw(value, tf.uint8)
depth_major = tf.cast(tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
[result.depth, result.height, result.width]), tf.float32)
result.image = utils.process_image(tf.transpose(depth_major, [1, 2, 0]), model_params['mean_pixel']) / 255.0
extended_image = 255 * tf.reshape(result.image, (1, result.height, result.width, result.depth))
result.net = vgg_net(model_params["weights"], extended_image)
content_feature = result.net[CONTENT_LAYER]
result.content_features = content_feature
return result
def get_image(image_dir):
image = scipy.misc.imread(image_dir)
image = np.ndarray.reshape(image.astype(np.float32), ((1,) + image.shape))
return image
def read_input(model_params):
if FLAGS.mode == "test":
content_image = get_image(FLAGS.test_image_path)
print content_image.shape
processed_content = utils.process_image(content_image, model_params["mean_pixel"]).astype(np.float32) / 255.0
return processed_content, None
else:
data_directory = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
filenames = [os.path.join(data_directory, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
filename_queue = tf.train.string_input_producer(filenames)
print "Reading cifar10 data"
read_input = read_cifar10(model_params, filename_queue)
num_preprocess_threads = 8
min_queue_examples = int(0.4 * NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN)
print "Shuffling train batch"
input_images, input_content_features = tf.train.shuffle_batch([read_input.image, read_input.content_features],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * FLAGS.batch_size,
min_after_dequeue=min_queue_examples)
return input_images, input_content_features
def inference_res(input_image):
W1 = utils.weight_variable([3, 3, 3, 32])
b1 = utils.bias_variable([32])
hconv_1 = tf.nn.relu(utils.conv2d_basic(input_image, W1, b1))
h_norm = utils.local_response_norm(hconv_1)
bottleneck_1 = utils.bottleneck_unit(h_norm, 16, 16, down_stride=True, name="res_1")
bottleneck_2 = utils.bottleneck_unit(bottleneck_1, 8, 8, down_stride=True, name="res_2")
bottleneck_3 = utils.bottleneck_unit(bottleneck_2, 16, 16, up_stride=True, name="res_3")
bottleneck_4 = utils.bottleneck_unit(bottleneck_3, 32, 32, up_stride=True, name="res_4")
W5 = utils.weight_variable([3, 3, 32, 3])
b5 = utils.bias_variable([3])
out = tf.nn.tanh(utils.conv2d_basic(bottleneck_4, W5, b5))
return out
def inference_strided(input_image):
W1 = utils.weight_variable([9, 9, 3, 32])
b1 = utils.bias_variable([32])
tf.histogram_summary("W1", W1)
tf.histogram_summary("b1", b1)
h_conv1 = tf.nn.relu(utils.conv2d_basic(input_image, W1, b1))
W2 = utils.weight_variable([3, 3, 32, 64])
b2 = utils.bias_variable([64])
tf.histogram_summary("W2", W2)
tf.histogram_summary("b2", b2)
h_conv2 = tf.nn.relu(utils.conv2d_strided(h_conv1, W2, b2))
W3 = utils.weight_variable([3, 3, 64, 128])
b3 = utils.bias_variable([128])
tf.histogram_summary("W3", W3)
tf.histogram_summary("b3", b3)
h_conv3 = tf.nn.relu(utils.conv2d_strided(h_conv2, W3, b3))
# upstrides
W4 = utils.weight_variable([3, 3, 64, 128])
b4 = utils.bias_variable([64])
tf.histogram_summary("W4", W4)
tf.histogram_summary("b4", b4)
# print h_conv3.get_shape()
# print W4.get_shape()
h_conv4 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv3, W4, b4))
W5 = utils.weight_variable([3, 3, 32, 64])
b5 = utils.bias_variable([32])
tf.histogram_summary("W5", W5)
tf.histogram_summary("b5", b5)
h_conv5 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv4, W5, b5))
W6 = utils.weight_variable([9, 9, 32, 3])
b6 = utils.bias_variable([3])
tf.histogram_summary("W6", W6)
tf.histogram_summary("b6", b6)
pred_image = tf.nn.tanh(utils.conv2d_basic(h_conv5, W6, b6))
return pred_image
def test(sess, output_image, mean_pixel):
best = sess.run(output_image)
output = utils.unprocess_image(best.reshape(best.shape[1:]), mean_pixel).astype(np.float32)
scipy.misc.imsave("output.jpg", output)
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.model_dir, MODEL_URL)
utils.maybe_download_and_extract(FLAGS.data_dir, DATA_URL, is_tarfile=True)
model_data = get_model_data()
model_params = {}
mean = model_data['normalization'][0][0][0]
model_params['mean_pixel'] = np.mean(mean, axis=(0, 1))
model_params['weights'] = np.squeeze(model_data['layers'])
style_image = get_image(FLAGS.style_path)
processed_style = utils.process_image(style_image, model_params['mean_pixel']).astype(np.float32)
style_net = vgg_net(model_params['weights'], processed_style)
tf.image_summary("Style_Image", style_image)
with tf.Session() as sess:
print "Evaluating style features..."
style_features = {}
for layer in STYLE_LAYERS:
features = style_net[layer].eval()
features = np.reshape(features, (-1, features.shape[3]))
style_gram = np.matmul(features.T, features) / features.size
style_features[layer] = style_gram
print "Reading image inputs"
input_image, input_content = read_input(model_params)
print "Setting up inference"
output_image = 255 * inference_strided(input_image)
print "Creating saver.."
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.log_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Model restored..."
if FLAGS.mode == "test":
test(sess, output_image, model_params['mean_pixel'])
return
print "Calculating content loss..."
image_net = vgg_net(model_params['weights'], output_image)
content_loss = CONTENT_WEIGHT * tf.nn.l2_loss(image_net[CONTENT_LAYER] - input_content) / utils.get_tensor_size(
input_content)
print content_loss.get_shape()
tf.scalar_summary("Content_loss", content_loss)
print "Calculating style loss..."
style_losses = []
for layer in STYLE_LAYERS:
image_layer = image_net[layer]
_, height, width, number = map(lambda i: i.value, image_layer.get_shape())
size = height * width * number
feats = tf.reshape(image_layer, (-1, number))
image_gram = tf.matmul(tf.transpose(feats), feats) / size
style_losses.append(0.5 * tf.nn.l2_loss(image_gram - style_features[layer]))
style_loss = STYLE_WEIGHT * reduce(tf.add, style_losses)
print style_loss.get_shape()
tf.scalar_summary("Style_loss", style_loss)
print "Calculating variational loss..."
tv_y_size = utils.get_tensor_size(output_image[:, 1:, :, :])
tv_x_size = utils.get_tensor_size(output_image[:, :, 1:, :])
tv_loss = VARIATION_WEIGHT * (
(tf.nn.l2_loss(output_image[:, 1:, :, :] - output_image[:, :IMAGE_SIZE - 1, :, :]) /
tv_y_size) +
(tf.nn.l2_loss(output_image[:, :, 1:, :] - output_image[:, :, :IMAGE_SIZE - 1, :]) /
tv_x_size))
print tv_loss.get_shape()
tf.scalar_summary("Variation_loss", tv_loss)
loss = content_loss + style_loss + tv_loss
tf.scalar_summary("Total_loss", loss)
print "Setting up train operation..."
train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
print "Setting up summary write"
summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph_def)
summary_op = tf.merge_all_summaries()
print "initializing all variables"
sess.run(tf.initialize_all_variables())
tf.train.start_queue_runners(sess=sess)
print "Running training..."
for step in range(MAX_ITERATIONS):
if step % 10 == 0:
this_loss, summary_str = sess.run([loss, summary_op])
summary_writer.add_summary(summary_str, global_step=step)
print('%s : Step %d' % (datetime.now(), step)),
print('total loss: %g' % this_loss)
if step % 100 == 0:
print ("Step %d" % step),
print(' content loss: %g' % content_loss.eval()),
print(' style loss: %g' % style_loss.eval()),
print(' tv loss: %g' % tv_loss.eval())
saver.save(sess, FLAGS.log_dir + "model.ckpt", global_step=step)
sess.run(train_step)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "shekkizh/TensorflowProjects",
"path": "GenerativeNetworks/GenerativeNeuralStyle.py",
"copies": "1",
"size": "12757",
"license": "mit",
"hash": -117780039498001390,
"line_mean": 37.6575757576,
"line_max": 121,
"alpha_frac": 0.6080583209,
"autogenerated": false,
"ratio": 3.2230924709449216,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43311507918449216,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charlie'
# Utils used with tensorflow implemetation
import tensorflow as tf
import numpy as np
import os, sys
from six.moves import urllib
import tarfile
import zipfile
from skimage import io, color
import scipy.io
def maybe_download_and_extract(dir_path, url_name, is_tarfile=False, is_zipfile=False):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
filename = url_name.split('/')[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url_name, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
if is_tarfile:
tarfile.open(filepath, 'r:gz').extractall(dir_path)
elif is_zipfile:
with zipfile.ZipFile(filepath) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dir_path)
def get_model_data(dir_path, model_url):
maybe_download_and_extract(dir_path, model_url)
filename = model_url.split("/")[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
raise IOError("VGG Model not found!")
data = scipy.io.loadmat(filepath)
return data
def save_image(image, save_dir, name):
"""
Save image by unprocessing and converting to rgb.
:param image: iamge to save
:param save_dir: location to save image at
:param name: prefix to save filename
:return:
"""
image = color.lab2rgb(image)
io.imsave(os.path.join(save_dir, name + ".png"), image)
def get_variable_set_not_trainable(weights, name):
init = tf.constant_initializer(weights, dtype=tf.float32)
var = tf.get_variable(name=name, initializer=init, shape=weights.shape, trainable=False)
return var
def weight_variable_not_trainable(shape, stddev=0.02, name=None):
initial = tf.truncated_normal(shape, stddev=stddev)
if name is None:
return tf.Variable(initial, trainable=False)
else:
return tf.get_variable(name, initializer=initial, trainable=False)
def bias_variable_not_trainable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if name is None:
return tf.Variable(initial, trainable=False)
else:
return tf.get_variable(name, initializer=initial, trainable=False)
def get_variable(weights, name):
init = tf.constant_initializer(weights, dtype=tf.float32)
var = tf.get_variable(name=name, initializer=init, shape=weights.shape)
return var
def weight_variable(shape, stddev=0.02, name=None):
initial = tf.truncated_normal(shape, stddev=stddev)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def get_tensor_size(tensor):
from operator import mul
return reduce(mul, (d.value for d in tensor.get_shape()), 1)
def conv2d_basic(x, W, bias):
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
return tf.nn.bias_add(conv, bias)
def conv2d_strided(x, W, b):
conv = tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def conv2d_transpose_strided(x, W, b, output_shape=None, stride=2):
# print x.get_shape()
# print W.get_shape()
if output_shape is None:
output_shape = x.get_shape().as_list()
output_shape[1] *= 2
output_shape[2] *= 2
output_shape[3] = W.get_shape().as_list()[2]
# print output_shape
conv = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def leaky_relu(x, alpha=0.2, name=""):
return tf.maximum(alpha * x, x, name)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def avg_pool_2x2(x):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5, stddev=0.02):
"""
Code taken from http://stackoverflow.com/a/34634291/2267819
"""
with tf.variable_scope(scope):
beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
, trainable=True)
gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, stddev),
trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return normed
def process_image(image, mean_pixel):
return image - mean_pixel
def unprocess_image(image, mean_pixel):
return image + mean_pixel
def add_to_regularization_and_summary(var):
if var is not None:
tf.summary.histogram(var.op.name, var)
tf.add_to_collection("reg_loss", tf.nn.l2_loss(var))
def add_activation_summary(var):
tf.summary.histogram(var.op.name + "/activation", var)
tf.scalar_summary(var.op.name + "/sparsity", tf.nn.zero_fraction(var))
def add_gradient_summary(grad, var):
if grad is not None:
tf.summary.histogram(var.op.name + "/gradient", grad)
"""
The residual code below is taken and modified
from https://github.com/tensorflow/models/blob/master/resnet/resnet_model.py
"""
def residual_block(x, in_filter, out_filter, stride, phase_train, is_conv=True, leakiness=0.0,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = batch_norm(x, out_filter, phase_train, scope="init_bn")
x = leaky_relu(x, alpha=leakiness, name="lrelu")
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = batch_norm(x, out_filter, phase_train, scope="init_bn")
x = leaky_relu(x, alpha=leakiness, name="lrelu")
with tf.variable_scope('sub1'):
if is_conv:
x = conv_no_bias('conv1', x, 3, in_filter, out_filter, stride)
else:
x = conv_transpose_no_bias('conv_t1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = batch_norm(x, out_filter, phase_train, scope="bn2")
x = tf.nn.relu(x, "relu")
if is_conv:
x = conv_no_bias('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
else:
x = conv_transpose_no_bias('conv_t2', x, 3, in_filter, in_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
if is_conv:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
else:
orig_x = tf.nn.fractional_avg_pool(orig_x, stride) # Available only in tf 0.11 - not tested
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
x += orig_x
# tf.logging.info('image after unit %s', x.get_shape())
return x
def bottleneck_residual_block(x, in_filter, out_filter, stride, phase_train, is_conv=True, leakiness=0.0,
activate_before_residual=False):
"""Bottleneck resisual unit with 3 sub layers."""
if activate_before_residual:
with tf.variable_scope('common_bn_relu'):
x = batch_norm(x, out_filter, phase_train, scope="init_bn")
x = leaky_relu(x, alpha=leakiness, name="lrelu")
orig_x = x
else:
with tf.variable_scope('residual_bn_relu'):
orig_x = x
x = batch_norm(x, out_filter, phase_train, scope="init_bn")
x = leaky_relu(x, alpha=leakiness, name="lrelu")
with tf.variable_scope('sub1'):
if is_conv:
x = conv_no_bias('conv1', x, 1, in_filter, out_filter / 4, stride)
else:
x = conv_transpose_no_bias('conv_t1', x, 1, out_filter / 4, out_filter, stride)
with tf.variable_scope('sub2'):
x = batch_norm(x, out_filter, phase_train, scope="bn2")
x = leaky_relu(x, alpha=leakiness, name="lrelu")
if is_conv:
x = conv_no_bias('conv2', x, 3, out_filter / 4, out_filter / 4, [1, 1, 1, 1])
else:
x = conv_transpose_no_bias('conv_t2', x, 3, out_filter / 4, out_filter / 4, [1, 1, 1, 1])
with tf.variable_scope('sub3'):
x = batch_norm(x, out_filter, phase_train, scope="bn3")
x = leaky_relu(x, alpha=leakiness, name="lrelu")
if is_conv:
x = conv_no_bias('conv3', x, 1, out_filter / 4, out_filter, [1, 1, 1, 1])
else:
x = conv_transpose_no_bias('conv_t3', x, 1, in_filter, out_filter / 4, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
if is_conv:
orig_x = conv_no_bias('project', orig_x, 1, in_filter, out_filter, stride)
else:
orig_x = conv_transpose_no_bias('project', orig_x, 1, in_filter, out_filter, stride)
x += orig_x
# tf.logging.info('image after unit %s', x.get_shape())
return x
def conv_no_bias(name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def conv_transpose_no_bias(name, x, filter_size, in_filters, out_filters, strides):
"""Convolution Transpose."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
output_shape = tf.shape(x)
output_shape[1] *= strides[1]
output_shape[2] *= strides[2]
output_shape[3] = in_filters
return tf.nn.conv2d_transpose(x, kernel, output_shape=output_shape, strides=strides, padding='SAME')
| {
"repo_name": "BerenLuthien/HyperColumns_ImageColorization",
"path": "TensorflowUtils.py",
"copies": "1",
"size": "11368",
"license": "bsd-3-clause",
"hash": 6939041719194776000,
"line_mean": 35.5530546624,
"line_max": 115,
"alpha_frac": 0.6031843772,
"autogenerated": false,
"ratio": 3.245218384242078,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4348402761442078,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charlie'
# Utils used with tensorflow implemetation
import tensorflow as tf
import numpy as np
import scipy.misc as misc
import os, sys
from six.moves import urllib
import tarfile
import zipfile
import scipy.io
def get_model_data(dir_path, model_url):
maybe_download_and_extract(dir_path, model_url)
filename = model_url.split("/")[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
raise IOError("VGG Model not found!")
data = scipy.io.loadmat(filepath)
return data
def maybe_download_and_extract(dir_path, url_name, is_tarfile=False, is_zipfile=False):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
filename = url_name.split('/')[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url_name, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
if is_tarfile:
tarfile.open(filepath, 'r:gz').extractall(dir_path)
elif is_zipfile:
with zipfile.ZipFile(filepath) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dir_path)
def save_image(image, save_dir, name, mean=None):
"""
Save image by unprocessing if mean given else just save
:param mean:
:param image:
:param save_dir:
:param name:
:return:
"""
if mean:
image = unprocess_image(image, mean)
misc.imsave(os.path.join(save_dir, name + ".png"), image)
# as describe at Sec.4.2
def get_variable(weights, name):
if name == 'conv1_1_w':
k1, k2, ic, oc = weights.shape
concat_weights = np.random.normal(0.0, 1.0, size=(k1, k2, 2 * ic, oc))
concat_weights[:, :, 0:ic, :] = weights
init = tf.constant_initializer(concat_weights, dtype=tf.float32)
var = tf.get_variable(name=name, initializer=init, shape=concat_weights.shape)
return var
init = tf.constant_initializer(weights, dtype=tf.float32)
var = tf.get_variable(name=name, initializer=init, shape=weights.shape)
return var
def weight_variable(shape, stddev=0.02, name=None):
# print(shape)
initial = tf.truncated_normal(shape, stddev=stddev)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def get_tensor_size(tensor):
from operator import mul
return reduce(mul, (d.value for d in tensor.get_shape()), 1)
def conv2d_basic(x, W, bias):
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
return tf.nn.bias_add(conv, bias)
def conv2d_strided(x, W, b):
conv = tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def conv2d_transpose_strided(x, W, b, output_shape=None, stride = 2):
# print x.get_shape()
# print W.get_shape()
if output_shape is None:
output_shape = x.get_shape().as_list()
output_shape[1] *= 2
output_shape[2] *= 2
output_shape[3] = W.get_shape().as_list()[2]
# print output_shape
conv = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def leaky_relu(x, alpha=0.0, name=""):
return tf.maximum(alpha * x, x, name)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def avg_pool_2x2(x):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def local_response_norm(x):
return tf.nn.lrn(x, depth_radius=5, bias=2, alpha=1e-4, beta=0.75)
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
"""
Code taken from http://stackoverflow.com/a/34634291/2267819
"""
with tf.variable_scope(scope):
beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
, trainable=True)
gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return normed
def process_image(image, mean_pixel):
return image - mean_pixel
def unprocess_image(image, mean_pixel):
return image + mean_pixel
def bottleneck_unit(x, out_chan1, out_chan2, down_stride=False, up_stride=False, name=None):
"""
Modified implementation from github ry?!
"""
def conv_transpose(tensor, out_channel, shape, strides, name=None):
out_shape = tensor.get_shape().as_list()
in_channel = out_shape[-1]
kernel = weight_variable([shape, shape, out_channel, in_channel], name=name)
shape[-1] = out_channel
return tf.nn.conv2d_transpose(x, kernel, output_shape=out_shape, strides=[1, strides, strides, 1],
padding='SAME', name='conv_transpose')
def conv(tensor, out_chans, shape, strides, name=None):
in_channel = tensor.get_shape().as_list()[-1]
kernel = weight_variable([shape, shape, in_channel, out_chans], name=name)
return tf.nn.conv2d(x, kernel, strides=[1, strides, strides, 1], padding='SAME', name='conv')
def bn(tensor, name=None):
"""
:param tensor: 4D tensor input
:param name: name of the operation
:return: local response normalized tensor - not using batch normalization :(
"""
return tf.nn.lrn(tensor, depth_radius=5, bias=2, alpha=1e-4, beta=0.75, name=name)
in_chans = x.get_shape().as_list()[3]
if down_stride or up_stride:
first_stride = 2
else:
first_stride = 1
with tf.variable_scope('res%s' % name):
if in_chans == out_chan2:
b1 = x
else:
with tf.variable_scope('branch1'):
if up_stride:
b1 = conv_transpose(x, out_chans=out_chan2, shape=1, strides=first_stride,
name='res%s_branch1' % name)
else:
b1 = conv(x, out_chans=out_chan2, shape=1, strides=first_stride, name='res%s_branch1' % name)
b1 = bn(b1, 'bn%s_branch1' % name, 'scale%s_branch1' % name)
with tf.variable_scope('branch2a'):
if up_stride:
b2 = conv_transpose(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
else:
b2 = conv(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
b2 = bn(b2, 'bn%s_branch2a' % name, 'scale%s_branch2a' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2b'):
b2 = conv(b2, out_chans=out_chan1, shape=3, strides=1, name='res%s_branch2b' % name)
b2 = bn(b2, 'bn%s_branch2b' % name, 'scale%s_branch2b' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2c'):
b2 = conv(b2, out_chans=out_chan2, shape=1, strides=1, name='res%s_branch2c' % name)
b2 = bn(b2, 'bn%s_branch2c' % name, 'scale%s_branch2c' % name)
x = b1 + b2
return tf.nn.relu(x, name='relu')
def add_to_regularization_and_summary(var):
if var is not None:
tf.histogram_summary(var.op.name, var)
tf.add_to_collection("reg_loss", tf.nn.l2_loss(var))
def add_activation_summary(var):
if var is not None:
tf.histogram_summary(var.op.name + "/activation", var)
tf.scalar_summary(var.op.name + "/sparsity", tf.nn.zero_fraction(var))
def add_gradient_summary(grad, var):
if grad is not None:
tf.histogram_summary(var.op.name + "/gradient", grad)
| {
"repo_name": "PetroWu/AutoPortraitMatting",
"path": "TensorflowUtils_plus.py",
"copies": "1",
"size": "8879",
"license": "apache-2.0",
"hash": 1369384595148125000,
"line_mean": 35.093495935,
"line_max": 120,
"alpha_frac": 0.6029958329,
"autogenerated": false,
"ratio": 3.1722043586995357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9267329312679051,
"avg_score": 0.001574175784096897,
"num_lines": 246
} |
__author__ = 'charm ship jo'
import pygame
pygame.init()
class Window:
def __init__(self,caption,size,flags=0,depth=0,fps=30):
self.fps = fps
self.screen = pygame.display.set_mode(size,flags,depth)
pygame.display.set_caption(caption)
self.clock = pygame.time.Clock()
self.Window_Open = True
self.Run = True
self.updatefunc = None
self.eventfunc = None
def SetPage(self,updatefunc = None,eventfunc = None):
self.updatefunc = updatefunc
self.eventfunc = eventfunc
def Flip(self):
self.Run = True
while self.Run and self.Window_Open:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.Window_Open = False
else:
if self.eventfunc is not None:
self.eventfunc(event)
if self.updatefunc is not None:
self.updatefunc(self.screen)
pygame.display.flip()
self.clock.tick(self.fps)
class Button:
def __init__(self,text,pos,size=(100,30),color=(0,0,200),hilight=(0,200,200)):
self.normal = color
self.hilight = hilight
self.rect = pygame.Rect(pos,size)
self.mouseover = False
self.text = text
self.font = pygame.font.Font(None,24)
self.text_image = self.font.render(text,1,(255,255,255))
w,h = self.font.size(text)
self.text_pos = (pos[0] + size[0] / 2 - w / 2,pos[1] + size[1] / 2 - h / 2)
self.buttondown = False
def Draw(self,surface):
rectout = self.rect.inflate(2,2)
rectin = self.rect.inflate(1,1)
if self.buttondown:
pygame.draw.rect(surface,(0,0,0),rectout)
pygame.draw.rect(surface,(255,255,255),rectin)
else:
pygame.draw.rect(surface,(255,255,255),rectout)
pygame.draw.rect(surface,(0,0,0),rectin)
if self.mouseover:
pygame.draw.rect(surface,self.hilight,self.rect)
else:
pygame.draw.rect(surface,self.normal,self.rect)
surface.blit(self.text_image,self.text_pos)
def Update(self,event):
x,y = event.pos
px,py,w,h = self.rect
self.mouseover = False
if x > px and x < px + w:
if y > py and y < py + h:
self.mouseover = True
if not self.mouseover:
self.buttondown = False
def MouseDown(self,event):
if self.mouseover:
self.buttondown = True
def Click(self,event):
if self.buttondown and self.mouseover:
self.buttondown = False
frame.push = frame.font.render(self.text,1,(100,0,200))
class Main:
def __init__(self):
self.button = Button('Game Start',(280,450))
self.button2 = Button('Exit',(430,450),(100,30),(200,0,0),(200,0,200))
self.font = pygame.font.Font(None,18)
self.push = None
def Update(self,surface):
surface.fill((255,255,255))
self.button.Draw(surface)
self.button2.Draw(surface)
if self.push is not None:
surface.blit(self.push,(400,300))
def Event(self,event):
if event.type == pygame.MOUSEBUTTONDOWN:
self.button.MouseDown(event)
self.button2.MouseDown(event)
elif event.type == pygame.MOUSEBUTTONUP:
self.button.Click(event)
self.button2.Click(event)
elif event.type == pygame.MOUSEMOTION:
self.button.Update(event)
self.button2.Update(event)
if __name__ == '__main__':
window = Window('charm ship jo game!',(800,600))
frame = Main()
window.SetPage(frame.Update,frame.Event)
window.Flip()
pygame.quit()
| {
"repo_name": "saintdragon2/python-3-lecture-2015",
"path": "civil-final/1st_presentation/9조/10.py",
"copies": "1",
"size": "3580",
"license": "mit",
"hash": 4634117050519224000,
"line_mean": 28.1056910569,
"line_max": 81,
"alpha_frac": 0.6005586592,
"autogenerated": false,
"ratio": 3.296500920810313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4397059580010313,
"avg_score": null,
"num_lines": null
} |
__author__ = "Chase Roberts"
__maintainers__ = ["Chase Roberts"]
import json
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth import authenticate
from deck.views import *
from deck.models import *
class DeckTest(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
def test_flow(self):
request = self.request_factory.get("/", {})
response = new_deck(request)
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
self.assertEqual(resp['shuffled'], False)
deck_id = resp['deck_id']
request = self.request_factory.get("/", {})
response = draw(request, deck_id)
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
ace = resp['cards'][0]
self.assertEqual(ace['suit'], 'SPADES')
self.assertEqual(ace['value'], 'ACE')
self.assertEqual(ace['code'], 'AS')
self.assertEqual(resp['remaining'], 51)
request = self.request_factory.get("/", {})
response = shuffle(request, deck_id)
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
self.assertEqual(resp['shuffled'], True)
self.assertEqual(resp['remaining'], 52)
request = self.request_factory.get("/", {"count":10})
response = draw(request, deck_id)
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
self.assertEqual(resp['remaining'], 42)
self.assertEqual(len(resp['cards']), 10)
cards = resp['cards']
card0 = cards[0]
card1 = cards[1]
request = self.request_factory.get("/", {"cards":card0['code']+','+card1['code']})
response = add_to_pile(request, deck_id, 'chase')
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
self.assertEqual(resp['remaining'], 42)
piles = resp['piles']
self.assertEqual(piles['chase']['remaining'], 2)
request = self.request_factory.get("/", {"cards":card0['code']})
response = draw_from_pile(request, deck_id, 'chase')
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
cards = resp['cards']
self.assertEqual(cards[0]['code'], card0['code'])
piles = resp['piles']
self.assertEqual(piles['chase']['remaining'], 1)
request = self.request_factory.get("/", {})
response = draw_from_pile(request, deck_id, 'chase')
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
cards = resp['cards']
self.assertEqual(cards[0]['code'], card1['code'])
piles = resp['piles']
self.assertEqual(piles['chase']['remaining'], 0)
def test_partial_deck(self):
#test to make sure a new partial deck is returned when requested
request = self.request_factory.get("/", {'cards':'AC,AD,AH,AS'})
response = shuffle(request)
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
self.assertEqual(resp['shuffled'], True)
deck_id = resp['deck_id']
self.assertEqual(resp['remaining'], 4)
#draw 4 cards and make sure they match the input data (and verify deck is empty)
request = self.request_factory.get("/", {'count':4})
response = draw(request, deck_id)
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
one, two, three, four = False, False, False, False
for card in resp['cards']:
if card['code'] == 'AS':
one = True
elif card['code'] == 'AD':
two = True
elif card['code'] == 'AH':
three = True
elif card['code'] == 'AC':
four = True
self.assertEqual(resp['remaining'], 0)
self.assertEqual(one, True)
self.assertEqual(two, True)
self.assertEqual(three, True)
self.assertEqual(four, True)
#verify that reshuffling a partial deck returns a partial deck
request = self.request_factory.get("/", {'cards':'KC,KD,KH,KS'})
response = shuffle(request)
resp = json.loads(response.content.decode('utf-8'))
deck_id = resp['deck_id']
reshuffleRequest = self.request_factory.get("/", {})
response = shuffle(reshuffleRequest, deck_id)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['remaining'], 4)
def test_draw_new(self):
request = self.request_factory.get("/", {'count':5})
response = draw(request)
self.assertEqual(response.status_code, 200)
resp = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp['success'], True)
self.assertEqual(resp['remaining'], 47)
| {
"repo_name": "gavinmcgimpsey/deckofcards",
"path": "deck/tests.py",
"copies": "2",
"size": "5594",
"license": "mit",
"hash": -1257555009063426800,
"line_mean": 40.1323529412,
"line_max": 90,
"alpha_frac": 0.6022524133,
"autogenerated": false,
"ratio": 3.852617079889807,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5454869493189807,
"avg_score": null,
"num_lines": null
} |
__author__ = ['Chatziargyriou Eleftheria <ele.hatzy@gmail.com>']
__license__ = 'MIT License'
import re
from cltk.stem.middle_english.stem import affix_stemmer
"""
The hyphenation/syllabification algorithm is based on the typical syllable
structure model of onset/nucleus/coda. An additional problem arises with the
distinction between long and short vowels, since many use identical graphemes
for both long and short vowels. The great vowel shift that dates back to the
early stages of ME poses an additional problem.
"""
SHORT_VOWELS = ['a', 'e', 'i', 'o', 'u', 'y', 'æ']
LONG_VOWELS = ['aa', 'ee', 'oo', 'ou', 'ow', 'ae']
DIPHTHONGS = ['th', 'gh', 'ht', 'ch']
TRIPHTHONGS = ['ght', 'ghl']
CONSONANTS = ['b', 'c', 'd', 'f', 'g', 'h', 'l', 'm', 'n', 'p', 'r', 's', 't', 'x', 'ð', 'þ', 'ƿ']
#Soundex replacement rules
dict_SE = {
'p': '1',
'b': '1',
'f': '1',
'v': '1',
'gh':'1',
'sh':'2',
't': '2',
'd': '2',
's': '2',
'z': '2',
'r': '2',
'k': '2',
'g': '2',
'w': '2',
'l': '3',
'm': '4',
'n': '4',
'r': '5'
}
class Word:
def __init__(self, word):
self.word = word
self.syllabified = None
self.stressed = None
def syllabify(self):
"""
Syllabification module for Middle English.
Throughout the early 11th-14th century, ME went through a process of
loss of gemination. Originally, the syllable preceding a geminate was
a closed one. The method assumes any occurring geminates will be
separated like in Modern English (working both as coda of first syllable
and onset of the other).
The algorithm also takes into account the shortening of vowels before a
cluster of consonants which took place at the earlier stages of the
language.
Returns:
list: string list containing the syllables of the given word
Examples:
>>> Word('heldis').syllabify()
['hel', 'dis']
>>> Word('greef').syllabify()
['greef']
Once you syllabify the word, the result will be saved as a class
variable
>>> word = Word('commaundyd')
>>> word.syllabify()
['com', 'mau', 'ndyd']
>>> word.syllabified
['com', 'mau', 'ndyd']
"""
# Array holding the index of each given syllable
ind = []
i = 0
# Iterate through letters of word searching for the nuclei
while i < len(self.word) - 1:
if self.word[i] in SHORT_VOWELS:
nucleus = ''
# Find cluster of vowels
while self.word[i] in SHORT_VOWELS and i < len(self.word) - 1:
nucleus += self.word[i]
i += 1
try:
# Check whether it is suceeded by a geminant
if self.word[i] == self.word[i + 1]:
ind.append(i)
i += 2
continue
elif sum(c not in CONSONANTS for c in self.word[i:i + 3]) == 0:
ind.append(i - 1 if self.word[i:i + 3] in TRIPHTHONGS else i)
i += 3
continue
except IndexError:
pass
if nucleus in SHORT_VOWELS:
ind.append(i - 1 if self.word[i:i + 2] in DIPHTHONGS else i)
continue
else:
ind.append(i - 1)
continue
i += 1
# Check whether the last syllable should be merged with the previous one
try:
if ind[-1] in [len(self.word) - 2, len(self.word) - 1]:
ind = ind[:-(1 + (ind[-2] == len(self.word) - 2))]
except IndexError:
if ind[-1] in [len(self.word) - 2, len(self.word) - 1]:
ind = ind[:-1]
self.syllabified = self.word
for n, k in enumerate(ind):
self.syllabified = self.syllabified[:k + n + 1] + "." + self.syllabified[k + n + 1:]
# Check whether the last syllable lacks a vowel nucleus
self.syllabified = self.syllabified.split(".")
if sum(map(lambda x: x in SHORT_VOWELS, self.syllabified[-1])) == 0:
self.syllabified[-2] += self.syllabified[-1]
self.syllabified = self.syllabified[:-1]
return self.syllabified
def syllabified_str(self, separator="."):
"""
Returns:
str: Syllabified word in string format
Examples:
>>> Word('conseil').syllabified_str()
'con.seil'
You can also specify the separator('.' by default)
>>> Word('sikerly').syllabified_str(separator = '-')
'sik-er-ly'
"""
return separator.join(self.syllabified if self.syllabified else self.syllabify())
def stresser(self, stress_rule='FSR'):
"""
Args:
:param stress_rule: Stress Rule, valid options:
'FSR': French Stress Rule, stress falls on the ultima, unless
it contains schwa (ends with e), in which case the penult is
stressed
'GSR': Germanic Stress Rule, stress falls on the first syllable
of the stemm. Note that the accuracy of the function directly
depends on that of the stemmer.
'LSR': Latin Stress Rule, stress falls on the penult if its
heavy, else, if it has more than two syllables on the
antepenult, else on the ultima.
Returns:
list: A list containing the separate syllable, where the stressed
syllable is prefixed by ' . Monosyllabic words are left unchanged,
since stress indicates relative emphasis.
Examples:
>>> Word('beren').stresser(stress_rule = "FSR")
['ber', "'en"]
>>> Word('prendre').stresser(stress_rule = "FSR")
["'pren", 'dre']
>>> Word('yisterday').stresser(stress_rule = "GSR")
['yi', 'ster', "'day"]
>>> Word('day').stresser(stress_rule = "GSR")
['day']
>>> Word('mervelus').stresser(stress_rule = "LSR")
["'mer", 'vel', 'us']
>>> Word('verbum').stresser(stress_rule = "LSR")
['ver', "'bum"]
"""
# Syllabify word
if not self.syllabified:
self.syllabify()
# Check whether word is monosyllabic
if len(self.syllabified) == 1:
return self.syllabified
if stress_rule == 'FSR':
# Check whether ultima ends in e
if self.syllabified[-1][-1] == 'e':
return self.syllabified[:-2] + ['\'{0}'.format(self.syllabified[-2])] + self.syllabified[-1:]
else:
return self.syllabified[:-1] + ['\'{0}'.format(self.syllabified[-1])]
elif stress_rule == 'GSR':
# The word striped of suffixes
st_word = affix_stemmer([self.word], strip_suf=False)
affix = self.word[:len(self.word) - len(st_word)]
# Syllabify stripped word and affix
syl_word = Word(st_word).syllabify()
# Add stress
syl_word = ['\'{0}'.format(syl_word[0])] + syl_word[1:]
if affix:
affix = Word(affix).syllabify()
syl_word = affix + syl_word
return syl_word
elif stress_rule == 'LSR':
# Check whether penult is heavy (contains more than one mora)
if sum(map(lambda x: x in SHORT_VOWELS, self.syllabified[-1])) > 1:
return self.syllabified[:-2] + ['\'{0}'.format(self.syllabified[-2])] + self.syllabified[-1:]
elif len(self.syllabified) > 2:
return self.syllabified[:-3] + ['\'{0}'.format(self.syllabified[-3])] + self.syllabified[-2:]
else:
return self.syllabified[:-1] + ['\'{0}'.format(self.syllabified[-1])]
def phonetic_indexing(self, p = "SE"):
"""
Args:
:param p: Specifies the phonetic indexing method
SE: Soundex variant for MHG
Returns:
str: Encoded string corresponding to the word's phonetic
representation
"""
if p == "SE":
return self._Soundex()
def _Soundex(self):
"""
The Soundex phonetic indexing algorithm adapted to ME phonology.
Algorithm:
Let w the original word and W the resulting one
1) Capitalize the first letter of w and append it to W
2) Apply the following replacement rules
p, b, f, v, gh (non-nasal fricatives) -> 1
t, d, s, sh, z, r, k, g, w (non-nasal alveolars and velars) -> 2
l (alveolar lateral) -> 3
m, n (nasals) -> 4
r (alveolar approximant) -> 5
3) Concetate multiple occurrences of numbers into one
4) Remove non-numerical characters
Notes:
/h/ was thought to be either a voiceless or velar fricative
when occurring in the coda with its most used grapheme being <gh>.
Those phonemes either disappeared, resulting in the lengthening
of preceding vowel clusters, or were developed into /f/ as evident
by modern spelling (e.g. 'enough': /ɪˈnʌf/ and 'though': /ðəʊ/)
Examples:
>>> Word("midel").phonetic_indexing(p="SE")
'M230'
>>> Word("myddle").phonetic_indexing(p="SE")
'M230'
>>> Word("might").phonetic_indexing(p="SE")
'M120'
>>> Word("myghtely").phonetic_indexing(p="SE")
'M123'
"""
word = self.word[1:]
for w, val in zip(dict_SE.keys(), dict_SE.values()):
word = word.replace(w, val)
#Remove multiple adjacent occurences of digit
word = re.sub(r"(\d)\1+", r"\1", word)
# Strip remaining letters
word = re.sub(r"[a-zðþƿ]+", "", word)
# Add trailing zeroes and return
return (self.word[0].upper() + word + "0" * 3)[:4]
| {
"repo_name": "LBenzahia/cltk",
"path": "cltk/phonology/middle_english/transcription.py",
"copies": "1",
"size": "10342",
"license": "mit",
"hash": -3009798491542365000,
"line_mean": 29.649851632,
"line_max": 109,
"alpha_frac": 0.5199922548,
"autogenerated": false,
"ratio": 3.5373287671232876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4557321021923288,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chaya D. Stern'
from pymol import stored, cmd
import os
import errno
def torsion_drive(atom1, atom2, atom3, atom4, interval, selection, path, mol_name,):
"""
This function generates input pdbs of dihedral angles selected of intervals specified with interval
:param atom1: name of atom 1 of dihedral
:param atom2: name of atom 2 of dihedral
:param atom3: name of atom 3 of dihedral
:param atom4: name of atom 4 of dihedral
:param interval: int or float (in degrees) of intervals to generate torsion scan for
:param selection: name of selection for molecule
:param path: path to where pdb files should be saved
:param mole_name: name of molecule to append to filenamen
"""
atom1 = selection + " and name " + atom1
atom2 = selection + " and name " + atom2
atom3 = selection + " and name " + atom3
atom4 = selection + " and name " + atom4
for angle in range(0, 360 + int(interval), int(interval)):
try:
os.makedirs('%s/%i' % (path, angle))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
cmd.set_dihedral(atom1, atom2, atom3, atom4, angle)
filename = '%s/%i/%s_%i.pdb' % (path, angle, mol_name, angle)
cmd.save(filename, selection, 1)
cmd.extend("torsion_drive", torsion_drive) | {
"repo_name": "ChayaSt/torsionfit",
"path": "torsionfit/qmscan/generate_dihedral.py",
"copies": "4",
"size": "1411",
"license": "mit",
"hash": -833706337376347400,
"line_mean": 36.1578947368,
"line_max": 103,
"alpha_frac": 0.6371367824,
"autogenerated": false,
"ratio": 3.6744791666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6311615949066666,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chaya D. Stern'
import numpy as np
import logging
import sys
verbose = False
def RMSE(scanSet, db):
'''
:param model: TorsionScanSet
:param db: pymc database
:return: numpy array of rmse
'''
N = len(scanSet.qm_energy)
errors = np.zeros(len(db.trace('mm_energy')[:]))
for i, energy in enumerate(db.trace('mm_energy')[:]):
rmse = np.linalg.norm(energy - scanSet.qm_energy)/np.sqrt(N)
errors[i] = rmse
return errors
def logger(name='torsionFit', pattern='%(asctime)s %(levelname)s %(name)s: %(message)s',
date_format='%H:%M:%S', handler=logging.StreamHandler(sys.stdout)):
"""
Retrieves the logger instance associated to the given name
:param name: The name of the logger instance
:param pattern: The associated pattern
:param date_format: The date format to be used in the pattern
:param handler: The logging handler
:return: The logger
"""
_logger = logging.getLogger(name)
_logger.setLevel(log_level(verbose))
if not _logger.handlers:
formatter = logging.Formatter(pattern, date_format)
handler.setFormatter(formatter)
handler.setLevel(log_level(verbose))
_logger.addHandler(handler)
_logger.propagate = False
return _logger
def log_level(verbose=verbose):
if verbose:
return logging.DEBUG
else:
return logging.INFO
| {
"repo_name": "ChayaSt/torsionfit",
"path": "torsionfit/utils.py",
"copies": "4",
"size": "1415",
"license": "mit",
"hash": -2497664416041538000,
"line_mean": 26.7450980392,
"line_max": 88,
"alpha_frac": 0.6508833922,
"autogenerated": false,
"ratio": 3.723684210526316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6374567602726317,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chaya D. Stern'
import pandas as pd
import numpy as np
from simtk.unit import Quantity, nanometers, kilojoules_per_mole
from cclib.parser import Gaussian, Psi
from cclib.parser.utils import convertor
import mdtraj as md
from parmed.charmm import CharmmPsfFile, CharmmParameterSet
import parmed
from torsionfit.database import DataBase
from copy import deepcopy
from fnmatch import fnmatch
import os
import re
import warnings
import itertools
def to_optimize(param, stream, penalty=10):
""" returns a list of dihedrals to optimize and updates CharmmParameterSet
with stream files
Parameters
----------
param : CharmmParameterSet
stream: list of stream files
penalty: int for CGenFF penalty cutoff (Default = 10)
Returns list of tuples containing dihedrals to optimize
"""
if type(stream) != list:
stream = [stream]
keys = [i for i in param.dihedral_types.keys()]
for j in stream:
param.read_stream_file(j)
params_to_optimize = [k for k in param.dihedral_types.keys()
if k not in keys and param.dihedral_types[k].penalty >= penalty]
# compress type list
written = set()
for t in params_to_optimize:
if t in written or tuple(reversed(t)) in written: continue
written.add(t)
return list(written)
def parse_psi4_log(logfiles, structure):
"""
Parses output of psi4 torsion scan script
:param logfiles: list of str
logfiles of psi4 script
:param structure: str
Charmm psf file of structure
:return:
TorsionScanSet
"""
topology = md.load_psf(structure)
structure = CharmmPsfFile(structure)
positions = np.ndarray((0, topology.n_atoms, 3))
qm_energies = np.ndarray(0)
torsions = np.ndarray((0, 4), dtype=int)
directions = np.ndarray(0, dtype=int)
angles = np.ndarray(0, dtype=float)
if type(logfiles) != list:
logfiles = [logfiles]
for file in logfiles:
qm = np.ndarray(0)
fi = open(file, 'r')
# check if log file is complete
complete = False # complete flag
for line in fi:
if line.startswith('Relative'):
complete = True
fi.seek(0)
section = None
torsion = np.ndarray((1, 4), dtype=int)
angle = np.ndarray(0, dtype=float)
for line in fi:
# Flag if structure is optimized
optimized = False
if line.startswith('Optimizer'):
optimized = True
# Store Dihedral and position of optimized structures
fi.next()
l = filter(None, fi.next().strip().split(' '))
dih = round(float(l[-2]))
try:
t = l[-6:-2]
for i in range(len(t)):
torsion[0][i] = int(t[i]) - 1
torsions = np.append(torsions, torsion, axis=0)
except ValueError:
pass
angle = np.append(angle, dih)
fi.next()
pos = filter(None, re.split("[, \[\]]", fi.next().strip()))
pos = [float(i) for i in pos]
pos = np.asarray(pos).reshape((-1, 3))
# convert angstroms to nanometers
positions = np.append(positions, pos[np.newaxis]*0.1, axis=0)
if not complete and optimized:
# Find line that starts with energy
for line in fi:
if line.startswith('Energy'):
energy = filter(None, line.strip().split(' '))[-1]
# Convert to KJ/mol
energy = float(energy)*2625.5
qm = np.append(qm, energy)
break
if line.startswith('Relative'):
section = 'Energy'
fi.next()
continue
if section == 'Energy':
line = filter(None, line.strip().split(' '))
if line != []:
dih = round(float(line[0]))
if dih in angle:
# Only save energies of optimized structures
qm_energies = np.append(qm_energies, float(line[-1]))
if qm.size is not 0:
qm = qm - min(qm)
qm_energies = np.append(qm_energies, qm)
fi.close()
angles = np.append(angles, angle, axis=0)
return QMDataBase(positions, topology, structure, torsions, directions, angles, qm_energies)
def parse_psi4_out(oufiles_dir, structure, pattern="*.out"):
"""
Parse psi4 out files from distributed torsion scan (there are many output files, one for each structure)
:param oufiles_dir: str
path to directory where the psi4 output files are
:param structure: str
path to psf, mol2 or pbd file of structure
:param pattern: str
pattern for psi4 output file. Default is *.out
:return: TorsionScanSet
"""
# Check extension of structure file
if structure.endswith('psf'):
topology = md.load_psf(structure)
structure = CharmmPsfFile(structure)
else:
topology = md.load(structure).topology
structure = parmed.load_file(structure)
positions = np.ndarray((0, topology.n_atoms, 3))
qm_energies = np.ndarray(0)
torsions = np.ndarray((0, 4), dtype=int)
angles = np.ndarray(0, dtype=float)
optimized = np.ndarray(0, dtype=bool)
out_files = {}
for path, subdir, files in os.walk(oufiles_dir):
for name in files:
if fnmatch(name, pattern):
if name.startswith('timer'):
continue
name_split = name.split('_')
try:
torsion_angle = (name_split[1] + '_' + name_split[2] + '_' + name_split[3] + '_' + name_split[4])
except IndexError:
warnings.warn("Do you only have one torsion scan? The output files will be treated as one scan")
torsion_angle = 'only_one_scan'
try:
out_files[torsion_angle]
except KeyError:
out_files[torsion_angle] = []
path = os.path.join(os.getcwd(), path, name)
out_files[torsion_angle].append(path)
# Sort files in increasing angles order for each torsion
sorted_files = []
dih_angles = []
for tor in out_files:
dih_angle = []
for file in out_files[tor]:
dih_angle.append(int(file.split('_')[-1].split('.')[0]))
sorted_files.append([out_file for (angle, out_file) in sorted(zip(dih_angle, out_files[tor]))])
dih_angle.sort()
dih_angles.append(dih_angle)
if not out_files:
raise Exception("There are no psi4 output files. Did you choose the right directory?")
# Parse files
for f in itertools.chain.from_iterable(sorted_files):
torsion = np.ndarray((1, 4), dtype=int)
fi = open(f, 'r')
for line in fi:
if line.startswith('dih_string'):
t = line.strip().split('"')[1].split(' ')[:4]
for i in range(len(t)):
torsion[0][i] = int(t[i]) - 1
torsions = np.append(torsions, torsion, axis=0)
fi.close()
optimizer = True
log = Psi(f)
data = log.parse()
try:
data.optdone
except AttributeError:
optimizer = False
warnings.warn("Warning: Optimizer failed for {}".format(f))
optimized = np.append(optimized, optimizer)
positions = np.append(positions, data.atomcoords[-1][np.newaxis]*0.1, axis=0)
# Try MP2 energies. Otherwise take SCFenergies
try:
qm_energy = convertor(data.mpenergies[-1], "eV", "kJmol-1")
except AttributeError:
try:
qm_energy = convertor(np.array([data.scfenergies[-1]]), "eV", "kJmol-1")
except AttributeError:
warnings.warn("Warning: Check if the file terminated before completing SCF")
qm_energy = np.array([np.nan])
qm_energies = np.append(qm_energies, qm_energy, axis=0)
# Subtract lowest energy to find relative energies
qm_energies = qm_energies - min(qm_energies)
angles = np.asarray(list(itertools.chain.from_iterable(dih_angles)))
return QMDataBase(positions=positions, topology=topology, structure=structure, torsions=torsions, angles=angles,
qm_energies=qm_energies, optimized=optimized)
def parse_gauss(logfiles, structure):
""" parses Guassian09 torsion-scan log file
parameters
----------
logfiles: str of list of str
Name of Guassian 09 torsion scan log file
structure: charmm psf file
returns
-------
TorsionScanSet
"""
topology = md.load_psf(structure)
structure = CharmmPsfFile(structure)
positions = np.ndarray((0, topology.n_atoms, 3))
qm_energies = np.ndarray(0)
torsions = np.ndarray((0, 4), dtype=int)
directions = np.ndarray(0, dtype=int)
steps = np.ndarray((0, 3), dtype=int)
if type(logfiles) != list:
logfiles = [logfiles]
for file in (logfiles):
direction = np.ndarray(1)
torsion = np.ndarray((1, 4), dtype=int)
step = np.ndarray((0, 3), dtype=int)
index = (2, 12, -1)
log = Gaussian(file)
data = log.parse()
# convert angstroms to nanometers
positions = np.append(positions, data.atomcoords*0.1, axis=0)
# Only add qm energies for structures that converged (because cclib throws out those coords but not other info)
qm_energies = np.append(qm_energies, (convertor(data.scfenergies[:len(data.atomcoords)], "eV", "kJmol-1") -
min(convertor(data.scfenergies[:len(data.atomcoords)], "eV", "kJmol-1"))), axis=0)
fi = open(file, 'r')
for line in fi:
if re.search(' Scan ', line):
t = line.split()[2].split(',')
t[0] = t[0][-1]
t[-1] = t[-1][0]
for i in range(len(t)):
torsion[0][i] = (int(t[i]) - 1)
if re.search('^ D ', line):
d = line.split()[-1]
if d[0] == '-':
direction[0] = 0
elif d[0] == '1':
direction[0] = 1
if re.search('Step', line):
try:
point = np.array(([int(line.rsplit()[j]) for j in index]))
point = point[np.newaxis,:]
step = np.append(step, point, axis=0)
except:
pass
fi.close()
# only add scan points from converged structures
steps = np.append(steps, step[:len(data.atomcoords)], axis=0)
for i in range(len(data.atomcoords)):
torsions = np.append(torsions, torsion, axis=0)
directions = np.append(directions, direction, axis=0)
del log
del data
return QMDataBase(positions=positions, topology=topology, structure=structure, torsions=torsions, steps=steps,
qm_energies=qm_energies, directions=directions)
class QMDataBase(DataBase):
"""container object for torsion scan
A TorsionScanSet should be constructed by loading Gaussian 09 torsion scan log files or a psi4 output file from disk
with an mdtraj.Topology object
Attributes
----------
structure: ParmEd.Structure
qm_energy: simtk.unit.Quantity((n_frames), unit=kilojoule/mole)
mm_energy: simtk.unit.Quantity((n_frames), unit=kilojoule/mole)
delta_energy: simtk.unit.Quantity((n_frames), unit=kilojoule/mole)
torsion_index: {np.ndarray, shape(n_frames, 4)}
step: {np.ndarray, shape(n_frame, 3)}
direction: {np.ndarray, shape(n_frame)}. 0 = negative, 1 = positive
"""
def __init__(self, positions, topology, structure, torsions, qm_energies, angles=None, steps=None, directions=None,
optimized=None, time=None):
"""Create new TorsionScanSet object"""
assert isinstance(topology, object)
super(QMDataBase, self).__init__(positions, topology, structure, time)
self.qm_energy = Quantity(value=qm_energies, unit=kilojoules_per_mole)
self.initial_mm = Quantity()
self.delta_energy = Quantity()
self.torsion_index = torsions
self.direction = directions
self.steps = steps
self.angles = angles
self.optimized = optimized
self.phis = {}
def compute_energy(self, param, offset=None, platform=None):
""" Computes energy for a given structure with a given parameter set
Parameters
----------
param: parmed.charmm.CharmmParameterSet
platform: simtk.openmm.Platform to evaluate energy on (if None, will select automatically)
"""
# Save initial mm energy
save = False
if not self._have_mm_energy:
save = True
# calculate energy
super(QMDataBase, self).compute_energy(param, platform)
# Subtract off minimum of mm_energy and add offset
energy_unit = kilojoules_per_mole
min_energy = self.mm_energy.min()
self.mm_energy -= min_energy
if save:
self.initial_mm = deepcopy(self.mm_energy)
if offset:
offset = Quantity(value=offset.value, unit=energy_unit)
self.mm_energy += offset
self.delta_energy = (self.qm_energy - self.mm_energy)
# self.delta_energy = self.delta_energy - self.delta_energy.min()
def to_dataframe(self, psi4=True):
""" convert TorsionScanSet to pandas dataframe
Parameters
----------
psi4 : bool
Flag if QM log file is from psi4. Default True.
"""
if len(self.mm_energy) == self.n_frames and len(self.delta_energy) == self.n_frames:
mm_energy = self.mm_energy
delta_energy = self.delta_energy
else:
mm_energy = [float('nan') for _ in range(self.n_frames)]
delta_energy = [float('nan') for _ in range(self.n_frames)]
if psi4:
data = [(self.torsion_index[i], self.angles[i], self.qm_energy[i], mm_energy[i], delta_energy[i],
self.optimized[i]) for i in range(self.n_frames)]
columns = ['Torsion', 'Torsion angle', 'QM energy (KJ/mol)', 'MM energy (KJ/mol)', 'Delta energy (KJ/mol)',
'Optimized']
else:
data = [(self.torsion_index[i], self.direction[i], self.steps[i], self.qm_energy[i], mm_energy[i],
delta_energy[i]) for i in range(self.n_frames)]
columns = ['Torsion', 'direction','steps', 'QM energy (KJ/mol)', 'MM energy (KJ/mol)',
'Delta energy (KJ/mol)']
torsion_set = pd.DataFrame(data, columns=columns)
return torsion_set
def extract_geom_opt(self):
"""
Extracts optimized geometry for Gaussian torsion scan.
Returns
-------
New QMDataBase
"""
key = []
for i, step in enumerate(self.steps):
try:
if step[1] != self.steps[i+1][1]:
key.append(i)
except IndexError:
key.append(i)
new_torsionScanSet = self.slice(key)
return new_torsionScanSet
def remove_nonoptimized(self):
"""
Remove configurations where optimizer failed
Returns: copy of scan set with only optimized structures
Returns
-------
new QMDataBase
"""
key = []
for i, optimized in enumerate(self.optimized):
if optimized:
key.append(i)
new_torsionscanset = self.slice(key)
return new_torsionscanset
@property
def _have_mm_energy(self):
return len(self.mm_energy) is not 0
# @property
# def _unique_torsions(self):
# Not returning the right amount. debug
# torsions = []
# for i in range(len(self.torsion_index)):
# try:
# if (self.torsion_index[i] != self.torsion_index[i+1]).all():
# torsions.append(self.torsion_index[i]), torsions.append(self.torsion_index[i+1])
# except:
# pass
# return len(torsions), torsions
def __getitem__(self, key):
"Get a slice of this trajectory"
return self.slice(key)
def slice(self, key, copy=True):
"""Slice trajectory, by extracting one or more frames into a separate object
This method can also be called using index bracket notation, i.e
`traj[1] == traj.slice(1)`
Parameters
----------
key : {int, np.ndarray, slice}
The slice to take. Can be either an int, a list of ints, or a slice
object.
copy : bool, default=True
Copy the arrays after slicing. If you set this to false, then if
you modify a slice, you'll modify the original array since they
point to the same data.
"""
xyz = self.xyz[key]
time = self.time[key]
torsions = self.torsion_index[key]
if self.direction is not None:
direction = self.direction[key]
if self.optimized is not None:
optimized = self.optimized[key]
if self.steps is not None:
steps = self.steps[key]
if self.angles is not None:
angles = self.angles[key]
qm_energy = self.qm_energy[key]
unitcell_lengths, unitcell_angles = None, None
if self.unitcell_angles is not None:
unitcell_angles = self.unitcell_angles[key]
if self.unitcell_lengths is not None:
unitcell_lengths = self.unitcell_lengths[key]
if copy:
xyz = xyz.copy()
time = time.copy()
topology = deepcopy(self._topology)
structure = deepcopy(self.structure)
torsions = torsions.copy()
qm_energy = qm_energy.copy()
if self.direction is not None:
direction = direction.copy()
else:
direction = self.direction
if self.optimized is not None:
optimized = optimized.copy()
else:
optimized = self.optimized
if self.steps is not None:
steps = steps.copy()
else:
steps = self.steps
if self.angles is not None:
angles = angles.copy()
else:
angles = self.angles
if self.unitcell_angles is not None:
unitcell_angles = unitcell_angles.copy()
if self.unitcell_lengths is not None:
unitcell_lengths = unitcell_lengths.copy()
newtraj = self.__class__(
positions=xyz, topology=topology, structure=structure, torsions=torsions, directions=direction, steps=steps,
qm_energies=qm_energy, optimized=optimized, angles=angles, time=time)
if self._rmsd_traces is not None:
newtraj._rmsd_traces = np.array(self._rmsd_traces[key],
ndmin=1, copy=True)
return newtraj
# def combine(self, qmdabase, copy=True):
# """
# Add more QM configurations to database
#
# Parameters
# ----------
# directory : path to directory of QM scan
#
# Returns
# -------
# Updates database in place
#
# """
#
# xyz = self.xyz
# time = self.time
# torsions = self.torsion_index
# if self.direction is not None:
# direction = self.direction
# if self.optimized is not None:
# optimized = self.optimized
# if self.steps is not None:
# steps = self.steps
# if self.angles is not None:
# angles = self.angles
# qm_energy = self.qm_energy
# unitcell_lengths, unitcell_angles = None, None
# if self.unitcell_angles is not None:
# unitcell_angles = self.unitcell_angles
# if self.unitcell_lengths is not None:
# unitcell_lengths = self.unitcell_lengths
#
# if copy:
# xyz = xyz.copy()
# time = time.copy()
# topology = deepcopy(self._topology)
# structure = deepcopy(self.structure)
# torsions = torsions.copy()
# qm_energy = qm_energy.copy()
# if self.direction is not None:
# direction = direction.copy()
# else:
# direction = self.direction
# if self.optimized is not None:
# optimized = optimized.copy()
# else:
# optimized = self.optimized
# if self.steps is not None:
# steps = steps.copy()
# else:
# steps = self.steps
# if self.angles is not None:
# angles = angles.copy()
# else:
# angles = self.angles
# if self.unitcell_angles is not None:
# unitcell_angles = unitcell_angles.copy()
# if self.unitcell_lengths is not None:
# unitcell_lengths = unitcell_lengths.copy()
#
# newtraj = self.__class__(
# positions=xyz, topology=topology, structure=structure, torsions=torsions, directions=direction, steps=steps,
# qm_energies=qm_energy, optimized=optimized, angles=angles, time=time)
#
# if self._rmsd_traces is not None:
# newtraj._rmsd_traces = np.array(self._rmsd_traces[key],
# ndmin=1, copy=True)
# return newtraj
def build_phis(self, to_optimize=None):
"""
This function builds a dictionary of phis for specified dihedrals in the molecules for all frames in the qm db.
Parameters
----------
to_optimize : list of dihedral types to calculate phis for
Default is None. When None, it will calculate phis for all dihedral types in molecule
"""
type_list = to_optimize
if type_list is None:
type_list = []
for torsion_type in self.structure.dihedrals:
t = (torsion_type.atom1.type, torsion_type.atom2.type, torsion_type.atom3.type, torsion_type.atom4.type)
type_list.append(t)
type_frequency = {}
for t in type_list:
# If t is not a palindrome, reverse it.
if t[0] >= t[-1]:
t = tuple(reversed(t))
try:
type_frequency[t] += 1
except KeyError:
type_frequency[t] = 1
# sanity check
if len(self.structure.dihedrals) != sum(type_frequency.values()):
warnings.warn("type frequency values don't sum up to number of dihedral")
self.phis = {t_type: [[] for i in range(self.n_frames)] for t_type in type_frequency}
for i in range(self.n_frames):
for dihedral in self.structure.dihedrals:
atom = dihedral.atom1
bond_atom = dihedral.atom2
angle_atom = dihedral.atom3
torsion_atom = dihedral.atom4
torsion_type = (atom.type, bond_atom.type, angle_atom.type, torsion_atom.type)
try:
self._append_phi(i, torsion_type, atom, bond_atom, angle_atom, torsion_atom)
except KeyError:
warnings.warn("torsion {} is not in list of phis to precalculate but is in the structure. "
"Are you sure you did not want to fit it?".format(torsion_type))
# try:
# self.phis[torsion_type][i].append(self._cartesian_to_phi(atom, bond_atom, angle_atom, torsion_atom,
# i))
# except KeyError:
# self.phis[tuple(reversed(torsion_type))][i].append(self._cartesian_to_phi(atom, bond_atom,
# angle_atom, torsion_atom, i))
# Convert to np.array
for t in self.phis:
self.phis[t] = np.array(self.phis[t])
def _append_phi(self, i, torsion_type, atom, bond_atom, angle_atom, torsion_atom):
"""
Helper function to try to append a calculated phi angle to existing list for a torsion type
Parameters
----------
i : int
frame for which phi is being calculated.
torsion_type : tuple of strings
atom : parmed atom type
first atom in dihedral
bond_atom : parmed atomtype
second atom in dihedral
angle_atom : parmed atomtype
third atom in dihedral
torsion_atom : parmed atomtype
fourth atom in dihedral
"""
try:
self.phis[torsion_type][i].append(self._cartesian_to_phi(atom, bond_atom, angle_atom, torsion_atom,
i))
except KeyError:
self.phis[tuple(reversed(torsion_type))][i].append(self._cartesian_to_phi(atom, bond_atom,
angle_atom, torsion_atom, i))
def _cartesian_to_phi(self, atom, bond_atom, angle_atom, torsion_atom, i):
"""
measures torsion angle for a specific torsion
Parameters
----------
atom : parmed atom
bond_atom : parmed atom
angle_atom : parmed atom
torsion_atom : parmed atom
i : int
index for configuration (frame)
Returns
-------
phi: float;
torsion angle in radians
"""
atom1_coords = self.positions[i][atom.idx]
bond_coords = self.positions[i][bond_atom.idx]
angle_coords = self.positions[i][angle_atom.idx]
torsion_coords = self.positions[i][torsion_atom.idx]
a = atom1_coords - bond_coords
b = angle_coords - bond_coords
#3-4 bond
c = angle_coords - torsion_coords
a_u = a / np.linalg.norm(a)
b_u = b / np.linalg.norm(b)
c_u = c / np.linalg.norm(c)
plane1 = np.cross(a_u, b_u)
plane2 = np.cross(b_u, c_u)
cos_phi = np.dot(plane1, plane2) / (np.linalg.norm(plane1)*np.linalg.norm(plane2))
cos_phi = np.dot(plane1, plane2) / (np.linalg.norm(plane1)*np.linalg.norm(plane2))
if cos_phi < -1.0:
cos_phi = -1.0
elif cos_phi > 1.0:
cos_phi = 1.0
phi = np.arccos(cos_phi)
if np.dot(a, plane2) <= 0:
phi = -phi
return phi
#def Fourier_series(self, K, n, phase): | {
"repo_name": "ChayaSt/torsionfit",
"path": "torsionfit/database/qmdatabase.py",
"copies": "4",
"size": "27227",
"license": "mit",
"hash": -3142209354035950600,
"line_mean": 36.4010989011,
"line_max": 128,
"alpha_frac": 0.5479487274,
"autogenerated": false,
"ratio": 3.927726485862666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6475675213262666,
"avg_score": null,
"num_lines": null
} |
import sys
import os
import subprocess
class ADB:
PYADB_VERSION = "0.1.4"
_output = None
_error = None
_return = 0
_devices = None
_target = None
# reboot modes
REBOOT_RECOVERY = 1
REBOOT_BOOTLOADER = 2
# default TCP/IP port
DEFAULT_TCP_PORT = 5555
# default TCP/IP host
DEFAULT_TCP_HOST = "localhost"
def pyadb_version(self):
return self.PYADB_VERSION
def __init__(self, adb_path=None):
self.__adb_path = adb_path
def __clean__(self):
self._output = None
self._error = None
self._return = 0
def __parse_output__(self, outstr):
ret = None
if len(outstr) > 0:
ret = outstr.splitlines()
return ret
def __build_command__(self, cmd):
ret = None
if self._devices is not None and len(self._devices) > 1 and self._target is None:
self._error = "Must set target device first"
self._return = 1
return ret
# Modified function to directly return command set for Popen
#
# Unfortunately, there is something odd going on and the argument list is not being properly
# converted to a string on the windows 7 test systems. To accomodate, this block explitely
# detects windows vs. non-windows and builds the OS dependent command output
#
# Command in 'list' format: Thanks to Gil Rozenberg for reporting the issue
#
if sys.platform.startswith('win'):
ret = self.__adb_path + " "
if self._target is not None:
ret += "-s " + self._target + " "
if type(cmd) is list:
ret += ' '.join(cmd)
else:
ret += cmd
else:
ret = [self.__adb_path]
if self._target is not None:
ret += ["-s", self._target]
if type(cmd) is list:
for i in cmd:
ret.append(i)
else:
ret += [cmd]
return ret
def get_output(self):
return self._output
def get_error(self):
return self._error
def get_return_code(self):
return self._return
def last_failed(self):
"""
Did the last command fail?
"""
if self._output is None and self._error is not None and self._return:
return True
return False
def run_cmd(self, cmd, run_in_background=False):
"""
Runs a command by using adb tool ($ adb <cmd>)
"""
self.__clean__()
if self.__adb_path is None:
self._error = "ADB path not set"
self._return = 1
return
# For compat of windows
cmd_list = self.__build_command__(cmd)
if run_in_background is False:
adb_proc = subprocess.Popen(
cmd_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False
)
else:
subprocess.Popen(
cmd_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False
)
return
(self._output, self._error) = adb_proc.communicate()
self._return = adb_proc.returncode
if len(self._output) == 0:
self._output = None
if len(self._error) == 0:
self._error = None
return
def get_version(self):
"""
Returns ADB tool version
adb version
"""
self.run_cmd("version")
ret = self._output.split()[-1:][0]
return ret
def check_path(self):
"""
Intuitive way to verify the ADB path
"""
if self.get_version() is None:
return False
return True
def set_adb_path(self, adb_path):
"""
Sets ADB tool absolute path
"""
if os.path.isfile(adb_path) is False:
return False
self.__adb_path = adb_path
return True
def get_adb_path(self):
"""
Returns ADB tool path
"""
return self.__adb_path
def start_server(self):
"""
Starts ADB server
adb start-server
"""
self.__clean__()
self.run_cmd('start-server')
return self._output
def kill_server(self):
"""
Kills ADB server
adb kill-server
"""
self.__clean__()
self.run_cmd('kill-server')
def restart_server(self):
"""
Restarts ADB server
"""
self.kill_server()
return self.start_server()
def restore_file(self, file_name):
"""
Restore device contents from the <file> backup archive
adb restore <file>
"""
self.__clean__()
self.run_cmd(['restore', file_name])
return self._output
def wait_for_device(self):
"""
Blocks until device is online
adb wait-for-device
"""
self.__clean__()
self.run_cmd('wait-for-device')
return self._output
def get_help(self):
"""
Returns ADB help
adb help
"""
self.__clean__()
self.run_cmd('help')
return self._output
def get_devices(self):
"""
Returns a list of connected devices
adb devices
"""
error = 0
self.run_cmd('devices')
if self._error is not None:
return ''
self._devices = self._output.decode().partition('\n')[2].replace('device', '').split()
if self._devices[1:] == ['no', 'permissions']:
error = 2
self._devices = None
return error, self._devices
def set_target_device(self, device):
"""
Select the device to work with
"""
self.__clean__()
if device is None or device not in self._devices:
self._error = 'Must get device list first'
self._return = 1
return False
self._target = device
return True
def get_target_device(self):
"""
Returns the selected device to work with
"""
return self._target
def get_state(self):
"""
Get ADB state
adb get-state
"""
self.__clean__()
self.run_cmd('get-state')
return self._output
def get_serialno(self):
"""
Get serialno from target device
adb get-serialno
"""
self.__clean__()
self.run_cmd('get-serialno')
return self._output
def reboot_device(self, mode):
"""
Reboot the target device
adb reboot recovery/bootloader
"""
self.__clean__()
if mode not in (self.REBOOT_RECOVERY, self.REBOOT_BOOTLOADER):
self._error = "mode must be REBOOT_RECOVERY/REBOOT_BOOTLOADER"
self._return = 1
return self._output
self.run_cmd(["reboot", "%s" % "recovery" if mode == self.REBOOT_RECOVERY else "bootloader"])
return self._output
def set_adb_root(self):
"""
restarts the adbd daemon with root permissions
adb root
"""
self.__clean__()
self.run_cmd('root')
return self._output
def set_system_rw(self):
"""
Mounts /system as rw
adb remount
"""
self.__clean__()
self.run_cmd("remount")
return self._output
def get_remote_file(self, remote, local):
"""
Pulls a remote file
adb pull remote local
"""
self.__clean__()
self.run_cmd(['pull', remote, local])
if self._error is not None and "bytes in" in self._error.decode('utf-8'):
self._output = self._error
self._error = None
return self._output
def push_local_file(self, local, remote):
"""
Push a local file
adb push local remote
"""
self.__clean__()
self.run_cmd(['push', local, remote])
return self._output
def shell_command(self, cmd, run_in_background=False):
"""
Executes a shell command
adb shell <cmd>
"""
self.__clean__()
self.run_cmd(['shell', cmd], run_in_background)
return self._output
def listen_usb(self):
"""
Restarts the adbd daemon listening on USB
adb usb
"""
self.__clean__()
self.run_cmd("usb")
return self._output
def listen_tcp(self, port=DEFAULT_TCP_PORT):
"""
Restarts the adbd daemon listening on the specified port
adb tcpip <port>
"""
self.__clean__()
self.run_cmd(['tcpip', port])
return self._output
def get_bugreport(self):
"""
Return all information from the device that should be included in a bug report
adb bugreport
"""
self.__clean__()
self.run_cmd("bugreport")
return self._output
def get_jdwp(self):
"""
List PIDs of processes hosting a JDWP transport
adb jdwp
"""
self.__clean__()
self.run_cmd("jdwp")
return self._output
def get_logcat(self, lcfilter=""):
"""
View device log
adb logcat <filter>
"""
self.__clean__()
self.run_cmd(['logcat', lcfilter])
return self._output
def run_emulator(self, cmd=""):
"""
Run emulator console command
"""
self.__clean__()
self.run_cmd(['emu', cmd])
return self._output
def connect_remote(self, host=DEFAULT_TCP_HOST, port=DEFAULT_TCP_PORT):
"""
Connect to a device via TCP/IP
adb connect host:port
"""
self.__clean__()
self.run_cmd(['connect', "%s:%s" % (host, port)])
return self._output
def disconnect_remote(self, host=DEFAULT_TCP_HOST, port=DEFAULT_TCP_PORT):
"""
Disconnect from a TCP/IP device
adb disconnect host:port
"""
self.__clean__()
self.run_cmd(['disconnect', "%s:%s" % (host, port)])
return self._output
def ppp_over_usb(self, tty=None, params=""):
"""
Run PPP over USB
adb ppp <tty> <params>
"""
self.__clean__()
if tty is None:
return self._output
cmd = ["ppp", tty]
if params != "":
cmd += params
self.run_cmd(cmd)
return self._output
def sync_directory(self, directory=""):
"""
Copy host->device only if changed (-l means list but don't copy)
adb sync <dir>
"""
self.__clean__()
self.run_cmd(['sync', directory])
return self._output
def forward_socket(self, local=None, remote=None):
"""
Forward socket connections
adb forward <local> <remote>
"""
self.__clean__()
if local is None or remote is None:
return self._output
self.run_cmd(['forward', local, remote])
return self._output
def uninstall(self, package=None, keepdata=False):
"""
Remove this app package from the device
adb uninstall [-k] package
"""
self.__clean__()
if package is None:
return self._output
cmd = ['uninstall', "%s" % (package if keepdata is True else "-k %s" % package)]
self.run_cmd(cmd)
return self._output
def install(self, fwdlock=False, reinstall=False, sdcard=False, pkgapp=None):
"""
Push this package file to the device and install it
adb install [-l] [-r] [-s] <file>
-l -> forward-lock the app
-r -> reinstall the app, keeping its data
-s -> install on sdcard instead of internal storage
"""
self.__clean__()
if pkgapp is None:
return self._output
cmd = "install "
if fwdlock is True:
cmd += "-l "
if reinstall is True:
cmd += "-r "
if sdcard is True:
cmd += "-s "
self.run_cmd([cmd, pkgapp])
return self._output
def find_binary(self, name=None):
"""
Look for a binary file on the device
"""
self.shell_command(['which', name])
if self._output is None: # not found
self._error = "'%s' was not found" % name
elif self._output.strip() == "which: not found": # 'which' binary not available
self._output = None
self._error = "which binary not found"
else:
self._output = self._output.strip()
return self._output
| {
"repo_name": "casschin/pyadb",
"path": "pyadb/adb.py",
"copies": "1",
"size": "13746",
"license": "bsd-2-clause",
"hash": 3745135573042152400,
"line_mean": 25.3824701195,
"line_max": 101,
"alpha_frac": 0.4898879674,
"autogenerated": false,
"ratio": 4.425627817128139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004393166412806414,
"num_lines": 502
} |
try:
import sys
import os
import subprocess
except ImportError,e:
# should never be reached
print "[f] Required module missing. %s" % e.args[0]
sys.exit(-1)
class ADB():
PYADB_VERSION = "0.1.4"
__adb_path = None
__output = None
__error = None
__return = 0
__devices = None
__target = None
# reboot modes
REBOOT_RECOVERY = 1
REBOOT_BOOTLOADER = 2
# default TCP/IP port
DEFAULT_TCP_PORT = 5555
# default TCP/IP host
DEFAULT_TCP_HOST = "localhost"
def pyadb_version(self):
return self.PYADB_VERSION
def __init__(self,adb_path=None):
self.__adb_path = adb_path
def __clean__(self):
self.__output = None
self.__error = None
self.__return = 0
def __parse_output__(self,outstr):
ret = None
if(len(outstr) > 0):
ret = outstr.splitlines()
return ret
def __build_command__(self,cmd):
ret = None
if self.__devices is not None and len(self.__devices) > 1 and self.__target is None:
self.__error = "Must set target device first"
self.__return = 1
return ret
# Modified function to directly return command set for Popen
#
# Unfortunately, there is something odd going on and the argument list is not being properly
# converted to a string on the windows 7 test systems. To accomodate, this block explitely
# detects windows vs. non-windows and builds the OS dependent command output
#
# Command in 'list' format: Thanks to Gil Rozenberg for reporting the issue
#
if sys.platform.startswith('win'):
ret = self.__adb_path + " "
if( self.__target is not None ):
ret += "-s " + self.__target + " "
if type(cmd) == type([]):
ret += ' '.join(cmd)
else:
ret += cmd
else:
ret = [self.__adb_path]
if( self.__target is not None ):
ret += ["-s", self.__target]
if type(cmd) == type([]):
for i in cmd:
ret.append(i)
else:
ret += [cmd]
return ret
def get_output(self):
return self.__output
def get_error(self):
return self.__error
def get_return_code(self):
return self.__return
def lastFailed(self):
"""
Did the last command fail?
"""
if self.__output is None and self.__error is not None and self.__return:
return True
return False
def run_cmd(self,cmd):
"""
Runs a command by using adb tool ($ adb <cmd>)
"""
self.__clean__()
if self.__adb_path is None:
self.__error = "ADB path not set"
self.__return = 1
return
# For compat of windows
cmd_list = self.__build_command__(cmd)
try:
adb_proc = subprocess.Popen(cmd_list, stdin = subprocess.PIPE, \
stdout = subprocess.PIPE, \
stderr = subprocess.PIPE, shell = False)
(self.__output, self.__error) = adb_proc.communicate()
self.__return = adb_proc.returncode
if( len(self.__output) == 0 ):
self.__output = None
if( len(self.__error) == 0 ):
self.__error = None
except:
pass
return
def get_version(self):
"""
Returns ADB tool version
adb version
"""
self.run_cmd("version")
try:
ret = self.__output.split()[-1:][0]
except:
ret = None
return ret
def check_path(self):
"""
Intuitive way to verify the ADB path
"""
if self.get_version() is None:
return False
return True
def set_adb_path(self,adb_path):
"""
Sets ADB tool absolute path
"""
if os.path.isfile(adb_path) is False:
return False
self.__adb_path = adb_path
return True
def get_adb_path(self):
"""
Returns ADB tool path
"""
return self.__adb_path
def start_server(self):
"""
Starts ADB server
adb start-server
"""
self.__clean__()
self.run_cmd('start-server')
return self.__output
def kill_server(self):
"""
Kills ADB server
adb kill-server
"""
self.__clean__()
self.run_cmd('kill-server')
def restart_server(self):
"""
Restarts ADB server
"""
self.kill_server()
return self.start_server()
def restore_file(self,file_name):
"""
Restore device contents from the <file> backup archive
adb restore <file>
"""
self.__clean__()
self.run_cmd(['restore' , file_name ])
return self.__output
def wait_for_device(self):
"""
Blocks until device is online
adb wait-for-device
"""
self.__clean__()
self.run_cmd('wait-for-device')
return self.__output
def get_help(self):
"""
Returns ADB help
adb help
"""
self.__clean__()
self.run_cmd('help')
return self.__output
def get_devices(self):
"""
Returns a list of connected devices
adb devices
"""
error = 0
self.run_cmd("devices")
if self.__error is not None:
return ''
try:
self.__devices = self.__output.partition('\n')[2].replace('device','').split()
if self.__devices[1:] == ['no','permissions']:
error = 2
self.__devices = None
except:
self.__devices = None
error = 1
return (error,self.__devices)
def set_target_device(self,device):
"""
Select the device to work with
"""
self.__clean__()
if device is None or not device in self.__devices:
self.__error = 'Must get device list first'
self.__return = 1
return False
self.__target = device
return True
def get_target_device(self):
"""
Returns the selected device to work with
"""
return self.__target
def get_state(self):
"""
Get ADB state
adb get-state
"""
self.__clean__()
self.run_cmd('get-state')
return self.__output
def get_serialno(self):
"""
Get serialno from target device
adb get-serialno
"""
self.__clean__()
self.run_cmd('get-serialno')
return self.__output
def reboot_device(self,mode):
"""
Reboot the target device
adb reboot recovery/bootloader
"""
self.__clean__()
if not mode in (self.REBOOT_RECOVERY,self.REBOOT_BOOTLOADER):
self.__error = "mode must be REBOOT_RECOVERY/REBOOT_BOOTLOADER"
self.__return = 1
return self.__output
self.run_cmd(["reboot", "%s" % "recovery" if mode == self.REBOOT_RECOVERY else "bootloader"])
return self.__output
def set_adb_root(self):
"""
restarts the adbd daemon with root permissions
adb root
"""
self.__clean__()
self.run_cmd('root')
return self.__output
def set_system_rw(self):
"""
Mounts /system as rw
adb remount
"""
self.__clean__()
self.run_cmd("remount")
return self.__output
def get_remote_file(self,remote,local):
"""
Pulls a remote file
adb pull remote local
"""
self.__clean__()
self.run_cmd(['pull',remote , local] )
if self.__error is not None and "bytes in" in self.__error:
self.__output = self.__error
self.__error = None
return self.__output
def push_local_file(self,local,remote):
"""
Push a local file
adb push local remote
"""
self.__clean__()
self.run_cmd(['push',local,remote] )
return self.__output
def shell_command(self,cmd):
"""
Executes a shell command
adb shell <cmd>
"""
self.__clean__()
self.run_cmd(['shell',cmd])
return self.__output
def listen_usb(self):
"""
Restarts the adbd daemon listening on USB
adb usb
"""
self.__clean__()
self.run_cmd("usb")
return self.__output
def listen_tcp(self,port=DEFAULT_TCP_PORT):
"""
Restarts the adbd daemon listening on the specified port
adb tcpip <port>
"""
self.__clean__()
self.run_cmd(['tcpip',port])
return self.__output
def get_bugreport(self):
"""
Return all information from the device that should be included in a bug report
adb bugreport
"""
self.__clean__()
self.run_cmd("bugreport")
return self.__output
def get_jdwp(self):
"""
List PIDs of processes hosting a JDWP transport
adb jdwp
"""
self.__clean__()
self.run_cmd("jdwp")
return self.__output
def get_logcat(self,lcfilter=""):
"""
View device log
adb logcat <filter>
"""
self.__clean__()
self.run_cmd(['logcat',lcfilter])
return self.__output
def run_emulator(self,cmd=""):
"""
Run emulator console command
"""
self.__clean__()
self.run_cmd(['emu',cmd])
return self.__output
def connect_remote (self,host=DEFAULT_TCP_HOST,port=DEFAULT_TCP_PORT):
"""
Connect to a device via TCP/IP
adb connect host:port
"""
self.__clean__()
self.run_cmd(['connect',"%s:%s" % ( host , port ) ] )
return self.__output
def disconnect_remote (self , host=DEFAULT_TCP_HOST , port=DEFAULT_TCP_PORT):
"""
Disconnect from a TCP/IP device
adb disconnect host:port
"""
self.__clean__()
self.run_cmd(['disconnect',"%s:%s" % ( host , port ) ] )
return self.__output
def ppp_over_usb(self,tty=None,params=""):
"""
Run PPP over USB
adb ppp <tty> <params>
"""
self.__clean__()
if tty is None:
return self.__output
cmd = ["ppp",tty]
if params != "":
cmd += params
self.run_cmd(cmd)
return self.__output
def sync_directory(self,directory=""):
"""
Copy host->device only if changed (-l means list but don't copy)
adb sync <dir>
"""
self.__clean__()
self.run_cmd(['sync',directory])
return self.__output
def forward_socket(self,local=None,remote=None):
"""
Forward socket connections
adb forward <local> <remote>
"""
self.__clean__()
if local is None or remote is None:
return self.__output
self.run_cmd(['forward',local,remote])
return self.__output
def uninstall(self,package=None,keepdata=False):
"""
Remove this app package from the device
adb uninstall [-k] package
"""
self.__clean__()
if package is None:
return self.__output
cmd = ['uninstall',"%s" % (package if keepdata is True else "-k %s" % package )]
self.run_cmd(cmd)
return self.__output
def install(self,fwdlock=False,reinstall=False,sdcard=False,pkgapp=None):
"""
Push this package file to the device and install it
adb install [-l] [-r] [-s] <file>
-l -> forward-lock the app
-r -> reinstall the app, keeping its data
-s -> install on sdcard instead of internal storage
"""
self.__clean__()
if pkgapp is None:
return self.__output
cmd = "install "
if fwdlock is True:
cmd += "-l "
if reinstall is True:
cmd += "-r "
if sdcard is True:
cmd += "-s "
self.run_cmd([cmd,pkgapp])
return self.__output
def find_binary(self,name=None):
"""
Look for a binary file on the device
"""
self.shell_command(['which',name])
if self.__output is None: # not found
self.__error = "'%s' was not found" % name
elif self.__output.strip() == "which: not found": # 'which' binary not available
self.__output = None
self.__error = "which binary not found"
else:
self.__output = self.__output.strip()
return self.__output
| {
"repo_name": "ohyeah521/pyadb",
"path": "pyadb/adb.py",
"copies": "1",
"size": "13863",
"license": "bsd-2-clause",
"hash": -1161467678908281000,
"line_mean": 25.505952381,
"line_max": 101,
"alpha_frac": 0.4823631249,
"autogenerated": false,
"ratio": 4.441845562319769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009248461010249372,
"num_lines": 504
} |
import logging
import sys
import textwrap
import time
from logging.handlers import RotatingFileHandler
from bson.json_util import DEFAULT_JSON_OPTIONS
from pymongo import MongoClient, errors
from bson import json_util
from datetime import datetime
from elasticsearch import Elasticsearch
from pymongo.errors import CursorNotFound, AutoReconnect
from ssl import SSLWantReadError
# Logging config
def set_logging():
global logger
logging.basicConfig(
format="%(asctime)s - %(lineno)d - %(funcName)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
DEFAULT_JSON_OPTIONS.datetime_representation = 2
logger = logging.getLogger("yeti_to_elastic")
formatter = logging.Formatter(
"%(asctime)s - %(lineno)d - %(funcName)s - %(levelname)s - %(message)s"
)
# You may change here the path for the log file
handler = RotatingFileHandler("yeti_to_elastic.log", maxBytes=20000, backupCount=5)
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
class YetiFeedSender(object):
def __init__(
self,
elastic_index,
excluded_feeds=set(),
mongo_client=None,
mongo_hostname="localhost",
elastic_instance=None,
elastic_hostname=None,
elastic_port=9200,
elastic_user=None,
elastic_pass=None,
elastic_use_ssl=None,
elastic_verify_certs=None,
):
"""
This class connects to YETI's MongoDB and to Elasticsearch.
It parses the observable collection in YETI's MongoDB and sends to Elasticsearch.
:param elastic_index: Elastic Stack index name.
:param excluded_feeds: Set that includes feeds to exclude from indexing.
:param mongo_client: Mongodb client.
:param mongo_hostname: Mongodb hostname.
:param elastic_instance: Elastic Stack connection instance.
:param elastic_hostname: Elastic Stack hostname.
:param elastic_port: Elastic Stack indexing port.
:param elastic_user: Elastic Stack user.
:param elastic_pass: Elastic Stack password.
:param elastic_use_ssl: Boolean. Flag to determine if the connection to Elastic Stack should use SSL.
:param elastic_verify_certs: Boolean. Flag to determine if the connection to Elastic Stack should verify the certificate.
"""
self.elastic_index = elastic_index
self.excluded_feeds = excluded_feeds
if mongo_client:
self.mongo_client = mongo_client
else:
mongo_hostname = mongo_hostname
self.create_mongo_connection(mongo_hostname)
if elastic_instance:
self.elastic_instance = elastic_instance
else:
elastic_hostname = elastic_hostname
elastic_port = elastic_port
elastic_user = elastic_user
elastic_pass = elastic_pass
elastic_use_ssl = elastic_use_ssl
elastic_verify_certs = elastic_verify_certs
self.create_elastic_connection(
elastic_hostname,
elastic_port,
use_ssl=elastic_use_ssl,
verify_certs=elastic_verify_certs,
username=elastic_user,
password=elastic_pass,
)
def create_mongo_connection(self, hostname="localhost"):
"""
Creates a connection to YETI's MongoDB.
:param hostname: Hostname to connect to. Default is "localhost"
:return: None
"""
try:
# Try connecting to MongoDB for 10ms
self.mongo_client = MongoClient(
"mongodb://{}:27017/".format(hostname), serverSelectionTimeoutMS=10
)
self.mongo_client.server_info()
except errors.ServerSelectionTimeoutError as mongo_conn_err:
logger.exception(
(
"MongoDB connection issue occurred. "
"Error message: " + str(mongo_conn_err)
)
)
sys.exit(1)
def create_elastic_connection(
self,
hostname,
port,
use_ssl=True,
verify_certs=False,
username=None,
password=None,
):
"""
Creates an Elasticsearch connection.
:param hostname: Elasticsearch hostname/ip address
:param port: Elasticsearch indexing port
:param use_ssl: Is the server uses ssl or not
:param verify_certs: Should the request verify the certification
:param username: Username in order to connect to Elasticsearch
:param password: Password in order to connect to Elasticsearch
:return: None
"""
if username and password:
if use_ssl:
self.elastic_instance = Elasticsearch(
hosts=[{"host": hostname, "port": port}],
http_auth=(username, password),
use_ssl=use_ssl,
verify_certs=verify_certs,
)
else:
self.elastic_instance = Elasticsearch(
hosts=[{"host": hostname, "port": port}],
http_auth=(username, password),
)
else:
if use_ssl:
self.elastic_instance = Elasticsearch(
hosts=[{"host": hostname, "port": port}],
use_ssl=use_ssl,
verify_certs=verify_certs,
)
else:
self.elastic_instance = Elasticsearch(
hosts=[{"host": hostname, "port": port}]
)
# Check if there is a connection to elastic
if not self.elastic_instance.ping():
logger.error("Elastic Stack connection issue occurred.")
raise ConnectionError
@staticmethod
def format_observable(observable, excluded_feeds=()):
"""
Formats an observable to Elasticsearch accepted structure
:param observable: observable dict
:param excluded_feeds: excluded_feeds set
:return: deserialized_json str
"""
formatted_dict = dict()
formatted_dict["@timestamp"] = datetime.now().isoformat()
# Loop observable dictionary
for key in observable.keys():
if key == "_id":
formatted_dict["id_generation_time"] = observable[
key
].generation_time.isoformat()
elif key == "parsed_url":
for parsed_url_key in observable[key].keys():
formatted_dict["parsed_url.{}".format(parsed_url_key)] = observable[
key
][parsed_url_key]
elif key == "created":
formatted_dict["created"] = observable[key].isoformat()
elif key == "_cls":
formatted_dict["cls"] = observable[key]
elif key == "tags":
index = 0
while index < len(observable[key]):
observable[key][index]["first_seen"] = observable[key][index][
"first_seen"
].isoformat()
observable[key][index]["last_seen"] = observable[key][index][
"last_seen"
].isoformat()
index += 1
formatted_dict[key] = observable[key]
elif key == "last_tagged":
formatted_dict[key] = observable[key].isoformat()
elif key == "context":
for context_entry_dict in observable[key]:
if context_entry_dict["source"] in excluded_feeds:
observable[key].remove(context_entry_dict)
# If we excluded all feeds, return an empty string
if not observable[key]:
logger.warning(
"The value: {} from the date {} was not indexed".format(
observable["value"], formatted_dict["created"]
)
)
return ""
formatted_dict[key] = observable[key]
else:
# Check for doc values of FILES.
# If it's a FILE, remove the "FILE:" prefix from the value
if key == "value" and str(observable[key]).startswith("FILE:"):
observable[key] = observable[key][5:]
formatted_dict[key] = observable[key]
# Format the dict to json. Supports mongodb structure representation
json_to_elastic = json_util.dumps(formatted_dict)
return json_to_elastic
def extract_and_send(self, elastic_index=None):
"""
This method extracts data out of the mongodb and sends in to elasticsearch.
:param elastic_index: Used if there is a need to change the elastic index
:return: None
"""
if elastic_index:
self.elastic_index = elastic_index
db = self.mongo_client.yeti
observables = db.observable
response = ""
processed = 0
while True:
try:
# Loop observables
for observable in observables.find(no_cursor_timeout=True).skip(
processed
):
processed += 1
json_to_index = self.format_observable(
observable, excluded_feeds=self.excluded_feeds
)
# If the json to index is empty, don't index
if not json_to_index:
continue
try:
# Index to elasticsearch
response = self.elastic_instance.index(
index=self.elastic_index,
doc_type="yeti_feed",
id=observable.get("_id"),
body=json_to_index,
request_timeout=30,
)
except TypeError as type_error:
logger.warning(type_error)
except SSLWantReadError as ssl_error:
logger.error(ssl_error)
except Exception as e:
logger.error(str(e))
if response.get("result") == "created":
logger.info(
"Created {} in index {} - Processed: {}".format(
response.get("_id"), self.elastic_index, processed
)
)
elif response.get("result") == "updated":
logger.info(
"Updated {} in index {} - Processed: {}".format(
response.get("_id"), self.elastic_index, processed
)
)
else:
logger.warning(
"Failed to index {} in index {} - Processed: {}".format(
response.get("_id"), self.elastic_index, processed
)
)
logger.info("Finished processing all events. Sleeping for 30 seconds.")
time.sleep(30)
except CursorNotFound:
logger.warning("Lost cursor. Retry with skip")
except AutoReconnect as e:
logger.error("Connection Error: " + str(e))
except Exception as e:
logger.error("Unknown Error: {}".format(str(e)))
def main():
import argparse
set_logging()
parser = argparse.ArgumentParser(
prog="YetiToElastic",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(
"""\
Example:
sender = YetiFeedSender("yeti-feeds",
elastic_hostname="<elastic_hostname>"
excluded_feeds=("AsproxTracker", "UrlHaus"),
elastic_user="ChenErlich",
elastic_pass="YETI",
elastic_use_ssl=True)
sender.extract_and_send()
"""
),
)
parser.add_argument(
"--elastic_index",
type=str,
default="yeti-feeds",
help="Elastic Stack index name",
)
parser.add_argument(
"--excluded_feeds",
type=set,
default=set(),
help="Set of feeds to exclude from indexing",
)
parser.add_argument("--mongo_hostname", type=str, help="Mongodb hostname")
parser.add_argument("elastic_hostname", type=str, help="Elastic Stack hostname/ip")
parser.add_argument(
"--elastic_port", type=int, default=9200, help="Elastic Stack index name"
)
parser.add_argument("--elastic_user", type=str, help="Elastic Stack user")
parser.add_argument("--elastic_pass", type=str, help="Elastic Stack password")
parser.add_argument(
"--elastic_use_ssl",
type=bool,
help="Flag to determine if the connection to Elastic Stack should use SSL",
)
parser.add_argument(
"--elastic_verify_certs",
type=bool,
help="Flag to determine if the connection to Elastic Stack should verify the certificate",
)
try:
args = parser.parse_args()
except SystemExit:
parser.print_help()
exit()
# Note: There are elastic_instance and mongo_client arguments that can be delivered which are not
# present. They are relevant if the YetiFeedSender will be called from a 3rd party and not directly from main.
sender = YetiFeedSender(
args.elastic_index,
excluded_feeds=args.excluded_feeds,
mongo_hostname=args.mongo_hostname,
elastic_hostname=args.elastic_hostname,
elastic_port=args.elastic_port,
elastic_user=args.elastic_user,
elastic_pass=args.elastic_pass,
elastic_use_ssl=args.elastic_use_ssl,
elastic_verify_certs=args.elastic_verify_certs,
)
sender.extract_and_send()
if __name__ == "__main__":
main()
| {
"repo_name": "yeti-platform/yeti",
"path": "extras/yeti_to_elasticsearch.py",
"copies": "1",
"size": "14495",
"license": "apache-2.0",
"hash": 6410166187659785000,
"line_mean": 35.1471321696,
"line_max": 129,
"alpha_frac": 0.5373577096,
"autogenerated": false,
"ratio": 4.771231073074391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5808588782674392,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cheng'
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
# /pledges/
url(r'^$', views.index, name='index'),
url(r'^reward/list/$', views.list_rewards, name='list_rewards'),
url(r'^reward/collect/$', views.collect_reward, name='collect_reward'),
# /pledges/23
url(r'^(?P<pledge_id>\d+)/$', views.detail, name='detail'),
url(r'^follow/(?P<pledge_id>\d+)/$', views.follow, name='follow'),
url(r'^finish/(?P<pledge_id>\d+)/$', views.finish, name='finish'),
url(r'^success/(?P<pledge_id>\d+)/$', views.congrats, name='congrats'),
url(r'^already/(?P<pledge_id>\d+)/$', views.already, name='already'),
url(r'^share/(?P<pledge_id>\d+)/$', views.share, name='share'),
# /pledges/23/
url(r'^(?P<pledge_id>\d+)/results/$', views.results, name='results'),
url(r'^create_ajax/$', views.create_ajax, name='create_ajax'),
url(r'^paypal/$', views.get_paypal, name='paypal'),
)
| {
"repo_name": "TejasM/wisely",
"path": "wisely_project/pledges/urls.py",
"copies": "1",
"size": "1256",
"license": "mit",
"hash": 7544688192623138000,
"line_mean": 53.6086956522,
"line_max": 94,
"alpha_frac": 0.4673566879,
"autogenerated": false,
"ratio": 3.817629179331307,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.47849858672313067,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chengxue'
from taskflow import task
from utils.db_handlers import tenants as db_handler
from utils.helper import *
from keystoneclient import exceptions as keystone_exceptions
LOG = logging.getLogger(__name__)
class UpdateProjectsQuotasTask(task.Task):
"""
Task to update quotas for all migrated projects
"""
def __init__(self, *args, **kwargs):
super(UpdateProjectsQuotasTask, self).__init__(*args, **kwargs)
self.s_cloud_name = cfg.CONF.SOURCE.os_cloud_name
self.t_cloud_name = cfg.CONF.TARGET.os_cloud_name
self.ks_source = get_keystone_source()
self.nv_source = get_nova_source()
self.nv_target = get_nova_target()
def update_quota(self, tenant_name=None, quota=None, t_data=None):
if tenant_name is None:
LOG.error("Tenant name cannot be null, skip Updating.")
return
if quota is None:
LOG.info("Nothing to be updated for tenant {0}."
.format(tenant_name))
return
ks = get_keystone_target()
try:
tenant = ks.tenants.find(name=tenant_name)
except keystone_exceptions.NotFound:
LOG.error("Tenant {0} cannot be found in cloud {1}"
.format(tenant_name, self.s_cloud_name))
return
if tenant is not None:
self.nv_target.quotas.update(tenant.id,
metadata_items=quota.metadata_items,
injected_file_content_bytes=
quota.injected_file_content_bytes,
injected_file_path_bytes=None,
ram=quota.ram,
floating_ips=quota.floating_ips,
instances=quota.instances,
injected_files=quota.injected_files,
cores=quota.cores,
key_pairs=None,
security_groups=None,
security_group_rules=None)
t_data.update({'quota_updated': '1'})
db_handler.update_migration_record(**t_data)
LOG.info("The quota for tenant {0} has been updated successfully."
.format(tenant_name))
def execute(self):
LOG.info("Start Project quota updating ...")
tenants = self.ks_source.tenants.list()
for tenant in tenants:
tenant_name = tenant.name
# get the tenant data that has been migrated from src to dst
values = [tenant_name, self.s_cloud_name, self.t_cloud_name]
tenant_data = db_handler.get_migrated_tenant(values)
# only update quotas for project that has been completed migrated
if tenant_data is not None:
if tenant_data['state'] == "proxy_created":
if tenant_data['quota_updated'] == '1':
LOG.info("The quota of project {0} has been updated."
.format(tenant_data['project_name']))
else:
new_name_dst = tenant_data['new_project_name']
# get source project quota
src_quota = self.nv_source.quotas.get(tenant.id)
# update destination project quota
self.update_quota(new_name_dst,
src_quota,
tenant_data)
else:
LOG.info("The corresponding project {0} has not been "
"migrated.".format(tenant_data['project_name']))
else:
LOG.info("Tenant {} in could {} has not been migrated."
.format(tenant.name, self.s_cloud_name)) | {
"repo_name": "Phoenix1708/OpenAcademy_OpenStack_Flyway",
"path": "flyway/flow/update_projects_quotas_task.py",
"copies": "1",
"size": "4048",
"license": "apache-2.0",
"hash": -6279432761373879000,
"line_mean": 39.898989899,
"line_max": 78,
"alpha_frac": 0.5007411067,
"autogenerated": false,
"ratio": 4.773584905660377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5774326012360378,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chengxue'
from utils.db_base import *
from collections import OrderedDict
def initialise_keypairs_mapping():
"""function to create the keypairs table
which is used to record keypairs that has
been migrated
"""
table_name = "keypairs"
columns = '''id INT NOT NULL AUTO_INCREMENT,
name VARCHAR(64) NOT NULL,
public_key LONGTEXT NOT NULL,
fingerprint VARCHAR(128) NOT NULL,
user_name VARCHAR(64) NOT NULL,
src_cloud VARCHAR(128) NOT NULL,
dst_cloud VARCHAR(128) NOT NULL,
state VARCHAR(128) NOT NULL,
user_id_updated INT NOT NULL,
new_name VARCHAR(64),
PRIMARY KEY(id, fingerprint)
'''
if not check_table_exist(table_name):
create_table(table_name, columns, True)
return
def record_keypairs(keypair_details):
"""function to insert the detail of
keypair, which has been migrated, into database
:param keypair_details: relevant data of migrated keypair
"""
table_name = "keypairs"
values_to_insert = []
for details in keypair_details:
values_to_insert.append(details)
insert_record(table_name, values_to_insert, True)
def update_keypairs(**keypair_details):
"""function to update a
keypair record, which has been migrated, into database
:param keypair_details: relevant data of migrated keypair
"""
table_name = "keypairs"
w_dict = OrderedDict([('fingerprint', keypair_details["fingerprint"]),
('src_cloud', keypair_details["src_cloud"]),
('dst_cloud', keypair_details["dst_cloud"])])
update_table(table_name, keypair_details, w_dict, True)
def delete_keypairs(values):
"""function to delete a keypair record,
which has been migrated, into database
:param values: data used to filter migrated keypair record
"""
table_name = "keypairs"
w_dict = OrderedDict([('fingerprint', values[0]),
('src_cloud', values[1]),
('dst_cloud', values[2])])
delete_record(table_name, w_dict)
def get_keypairs(values):
"""function to return detail of keypair migration
:param values: keypair name and cloud name that used to filter data
:return: keypair migrate detail
"""
# parameters for "SELECT"
table_name = "keypairs"
columns = ["*"]
filters = {"fingerprint": values[0],
"src_cloud": values[1],
"dst_cloud": values[2]}
data = read_record(table_name, columns, filters, True)
if not data or len(data) == 0:
return None
elif len(data) > 1:
return None
# should be only one row
keypair_data = {'name': data[0][1],
'public_key': data[0][2],
'fingerprint': data[0][3],
'user_name': data[0][4],
'src_cloud': data[0][5],
'dst_cloud': data[0][6],
'state': data[0][7],
'user_id_updated': data[0][8],
'new_name': data[0][9]}
return keypair_data
def get_info_from_openstack_db(host, db_name, table_name, columns, filters):
info_return = read_openstack_record(host, db_name, table_name, columns,
filters, True)
return info_return
def insert_info_to_openstack_db(host, db_name, table_name, values):
values_to_insert = []
for details in values:
values_to_insert.append(details)
insert_openstack_record(host, db_name, table_name, values_to_insert, True)
def delete_info_from_openstack_db(host, db_name, table_name, where_dict):
value = OrderedDict([('fingerprint', where_dict[0]),
('deleted', where_dict[1])])
delete_openstack_record(host, db_name, table_name, value, True) | {
"repo_name": "Phoenix1708/OpenAcademy_OpenStack_Flyway",
"path": "flyway/utils/db_handlers/keypairs.py",
"copies": "1",
"size": "3972",
"license": "apache-2.0",
"hash": -1268473912583028700,
"line_mean": 31.0403225806,
"line_max": 78,
"alpha_frac": 0.5813192346,
"autogenerated": false,
"ratio": 4.094845360824742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5176164595424741,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chengyu'
from user_manage.models import User
from user_manage.models import Friendship
from colock.Error import *
from colock import utils, settings
import os
from colock.key_generator import phone_hash_gen
import base64
import message.igt_wrappers as igt
@utils.hook()
def get_friend_list(meta, data, img):
src_uid = meta['uid']
query = Friendship.objects.filter(src_uid=src_uid)
serial = [i.dest_uid for i in query]
return '', {'status': 'done'}, {'query': serial}
# this is used when you switched to a new phone, we return all your friends
# adding friend needs to search first for the uid and information
# then use the information to add friend
@utils.hook()
def hash2uid(meta, data, img):
# returns query list
ret = []
for hash_ite in data['phone_hash_list']:
qry = User.objects.filter(phone_hash=hash_ite)
for ii in range(len(qry)):
try:
####################
# low efficiency
####################
add_friend(meta, {'dest_uid': qry[ii].id})
ret.append(unicode(hash_ite))
ret.append(qry[ii].id)
#############
except FriendshipError:
pass
return '', {'status': 'done'}, {'query': ret}
# @utils.hook()
# def nickname2uid(meta, data, img):
# # returns query list
# input_nickname = data['nickname']
# query = User.objects.filter(nickname=input_nickname)
# if len(query) == 0:
# raise UserNotExistError
# return query
def is_friend_of(meta, data, img):
# only for server internal use
src_uid = meta['uid']
dest_uid = data['dest_uid']
# return true if src can send a message to dest, also self is considered as friend
if src_uid == dest_uid:
return True
friendship = Friendship.objects.filter(src_uid=dest_uid, dest_uid=src_uid)
if len(friendship) == 0:
return False
else:
if friendship[0].friendship_type <= 0:
return False
else:
return True
def can_send(sender_uid, receiver_uid):
# only for server internal use
# if not in blacklist, then you can send
if sender_uid == receiver_uid:
return True
friendship = Friendship.objects.filter(src_uid=receiver_uid, dest_uid=sender_uid)
if len(friendship) == 0:
return False
if friendship[0].friendship_type == 3:
return False
else:
return True
@utils.hook()
def add_friend(meta, data, img):
src_uid = meta['uid']
dest_uid = data['dest_uid']
friendship1 = Friendship.objects.filter(src_uid=dest_uid, dest_uid=src_uid)
if len(friendship1) != 0:
if friendship1[0].friendship_type == 0:
raise BlockedfriendError
friendship = Friendship.objects.filter(src_uid=src_uid, dest_uid=dest_uid)
dest = User.objects.get(id=dest_uid)
src = User.objects.get(id=src_uid)
if len(friendship) == 0:
Friendship(src_uid=src_uid, dest_uid=dest_uid, friendship_type=1).save()
dest = User.objects.get(id=dest_uid)
# give the other person a notification
igt.pushMsgToSingle_dispatch(receiver=dest, action='', meta={'status': 'Friend_Accepted'},
data={'reg_num': src.region_num, 'phone_num': src.phone_num, 'uid':src.id})
return '', {'status': 'done'}, {'reg_num': dest.region_num, 'phone_num': dest.phone_num}
else:
if friendship[0].friendship_type == 0:
raise BlockfriendError
if friendship[0].friendship_type != 1:
raise FriendExistError
# below is when type==1
return '', {'status': 'done'}, {'reg_num': dest.region_num, 'phone_num': dest.phone_num}
@utils.hook()
def del_friend(meta, data, img):
src_uid = meta['uid']
dest_uid = data['dest_uid']
friendship = Friendship.objects.filter(src_uid=src_uid, dest_uid=dest_uid)
if len(friendship) == 0:
raise FriendNotExistError
friendship[0].delete()
return '', {'status': 'done'}, {}
@utils.hook()
def block_friend(meta, data, img):
src_uid = meta['uid']
dest_uid = data['dest_uid']
friendship = Friendship.objects.filter(src_uid=src_uid, dest_uid=dest_uid)
if len(friendship) == 0:
tmp_friend = Friendship(src_uid=src_uid, dest_uid=dest_uid, friendship_type=0)
tmp_friend.save()
else:
friendship[0].friendship_type = 0
friendship[0].save()
return '', {'status': 'done'}, {}
@utils.hook()
def unblock_friend(meta, data, img):
src_uid = meta['uid']
dest_uid = data['dest_uid']
friendship = Friendship.objects.filter(src_uid=src_uid, dest_uid=dest_uid)
if len(friendship) == 0:
raise FriendNotExistError
if friendship[0].friendship_type != 0:
raise NoNeedError
friendship[0].friendship_type = 1
# Action, Meta, Data
return '', {'status': 'done'}, {}
@utils.hook()
def search_username(meta, data, img):
src_uid = meta['uid']
username = str(data['username'])
query = User.objects.filter(user_name=username)
query2 = User.objects.filter(user_name=username, user_logo__isnull=True)
if len(query) == 0:
raise FriendNotExistError
data = {'id': query[0].id, 'nickname': query[0].nickname}
###
User_Logo_Prefix = settings.BASE_DIR+'/upload/'
###
if len(query2) != len(query):
try:
path = query[0].user_logo.url
path = User_Logo_Prefix + path
fn, ext = os.path.splitext(path)
f = open(path)
content = f.read().encode("base64")
data['user_logo'] = content
data['filetype'] = ext
f.close()
except:
pass
return '', {'status': 'done'}, data
@utils.hook()
def update_user_info(meta, data, img):
info_dict = data['info_dict']
user = User.objects.get(id=int(meta['uid']))
for (key, val) in info_dict.iteritems():
if key != 'user_logo' and key != 'filetype':
setattr(user, key, val)
# if 'user_logo' in info_dict:
# User_Logo_Prefix = settings.BASE_DIR+'/upload/user_logo/'
# filename = User_Logo_Prefix + str(user.user_name) + info_dict['filetype']
# with open(filename, 'r+b') as f:
# f.write(info_dict['user_logo'].decode('base64'))
# user.user_logo(filename, f.read())
if ('region_num' in info_dict) or ('phone_num' in info_dict):
user.phone_hash = phone_hash_gen(user.region_num, user.phone_num)
user.user_logo = img
user.save()
return '', {'status': 'done'}, {}
@utils.hook()
def blacklist_friend(meta, data, img):
src_uid = meta['uid']
dest_uid = data['dest_uid']
friendship = Friendship.objects.filter(src_uid=src_uid, dest_uid=dest_uid)
if len(friendship) == 0:
tmp_friend = Friendship(src_uid=src_uid, dest_uid=dest_uid, friendship_type=3)
tmp_friend.save()
else:
friendship[0].friendship_type = 3
friendship[0].save()
dest = User.objects.get(id=dest_uid)
igt.pushMsgToSingle_dispatch(dest, '', meta={'status': 'Friend_Blacklisted'}, data={'uid': src_uid})
return '', {'status': 'done'}, {}
@utils.hook()
def unblacklist_friend(meta, data, img):
src_uid = meta['uid']
dest_uid = data['dest_uid']
friendship = Friendship.objects.filter(src_uid=src_uid, dest_uid=dest_uid)
if len(friendship) == 0:
raise FriendNotExistError
if friendship[0].friendship_type != 0:
raise NoNeedError
friendship[0].friendship_type = 1
# Action, Meta, Data
dest = User.objects.get(id=dest_uid)
igt.pushMsgToSingle_dispatch(dest, '', meta={'status': 'unBlacklist_friend'}, data={'uid': src_uid})
return '', {'status': 'done'}, {}
| {
"repo_name": "FXuZ/colock-server",
"path": "user_manage/friendship.py",
"copies": "1",
"size": "7825",
"license": "apache-2.0",
"hash": -5922503241595774000,
"line_mean": 31.6041666667,
"line_max": 112,
"alpha_frac": 0.6014057508,
"autogenerated": false,
"ratio": 3.334043459735833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44354492105358323,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenkovsky'
from ctypes import *
import os
from . import arpa
libngram = cdll.LoadLibrary(os.path.dirname(os.path.realpath(__file__)) + '/../libngram.so')
libngram.NgramBuilder_init.restype = POINTER(c_byte)
libngram.Ngram_init_from_bin.restype = POINTER(c_byte)
libngram.Ngram_init_from_bin.argtypes = [POINTER(c_byte)]
def arpa_to_bin(dst_path, arpa_fp):
builder = [None]
def init_ngram_builder(lm_info):
gram_nums = [y[1] for y in sorted(list(lm_info.items()), key=lambda x: x[0])]
arr = (c_longlong * len(gram_nums))(*gram_nums)
builder[0] = libngram.NgramBuilder_init(arr,len(gram_nums))
def gram(lm_info, section, words, prob, bow):
prob = int(prob*-1000000)
bow = int(bow*-1000000)
if len(words) > 1:
arr = (c_char_p * len(words))(*[c_char_p(w.encode("utf8")) for w in words])
libngram.NgramBuilder_add_ngram2(builder[0], arr, section, prob, bow)
else:
libngram.NgramBuilder_add_word(builder[0], c_char_p(words[0].encode("utf8")), prob, bow)
arpa(arpa_fp, header_end=init_ngram_builder, gram=gram)
libngram.NgramBuilder_save(builder[0], c_char_p(dst_path.encode("utf8")))
if builder[0] is not None:
arr = (POINTER(c_byte) * 1)(builder[0])
libngram.NgramBuilder_free(arr)
class Ngram:
def __init__(self, path):
import array
array = array.array('b')
fd = os.open(path, os.O_RDONLY)
sz = os.fstat(fd).st_size
fp = os.fdopen(fd, 'rb')
array.fromfile(fp, sz)
os.close(fd)
addr, count = array.buffer_info()
self.lm = libngram.Ngram_init_from_bin(cast(addr, POINTER(c_byte)))
def prob(self, words):
arr = (c_char_p * len(words))(*[c_char_p(w.encode("utf8")) for w in words])
return libngram.Ngram_prob2(self.lm, arr, len(words))
def bow(self, words):
arr = (c_char_p * len(words))(*[c_char_p(w.encode("utf8")) for w in words])
return libngram.Ngram_bow2(self.lm, arr, len(words))
def __del__(self):
arr = (POINTER(c_byte) * 1)(self.lm)
libngram.Ngram_free(arr)
if __name__ == '__main__':
import gzip
from docopt import docopt
args = docopt("""
Usage:
pyngram.py <dst> <src>
""")
with gzip.open(args["<src>"],"rt") as fi:
arpa_to_bin(args["<dst>"], fi)
| {
"repo_name": "chenkovsky/pyngram",
"path": "pyngram/__init__.py",
"copies": "1",
"size": "2379",
"license": "mit",
"hash": -53508232823783760,
"line_mean": 33.9852941176,
"line_max": 100,
"alpha_frac": 0.5968894493,
"autogenerated": false,
"ratio": 2.8187203791469195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39156098284469193,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenkovsky'
from rex import rex
import sys
def arpa(fp, gram=None, header_start = None, header_end = None, section_start = None, section_end = None, file_end = None):
section = None
lm_info = {}
max_gram = 0
for l in fp:
#print(l)
if l.startswith("\\"):
if l == "\\data\\\n":
section = 0
print("loading header", file=sys.stderr)
if header_start and header_start() == False:
break
elif l=="\\end\\\n":
if file_end:
file_end(lm_info)
break
else:
res = (l == rex("/^\\\\(\\d+)-grams/"))
if res is not None:
section = int(res[1])
print("loading %d-grams" % section, file=sys.stderr)
if section_start and section_start(lm_info,section) == False:
break
continue
if l == "\n":
if section == 0 and header_end and header_end(lm_info)== False:
break
elif section is not None and section > 0 and section_end and section_end(lm_info,section) == False:
break
section = None
continue
if section == 0:
res = (l == rex("/^ngram (\d+)=(\d+)/"))
lm_info[int(res[1])] = int(res[2])
print("ngram %d=%d"%(int(res[1]), int(res[2])), file=sys.stderr)
max_gram = max(max_gram, int(res[1]))
else:
larr = l.strip("\n").split("\t")
bow = None
if len(larr) == 3:
bow = float(larr[-1])
elif len(larr) < 2:
continue
if bow is None:
bow = 0
prob = float(larr[0])
words = larr[1].split(" ")
if gram and gram(lm_info, section, words, prob, bow) == False:
break | {
"repo_name": "chenkovsky/pyngram",
"path": "pyngram/arpa.py",
"copies": "1",
"size": "1969",
"license": "mit",
"hash": -835003712792065400,
"line_mean": 36.8846153846,
"line_max": 123,
"alpha_frac": 0.4393092941,
"autogenerated": false,
"ratio": 3.868369351669941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4807678645769941,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenkovsky'
import pandas as pd
import numpy as np
from sklearn.neighbors import KDTree
import time
class UserBasedKNNRecommender:
"""
class for user based knn recommender.
when doing recommendation, it first select neighbors,
and calculate the similarity between neighbors and current user.
then $$E(X[user][item]) = \sum_{u in neighbors} X[u][item]*similarity[user][u]$$.
return the highest ranked items.
"""
def __init__(self, X, lazy = False, neighbors_size = 10, kdt = None):
"""
item based knn recommender
:param X: np.matrix, the element in matrix should >= 0. X[user][item] == 0 means the user hasn't voted the item.
:param lazy: whether calculate similarity at recommend time.
:param neighbors_size: use how many neighbors' voting to decide the user's vote
:param kdt: KDTree used to calculate neighbors
"""
self.X_ori = X
#print("-----ori_X-----")
#print(X)
#print("-----end-----")
self.X = X/np.linalg.norm(X, axis = 1)[:,None]
#print("-----Xnorm-----")
#print(self.X)
#print("-----end-----")
self.neighbors_size = neighbors_size if len(self.X) > neighbors_size else len(self.X)
self.lazy = lazy
self.use_kdtree = not kdt is None
if not lazy and self.use_kdtree:
raise Exception("kdtree is used for lazily calculate similarity between users. Cannot use kdtree in non lazy mode.")
if lazy and self.use_kdtree:
#print("-----ini kdtree-----")
self.kdt = kdt
#self.kdt = KDTree(self.X_ori, leaf_size=30, metric= 'euclidean')
#print("-----end-----")
if not lazy:
self.similarity = self.X * self.X.transpose()
def recommend(self, id, items_num = 10, except_voted = True, ret_expect_vote = False):
"""
recommend item for user
:param id: user id, row num of the matrix.
:param items_num: return at most how many items.
:param except_voted: whether remove the items that user already has.
:return: the list of the highest ranked items.
"""
#print(time.strftime("start recommend:%Y-%m-%d %X",time.localtime()))
record = np.squeeze(np.asarray(self.X_ori[id]))
voted = np.argwhere(record > 0).flatten()
#print("-----voted-----")
#print(voted)
#print("-----end-----")
if not self.lazy:
neighbor_ids = np.squeeze(np.asarray(self.similarity[id])).argsort()[::-1][0:self.neighbors_size+1]
if self.use_kdtree:
neighbor_ids = self.kdt.query([record], k=self.neighbors_size+1,return_distance=False).flatten()
else:
neighbor_ids = np.nonzero(np.asarray(np.max(self.X[:,voted], axis = 1)).flatten())[0]
neighbor_ids = neighbor_ids[neighbor_ids != id]
#print("-----neighbor ids-----")
#print(neighbor_ids)
#neighbor_ids = np.array([0, 1, 2, 3, 5, 6])
#print("-----end-----")
#maybe we can skip select rest_items_ids
#rest_item_ids = np.array([])
#neighbor_matrix = []
#for idx in range(0, len(distances)):
# np.union1d(rest_item_ids, np.nonzero(self.X[neighbor_ids[idx]]))
# neighbor_matrix.append(np.squeeze(np.asarray(self.X[neighbor_ids[idx]])))
#rest_item_ids = np.setdiff1d(rest_item_ids, np.nonzero(record))
#calculate item priority
#1.calculate similarity
#print(time.strftime("get neighbor matrix:%Y-%m-%d %X",time.localtime()))
#m = self.X[neighbor_ids]
#print("-----neighbors' matrix-----")
#print(m)
#print("-----end-----")
#m = np.matrix(neighbor_matrix)
#def cosine_simi(u,v):
# print(u)
# #return None
# print(v)
# u = np.array(u)
# return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
#print(time.strftime("calc similarity:%Y-%m-%d %X",time.localtime()))
#similarity = np.ma.apply_along_axis( cosine_simi, 1, m, record)
if self.lazy:
similarity = self.X[id]*self.X[neighbor_ids].transpose()
else:
similarity = self.similarity[id][:, neighbor_ids]
#print("-----similarity-----")
#print(similarity)
#print("-----end-----")
#print(time.strftime("calc sum of similarity:%Y-%m-%d %X",time.localtime()))
total_similarity = np.sum(similarity)
#print("-----total similarity-----")
#print(total_similarity)
#print("-----end-----")
#print("-----sim*neighbor_matrix-----")
#print(np.dot(similarity, self.X_ori[neighbor_ids]))
#print("-----end-----")
#print(time.strftime("expect_votes:%Y-%m-%d %X",time.localtime()))
expect_votes = np.squeeze(np.asarray(np.dot(similarity, self.X_ori[neighbor_ids])/total_similarity))
#print("-----expect_votes-----")
#print(expect_votes)
#print("-----end-----")
#print(time.strftime("sort votes:%Y-%m-%d %X",time.localtime()))
candidate_item_ids = np.lexsort((-expect_votes,))
#print("-----id_idx_sorted-----")
#print(id_idx)
#print("-----end-----")
#print("-----candidate_item_ids_before_except-----")
#print(candidate_item_ids)
#print("-----end-----")
if (except_voted):
candidate_item_ids = [a for a in candidate_item_ids if not a in voted]
#print("-----candidate_item_ids_except-----")
#print(candidate_item_ids)
#print("-----end-----")
#print(time.strftime("end:%Y-%m-%d %X",time.localtime()))
return (candidate_item_ids[0:items_num], list(expect_votes[candidate_item_ids]) if ret_expect_vote else None)
class ItemBasedKNNRecommender:
"""
class for item based KNN recommender.
when doing recommendation, it calculates the similarity between items and user voted items.
$$similarity[i][j] = X[:][i]*X[:][j]/|X[:][i]|/|X[:][j]|$$.
Then use current user's votes to estimate votes for rest items.
$$X[u][i] = (\sum_{j\in voted_items} similarity[i][j]*X[u][j])/(\sum_{j \in voted_items} similarity[i][j])$$.
"""
def __init__(self, X, lazy = False): #, neighbors_size = 10
"""
item based knn recommender
:param X: np.matrix, the element in matrix should >= 0. X[user][item] == 0 means the user hasn't voted the item.
:param lazy: whether calculate similarity at recommend time.
"""
self.X_ori = X
#print("-----ori matrix-----")
#print(self.X_ori)
#print("-----end-----")
self.X = X/np.linalg.norm(X, axis = 0)
#print("-----normed-----")
#print(self.X)
#print("-----end-----")
self.lazy = lazy
if not lazy:
self.similarity = self.X.transpose()*self.X
#print("-----similarity-----")
#print(self.similarity)
#print("-----end-----")
#self.neighbors_size = neighbors_size
def recommend(self, id, items_num = 10, except_voted = True, ret_expect_vote = False):
"""
recommend item for user
:param id: user id, row num of the matrix.
:param items_num: return at most how many items.
:param except_voted: whether remove the items that user already has.
:return: the list of the highest ranked items.
"""
#print(time.strftime("start recommend: %Y-%m-%d %X",time.localtime()))
record = np.squeeze(np.asarray(self.X[id]))
#print(time.strftime("get record:%Y-%m-%d %X",time.localtime()))
#print("-----record-----")
#print(record)
#print("-----end-----")
voted = np.argwhere(record > 0).flatten()
#print(time.strftime("voted: %Y-%m-%d %X",time.localtime()))
#print("-----voted-----")
#print(voted)
#print("-----end-----")
if self.lazy:
other_user = np.nonzero(np.asarray(np.max(self.X[:,voted], axis = 1)).flatten())[0]
#print(time.strftime("other user:%Y-%m-%d %X",time.localtime()))
#print("-----other user-----")
#print(other_user)
#print("-----end-----")
candidate_item_ids = np.argwhere(np.asarray(np.max(self.X[other_user,:], axis = 0)).flatten() > 0).flatten()
#np.argwhere(np.asarray(np.max(matrix[[1,2],:], axis = 0)).flatten() > 0)[0]
#print(time.strftime("candidate item ids:%Y-%m-%d %X",time.localtime()))
#print("-----candidate item ids-----")
#print(candidate_item_ids)
#print("-----end-----")
m = self.X[:, candidate_item_ids]
#m = self.X[:, candidate_item_ids]/ np.linalg.norm(self.X[:, candidate_item_ids], axis=0)[:,None]
#print(time.strftime("m:%Y-%m-%d %X",time.localtime()))
#print("-----m-----")
#print(m)
#print("-----end-----")
voted_m = self.X[:,voted]
#voted_m = self.X[:,voted]/ np.linalg.norm(self.X[:,voted],axis=0)[:,None]
#print(time.strftime("voted_m:%Y-%m-%d %X",time.localtime()))
#print("-----voted_m-----")
#print(voted_m)
#print("-----end-----")
sim = voted_m.transpose()*m
else:
sim = self.similarity[voted, :]
#print("-----sim-----")
#print(sim)
#print("-----end-----")
#total_sim = np.sum(sim, axis =0)
#print("-----total_sim-----")
#print(total_sim)
#print("-----end-----")
#print("-----exp-----")
#print(self.X_ori[id, voted] * sim)
#print("-----end-----")
#print("-----sim/total_sim-----")
#print(sim/total_sim)
#print("-----end-----")
# it seems that divided by total_sim is not a good choice.
# for example.
# if user A only voted item X.
# similarity between other items and X is [s_1,s_2...s_n]
# so every items's expect vote is [s_1*vote[X],s_2*vote[X],...s_n*vote[X]]
# and divided by total_sim
# expect_votes is [vote[X], vote[X], ...]
expect_votes = np.squeeze(np.asarray(self.X_ori[id, voted] * sim))#/total_sim
#print(time.strftime("expect_votes:%Y-%m-%d %X",time.localtime()))
#print("-----expect_votes-----")
#print(expect_votes)
#print("-----end-----")
id_idxes = expect_votes.argsort()[::-1]
#print(time.strftime("id_idxes:%Y-%m-%d %X",time.localtime()))
#print("-----id_idxes-----")
#print(id_idxes)
#print("-----end-----")
if self.lazy:
candidate_item_ids = candidate_item_ids[id_idxes]
else:
candidate_item_ids = np.array(range(0,np.size(self.X,1)))[id_idxes]
expect_votes = sorted(expect_votes, reverse = True)
#print(time.strftime("candidate_item_ids:%Y-%m-%d %X",time.localtime()))
if (except_voted):
expect_votes = [expect_votes[idx] for idx, a in enumerate(candidate_item_ids) if not a in voted]
candidate_item_ids = [a for a in candidate_item_ids if not a in voted]
return (candidate_item_ids[0:items_num], list(expect_votes[0:items_num])if ret_expect_vote else None)
| {
"repo_name": "chenkovsky/recpy",
"path": "knn.py",
"copies": "1",
"size": "11367",
"license": "mit",
"hash": -6707025606095105000,
"line_mean": 44.6506024096,
"line_max": 128,
"alpha_frac": 0.5398961907,
"autogenerated": false,
"ratio": 3.464492532764401,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4504388723464401,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenshuai'
cast = ["Cleese", "Plain", "Jones", "Idle"]
print cast
print len(cast)
print cast[1]
cast.append("Gilliam")
print cast
cast.pop()
print cast
cast.extend(["Gilliam", "Chapman"])
print cast
cast.remove("Chapman")
print cast
cast.insert(0, "Chapman")
print cast
movies = ["The Holy Grail", "The Life of Brian", "The Meaning of Life"]
movies.insert(1, 1975)
movies.insert(3, 1979)
movies.append(1983)
fav_movies = ["The Holy Grail", "The Life of Brain"]
for each_flick in fav_movies:
print(each_flick)
# movies = [[],[],[]]
print movies[4][1][3]
print isinstance(movies, list) # True
print isinstance(len(movies), list) # False
# nester.py
# default value is not necessary
def print_lol(the_list, indent=False, level=0, fh=sys.stdou):
for each_item in the_list:
if isinstance(each_item, list):
print_lol(each_item, indent, level+1, fh)
else:
if indent:
for tab_stop in range(level):
print "\t"
print each_item
# print(each_item, end='', file=fh) python3
# python PyPI
# perl CPAN
"""This is the standard way to include a multiple-line comment in your code."""
"""import sys; print(sys.path); the location of python lib"""
"""pyc file === java class file"""
# list()
# range()
# enumerate()
# int()
# id()
# next()
import os
print os.chdir("../")
print os.getcwd()
data = open("sketch.txt")
print data.readline()
data.seek(0)
# Man: Is this the right room for an argument?
# Other Man: I've told you once.
# Man: No you haven't!
# Other Man: Yes I have.
# Man: When?
# Other Man: Just now.
# Man: No you didn't
if os.path.exists("readme.txt"):
data = open("readme.txt")
for each_line in data:
if not each_line.find(":") == -1:
try:
# split: Immutable parameters
(role, line_spoken) = each_line.split(":", 1)
print role
print line_spoken
# focus your job's content
except ValueError:
pass
data.close()
else:
print "The data file is missing !"
# try/except/finally
man = []
other = []
try:
data = open("sketch.txt")
for each_line in data:
try:
(role, line_spoken) = each_line.split(":", 1)
line_spoken = line_spoken.strip()
if role == "Man":
man.append(line_spoken)
elif role == 'Other Man':
other.append(line_spoken)
except ValueError as err:
print "File error: " + str(err)
pass
# call locals() before call close()
# locals() BIF
finally:
if 'data' in locals():
data.close()
except IOError:
print "The datafile is missing !"
print man
print other
"""with is equals try/except/finally, with use a kind of context management protocol python tech"""
try:
with open("its.txt", "w") as data:
data.write("It's...")
except IOError as err:
print "File error: " + str(err)
"""||"""
"""||"""
"""||"""
"""with is equals try/except/finally, with use a kind of context management protocol python tech"""
try:
data = open("its.txt", "w")
data.write("It's...")
except IOError as err:
print "File error: " + str(err)
finally:
if "data" in locals():
data.close()
with open("man_data.txt", "w") as man_file, open("other_data.txt", "w") as other_file:
# data in memory
print man_file.readlines()
# data in memory
print other_file.readlines()
# dump load; must use binary
import pickle
try:
with open("mydata.pickle", "wb") as mysavedata:
pickle.dump([1, 2, 'three'], mysavedata)
with open("mydata.pickle", "rb") as myrestoredata:
a_list = pickle.load(myrestoredata)
print a_list
except IOError as err:
print "File error: " + str(err)
except pickle.PickleError as pickle_err:
print "Pickling error: " + str(pickle_perr)
# In-place sorting
print data.sort()
# Copied sorting
print sorted(data)
def sanitize(time_string):
if "-" in time_string:
splitter = "-"
elif ":" in time_string:
splitter = ":"
else:
return time_string
mins, secs = time_string.split(splitter)
return mins + "." + secs
# create convert iterate append
clean_mikey = [sanitize(each_t) for each_t in mikey]
print sorted(set([sanitize(each_t) for each_t in mikey]), reverse=True)[0:3]
# pop
def get_coach_data(filename):
try:
with open(filename) as f:
data = f.readline()
templ = data.strip().split(",")
return {
"name": templ.pop(0),
"dob": templ.pop(0),
"times": str(sorted(set(sanitize(t) for t in sarah_data["times"]))[0:3])
}
except IOError as ioerr:
print "File error: " + str(ioerr)
return None
# class
class Athlete:
def __init__(self, a_name, a_dob=None, a_times=[]):
self.name = a_name
self.dob = a_dob
self.times = a_times
def top3(self):
return sorted(set([sanitize(t) for t in self.times]))[0:3]
def add_time(self, time_value):
self.times.append(time_value)
def add_times(self, list_of_times):
self.times.extend(list_of_times)
sarah = Athlete("Sarah Sweeney", "2002-6-17", ["2:58", "2.58", "1.56"])
james = Athlete("James Jones")
print str(sarah.top3())
print str(james.top3())
| {
"repo_name": "feng345fengcool/hellojava",
"path": "src/main/resources/basic.py",
"copies": "1",
"size": "5434",
"license": "apache-2.0",
"hash": 7160944304162383000,
"line_mean": 23.4774774775,
"line_max": 99,
"alpha_frac": 0.5894368789,
"autogenerated": false,
"ratio": 3.24031007751938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43297469564193797,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenzhao'
from base import *
from flask.ext.security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from gmission.config import APP_SECRET_KEY
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin, BasicModelMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __unicode__(self):
return self.name
class User(db.Model, UserMixin, BasicModelMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), unique=True, nullable=False)
email = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255)) # encrypt later
credit = db.Column(db.Integer, default=0)
active = db.Column(db.Boolean(), default=False)
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
iat = db.Column(db.Integer, default=0)
roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic'))
# roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic'))
def get_roles(self):
return [role.name for role in self.roles]
def hash_password(self, password):
self.password = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password)
def generate_auth_token(self, expiration=3600):
s = Serializer(APP_SECRET_KEY, expires_in=expiration)
ret = s.dumps({'id': self.id, 'username': self.username})
try:
payload, header = s.loads(ret, return_header=True)
self.iat = header.get('iat', 0)
db.session.commit()
except Exception:
return ret
return ret
def get_json(self, password=False):
fields_in_json = ['id', 'username', 'email', 'credit', 'active', 'created_on']
json_dict = {}
for field in fields_in_json:
json_dict[field] = getattr(self, field)
json_dict['roles'] = self.get_roles()
if password:
json_dict['password'] = self.password
return json_dict
@staticmethod
def verify_auth_token(token):
s = Serializer(APP_SECRET_KEY)
try:
data, header = s.loads(token, return_header=True)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.filter_by(id=data['id'], username=data['username'], iat=header['iat']).first()
return user
def __unicode__(self):
return '<User id=%s email=%s>' % (self.id, self.email)
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/gmission/models/user.py",
"copies": "1",
"size": "3166",
"license": "mit",
"hash": -7097410278279076000,
"line_mean": 37.6097560976,
"line_max": 105,
"alpha_frac": 0.6430827543,
"autogenerated": false,
"ratio": 3.702923976608187,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48460067309081867,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CHEN Zhao'
import json_encoder
import log
import sys
import socket
import os.path
APP_SECRET_KEY = 'gMissionForHKUSTSecretKey'
APP_AUTH_HEADER_PREFIX = 'gMission'
def stdout(*lst):
print '[' + ' '.join(sys.argv) + ']' + ' '.join(map(str, lst))
sys.stdout.flush()
def config(app, root):
config_common(app, root)
# if is_production():
# config_production(app)
# else:
# config_developing(app)
check_dir_config(app)
def is_production():
stdout('docker now, all is production')
return True
# return 'xjimi.com' in socket.gethostname() or 'gmission' in socket.gethostname()
def makedir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def check_dir_config(app):
for key, value in app.config.items():
if key.startswith('GMISSION') and key.endswith('DIR'):
makedir(value)
def config_common(app, root_path):
app.json_encoder = json_encoder.CustomJSONEncoder
app.secret_key = 'blabla'
app.config['SECRET_KEY'] = APP_SECRET_KEY
app.config['APP_AUTH_HEADER_PREFIX'] = APP_AUTH_HEADER_PREFIX
app.config['WTF_CSRF_ENABLED'] = False
app.config['SECURITY_TOKEN_AUTHENTICATION_KEY'] = 'UserAuthToken'
app.config['SECURITY_TOKEN_AUTHENTICATION_HEADER'] = 'X-Xuewen-User-Auth-Token'
app.config['GMISSION_IMAGE_UPLOAD_DIR'] = os.path.join(root_path, 'static', 'image', 'original')
app.config['GMISSION_IMAGE_THUMB_DIR'] = os.path.join(root_path, 'static', 'image', 'thumb')
app.config['GMISSION_VIDEO_UPLOAD_DIR'] = os.path.join(root_path, 'static', 'video', 'original')
app.config['GMISSION_VIDEO_THUMB_DIR'] = os.path.join(root_path, 'static', 'video', 'thumb')
app.config['GMISSION_AUDIO_UPLOAD_DIR'] = os.path.join(root_path, 'static', 'audio', 'original')
app.config['GMISSION_LOGS_DIR'] = os.path.join(root_path, 'logs')
# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://csp_team:csp2014hkust@127.0.0.1:3306/gmission_hkust'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://csp_team:csp2014hkust@docker-mysql/gmission_hkust'
FP_PATH = os.path.join(root_path, 'static', 'fp_collection')
app.config['APK_PATH'] = os.path.join(FP_PATH, 'app-debug-unaligned.apk')
app.config['DATA_PATH'] = os.path.join(FP_PATH, 'wherami.zip')
app.config['WIFIPAD_PATH'] = os.path.join(FP_PATH,'wififorpad.apk')
app.config['LOCALIZATION_PATH'] = os.path.join(FP_PATH, 'wifilocalization.apk')
log.set_logger(app)
app.config.from_object('email')
#
# def config_developing(app):
# print 'NOT production server'
# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://csp_team:csp2014hkust@docker-mysql/gmission_hkust'
# pass
#
#
# def config_production(app):
# print 'production server'
# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://csp_team:csp2014hkust@docker-mysql/gmission_hkust'
# pass
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/gmission/config/__init__.py",
"copies": "1",
"size": "2911",
"license": "mit",
"hash": -5404358308857559000,
"line_mean": 31.7078651685,
"line_max": 107,
"alpha_frac": 0.6698728959,
"autogenerated": false,
"ratio": 2.9256281407035174,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9084584854411143,
"avg_score": 0.0021832364384748804,
"num_lines": 89
} |
__author__ = 'chenzhao'
import logging
import os.path
import os
from logging.handlers import RotatingFileHandler
def set_logger(app):
logs_path = app.config['GMISSION_LOGS_DIR']
if not os.path.exists(logs_path):
os.mkdir(logs_path)
set_flask_logger(app, logs_path)
set_profiling_logger(app, logs_path)
set_admin_logger(app, logs_path)
set_push_msg_logger(app, logs_path)
def set_flask_logger(app, logs_path):
log_file = os.path.join(logs_path, 'GMission.log')
handler = RotatingFileHandler(log_file, maxBytes=10000000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
def set_profiling_logger(app, logs_path):
profiling_formatter = logging.Formatter('%(asctime)s %(message)s')
profiling_log_file = os.path.join(logs_path, 'GMissionProfiling.log')
profiling_handler = RotatingFileHandler(profiling_log_file, maxBytes=10000000, backupCount=1)
profiling_handler.setFormatter(profiling_formatter)
logger = logging.getLogger('GMissionProfiling')
logger.setLevel(logging.DEBUG)
if not logger.handlers:
logger.addHandler(profiling_handler)
app.profiling_logger = logger
def set_admin_logger(app, logs_path):
admin_formatter = logging.Formatter('%(asctime)s %(message)s')
admin_log_file = os.path.join(logs_path, 'GMissionAdmin.log')
admin_handler = RotatingFileHandler(admin_log_file, maxBytes=10000000, backupCount=1)
admin_handler.setFormatter(admin_formatter)
logger = logging.getLogger('GMissionAdmin')
logger.setLevel(logging.DEBUG)
if not logger.handlers:
logger.addHandler(admin_handler)
app.admin_logger = logger
def set_push_msg_logger(app, logs_path):
profiling_formatter = logging.Formatter('%(asctime)s %(message)s')
profiling_log_file = os.path.join(logs_path, 'GMissionAsyncJobs.log')
profiling_handler = RotatingFileHandler(profiling_log_file, maxBytes=10000000, backupCount=1)
profiling_handler.setFormatter(profiling_formatter)
logger = logging.getLogger('GMissionAsyncJobs')
logger.setLevel(logging.DEBUG)
if not logger.handlers:
logger.addHandler(profiling_handler)
app.push_msg_logger = logger
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/gmission/config/log.py",
"copies": "1",
"size": "2226",
"license": "mit",
"hash": 4957775791757541000,
"line_mean": 30.3521126761,
"line_max": 97,
"alpha_frac": 0.725965858,
"autogenerated": false,
"ratio": 3.4565217391304346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46824875971304347,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenzhao'
from base import *
# type = text / image / selection
class HIT(db.Model, BasicModelMixin):
__tablename__ = 'hit'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(20))
title = db.Column(db.String(500))
description = db.Column(db.TEXT)
attachment_id = db.Column(db.Integer, db.ForeignKey('attachment.id'))
attachment = db.relationship('Attachment', foreign_keys=attachment_id)
campaign_id = db.Column(db.Integer, db.ForeignKey('campaign.id'))
campaign = db.relationship('Campaign', lazy='select')
credit = db.Column(db.Integer, default=10)
status = db.Column(db.String(20), default='open') # or closed
required_answer_count = db.Column(db.Integer, default=3)
min_selection_count = db.Column(db.Integer, default=1)
max_selection_count = db.Column(db.Integer, default=1)
begin_time = db.Column(db.DateTime, default=datetime.datetime.now)
end_time = db.Column(db.DateTime, default=lambda: datetime.datetime.now() + datetime.timedelta(days=1))
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
location_id = db.Column(db.Integer, db.ForeignKey('location.id'), nullable=True)
location = db.relationship('Location', foreign_keys=location_id)
requester = db.relationship('User')
requester_id = db.Column(db.Integer, db.ForeignKey('user.id'))
selections = db.relationship('Selection', lazy='select')
answers = db.relationship('Answer', lazy='select')
def __unicode__(self):
return '<%s,%s>' % (repr(self.id), self.task)
class Answer(db.Model, BasicModelMixin):
id = db.Column(db.Integer, primary_key=True)
hit = db.relationship('HIT', lazy='select')
hit_id = db.Column(db.Integer, db.ForeignKey('hit.id'))
brief = db.Column(db.String(100))
attachment_id = db.Column(db.Integer, db.ForeignKey('attachment.id'))
attachment = db.relationship('Attachment', lazy='immediate', foreign_keys=attachment_id)
type = db.Column(db.String(20))
accepted = db.Column(db.Boolean, default=False)
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
location = db.relationship('Location', lazy='select')
location_id = db.Column(db.Integer, db.ForeignKey('location.id'))
worker = db.relationship('User', lazy='select')
worker_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __unicode__(self):
return '<%d,%s,%s>' % (self.id, self.task, self.option)
class Selection(db.Model, BasicModelMixin):
id = db.Column(db.Integer, primary_key=True)
hit = db.relationship('HIT', lazy='select')
hit_id = db.Column(db.Integer, db.ForeignKey('hit.id'))
brief = db.Column(db.String(100))
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/gmission/models/crowdsourcing.py",
"copies": "1",
"size": "2810",
"license": "mit",
"hash": -6033324008222323000,
"line_mean": 36.4666666667,
"line_max": 107,
"alpha_frac": 0.6797153025,
"autogenerated": false,
"ratio": 3.361244019138756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4540959321638756,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenzhao'
import datetime
import re
import hashlib
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.orm import backref
db = SQLAlchemy()
GEO_NUMBER_TYPE = db.REAL()
#python any is stupid
def good_any(l):
for i in l:
if i:
return i
return False
def get_or_create(model, **kwargs):
instance = db.session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
db.session.add(instance)
db.session.flush()
return instance
def remove_and_commit(model, **kwargs):
db.session.query(model).filter_by(**kwargs).delete()
db.session.commit()
class BasicModelMixin(object):
@staticmethod
def model_base(cls):
if db.Model not in cls.__bases__:
return good_any(BasicModelMixin.model_base(base) for base in cls.__bases__)
return cls
@classmethod
def urlname(cls):
model_cls = BasicModelMixin.model_base(cls)
if not model_cls:
raise Exception('urlname must be called from a db.Model class')
# print 'urlname', model_cls
s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model_cls.__name__)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower()
def __unicode__(self):
return "%s id:%d" % (self.__class__.__name__, self.id)
def __repr__(self):
return self.__unicode__()
def __str__(self):
return self.__unicode__()
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/gmission/models/base.py",
"copies": "1",
"size": "1545",
"license": "mit",
"hash": -3849444209082241000,
"line_mean": 23.5238095238,
"line_max": 87,
"alpha_frac": 0.6006472492,
"autogenerated": false,
"ratio": 3.6098130841121496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9701282419722471,
"avg_score": 0.0018355827179356592,
"num_lines": 63
} |
__author__ = 'chenzhao'
import inspect
from flask.ext import restless
from gmission.models import *
from .base import ReSTBase
from gmission.flask_app import db, app
REST_PREFIX = '/rest'
class ReSTManager(object):
rest_models = []
@classmethod
def rest_url_get_single(cls, model_obj):
return '%s/%s/%d' % (REST_PREFIX, model_obj.urlname(), model_obj.id)
@classmethod
def init_rest(cls, app):
for model_cls in globals().values():
if inspect.isclass(model_cls) and issubclass(model_cls, db.Model):
# print model_cls
if model_cls not in ReSTBase.__subclasses__():
ReSTManager.rest_models.append(type('ReST' + model_cls.__name__, (model_cls, ReSTBase), {}))
cls.manager = restless.APIManager(app, flask_sqlalchemy_db=db,
preprocessors=ReSTBase.universal_preprocessors(),
postprocessors=ReSTBase.universal_postprocessors())
for rest_class in ReSTBase.__subclasses__():
cls.manager.create_api(rest_class, methods=['GET', 'POST', 'PUT', 'DELETE'],
url_prefix=REST_PREFIX,
results_per_page=None,
allow_functions=True,
exclude_columns=rest_class.rest_exclude_columns(),
collection_name=rest_class.urlname(),
preprocessors=rest_class.rest_preprocessors(),
postprocessors=rest_class.rest_postprocessors(), )
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/gmission/rest/manager.py",
"copies": "1",
"size": "1746",
"license": "mit",
"hash": 5364546028743731000,
"line_mean": 44.9473684211,
"line_max": 112,
"alpha_frac": 0.5171821306,
"autogenerated": false,
"ratio": 4.523316062176166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5540498192776167,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CHEN Zhao'
import os
import random
from gmission.flask_app import app
from flask import Blueprint, jsonify, request, redirect, url_for, send_from_directory
from werkzeug.utils import secure_filename
from PIL import Image
image_blueprint = Blueprint('image', __name__, template_folder='templates')
ALLOWED_EXTENSIONS = ['pdf', 'png', 'jpg', 'jpeg', 'gif']
UPLOAD_DIR = app.config['GMISSION_IMAGE_UPLOAD_DIR']
THUMB_DIR = app.config['GMISSION_IMAGE_THUMB_DIR']
def allowed_file(filename):
return True
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def unique_filename(file_obj):
rid = random.randint(100000000, 999999999)
return '%d-%s' % (rid, secure_filename(file_obj.filename))
def gen_image_thumb(original_path, thumb_path):
thumb_max_size = 250, 250
im = Image.open(original_path)
im.thumbnail(thumb_max_size, Image.ANTIALIAS)
im.save(thumb_path)
@image_blueprint.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = unique_filename(file)
original_path = os.path.join(UPLOAD_DIR, filename)
file.save(original_path)
thumb_path = os.path.join(THUMB_DIR, filename)
gen_image_thumb(original_path, thumb_path)
return jsonify(filename=filename, size=os.stat(original_path).st_size)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@image_blueprint.route('/original/<filename>')
def uploaded_file(filename):
return send_from_directory(UPLOAD_DIR, filename)
@image_blueprint.route('/thumb/<filename>')
def thumb_file(filename):
return send_from_directory(THUMB_DIR, filename)
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/gmission/blueprints/image.py",
"copies": "1",
"size": "2005",
"license": "mit",
"hash": -9137040077026091000,
"line_mean": 29.3787878788,
"line_max": 86,
"alpha_frac": 0.6698254364,
"autogenerated": false,
"ratio": 3.4391080617495713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4608933498149571,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CHEN Zhao'
import os
import subprocess
import random
from gmission.flask_app import app
from flask import Blueprint, jsonify, request, redirect, url_for, send_from_directory
from werkzeug.utils import secure_filename
audio_blueprint = Blueprint('audio', __name__, template_folder='templates')
UPLOAD_DIR = app.config['GMISSION_AUDIO_UPLOAD_DIR']
def allowed_file(filename):
return True
def unique_filename(file_obj):
rid = random.randint(100000000, 999999999)
return '%d-%s' % (rid, secure_filename(file_obj.filename))
@audio_blueprint.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = unique_filename(file)
original_path = os.path.join(UPLOAD_DIR, filename)
file.save(original_path)
return jsonify(filename=filename, size=os.stat(original_path).st_size)
@audio_blueprint.route('/original/<filename>')
def uploaded_file(filename):
return send_from_directory(UPLOAD_DIR, filename)
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/gmission/blueprints/audio.py",
"copies": "1",
"size": "1113",
"license": "mit",
"hash": -2031719833654991400,
"line_mean": 27.5384615385,
"line_max": 86,
"alpha_frac": 0.6945193172,
"autogenerated": false,
"ratio": 3.6254071661237783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9685975324171301,
"avg_score": 0.026790231830495597,
"num_lines": 39
} |
__author__ = 'CHEN Zhao'
import time
import admin
import blueprints
from flask_app import app, cache
import rest
from flask import render_template, request, redirect, jsonify, g
from models import *
import json
app.register_blueprint(blueprints.user_bp, url_prefix='/user')
app.register_blueprint(blueprints.image_bp, url_prefix='/image')
app.register_blueprint(blueprints.video_bp, url_prefix='/video')
app.register_blueprint(blueprints.audio_bp, url_prefix='/audio')
# flask_app.register_blueprint(blueprints._bp, url_prefix='/portal')
app.register_blueprint(blueprints.shortcut_bp, url_prefix='/shortcut')
app.register_blueprint(blueprints.shortcut_sd, url_prefix='/definitions')
rest.ReSTManager.init_rest(app)
# admin.init_admin()
def profile_log(*l):
app.profiling_logger.debug(l)
@app.route('/')
def index():
return render_template('index.html', config=app.config)
@app.route('/test')
def test():
"""Print available functions."""
func_list = {}
for rule in app.url_map.iter_rules():
if rule.endpoint != 'static':
func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__
return jsonify(func_list)
def is_cached_url(url):
return url.endswith('/rest/location')
# @app.before_request
# def before_request():
# g.request_start_time = time.time() # time.time is precise enough
# profile_log(request.path, 'crab', time.time() - g.request_start_time)
#
#
@app.after_request
def after_request(response):
try:
if request.method == 'HEAD' and 'num_results' in response.data:
count = json.loads(response.data)['num_results']
response.headers.add('GMISSION-Count', str(count))
except:
pass
return response
# # resp_brief = response.data[:200] if 'json' in response.mimetype else ''
# # print "[After request:%s %s, %d, %s, %s]" % \
# # (request.method, request.url, response.status_code, response.mimetype, resp_brief)
# # if not getattr(response, 'simple_url_cached', False):
# # cache.set(request.url, response)
# return response
# @app.teardown_request
# def teardown_request(l):
# profile_log(request.path, time.time() - g.request_start_time)
# 409 Conflict: the best HTTP code I can find
@app.errorhandler(409)
def conflict(e):
print 'conflict!'
obj = e.conflict_obj
obj_dict = {c.name: getattr(obj, c.name) for c in obj.__table__.columns}
return jsonify(**obj_dict)
# print e.get_single_url
# return redirect(e.get_single_url, code=303) # something wrong with redirect
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/gmission/views.py",
"copies": "1",
"size": "2574",
"license": "mit",
"hash": 2358753885647399400,
"line_mean": 28.5862068966,
"line_max": 96,
"alpha_frac": 0.6767676768,
"autogenerated": false,
"ratio": 3.266497461928934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9434454227690199,
"avg_score": 0.0017621822077469053,
"num_lines": 87
} |
__author__ = 'chenzhao'
######################################
# DO NOT RENAME THIS FILE TO email.py
######################################
# Import smtplib for the actual sending function
import smtplib
# Import the email modules we'll need
from email.mime.text import MIMEText
gmail_user, gmail_password = 'gmission.from.hkust@gmail.com','csp2014hkust'
def invalid_receiver(receiver):
return 'test.com' in receiver or 'xxx.com' in receiver
pass
def send_many(subject, body, receivers):
for receiver in receivers:
send(subject, body, receiver)
def send(subject, body, receiver):
if invalid_receiver(receiver):
return False
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = gmail_user
msg['To'] = receiver
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(gmail_user, gmail_password)
server.sendmail(gmail_user, [receiver, ], msg.as_string())
server.quit()
return True
if __name__ == '__main__':
send('test', 'body', 'zchenah@ust.hk') | {
"repo_name": "gmission/gmission",
"path": "services/cron_jobs/gmail.py",
"copies": "1",
"size": "1063",
"license": "mit",
"hash": -6336540074524997000,
"line_mean": 21.1666666667,
"line_max": 75,
"alpha_frac": 0.619943556,
"autogenerated": false,
"ratio": 3.6655172413793102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.478546079737931,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenzhao'
from flask import request, render_template, redirect, flash, url_for, g, jsonify, send_from_directory
from werkzeug.utils import secure_filename
import random
from models import *
from barrage.flask_app import app
UPLOAD_DIR = app.config['BB_IMAGE_UPLOAD_DIR']
URL_PREFIX = '/bb'
@app.before_first_request
def init_db():
db.create_all()
@app.route('/')
def index():
return render_template('testing.html')
def BB_from_similar_images(img_id):
img = Image.query.get(img_id)
bbs = BB.query.join(Image).filter(Image.near_condition(img.lon, img.lat)).order_by(BB.created_on.desc()).all()
for bb in bbs:
print bb.content, bb.created_on
return bbs
@app.route('/api/images/<int:img_id>/BBs/', methods=['GET', 'POST'])
def BB_of_image(img_id):
if request.method=='POST':
print request.form
j = request.json
print j
bb = BB(content=j['content'], image_id=img_id)
db.session.add(bb)
db.session.commit()
return jsonify(BB=[b.content for b in BB_from_similar_images(img_id)])
def allowed_file(filename):
return True
def unique_filename(file_obj):
rid = random.randint(100000000, 999999999)
return '%d-%s' % (rid, secure_filename(file_obj.filename))
def create_image(img_path, lon, lat):
thing = get_or_create(Thing, id=1)
img = Image(path=img_path, thing_id=thing.id, lon=lon, lat=lat)
db.session.add(img)
db.session.commit()
return img
@app.route('/api/images/', methods=['POST'])
def upload_image():
file = request.files['file']
lon = request.form['lon']
lat = request.form['lat']
if file and allowed_file(file.filename):
filename = unique_filename(file)
img_path = os.path.join(UPLOAD_DIR, filename)
file.save(img_path)
img = create_image(filename, lon, lat)
return jsonify(filename=filename, size=os.stat(img_path).st_size, image_id=img.id)
@app.route('/api/images/<int:img_id>', methods=['GET'])
def download_image(img_id):
print img_id
img = Image.query.get(img_id)
print img
print img.path
return send_from_directory(UPLOAD_DIR, img.path)
| {
"repo_name": "chenzhao/barrage-server",
"path": "barrage/views.py",
"copies": "1",
"size": "2178",
"license": "mit",
"hash": 4439304593953986000,
"line_mean": 24.6235294118,
"line_max": 114,
"alpha_frac": 0.6533516988,
"autogenerated": false,
"ratio": 3.1069900142653353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4260341713065335,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenzhao'
import datetime
import re
import os
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import and_
db = SQLAlchemy()
def get_or_create(model, commit=False, **kwargs):
instance = db.session.query(model).filter_by(**kwargs).first()
if instance:
instance._existed = True
return instance
else:
instance = model(**kwargs)
db.session.add(instance)
db.session.flush()
if commit:
db.session.commit()
instance._existed = False
return instance
class BB(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(140))
image_id = db.Column(db.Integer, db.ForeignKey('image.id'))
image = db.relationship('Image', backref='BBs', lazy='joined')
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
class Image(db.Model):
id = db.Column(db.Integer, primary_key=True)
path = db.Column(db.String(200))
thing_id = db.Column(db.Integer, db.ForeignKey('thing.id'))
thing = db.relationship('Thing', backref='images', lazy='joined')
lon = db.Column(db.Float)
lat = db.Column(db.Float)
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
@staticmethod
def near_condition(lon, lat):
return and_(Image.lon>(lon-0.1), Image.lon<(lon+0.1) ,Image.lat>(lat-0.1) ,Image.lat<(lat+0.1) )
class Thing(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(140))
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
| {
"repo_name": "chenzhao/barrage-server",
"path": "barrage/models.py",
"copies": "1",
"size": "1590",
"license": "mit",
"hash": 2479510869770718000,
"line_mean": 28.4444444444,
"line_max": 104,
"alpha_frac": 0.6553459119,
"autogenerated": false,
"ratio": 3.2919254658385095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4447271377738509,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenzhao'
import inspect
import os
import os.path
import shutil
from gmission.models import *
def columns(cls):
for cln in cls.__mapper__.columns:
yield cln
def all_models():
for cls in globals().values():
if inspect.isclass(cls) and issubclass(cls, db.Model):
yield cls
def to_java_type(sql_type):
type_mapping = {'REAL': 'double',
'FLOAT': 'double',
'INTEGER': 'int',
'DATETIME': 'String',
'BOOLEAN': 'boolean',
}
return type_mapping.get(str(sql_type), 'String')
def generate_java_class(model):
jclass = 'public class %s {\n' % model.__name__
for cln in columns(model):
jclass += '\t%s %s;\n' %(to_java_type(cln.type), cln.name)
jclass += '\tString urlname = "%s";\n'%model.urlname()
jclass += '}\n'
return jclass
def generate_java_code():
java_dir = 'java_models'
shutil.rmtree(java_dir)
os.makedirs(java_dir)
for model in all_models():
jclass = generate_java_class(model)
jfname = os.path.join(java_dir, '%s.java' % model.__name__)
with file(jfname, 'w') as jf:
jf.write(jclass)
def main():
generate_java_code()
pass
if __name__=='__main__':
main()
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/model_translate.py",
"copies": "1",
"size": "1322",
"license": "mit",
"hash": 5567613565314658000,
"line_mean": 20.6721311475,
"line_max": 67,
"alpha_frac": 0.5491679274,
"autogenerated": false,
"ratio": 3.3638676844783717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44130356118783715,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenzhao'
import time
import random
import datetime
from unit_test import *
#
# SIM_USER_IDS = range(1000000)
# BULK_SIZE = 10000
#
#
# def create_sim_users():
# existing_sim_user_ids = set([int(u[0][3:]) for u in db.session.query(User.name).filter(User.name.like('sim%')).all()])
#
# l = SIM_USER_IDS
# for ids in [l[i:i+BULK_SIZE] for i in range(0, len(l), BULK_SIZE)]:
# print ids
# db.engine.execute(User.__table__.insert(), [{'name': 'sim%d'%i, 'email': 'sim%d@sim.com'%i, 'password': '111111'}
# for i in ids if i not in existing_sim_user_ids])
# db.session.commit()
# return
#
#
# def sim_trace():
# # users = User.query.filter(User.name.like('sim%'))
# sim_user_ids = (u[0] for u in db.session.query(User.id).filter(User.name.like('sim%')))
#
# traces = []
# for user_id in sim_user_ids:
# lo = random.uniform(1, 100)
# la = random.uniform(1, 100)
# traces.append({'longitude':lo, 'latitude':la, 'radius':0, 'user_id':user_id})
# if len(traces) == BULK_SIZE:
# db.engine.execute(UserLastPosition.__table__.insert(), traces)
# db.session.commit()
# del traces[:]
# print BULK_SIZE, 'inserted'
# if traces:
# db.engine.execute(UserLastPosition.__table__.insert(), traces)
# db.session.commit()
def create_sim_user(count):
uids = []
for i in range(count):
id_str = 'sim_work_%d'%i
user = dict(email='%s@sim.com'%(id_str,), password='1234567', name=id_str)
rest_post('user', user)
u = post('user/login', **user).json()
uids.append(u['id'])
return uids
def sim_trace():
fire_bird_pos = (114.274277, 22.340725)
total_workers = 10
for uid in create_sim_user(total_workers):
print uid
steps = random.randint(10, 100)
lon, lat = fire_bird_pos
created_on = datetime.datetime(2015, 1, 1)
for step in range(steps):
lon_speed = random.random()/100000*random.choice([-1,1]) # meters/second , hopefully
lat_speed = random.random()/100000*random.choice([-1,1]) # meters/second , hopefully
duration = random.randint(5, 600) # seconds
lon += lon_speed*duration
lat += lat_speed*duration
created_on += datetime.timedelta(seconds=duration)
trace = dict(longitude=lon, latitude=lat, z=0, user_id=uid, created_on=created_on.isoformat().split('.')[0])
trace_j = rest_post('position_trace', trace).json()
pass
def main():
# create_sim_users()
sim_trace()
pass
if __name__=='__main__':
main()
| {
"repo_name": "gmission/gmission",
"path": "hkust-gmission/test/sim.py",
"copies": "1",
"size": "2735",
"license": "mit",
"hash": 6982675749097573000,
"line_mean": 30.0795454545,
"line_max": 124,
"alpha_frac": 0.5616087751,
"autogenerated": false,
"ratio": 3.1436781609195403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.420528693601954,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenzheng'
__author__ = 'chenzheng'
import os
import subprocess
import argparse
pjoin = os.path.join
N2TEMPLATE ="""
memory total 1000 mb
geometry units angstroms
N 0 0 0
N 0 0 1.1
end
title "N2 dft optimize"
charge 0
basis
N library "{Polarfunc_1st}"
end
dft
mult 1
xc {functional_1st}
end
task dft optimize
title "N2 dft freq"
charge 0
basis
N library "{Polarfunc_1st}"
end
dft
mult 1
xc {functional_1st}
end
task dft freq
title "N2 dft energy"
charge 0
basis
N library "{Polarfunc_2nd}"
end
dft
mult 1
xc {functional_2nd}
end
task dft energy
"""
# Function we used for N2_template file generate
def N2_temp_build(functional_1st,functional_2nd, Polarfunc_1st, Polarfunc_2nd):
p = {
'functional_1st':functional_1st,
'functional_2nd':functional_2nd,
'Polarfunc_1st':Polarfunc_1st,
'Polarfunc_2nd':Polarfunc_2nd,
}
with open('N2.nw','w') as f:
f.write(N2TEMPLATE.format(**p))
#This is the nwchem we call for calculation using N2 template file generated
os.system('nwchem N2.nw > N2.nwout')
# command = 'nwchem ' + 'N2.nw ' + '> N2.nwout'
# subprocess.call(command,shell=True)
| {
"repo_name": "czhengsci/nano266",
"path": "Chen_Script/N2_Temp_Gen.py",
"copies": "1",
"size": "1182",
"license": "bsd-3-clause",
"hash": 4243657201282108000,
"line_mean": 15.6478873239,
"line_max": 80,
"alpha_frac": 0.6759729272,
"autogenerated": false,
"ratio": 2.7746478873239435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3950620814523943,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chenzheng'
import os
import subprocess
import argparse
pjoin = os.path.join
H2TEMPLATE ="""
memory total 1000 mb
geometry units angstroms
H 0 0 0
H 0 0 0.7414
end
title "H2 dft optimize"
charge 0
basis
H library "{Polarfunc_1st}"
end
dft
mult 1
xc {functional_1st}
end
task dft optimize
title "H2 dft freq"
charge 0
basis
H library "{Polarfunc_1st}"
end
dft
mult 1
xc {functional_1st}
end
task dft freq
title "H2 dft energy"
charge 0
basis
H library "{Polarfunc_2nd}"
end
dft
mult 1
xc {functional_2nd}
end
task dft energy
"""
# Function we used for H2_template file generate
def H2_temp_build(functional_1st,functional_2nd,Polarfunc_1st, Polarfunc_2nd):
p = {
'functional_1st':functional_1st,
'functional_2nd':functional_2nd,
'Polarfunc_1st':Polarfunc_1st,
'Polarfunc_2nd':Polarfunc_2nd,
}
with open('H2.nw','w') as f:
f.write(H2TEMPLATE.format(**p))
#This is the nwchem we call for calculation using H2 template file generated
os.system('nwchem H2.nw > H2.nwout')
# command = 'nwchem ' + 'H2.nw ' + '> H2.nwout'
# subprocess.call(command,shell=True) | {
"repo_name": "czhengsci/nano266",
"path": "Chen_Script/H2_Temp_Gen.py",
"copies": "1",
"size": "1157",
"license": "bsd-3-clause",
"hash": 997985094538085600,
"line_mean": 15.7826086957,
"line_max": 80,
"alpha_frac": 0.680207433,
"autogenerated": false,
"ratio": 2.78125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3961457433,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chick Markley'
from PySide import QtGui, QtCore
from ast_tool_box.models.code_models.code_model import AstTreeItem, CodeItem, FileItem, GeneratedCodeItem
from ast_tool_box.views.code_views.ast_tree_widget import AstTreePane, AstTreeWidget
from ast_tool_box.views.editor_widget import EditorPane
class CodePane(QtGui.QGroupBox):
"""
A pane that can show one or more code_items
A code item can be source_text ast
"""
def __init__(self, code_presenter=None, panel_count=2, default_pane_size=800):
super(CodePane, self).__init__("Code && Trees")
self.code_presenter = code_presenter
self.panel_count = panel_count
self.all_expanded = True
self.default_pane_size =default_pane_size
layout = QtGui.QVBoxLayout()
button_box = QtGui.QGroupBox()
button_box.setMaximumHeight(40)
toolbar_layout = QtGui.QHBoxLayout()
# toolbar_layout.setContentsMargins(0, 0, 0, 0)
toolbar_layout.addSpacing(0)
one_button = QtGui.QPushButton(u"\u20DE")
one_button.setToolTip("Show one window below")
one_button.clicked.connect(self.set_to_one_panel)
toolbar_layout.addWidget(one_button)
two_button = QtGui.QPushButton(u"\u20DE\u20DE")
two_button.setToolTip("Show two windows below")
two_button.clicked.connect(self.set_to_two_panel)
toolbar_layout.addWidget(two_button)
self.three_button = QtGui.QPushButton(u"\u20DE\u20DE\u20DE")
self.three_button.setToolTip("Show three window below")
self.three_button.clicked.connect(self.set_to_three_panel)
toolbar_layout.addWidget(self.three_button)
self.three_button.setEnabled(False)
self.expand_all_button = QtGui.QPushButton(u"\u27F9\u27F8")
self.expand_all_button.setToolTip("Expand all AST trees")
self.expand_all_button.clicked.connect(self.expand_all_asts)
toolbar_layout.addWidget(self.expand_all_button)
reload_button = QtGui.QPushButton(u"\u27F2")
reload_button.setToolTip("Reload file")
reload_button.clicked.connect(self.reload_panel)
toolbar_layout.addWidget(reload_button)
toolbar_layout.addStretch(1)
button_box.setLayout(toolbar_layout)
layout.addWidget(button_box)
self.code_splitter = QtGui.QSplitter(self, orientation=QtCore.Qt.Horizontal)
self.tab_bar = QtGui.QTabBar()
# style = QtGui.QStyleFactory.create(u"Plastique")
# self.tab_bar.setStyle(style)
self.tab_bar.setTabsClosable(True)
self.tab_bar.setUsesScrollButtons(True)
self.tab_bar.tabCloseRequested.connect(self.delete_at)
self.tab_bar.currentChanged.connect(self.tab_selected)
layout.addWidget(self.tab_bar)
# delete_signal = QtCore.Signal(int)
# delete_signal.connect(self.delete_tab_happened)
class XXX(QtCore.QObject):
delete_signal = QtCore.Signal(int)
self.xxx = XXX(self)
self.xxx.delete_signal.connect(self.delete_tab_happened)
layout.addWidget(self.code_splitter)
self.setLayout(layout)
def expand_all_asts(self):
if self.all_expanded:
self.all_expanded = False
AstTreeWidget.expand_all_at_create = False
self.expand_all_button.setText(u"\u27FA")
self.expand_all_button.setToolTip("Expand all AST trees")
for index in range(self.code_splitter.count()):
try:
self.code_splitter.widget(index).collapse_all()
except AttributeError:
pass
else:
self.all_expanded = True
AstTreeWidget.expand_all_at_create = True
self.expand_all_button.setText(u"\u27F9\u27F8")
self.expand_all_button.setToolTip("Collapse all ast trees")
for index in range(self.code_splitter.count()):
try:
self.code_splitter.widget(index).expand_all()
except AttributeError:
pass
def reload_panel(self):
for index in range(self.code_splitter.count()-1, 1, -1):
self.tab_bar.removeTab(index)
self.code_splitter.widget(index).deleteLater()
def clear(self):
for index in range(self.code_splitter.count()-1, -1, -1):
self.tab_bar.removeTab(index)
self.code_splitter.widget(index).deleteLater()
@QtCore.Slot(int)
def tab_selected(self, index):
if index < self.tab_bar.count():
print("Tab selected %d" % index)
self.set_panel_sizes(emphasize_index=index)
@QtCore.Slot(int)
def delete_at(self, index):
item = self.code_splitter.widget(index)
item.deleteLater()
self.code_presenter.delete_last_item()
# item.destroy(destroyWindow=True, destroySubWindows=True)
self.tab_bar.removeTab(index)
#
# TODO the following call does not work as expected due to the deleteLater above
#
# self.set_panel_sizes()
self.xxx.delete_signal.emit(index)
@QtCore.Slot(int)
def delete_tab_happened(self, index):
print("deleted_tab_happened %d %s" % (index, self.code_splitter.sizes()))
self.set_panel_sizes()
def set_to_one_panel(self):
self.panel_count = 1
self.set_panel_sizes()
def set_to_two_panel(self):
self.panel_count = 2
self.set_panel_sizes()
def set_to_three_panel(self):
self.panel_count = 3
self.set_panel_sizes()
def set_panel_sizes(self, emphasize_index=None):
"""
resize the panel based on current panel pattern
"""
sizes = self.code_splitter.sizes()
# print("In set panel sizes splitter %s self.panel_count %d sizes %s" %
# (
# [self.code_splitter.size(),self.code_splitter.baseSize(), self.code_splitter.frameSize()],
# self.panel_count,
# sizes
# )
# )
total = sum(sizes)
if total == 0:
total = self.default_pane_size
new_sizes = map(lambda x: 0, sizes)
panel_count = self.panel_count
if panel_count > len(sizes):
panel_count = len(sizes)
if emphasize_index is None or emphasize_index >= len(sizes):
main_emphasis_index = -1
second_emphasis_index = -2
third_emphasis_index = -3
elif isinstance(emphasize_index, int):
main_emphasis_index = emphasize_index
current_max_tab = max(sizes)
second_emphasis_index = sizes.index(current_max_tab)
sizes[second_emphasis_index] = 0
current_max_tab = max(sizes)
third_emphasis_index = sizes.index(current_max_tab)
else:
main_emphasis_index = -1
second_emphasis_index = -2
third_emphasis_index = -3
if panel_count == 1:
new_sizes[main_emphasis_index] = total
elif panel_count == 2:
new_sizes[second_emphasis_index] = int(total * 0.4)
new_sizes[main_emphasis_index] = int(total * 0.6)
elif panel_count > 2:
new_sizes[third_emphasis_index] = int(total * 0.2)
new_sizes[second_emphasis_index] = int(total * 0.3)
new_sizes[main_emphasis_index] = int(total * 0.5)
self.code_splitter.setSizes(new_sizes)
def current_item(self):
return self.code_presenter.current_item()
def add_code_item(self, code_item):
"""
add a new code item widget to the right hand side of the
splitter, reduce size of left hand members
"""
assert isinstance(code_item, CodeItem)
if isinstance(code_item, FileItem):
widget = EditorPane()
widget.setPlainText(code_item.code)
elif isinstance(code_item, AstTreeItem):
widget = AstTreePane(self.code_presenter, code_item.code, tab_name=code_item.code_name)
elif isinstance(code_item, GeneratedCodeItem):
widget = EditorPane()
widget.setPlainText(code_item.code)
else:
CodePane.show_error("add_code_item got %s %s" % (type(code_item), code_item))
return
self.tab_bar.addTab(code_item.code_name)
self.tab_bar.setCurrentIndex(self.tab_bar.count()-1)
self.code_splitter.addWidget(widget)
if self.code_splitter.count() > 2:
self.three_button.setEnabled(True)
self.code_splitter.setCollapsible(self.code_splitter.count()-1, True)
self.set_panel_sizes()
def resolve_transform_arguments(self, transform_thing):
settings = QtCore.QSettings()
group_name = "transforms/%s/parameters" % transform_thing.package_name
dialog = QtGui.QDialog()
dialog.setSizeGripEnabled(True)
class ThreeLineEditor(QtGui.QPlainTextEdit):
def sizeHint(self):
return QtCore.QSize(200, 25)
form_text_boxes = []
form_layout = QtGui.QFormLayout()
form_layout.addRow(QtGui.QLabel("Parameters required for this transform"))
settings.beginGroup(group_name)
for positional_arg in transform_thing.positional_args:
default_text = positional_arg.default_source if positional_arg.default_source else ""
default_text = settings.value(positional_arg.name, default_text)
# text_editor = ThreeLineEditor()
# text_editor.document().setPlainText(default_text)
# form_text_boxes.append(text_editor)
text_editor = QtGui.QLineEdit()
text_editor.setText(default_text)
form_text_boxes.append(text_editor)
label_text = positional_arg.name
# label_text = "%s\n%s" % (
# positional_arg.name,
# positional_arg.default_source if positional_arg.default_source else "No default value"
# )
form_layout.addRow(
QtGui.QLabel(label_text),
text_editor,
)
settings.endGroup()
form_layout.addRow(
QtGui.QLabel("""
Check source in editor for default values, if any. The text in each box
will be processed by the python eval() function so if you want a string
be sure to put quotes around it
""")
)
cancel_button = QtGui.QPushButton("Cancel")
cancel_button.clicked.connect(dialog.reject)
accept_button = QtGui.QPushButton("Go")
accept_button.clicked.connect(dialog.accept)
form_layout.addRow(
cancel_button,
accept_button,
)
dialog.setLayout(form_layout)
accept_button.setFocus()
result = dialog.exec_()
print("result %s" % result)
if not result:
return None
# result = [x.document().toPlainText().strip() for x in form_text_boxes]
result = [x.text().strip() for x in form_text_boxes]
settings = QtCore.QSettings()
settings.beginGroup(group_name)
for index, text in enumerate(result):
settings.setValue(transform_thing.positional_args[index].name, result[index])
print("saving group %s param %s value %s" % (
group_name,
transform_thing.positional_args[index],
result[index]
))
settings.endGroup()
return result
@staticmethod
def show_error(message):
# QtGui.QErrorMessage().showMessage(message)
message_box = QtGui.QMessageBox()
message_box.setText("Error:\n%s" % message)
message_box.exec_()
def search_box_changed(self):
if not self.search_box.text():
return
current_tree = self.ast_tree_tabs.currentWidget()
# print("current tree %s" % current_tree)
#
# for widget_index in range(self.ast_tree_tabs.count()):
# widget = self.ast_tree_tabs.widget(widget_index)
# print("widget %s ast_tree %s" % (widget, widget.ast_root))
items = current_tree.findItems(
self.search_box.text(),
QtCore.Qt.MatchContains | QtCore.Qt.MatchRecursive,
column=0
)
# print("Found %d items" % len(items))
if len(items) > 0:
# print(items[0])
current_tree.setCurrentItem(items[0])
current_tree.expandItem(items[0])
| {
"repo_name": "ucb-sejits/ast_tool_box",
"path": "ast_tool_box/views/code_views/code_pane.py",
"copies": "1",
"size": "12606",
"license": "mit",
"hash": -1039237745624882400,
"line_mean": 36.4065281899,
"line_max": 110,
"alpha_frac": 0.6025702047,
"autogenerated": false,
"ratio": 3.8153753026634383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4917945507363438,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chick Markley'
from PySide import QtGui, QtCore
from ast_tool_box.views.highlighter import Highlighter
class EditorPane(QtGui.QPlainTextEdit):
def __init__(self, parent_panel=None):
# Editor widget
super(EditorPane, self).__init__()
font = QtGui.QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(13)
self.file_name = ''
self.parent_panel = parent_panel
# self.setReadOnly(True)
self.setFont(font)
self.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.setStyleSheet("selection-color: black; selection-background-color: yellow;")
if self.parent_panel:
self.textChanged.connect(self.parent_panel.text_changed)
self.undoAvailable.connect(self.parent_panel.enable_undo)
self.redoAvailable.connect(self.parent_panel.enable_redo)
self.highlighter = Highlighter(self.document())
def set_file_name(self, file_name):
self.file_name = file_name
def set_read_only(self, value):
if self.parent_panel:
self.parent_panel.set_read_only(value)
self.setReadOnly(value)
def setPlainText(self, text):
if self.parent_panel:
self.parent_panel.save_button.setEnabled(True)
self.parent_panel.undo_button.setEnabled(True)
super(EditorPane, self).setPlainText(text)
class EditorPanel(QtGui.QGroupBox):
def __init__(self, transform_pane=None):
super(EditorPanel, self).__init__("Editor")
self.transform_pane = transform_pane
self.transform_collection = None
layout = QtGui.QVBoxLayout()
button_panel = QtGui.QDialogButtonBox(QtCore.Qt.Horizontal)
self.save_button = QtGui.QPushButton("Save")
self.undo_button = QtGui.QPushButton("Undo")
self.redo_button = QtGui.QPushButton("Undo")
self.save_button = button_panel.addButton(u"Save", QtGui.QDialogButtonBox.AcceptRole)
self.undo_button = button_panel.addButton(u"Undo", QtGui.QDialogButtonBox.ActionRole)
self.redo_button = button_panel.addButton(u"Redo", QtGui.QDialogButtonBox.ActionRole)
self.editor = EditorPane(parent_panel=self)
self.save_button.clicked.connect(self.save)
self.undo_button.clicked.connect(self.undo)
self.redo_button.clicked.connect(self.redo)
layout.addWidget(button_panel)
layout.addWidget(self.editor)
self.setLayout(layout)
def text_changed(self):
self.save_button.setEnabled(True)
self.undo_button.setEnabled(True)
def undo(self):
self.editor.undo()
# if not self.editor.undoAvailable():
# self.undo_button.setEnabled(False)
self.redo_button.setEnabled(True)
@QtCore.Slot(bool)
def enable_undo(self, value):
self.undo_button.setEnabled(value)
def redo(self):
self.editor.redo()
# if not self.editor.redoAvailable():
# self.redo_button.setEnabled(False)
self.undo_button.setEnabled(True)
@QtCore.Slot(bool)
def enable_redo(self, value):
self.redo_button.setEnabled(value)
def save(self):
print("Got save file for %s" % self.editor.file_name)
file_text = self.editor.toPlainText()
with open(self.editor.file_name, "w+") as f:
f.write(file_text)
self.editor.setPlainText(file_text)
self.save_button.setEnabled(False)
if self.transform_pane:
if self.transform_collection:
print("calling update file")
self.transform_pane.transform_presenter.update_file(self.transform_collection)
else:
print("calling reload transforms")
self.transform_pane.transform_presenter.reload_transforms()
def set_read_only(self, value):
if value:
self.disable_buttons()
def disable_buttons(self):
self.save_button.setEnabled(False)
self.undo_button.setEnabled(False)
self.redo_button.setEnabled(False)
| {
"repo_name": "ucb-sejits/ast_tool_box",
"path": "ast_tool_box/views/editor_widget.py",
"copies": "1",
"size": "4113",
"license": "mit",
"hash": -1578324404918985000,
"line_mean": 33.5630252101,
"line_max": 94,
"alpha_frac": 0.6442985655,
"autogenerated": false,
"ratio": 3.780330882352941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4924629447852941,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chick Markley'
from PySide import QtGui, QtCore
class SearchLineEdit(QtGui.QLineEdit):
def __init__(self, parent=None, on_changed=None, on_next=None):
QtGui.QLineEdit.__init__(self, parent)
self.clear_button = QtGui.QToolButton(self)
self.clear_button.setIcon(
QtGui.QIcon.fromTheme(
"system-close",
fallback=QtGui.QIcon(QtGui.QPixmap(r"images/close_icon.png"))
)
)
#self.clear_button.setIconSize(clear_pixmap.size())
self.clear_button.setCursor(QtCore.Qt.ArrowCursor)
self.clear_button.setStyleSheet("QToolButton { border: none; padding: 0px;}")
self.clear_button.hide()
self.clear_button.clicked.connect(self.clear)
self.textChanged.connect(self.updateCloseButton)
if on_changed:
self.textChanged.connect(on_changed)
self.search_button = QtGui.QToolButton(self)
self.search_button.setIcon(
QtGui.QIcon.fromTheme(
"system-search",
fallback=QtGui.QIcon(QtGui.QPixmap(r"images/search_icon.png"))
)
)
self.search_button.setToolTip("Clicking this will advance search to next item")
self.search_button.setStyleSheet("QToolButton { border: none; padding: 0px;}")
if on_next:
self.search_button.clicked.connect(on_next)
self.returnPressed.connect(on_next)
frame_width = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
self.setStyleSheet(
"QLineEdit { padding-left: %spx; padding - right: % spx;} " %
(self.search_button.sizeHint().width() + frame_width + 1, self.clear_button.sizeHint().width() + frame_width + 1)
)
msz = self.minimumSizeHint()
self.setMinimumSize(max(msz.width(),
self.search_button.sizeHint().width() +
self.clear_button.sizeHint().width() + frame_width * 2 + 2),
max(msz.height(),
self.clear_button.sizeHint().height() + frame_width * 2 + 2))
# self.searchMenu = QtGui.QMenu(self.search_button)
# self.search_button.setMenu(self.searchMenu)
# self.searchMenu.addAction("Google")
# self.search_button.setPopupMode(QtGui.QToolButton.InstantPopup)
def resizeEvent(self, event):
sz = self.clear_button.sizeHint()
frameWidth = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
self.clear_button.move(self.rect().right() - frameWidth -
sz.width(),
(self.rect().bottom() + 1 - sz.height()) / 2)
self.search_button.move(self.rect().left() + 1,
(self.rect().bottom() + 1 - sz.height()) / 2)
def updateCloseButton(self, text):
if text:
self.clear_button.setVisible(True)
else:
self.clear_button.setVisible(False)
if __name__ == '__main__':
app = QtGui.QApplication([])
w = SearchLineEdit(
QtGui.QPixmap(r"images/search_icon.png"),
QtGui.QPixmap(r"images/search_icon.png")
)
w.show()
app.exec_()
| {
"repo_name": "ucb-sejits/ast_tool_box",
"path": "ast_tool_box/views/search_widget.py",
"copies": "1",
"size": "3170",
"license": "mit",
"hash": -2911495842420046300,
"line_mean": 38.625,
"line_max": 125,
"alpha_frac": 0.5977917981,
"autogenerated": false,
"ratio": 3.6774941995359627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4775285997635963,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chick Markley'
import os
import sys
import imp
import inspect
from pprint import pprint
class Util(object):
@staticmethod
def is_package(directory):
# print "is_package testing %s" % os.path.join(directory, "__init__.py")
return os.path.isfile(os.path.join(directory, "__init__.py"))
@staticmethod
def path_to_path_and_package(file_path, package_name=None):
"""
converts a file name into a path and a package name
"""
# print "path_to_path_and_package got %s and %s" % (file_path, package_name)
if package_name is None:
file_path, package_name = os.path.split(file_path)
if package_name.endswith(".py"):
package_name = ".".join(package_name.split(".")[:-1])
if Util.is_package(package_name):
file_path2, file_name2 = os.path.split(file_path)
return Util.path_to_path_and_package(file_path2, ".".join([file_name2, package_name]))
else:
if file_path == "":
file_path = '.'
return file_path, package_name
@staticmethod
def get_module(package_name):
if package_name in sys.modules:
return sys.modules[package_name]
return None
@staticmethod
def clear_classes_and_reload_package(name):
loaded_module = sys.modules[name]
keys = loaded_module.__dict__.keys()[:]
for key in keys:
if inspect.isclass(loaded_module.__dict__[key]):
# print("deleting %s" % key)
del loaded_module.__dict__[key]
del sys.modules[name]
__import__(name)
# imp.reload(loaded_module)
@staticmethod
def clear_classes_in_package(name):
if name in sys.modules:
loaded_module = sys.modules[name]
keys = loaded_module.__dict__.keys()[:]
for key in keys:
if inspect.isclass(loaded_module.__dict__[key]):
# print("deleting %s" % key)
del loaded_module.__dict__[key]
del sys.modules[name] | {
"repo_name": "ucb-sejits/ast_tool_box",
"path": "ast_tool_box/util.py",
"copies": "1",
"size": "2118",
"license": "mit",
"hash": 1536131648322578000,
"line_mean": 32.109375,
"line_max": 98,
"alpha_frac": 0.5627950897,
"autogenerated": false,
"ratio": 3.8933823529411766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49561774426411764,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chick Markley'
import types
import ast
from ast_tool_box.views.editor_widget import EditorPane
from ast_tool_box.views.search_widget import SearchLineEdit
from ast_tool_box.models.transform_models.transform_file import AstTransformItem, CodeGeneratorItem
from PySide import QtGui, QtCore
DEBUGGING = False
class TransformTreeWidgetItem(QtGui.QTreeWidgetItem):
"""
connects a gui tree item with the corresponding node in the actual ast tree
"""
def __init__(self, parent, name=None, source=None):
super(TransformTreeWidgetItem, self).__init__(parent)
self.name = name
self.source = source
def picked(self):
print("got selected %s" % self.name)
class TransformTreeWidget(QtGui.QTreeWidget):
"""
displays an ast as a tree widget
"""
COL_NODE = 0
COL_FIELD = 1
COL_CLASS = 2
COL_VALUE = 3
COL_POS = 4
COL_HIGHLIGHT = 5
expand_all_at_create = True
def __init__(self, transform_presenter=None, transform_pane=None):
super(TransformTreeWidget, self).__init__()
self.transform_presenter = transform_presenter
self.transform_pane = transform_pane
self.setColumnCount(2)
self.setHeaderLabels(["Transforms"])
self.header().resizeSection(TransformTreeWidget.COL_NODE, 800)
self.header().setStretchLastSection(True)
self.transform_signal = QtCore.Signal(int)
self.expand_descendants_action = QtGui.QAction(
"&Expand all children",
self,
statusTip="Expand all descendant nodes",
triggered=self.expand_descendants
)
self.itemClicked.connect(self.clicked)
self.itemDoubleClicked.connect(self.double_clicked)
@QtCore.Slot(TransformTreeWidgetItem)
def clicked(self, item):
print("click %s" % item)
self.transform_pane.load_editor_from(item)
@QtCore.Slot(TransformTreeWidgetItem)
def double_clicked(self, info):
print("doubleclick on %s" % info)
print("doubleclick on %s" % self.currentItem())
print("comparing to %s" % AstTransformItem)
print("comparing to %s" % AstTransformItem)
if isinstance(self.currentItem().source, AstTransformItem) or\
isinstance(self.currentItem().source, CodeGeneratorItem):
self.transform_presenter.apply_current_transform()
else:
self.transform_pane.show_error("Only works for Ast Transforms and Code Generators")
def contextMenuEvent(self, event):
menu = QtGui.QMenu(self)
menu.addAction(self.expand_descendants_action)
sub_menu = QtGui.QMenu(self)
sub_menu.setTitle("Available transformers")
for transform_item in self.transform_presenter.transform_items():
sub_menu_action = TransformerAction(transform_item=transform_item, ast_tree_widget=self)
sub_menu.addAction(sub_menu_action)
menu.addMenu(sub_menu)
menu.exec_(event.globalPos())
def transform_current_ast(self, name):
transformer = self.ast_transformers.get_instance_by_name(name)
self.main_window.add_tree_tab(transformer=transformer)
def expand_descendants(self, item=None):
"""Expand all descendants of the current item"""
if item is None:
print("item is none")
item = self.currentItem()
print("item is %s" % item)
item.setExpanded(True)
for child_index in range(item.childCount()):
self.expand_descendants(item.child(child_index))
def collapse_descendants(self, item=None):
"""Expand all descendants of the current item"""
if item is None:
item = self.currentItem()
item.setExpanded(False)
for child_index in range(item.childCount()):
self.collapse_descendants(item.child(child_index))
def rebuild(self, transform_file):
file_node = None
for index in range(self.topLevelItemCount()):
wi = self.topLevelItem(index)
if wi.source is transform_file:
file_node = wi
break
if not file_node:
print("Could not find %s" % transform_file)
def remove_children(node):
for child_index in xrange(node.childCount()-1, -1, -1):
print("removing child %d from node %s" % (child_index, node))
remove_children(node.child(child_index))
node.takeChild(child_index)
remove_children(file_node)
self.build_children(transform_file, file_node)
self.expandToDepth(100)
def build_children(self, transform_file, file_node):
first_node = None
if len(transform_file.node_transforms) > 0:
transforms_node = TransformTreeWidgetItem(file_node)
transforms_node.setText(
TransformTreeWidget.COL_NODE,
"ast.NodeTransformer : (%d)" % len(transform_file.node_transforms)
)
for transform in transform_file.node_transforms:
transform_node = TransformTreeWidgetItem(transforms_node, name=transform.name, source=transform)
if not first_node:
first_node = transform_node
transform_node.setText(TransformTreeWidget.COL_NODE, transform.name())
# print("loaded transform to tree %s" % transform.name)
transform_node.setToolTip(TransformTreeWidget.COL_NODE, transform.doc)
else:
if transform_file.load_error_info:
first_node = file_node
if len(transform_file.code_generators) > 0:
code_generators_node = TransformTreeWidgetItem(file_node)
code_generators_node.setText(
TransformTreeWidget.COL_NODE,
"ctree.CodeGenVisitor : (%d)" % len(transform_file.code_generators)
)
print("%d code_generators" % len(transform_file.code_generators))
for code_generator in transform_file.code_generators:
code_generator_node = TransformTreeWidgetItem(
code_generators_node,
name=code_generator.name,
source=code_generator
)
if not first_node:
first_node = code_generator_node
code_generator_node.setText(TransformTreeWidget.COL_NODE, code_generator.name())
code_generator_node.setToolTip(TransformTreeWidget.COL_NODE, code_generator.doc)
return first_node
def build(self, transform_files):
self.clear()
first_node = None
for transform_file in transform_files:
file_node = TransformTreeWidgetItem(self, name=transform_file.base_name, source=transform_file)
file_node.setText(
TransformTreeWidget.COL_NODE,
"%s (%s)" % (transform_file.base_name, transform_file.package_name)
)
file_node.setToolTip(TransformTreeWidget.COL_NODE, transform_file.path)
node = self.build_children(transform_file, file_node)
if not first_node:
first_node = node
self.expandToDepth(100)
if first_node:
self.setCurrentItem(first_node)
self.transform_pane.load_editor_from(self.currentItem())
class TransformerAction(QtGui.QAction):
def __init__(self, transform_item, ast_tree_widget, **kwargs):
super(TransformerAction, self).__init__(transform_item.name(), ast_tree_widget, **kwargs)
self.ast_tree_widget = ast_tree_widget
self.transform_item = transform_item
self.text = transform_item.name()
self.triggered.connect(self.do_transform)
def do_transform(self):
print("Triggered with string %s" % self.text)
self.ast_tree_widget.transform_presenter.apply_transform(
code_item=self.ast_tree_widget.currentItem().ast_node,
transform_item=self.transform_item
)
| {
"repo_name": "ucb-sejits/ast_tool_box",
"path": "ast_tool_box/views/transform_views/transform_tree_widget.py",
"copies": "1",
"size": "8081",
"license": "mit",
"hash": -8120472517666699000,
"line_mean": 36.412037037,
"line_max": 112,
"alpha_frac": 0.6262838758,
"autogenerated": false,
"ratio": 4.148357289527721,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.527464116532772,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chick Markley'
import types
import ast
import tempfile
import os
from ast_tool_box.views.search_widget import SearchLineEdit
from PySide import QtGui, QtCore
DEBUGGING = False
class AstTreePane(QtGui.QGroupBox):
def __init__(self, code_presenter=None, ast_root=None, tab_name=None):
super(AstTreePane, self).__init__()
self.code_presenter = code_presenter
self.search_next_index = 0
self.last_search = ''
layout = QtGui.QVBoxLayout()
self.search_box = SearchLineEdit(on_changed=self.search_box_changed, on_next=self.search_next)
layout.addWidget(self.search_box)
self.ast_tree_widget = AstTreeWidget(code_presenter=self.code_presenter, ast_root=ast_root, tab_name=tab_name)
layout.addWidget(self.ast_tree_widget)
self.setLayout(layout)
def expand_all(self):
print("got to %s" % self)
self.ast_tree_widget.expand_descendants()
def collapse_all(self):
print("got to %s" % self)
self.ast_tree_widget.collapse_descendants()
def make_tree_from(self, syntax_tree, file_name="", display_depth=1):
self.ast_tree_widget.make_tree_from(syntax_tree, file_name=file_name, display_depth=display_depth)
def search_next(self):
if self.search_box.text() != self.last_search:
self.search_next_index = 0
else:
self.search_next_index += 1
self.last_search = self.search_box.text()
current_tree = self.ast_tree_widget
items = current_tree.findItems(
self.search_box.text(),
QtCore.Qt.MatchContains | QtCore.Qt.MatchRecursive,
column=AstTreeWidget.COL_NODE
)
# print("Found %d items" % len(items))
if len(items) > 0:
if self.search_next_index >= len(items):
self.search_next_index = 0
# print(items[0])
current_tree.setCurrentItem(items[self.search_next_index])
current_tree.expandItem(items[self.search_next_index])
def search_box_changed(self):
if not self.search_box.text():
return
current_tree = self.ast_tree_widget
# print("current tree %s" % current_tree)
#
# for widget_index in range(self.ast_tree_tabs.count()):
# widget = self.ast_tree_tabs.widget(widget_index)
# print("widget %s ast_tree %s" % (widget, widget.ast_root))
items = current_tree.findItems(
self.search_box.text(),
QtCore.Qt.MatchContains | QtCore.Qt.MatchRecursive,
column=AstTreeWidget.COL_NODE
)
# print("Found %d items" % len(items))
if len(items) > 0:
# print(items[0])
current_tree.setCurrentItem(items[0])
current_tree.expandItem(items[0])
class AstTreeWidget(QtGui.QTreeWidget):
"""
displays an ast as a tree widget
"""
COL_NODE = 0
COL_FIELD = 1
COL_CLASS = 2
COL_VALUE = 3
COL_POS = 4
COL_HIGHLIGHT = 5
expand_all_at_create = True
def __init__(self, code_presenter=None, ast_root=None, tab_name='tab'):
super(AstTreeWidget, self).__init__()
self.code_presenter = code_presenter
self.tab_name = tab_name
self.ast_root = ast_root
self.setColumnCount(2)
self.setHeaderLabels(["Node"])
self.header().resizeSection(AstTreeWidget.COL_NODE, 800)
self.header().setStretchLastSection(True)
self.transform_signal = QtCore.Signal(int)
self.show_with_dot_action = QtGui.QAction(
"&show tree using dot",
self,
statusTip="Create a *.png file using dot",
triggered=self.show_with_dot
)
self.make_root_action = QtGui.QAction(
"&Make new panel with this node as root",
self,
statusTip="This node will be made the current root in this window",
triggered=self.make_root
)
self.expand_descendants_action = QtGui.QAction(
"&Expand all children",
self,
statusTip="Expand all descendant nodes",
triggered=self.expand_descendants
)
if ast_root:
self.make_tree_from(self.ast_root)
def contextMenuEvent(self, event):
menu = QtGui.QMenu(self)
menu.addAction(self.show_with_dot_action)
menu.addAction(self.expand_descendants_action)
sub_menu = QtGui.QMenu(self)
sub_menu.setTitle("Available transformers")
for transform_item in self.code_presenter.transform_presenter.transform_items():
sub_menu_action = TransformerAction(transform_item=transform_item, ast_tree_widget=self)
sub_menu.addAction(sub_menu_action)
menu.addMenu(sub_menu)
menu.addAction(self.make_root_action)
menu.exec_(event.globalPos())
def transform_current_ast(self, name):
transformer = self.ast_transformers.get_instance_by_name(name)
self.main_window.add_tree_tab(transformer=transformer)
def show_with_dot(self):
from ctree.visual.dot_manager import DotManager
# start_node = self.currentItem().ast_node
#
# def find_appropriate_node(node):
# if not node:
# return None
# if isinstance(node, ast.AST):
# return node
# if hasattr(node, 'parent'):
# return find_appropriate_node(node.parent)
# return None
#
# start_node = find_appropriate_node(start_node)
start_item = self.currentItem()
def find_appropriate_node(item):
if not item:
return None
if hasattr(item, 'ast_node') and isinstance(item.ast_node, ast.AST):
return item.ast_node
if hasattr(item, 'parent'):
return find_appropriate_node(item.parent())
print("bad node %s" % item)
import pprint
pprint(dir(item))
return None
start_node = find_appropriate_node(start_item)
if not start_node:
self.code_presenter.show_error("Sorry, cannot find an ast node to begin graph")
return
file_name = os.path.join(tempfile.gettempdir(), 'tree_%s.png' % self.tab_name)
DotManager.dot_ast_to_browser(start_node, file_name)
def make_root(self):
"""make the current item the displayed root of the tree"""
# self.ast_root = self.currentItem().ast_node
# self.make_tree_from(self.ast_root)
self.code_presenter.apply_transform(code_item=self.currentItem().ast_node, transform_item=None)
def expand_descendants(self, item=None):
"""Expand all descendants of the current item"""
if item is None:
print("item is none")
item = self.currentItem()
print("item is %s" % item)
item.setExpanded(True)
for child_index in range(item.childCount()):
self.expand_descendants(item.child(child_index))
def collapse_descendants(self, item=None):
"""Expand all descendants of the current item"""
if item is None:
item = self.currentItem()
item.setExpanded(False)
for child_index in range(item.childCount()):
self.collapse_descendants(item.child(child_index))
def make_tree_from(self, syntax_tree, file_name="", display_depth=1):
"""
Populates the tree widget.
"""
self.clear()
# State we keep during the recursion.
# Is needed to populate the selection column.
to_be_updated = list([])
state = {'from': '? : ?', 'to': '1 : 0'}
def add_node(ast_node, parent_item, field_label):
"""
Helper function that recursively adds nodes.
:param parent_item: The parent QTreeWidgetItem to which this node will be added
:param field_label: Labels how this node is known to the parent
"""
if isinstance(ast_node, types.ListType) or isinstance(ast_node, types.TupleType):
value_str = ''
node_str = "{} = {}".format(field_label, class_name(ast_node))
for idx, node in enumerate(ast_node):
add_node(node, parent_item, "{}[{:d}]".format(field_label, idx))
return
node_item = AstTreeWidgetItem(parent_item)
if parent_item is self:
self.setCurrentItem(node_item)
node_item.ast_node = ast_node
if hasattr(ast_node, 'lineno'):
position_str = " ({:d}:{:d})".format(ast_node.lineno, ast_node.col_offset)
# If we find a new position string we set the items found since the last time
# to 'old_line : old_col : new_line : new_col' and reset the list
# of to-be-updated nodes
if position_str != state['to']:
state['from'] = state['to']
state['to'] = position_str
for node in to_be_updated:
node.setText(AstTreeWidget.COL_HIGHLIGHT, "{} : {}".format(state['from'], state['to']))
to_be_updated[:] = [node_item]
else:
to_be_updated.append(node_item)
else:
to_be_updated.append(node_item)
position_str = ""
# Recursively descend the AST
if isinstance(ast_node, ast.AST):
value_str = ''
node_str = "{} = {}".format(field_label, class_name(ast_node))
for key, val in ast.iter_fields(ast_node):
if val:
add_node(val, node_item, key)
elif isinstance(ast_node, types.ListType) or isinstance(ast_node, types.TupleType):
raise Exception("%s list should have been handled earlier in function" % ast_node)
else:
value_str = repr(ast_node)
node_str = "{}: {}".format(field_label, value_str)
if position_str:
node_str += position_str
node_item.setText(AstTreeWidget.COL_NODE, node_str)
# End of helper function
#syntax_tree = ast.parse(self._source_code, filename=self._file_name, mode=self._mode)
#logger.debug(ast.dump(syntax_tree))
add_node(syntax_tree, self, '"{}"'.format(file_name))
if AstTreeWidget.expand_all_at_create:
self.expandToDepth(100)
else:
self.expandToDepth(display_depth)
self.ast_root = syntax_tree
def class_name(obj):
""" Returns the class name of an object"""
return obj.__class__.__name__
class AstTreeWidgetItem(QtGui.QTreeWidgetItem):
"""
connects a gui tree item with the corresponding node in the actual ast tree
"""
def __init__(self, parent, source_node=None):
super(AstTreeWidgetItem, self).__init__(parent)
self.ast_node = source_node
class TransformerAction(QtGui.QAction):
def __init__(self, transform_item, ast_tree_widget, **kwargs):
super(TransformerAction, self).__init__(transform_item.name(), ast_tree_widget, **kwargs)
self.ast_tree_widget = ast_tree_widget
self.transform_item = transform_item
self.text = transform_item.name()
self.triggered.connect(self.do_transform)
def do_transform(self):
print("Triggered with string %s" % self.text)
self.ast_tree_widget.code_presenter.apply_transform(
code_item=self.ast_tree_widget.currentItem().ast_node,
transform_item=self.transform_item
)
| {
"repo_name": "ucb-sejits/ast_tool_box",
"path": "ast_tool_box/views/code_views/ast_tree_widget.py",
"copies": "1",
"size": "11795",
"license": "mit",
"hash": -6473404286601594000,
"line_mean": 34.7424242424,
"line_max": 118,
"alpha_frac": 0.5830436626,
"autogenerated": false,
"ratio": 3.8901715039577835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9953614592308091,
"avg_score": 0.003920114849938425,
"num_lines": 330
} |
__author__ = 'chick'
import os
import stat
import inspect
import shutil
from mako.template import Template
class Builder:
"""
Class that creates a directory and file hierarchy based on a template directory
ordinary files are copied as is
*.mako files are rendered with mako into files with the .mako removed
if the template directory name is code then it is changed to the target_base
"""
verbose_key = 'verbose'
def __init__(self, template_family, target_base, **kwargs):
self.target_base = target_base
self.template_family = template_family
self.verbose = False
if kwargs[Builder.verbose_key]:
self.verbose = kwargs[Builder.verbose_key]
def build(self, template_dir, target_dir, depth=0):
"""
walks the template_dir
each directory founds is created in associated target_dir
each *.mako file is processed as a template and created without .mako
other files are copied as is
"""
make_executable = False
def indent_print(s):
if self.verbose:
print (' ' * depth) + s
if template_dir is None:
a = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
b = 'templates'
c = self.template_family
template_dir = os.path.join(a, b, c) #
# os.path.abspath(inspect.getfile(inspect.currentframe())),
# 'templates',
# self.command_name,
#)
target_dir = self.target_base
indent_print("template dir is %s" % template_dir)
indent_print("target dir is %s" % target_dir)
try:
os.makedirs(target_dir)
except OSError as exception:
print ("Unable to create %s error (%d) %s" % \
(target_dir,exception.errno,exception.strerror))
exit(1)
if target_dir[-4:] == '/bin':
make_executable = True
files = os.listdir(template_dir)
indent_print("files " + ",".join(files))
for file in files:
source_file = os.path.join(template_dir, file)
target_file = os.path.join(target_dir, file)
if os.path.isfile(source_file):
if source_file.endswith('.mako'):
template = Template(filename=source_file)
file_name = target_file[:-5]
f1=open(file_name,'w+')
indent_print("Rendering %s" % file_name)
print >>f1, template.render(specializer_name=self.target_base)
f1.close()
else:
indent_print("processing ordinary file %s" % source_file)
if file != '.gitignore':
shutil.copyfile(source_file, target_file)
st = os.stat(target_file)
os.chmod(target_file, st.st_mode | stat.S_IEXEC | stat.S_IXOTH | stat.S_IXGRP)
elif os.path.isdir(source_file):
indent_print("processing directory %s" % file)
destination = target_file if file != 'specializer_package' else os.path.join(target_dir, self.target_base)
self.build(source_file, destination, depth+1)
| {
"repo_name": "mbdriscoll/ctree",
"path": "ctree/tools/generators/builder.py",
"copies": "3",
"size": "3321",
"license": "bsd-2-clause",
"hash": -8335997471395615000,
"line_mean": 35.097826087,
"line_max": 122,
"alpha_frac": 0.5609756098,
"autogenerated": false,
"ratio": 4.172110552763819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6233086162563818,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chitrabhanu'
import csv
import os, stat
import sys
import datetime
import time
import json
USAGE_ERROR_PREFIX = "USAGE ERROR: "
RUNTIME_ERROR_PREFIX = "RUNTIME ERROR: "
class UsageError(Exception):
def __init__(self, msg):
self.msg = USAGE_ERROR_PREFIX + msg
class RuntimeError(Exception):
def __init__(self, msg):
self.msg = RUNTIME_ERROR_PREFIX + msg
def Usage(valid_args_list):
print "USAGE:\n\tpython %s followed by one out of:" % (sys.argv[0])
for valid_args in valid_args_list:
args_string = ""
for arg_desc in valid_args[1]:
args_string += " " + arg_desc
print "\t\t" + valid_args[0] + args_string
def convert_palm_csv(args):
palm_csv_default_headings = [
"Prefix", "First Name", "Middle Name", "Last Name",
"Suffix","Nickname", "Anniversary", "Birthday",
"Profession", "Company", "Job Title", "Assistant Name",
"Assistant Phone", "Work Street", "Work City", "Work State",
"Work Zip", "Work Country", "Home Street", "Home City",
'Home State', "Home Zip", "Home Country", "Other Street",
"Other City", 'Other State', "Other Zip", "Other Country",
"Work", "Home", "Fax", "Other",
"Email", "Mobile", "Main", "Chat 1",
"Chat 2", "Website", "Custom 1", "Custom 2",
"Custom 3", "Custom 4", "Custom 5", "Custom 6",
"Custom 7", "Custom 8", "Custom 9", "Note",
"Private", "Category"
]
in_file_name = args[0]
out_file_name = args[1]
try:
validate_file_or_dir(in_file_name, is_file=True, is_write=False)
rename_msg = rename_file_on_overwrite(out_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(out_file_name, is_file=True, is_write=True)
in_file = open(in_file_name)
reader = csv.DictReader(in_file, palm_csv_default_headings)
out_file = open(out_file_name, "w")
except IOError as err:
raise UsageError(err)
try:
for row in reader:
jcard = [
"vcard",
[
["version", {}, "text", "4.0"]
]
]
name_fields = ["Prefix", "First Name", "Middle Name", "Last Name",
"Suffix"]
fn = ""
for name_field in name_fields:
fn += row[name_field] + " "
if fn != "":
fn = fn.strip()
jcard[1].append(["fn", {}, "text", fn])
json.dump(jcard, out_file)
in_file.close()
out_file.close()
except IOError as err:
raise RuntimeError(err)
def rename_file_on_overwrite(file_name):
if os.path.isfile(file_name) == True:
rename = file_name + datetime.datetime.now().strftime("-before-%Y-%m-%d-%H-%M-%S")
os.rename(file_name, rename)
return "Renaming " + file_name + " to " + rename
return None
def rename_dir_on_overwrite(dir_name):
if os.path.isdir(dir_name) == True:
rename = dir_name + datetime.datetime.now().strftime("-before-%Y-%m-%d-%H-%M-%S")
os.rename(dir_name, rename)
return "Renaming " + dir_name + " to " + rename
return None
def validate_file_or_dir(file_or_dir_name, is_file, is_write, no_over_write = True):
if is_write:
if os._exists(file_or_dir_name):
if no_over_write:
raise UsageError(file_or_dir_name + " cannot be overwritten")
if is_file:
if os.path.isfile(file_or_dir_name) == False:
raise UsageError(file_or_dir_name + " not found or is not a file")
else:
if os.path.isdir(file_or_dir_name) == False:
raise UsageError(file_or_dir_name + " not found or is not a dir")
if os.access(file_or_dir_name, os.W_OK) == False:
raise UsageError("You do not appear to have write permissions in " +
file_or_dir_name)
else:
if is_file:
if os.path.isfile(file_or_dir_name) == False:
raise UsageError(file_or_dir_name + " not found or is not a file")
else:
if os.path.isdir(file_or_dir_name) == False:
raise UsageError(file_or_dir_name + " not found or is not a dir")
if os.access(file_or_dir_name, os.R_OK) == False:
raise UsageError("You do not appear to have read permissions in " +
file_or_dir_name)
def main():
valid_args_list = [
("convertpalmcsv", ["in-file", "out-file"], convert_palm_csv)
]
try:
if len(sys.argv) == 1:
raise UsageError("Missing args")
valid_op_flag = False
for valid_args in valid_args_list:
if valid_args[0] == sys.argv[1]:
valid_op_flag = True
if len(sys.argv) != len(valid_args[1]) + 2:
raise UsageError("Wrong number of arguments for operation ' " + sys.argv[1] + " ' ")
valid_args[2](sys.argv[2:])
break
if valid_op_flag == False:
raise UsageError("Unrecognized operation " + sys.argv[1])
except UsageError as err:
print err.msg
Usage(valid_args_list)
sys.exit(1)
except RuntimeError as err:
print err.msg
sys.exit(2)
if __name__ == "__main__":
main()
| {
"repo_name": "unchaoss/unchaoss",
"path": "engine/py/contactops/contactops.py",
"copies": "1",
"size": "5436",
"license": "apache-2.0",
"hash": 6575334732027518000,
"line_mean": 32.975,
"line_max": 104,
"alpha_frac": 0.5410228109,
"autogenerated": false,
"ratio": 3.473482428115016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45145052390150164,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chitrabhanu'
import os, stat
import sys
import datetime
import hashlib
import time
from filecmp import dircmp
USAGE_ERROR_PREFIX = "USAGE ERROR: "
RUNTIME_ERROR_PREFIX = "RUNTIME ERROR: "
class UsageError(Exception):
def __init__(self, msg):
self.msg = USAGE_ERROR_PREFIX + msg
class RuntimeError(Exception):
def __init__(self, msg):
self.msg = RUNTIME_ERROR_PREFIX + msg
def Usage(valid_args_list):
print "USAGE:\n\tpython %s followed by one out of:" % (sys.argv[0])
for valid_args in valid_args_list:
args_string = ""
for arg_desc in valid_args[1]:
args_string += " " + arg_desc
print "\t\t" + valid_args[0] + args_string
def get_dir_list(args):
dir_name = args[0]
out_file_name = args[1]
try:
validate_file_or_dir(dir_name, is_file=False, is_write=False)
rename_msg = rename_file_on_overwrite(out_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(out_file_name, is_file=True, is_write=True)
out_file = open(out_file_name, "w")
except IOError as err:
raise UsageError(err)
dir_name_len = len(dir_name)
if dir_name.endswith("/") is False and dir_name.endswith("\\") is False:
dir_name_len += 1
for root, dirs, files in os.walk(dir_name):
for file_name in files:
file_path = ""
try:
out_line = ""
file_path = os.path.join(root, file_name)
out_line += file_path[dir_name_len:] + " "
out_line = add_file_stats(out_line, file_path)
out_file.write(out_line+"\n")
except IOError as err:
out_file.write(RUNTIME_ERROR_PREFIX + "(non-fatal) " +
str(err) + " (file = " + file_path + ")\n")
continue
out_file.close()
def match_dir_copies(args):
dir_list_file_golden_name = args[0]
dir_list_file_test_name = args[1]
golden_errors_file_name = args[2]
test_errors_file_name = args[3]
golden_only_file_name = args[4]
test_only_file_name = args[5]
size_or_cksm_mismatches_file_name = args[6]
name_matches_file_name = args[7]
try:
validate_file_or_dir(dir_list_file_golden_name, is_file=True,
is_write=False)
dir_list_file_golden = open(dir_list_file_golden_name)
validate_file_or_dir(dir_list_file_test_name, is_file=True,
is_write=False)
dir_list_file_test = open(dir_list_file_test_name)
rename_msg = rename_file_on_overwrite(golden_errors_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(golden_errors_file_name, is_file=True,
is_write=True)
golden_errors_file = open(golden_errors_file_name, "w")
rename_msg = rename_file_on_overwrite(test_errors_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(test_errors_file_name, is_file=True, is_write=True)
test_errors_file = open(test_errors_file_name, "w")
rename_msg = rename_file_on_overwrite(golden_only_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(golden_only_file_name, is_file=True, is_write=True)
golden_only_file = open(golden_only_file_name, "w")
rename_msg = rename_file_on_overwrite(test_only_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(test_only_file_name, is_file=True, is_write=True)
test_only_file = open(test_only_file_name, "w")
rename_msg = rename_file_on_overwrite(size_or_cksm_mismatches_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(size_or_cksm_mismatches_file_name, is_file=True,
is_write=True)
size_or_cksm_mismatches_file = open(size_or_cksm_mismatches_file_name, "w")
rename_msg = rename_file_on_overwrite(name_matches_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(name_matches_file_name, is_file=True, is_write=True)
name_matches_file = open(name_matches_file_name, "w")
except IOError as err:
raise UsageError(str(err))
golden_dir_info = {}
test_dir_info = {}
try:
update_dir_info(dir_list_file_golden, golden_dir_info, golden_errors_file)
golden_errors_file.close()
update_dir_info(dir_list_file_test, test_dir_info, test_errors_file)
test_errors_file.close()
except IOError as err:
raise RuntimeError(str(err))
name_matches_count = 0
golden_only_count = 0
golden_count = 0
golden_total = len(golden_dir_info)
for golden_dir_entry in golden_dir_info:
golden_count += 1
if (golden_count % 10000 ) == 0:
print "Processed ", golden_count, "out of", golden_total,\
"name_matches_count", name_matches_count,\
"golden_only_count", golden_only_count
if golden_dir_entry not in test_dir_info.keys():
golden_only_count += 1
golden_only_file.write(str(golden_dir_entry) + "\n")
else:
name_matches_count += 1
name_matches_file.write(str(golden_dir_entry) + "\n")
(golden_size, _, _, golden_cksm) = golden_dir_info[golden_dir_entry]
(test_size, _, _, test_cksm) = test_dir_info[golden_dir_entry]
if golden_size != test_size or golden_cksm != test_cksm:
size_or_cksm_mismatches_file.write("GOLDEN: " + str(golden_dir_entry) +
str(golden_dir_info[
golden_dir_entry]) + "\n")
size_or_cksm_mismatches_file.write("TEST: " + str(golden_dir_entry) +
str(test_dir_info[
golden_dir_entry]) + "\n")
golden_only_file.close()
name_matches_file.close()
size_or_cksm_mismatches_file.close()
for test_dir_entry in test_dir_info:
if test_dir_entry not in golden_dir_info.keys():
test_only_file.write(str(test_dir_entry) + "\n")
test_only_file.close()
def update_file_times(args):
dir_name = args[0]
dir_list_file_golden_name = args[1]
dir_list_file_test_name = args[2]
log_file_name = args[3]
try:
validate_file_or_dir(dir_name, is_file=False, is_write=False)
validate_file_or_dir(dir_list_file_golden_name, is_file=True,
is_write=False)
dir_list_file_golden = open(dir_list_file_golden_name)
validate_file_or_dir(dir_list_file_test_name, is_file=True,
is_write=False)
dir_list_file_test = open(dir_list_file_test_name)
rename_msg = rename_file_on_overwrite(log_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(log_file_name, is_file=True, is_write=True)
log_file = open(log_file_name, "w")
except IOError as err:
raise UsageError(str(err))
golden_dir_info = {}
test_dir_info = {}
try:
update_dir_info(dir_list_file_golden, golden_dir_info, log_file)
update_dir_info(dir_list_file_test, test_dir_info, log_file)
dir_list_file_golden.close()
dir_list_file_test.close()
except IOError as err:
raise RuntimeError(str(err))
for test_dir_entry in test_dir_info:
if " " not in test_dir_entry:
continue
if test_dir_entry in golden_dir_info.keys():
(golden_size, golden_atime, golden_mtime, golden_cksm) =\
golden_dir_info[test_dir_entry]
(test_size, test_atime, test_mtime, test_cksm) =\
test_dir_info[test_dir_entry]
if golden_atime != test_atime or golden_mtime != test_mtime:
file_path = os.path.join(dir_name, test_dir_entry)
if golden_atime != test_atime:
log_file.write(file_path + " Replacing Atime " +
time.ctime(test_atime) + " by " +
time.ctime(golden_atime) + " ")
if golden_mtime != test_mtime:
log_file.write(file_path + " Replacing Mtime " +
time.ctime(test_mtime) + " by " +
time.ctime(golden_mtime) + " ")
if not os.path.exists(file_path):
log_file.write(" ERROR (file not found)\n")
else:
log_file.write("\n")
os.utime(file_path, (golden_atime, golden_mtime))
log_file.close()
def list_duplicate_files(args):
dir_list_file_names = args[0:-3]
duplicates_list_file_name = args[len(args)-3]
full_dup_dirs_list_file_name = args[len(args)-2]
part_dup_dirs_list_file_name = args[len(args)-1]
try:
for dir_list_file_name in dir_list_file_names:
validate_file_or_dir(dir_list_file_name, is_file=True,
is_write=False)
rename_msg = rename_file_on_overwrite(duplicates_list_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(duplicates_list_file_name, is_file=True, is_write=True)
duplicates_list_file = open(duplicates_list_file_name, "w")
rename_msg = rename_file_on_overwrite(full_dup_dirs_list_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(full_dup_dirs_list_file_name, is_file=True, is_write=True)
full_dup_dirs_list_file = open(full_dup_dirs_list_file_name, "w")
rename_msg = rename_file_on_overwrite(part_dup_dirs_list_file_name)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(part_dup_dirs_list_file_name, is_file=True, is_write=True)
part_dup_dirs_list_file = open(part_dup_dirs_list_file_name, "w")
except IOError as err:
raise UsageError(str(err))
# Key is md5 checksum, value is list of file-paths having that md5 (checksum)
file_paths_for_md5_cksm = {}
# Key is md5 checksum, value is size of all files having that checksum.
file_size_for_md5_cksm = {}
# Key is dir-nm, value is [ [md5] ]. One entry for each directory where for
# one or more files in it, at least one copy (same contents, possibly
# different name) has been found. Another dictionary, 'dirs_with_not_all_files_duped'
# holds the names of directories in this list for which at least one file is
# not duplicated elsewhere. Any directory in this list but not in that has
# all of its files duplicated and is separately reported.
dirs_with_dup_files = {}
# Key is dir-nm, value is "". Created from dirs_with_dup_files by locating dirs
# in that list that are not fully duplicated
dirs_with_not_all_files_duped = {}
# List of lists, one entry for each set of directories all of which have the
# same set of duplicate files and for each set of directories without files
# whose subdirectories might be clones. Each entry's a list of the directories'
# names
clones_list = []
# List of indexes of clones_list entries that are not be be processed either
# because they contain subdirectories that are not clones or because their
# parent directories are fully cloned and hence using only the parents will
# suffice.
clone_skip_flags_list = []
# Key is dir-nm, value is the index of the clones_list index containing dir
clone_index_for_cloned_dir = {}
try:
for dir_list_file_name in dir_list_file_names:
dir_list_file = open(dir_list_file_name)
print "Reading ", dir_list_file_name
update_dup_info(dir_list_file, file_paths_for_md5_cksm,
file_size_for_md5_cksm, duplicates_list_file)
dir_list_file.close()
except IOError as err:
raise RuntimeError(str(err))
print "Number of unique files: ", len(file_paths_for_md5_cksm)
print "Counting directories with duplicated files"
update_dup_dir_info(file_paths_for_md5_cksm, dirs_with_dup_files)
print "Number of directories with duplicated files: ", len(dirs_with_dup_files)
print "Printing duplicates-list-file"
for dup_entry in file_paths_for_md5_cksm:
if len(file_paths_for_md5_cksm[dup_entry]) == 1:
continue
duplicates_list_file.write(dup_entry + ": " +
str(file_paths_for_md5_cksm[dup_entry]) + "\n")
print "Determining partially duplicated directories," \
" writing ERRORS to duplicates-list-file"
try:
for dir_list_file_name in dir_list_file_names:
dir_list_file = open(dir_list_file_name)
print "Re-reading ", dir_list_file_name
get_partial_dups(dir_list_file, dirs_with_dup_files,
dirs_with_not_all_files_duped, duplicates_list_file)
dir_list_file.close()
except IOError as err:
raise RuntimeError(str(err))
duplicates_list_file.close()
print "Identifying mutually-duplicated dirs (also printing names of duplicated FILES from not-fully-duplicated dirs)"
print "**************************************************"
print "Note that the code as currently written counts each set of duplicate files WITHIN any directory as a single"
print " file for comparison purposes when identifying duplicate sets of directories. This may lead to members of"
print " such sets containing different numbers of files due to the presence of duplicates."
print "**************************************************"
for this_dir in dirs_with_dup_files:
other_dirs_with_these_keys = {}
this_dir_name_has_been_printed = False
for md5 in dirs_with_dup_files[this_dir]:
for file_path in file_paths_for_md5_cksm[md5]:
(file_dir,_) = os.path.split(file_path)
if file_dir == this_dir:
continue
if file_dir not in other_dirs_with_these_keys:
other_dirs_with_these_keys[file_dir] = [md5]
else:
other_dirs_with_these_keys[file_dir].append(md5)
full_matches = [this_dir]
for that_dir in other_dirs_with_these_keys:
if this_dir not in dirs_with_not_all_files_duped and \
that_dir not in dirs_with_not_all_files_duped and\
set(dirs_with_dup_files[this_dir]) == \
set(dirs_with_dup_files[that_dir]):
full_matches.append(that_dir)
else:
# Partial match, print file name pairs common to both dirs
if this_dir_name_has_been_printed == False:
part_dup_dirs_list_file.write(this_dir + "\n")
this_dir_name_has_been_printed = True
part_dup_dirs_list_file.write("\t" + that_dir + "\n")
this_name = None
that_name = None
for key in other_dirs_with_these_keys[that_dir]:
for path in file_paths_for_md5_cksm[key]:
(dir,name) = os.path.split(path)
if dir == this_dir:
this_name = name
elif dir == that_dir:
that_name = name
if this_name != that_name:
part_dup_dirs_list_file.write("\t\t" + this_name + "*" +
that_name + "\n")
else:
part_dup_dirs_list_file.write("\t\t" + this_name +"\n")
if len(full_matches) > 1:
clone_index = -1
for full_match in full_matches:
if full_match in clone_index_for_cloned_dir:
if clone_index != -1:
assert clone_index == clone_index_for_cloned_dir[full_match]
else:
clone_index = clone_index_for_cloned_dir[full_match]
if clone_index == -1:
clone_index = len(clones_list)
clones_list.append([])
clone_skip_flags_list.append(False)
for full_match in full_matches:
entry = os.path.abspath(full_match)
if entry not in clone_index_for_cloned_dir:
clones_list[clone_index].append(entry)
clone_index_for_cloned_dir[entry] = clone_index
print "Printing fully-'cloned' dirs"
prune_clones_list(clones_list, clone_skip_flags_list, clone_index_for_cloned_dir)
for clone_index in range(len(clones_list)):
if clone_skip_flags_list[clone_index]:
continue
clones_list_entry = clones_list[clone_index]
if len(clones_list_entry):
full_dup_dirs_list_file.write(str(clones_list_entry) + "\n")
full_dup_dirs_list_file.close()
part_dup_dirs_list_file.close()
def get_dirs_within_path(path):
for root, dirs, files in os.walk(path):
return dirs
def get_files_within_path(path):
for root, dirs, files in os.walk(path):
return files
def prune_clones_list(clones_list, clone_skip_flags_list,
clone_index_for_cloned_dir):
clone_index_for_path = {}
# One entry per clone index value, holding list of descendant paths which
# have not yet been verified to be clones of the descendants of the other
# paths at this clone index
uncloned_descendant_paths = []
# One entry per clone index value, holding list of clone indexes of
# decendant paths verified to be clones of the descendants of the other
# paths at this clone index
cloned_descendant_indexes = []
leaf_path_already_processed = {}
for clone_index in range(len(clones_list)):
clones_list_entry = clones_list[clone_index]
clones_list_entry.sort()
uncloned_descendant_paths.append([])
cloned_descendant_indexes.append([])
for path in clones_list_entry:
assert path not in clone_index_for_path
clone_index_for_path[clone_index] = clone_index
dirs = get_dirs_within_path(path)
if len(dirs):
leaf_path_already_processed[path] = False
for dir in dirs:
uncloned_descendant_paths[clone_index].append(os.path.join(path, dir))
for leaf_path in leaf_path_already_processed:
if leaf_path_already_processed[leaf_path]:
continue
all_are_leaves = True
clone_index = clone_index_for_path[leaf_path]
for clone in clones_list[clone_index]:
if clone not in leaf_path_already_processed:
all_are_leaves = False
else:
leaf_path_already_processed[clone] = True
if all_are_leaves == False:
continue
process_set_of_cloned_nodes(clone_index, clones_list,
clone_skip_flags_list, clone_index_for_cloned_dir,
clone_index_for_path, uncloned_descendant_paths,
cloned_descendant_indexes)
for clone_index in range(len(clones_list)):
if len(uncloned_descendant_paths[clone_index]):
clone_skip_flags_list[clone_index] = True
else:
for index in cloned_descendant_indexes[clone_index]:
clone_skip_flags_list[index] = True
def process_set_of_cloned_nodes(clone_index, clones_list, clone_skip_flags_list,
clone_index_for_cloned_dir, clone_index_for_path, uncloned_descendant_paths,
cloned_descendant_indexes):
parent_paths = []
parent_clone_index = None
parents_have_same_clone_index = True
# Set when parents contain no files and are hence potential clones
parents_are_potential_clones = True
for index in range(len(clones_list[clone_index])):
current_path = clones_list[clone_index][index]
parent_path = os.path.abspath(os.path.join(current_path, ".."))
parent_paths.append(parent_path)
if parent_path in clone_index_for_path:
parents_are_potential_clones = False
if parent_clone_index is not None:
if clone_index_for_path[parent_path] != parent_clone_index:
parents_have_same_clone_index = False
break
else:
parent_clone_index = clone_index_for_path[parent_path]
else:
if len(get_files_within_path(parent_path)) != 0:
parents_are_potential_clones = False
if parents_are_potential_clones:
# All parents contain no files and are hence potential clones. Enter
# them as a new clone entry
parent_clone_index = len(clones_list)
clones_list.append([])
clone_skip_flags_list.append(False)
uncloned_descendant_paths[parent_clone_index].append([])
cloned_descendant_indexes[parent_clone_index].append([])
for parent_path in parent_paths:
clones_list[parent_clone_index].append(parent_path)
clone_index_for_cloned_dir[parent_path] = parent_clone_index
dirs = get_dirs_within_path(parent_path)
for dir in dirs:
uncloned_descendant_paths[parent_clone_index].append(os.path.join(parent_path, dir))
uncloned_descendant_paths[parent_clone_index].extend()
if parents_have_same_clone_index:
if len(set(parent_paths)) == len(parent_paths):
return
cloned_descendant_indexes[parent_clone_index] = clone_index
# TBD: Remove this clone_index's paths from the uncloned dirs list. If list thus becomes empty, then recurse back into this function
def compare_dirs(args):
dir1 = args[0]
dir2 = args[1]
output_file = args[2]
try:
validate_file_or_dir(dir1, is_file=False, is_write=False)
validate_file_or_dir(dir2, is_file=False, is_write=False)
rename_msg = rename_file_on_overwrite(output_file)
if rename_msg is not None:
print rename_msg
validate_file_or_dir(output_file, is_file=True, is_write=True)
output_file = open(output_file, "w")
except IOError as err:
raise UsageError(str(err))
try:
path1 = os.path.realpath(dir1)
path2 = os.path.realpath(dir2)
dcmp = dircmp(path1, path2)
list_diff_files(dcmp, output_file)
except IOError as err:
raise RuntimeError(str(err))
output_file.close()
def list_diff_files(dcmp, output_file):
left_list = []
left_list.extend(dcmp.left_only)
left_list.extend(dcmp.diff_files)
right_list = []
right_list.extend(dcmp.right_only)
right_list.extend(dcmp.diff_files)
(left_list, right_list) = remove_dups_based_on_cksms(dcmp.left, left_list, dcmp.right, right_list, )
if len(left_list):
output_file.write(dcmp.left + "\n")
output_file.write("\t" + str(left_list) + "\n")
if len(right_list):
output_file.write(dcmp.right + "\n")
output_file.write("\t" + str(right_list) + "\n")
for common_dir in dcmp.common_dirs:
sub_dcmp = dcmp.subdirs[common_dir]
list_diff_files(sub_dcmp, output_file)
def remove_dups_based_on_cksms(left_path, left_list, right_path, right_list):
left_md5 = {}
for left_file in left_list:
path = os.path.join(left_path, left_file)
if os.path.isfile(path):
left_md5[get_file_md5(path)] = left_file
dups_detected = []
for right_file in right_list:
path = os.path.join(right_path, right_file)
if os.path.isfile(path):
md5 = get_file_md5(path)
if md5 in left_md5:
dups_detected.append((left_md5[md5], right_file))
for (left_file, right_file) in dups_detected:
left_list.remove(left_file)
right_list.remove(right_file)
return (left_list, right_list)
def get_partial_dups(dir_file, dirs_with_dup_files, dirs_with_not_all_files_duped,
err_file):
line_number = 1
for line in dir_file:
(file_path, size, atime, mtime, md5_checksum) = read_dir_file_line(line, err_file, line_number)
if file_path is None:
return
(file_dir, _) = os.path.split(file_path)
if file_dir in dirs_with_dup_files and md5_checksum not in dirs_with_dup_files[file_dir]:
dirs_with_not_all_files_duped[file_dir] = ""
line_number += 1
def update_dup_info(dir_file, file_paths_for_md5_cksm, file_size_for_md5_cksm,
err_file):
line_number = 1
for line in dir_file:
(file_path, size, _, _, md5_checksum) = read_dir_file_line(line, err_file, line_number)
if file_path is None:
return
if md5_checksum in file_paths_for_md5_cksm:
if size != file_size_for_md5_cksm[md5_checksum]:
err_file.write("ERROR: line " + str(line_number) +
": Sz msmtch (key=" + md5_checksum + ")" +
" size-1 " + str(file_size_for_md5_cksm[md5_checksum]) +
" file-1 " + file_paths_for_md5_cksm[md5_checksum][0] +
" size-2 " + str(size) +
" file-2 " + file_path + "\n")
file_paths_for_md5_cksm[md5_checksum].append((file_path))
else:
file_paths_for_md5_cksm[md5_checksum] = [file_path]
file_size_for_md5_cksm[md5_checksum] = size
line_number += 1
def update_dup_dir_info(file_paths_for_md5_cksm, dirs_with_dup_files):
for md5_checksum in file_paths_for_md5_cksm:
if(len(file_paths_for_md5_cksm[md5_checksum]) == 1):
continue
for file_path in file_paths_for_md5_cksm[md5_checksum]:
(file_dir, _) = os.path.split(file_path)
if file_dir in dirs_with_dup_files:
dirs_with_dup_files[file_dir].append(md5_checksum)
else:
dirs_with_dup_files[file_dir] = [md5_checksum]
def update_dir_info(dir_file, dir_info, err_file):
line_number = 1
for line in dir_file:
(file_path, size, atime, mtime, md5_checksum) = read_dir_file_line(line, err_file, line_number)
if file_path is None:
return
dir_info[file_path] = (size, atime, mtime, md5_checksum)
line_number += 1
def read_dir_file_line(line, err_file, line_number):
if line.startswith(RUNTIME_ERROR_PREFIX):
err_file.write(str(line_number) + ": " + line.strip() + "\n")
return
words = line.split()
file_path = words[0]
addl_words = 0
if len(words) < 5:
err_file.write(str(line_number) + ": (Bad line, not five words) " +
line.strip() + "\n")
return
elif len(words) > 5:
addl_words = len(words) - 5
file_path_len = line.find(words[0])
file_path_len += len(words[0])
for index in range(addl_words):
file_path_len = line.find(words[index+1], file_path_len)
file_path_len += len(words[index+1])
file_path = line[:file_path_len].strip()
errs = ""
size = None
atime = None
mtime = None
try:
size = int(words[addl_words + 1])
except ValueError as err:
errs += " Size value " + words[addl_words + 1] + " not int."
try:
atime = int(words[addl_words + 2])
except ValueError as err:
errs += " Atime value " + words[addl_words + 2] + " not int."
try:
mtime = int(words[addl_words + 3])
except ValueError as err:
errs += " Mtime value " + words[addl_words + 3] + " not int."
if errs != "":
err_file.write(str(line_number) + ": (" + errs + ") " + line.strip() +
"\n")
return (None, None, None, None, None)
return (file_path, size, atime, mtime, words[addl_words + 4])
def rename_file_on_overwrite(file_name):
if os.path.isfile(file_name) == True:
rename = file_name + datetime.datetime.now().strftime(
"-before-%Y-%m-%d-%H-%M-%S")
os.rename(file_name, rename)
return "Renaming " + file_name + " to " + rename
return None
def rename_dir_on_overwrite(dir_name):
if os.path.isdir(dir_name) == True:
rename = dir_name + datetime.datetime.now().strftime(
"-before-%Y-%m-%d-%H-%M-%S")
os.rename(dir_name, rename)
return "Renaming " + dir_name + " to " + rename
return None
def validate_file_or_dir(file_or_dir_name, is_file, is_write,
no_over_write = True):
if is_write:
if os.path.exists(file_or_dir_name):
if no_over_write:
raise UsageError(file_or_dir_name + " cannot be overwritten")
if is_file:
if os.path.isfile(file_or_dir_name) == False:
raise UsageError(file_or_dir_name +
" not found or is not a file")
else:
if os.path.isdir(file_or_dir_name) == False:
raise UsageError(file_or_dir_name +
" not found or is not a dir")
if os.access(file_or_dir_name, os.W_OK) == False:
raise UsageError(
"You do not appear to have write permissions in " +
file_or_dir_name)
else:
if is_file:
if os.path.isfile(file_or_dir_name) == False:
raise UsageError(file_or_dir_name +
" not found or is not a file")
else:
if os.path.isdir(file_or_dir_name) == False:
raise UsageError(file_or_dir_name +
" not found or is not a dir")
if os.access(file_or_dir_name, os.R_OK) == False:
raise UsageError("You do not appear to have read permissions in " +
file_or_dir_name)
def add_file_stats(out_line, file_path):
stat_info = os.stat(file_path)
out_line += str(stat_info[stat.ST_SIZE]) + " "
out_line += str(stat_info[stat.ST_ATIME]) + " "
out_line += str(stat_info[stat.ST_MTIME]) + " "
out_line += str(get_file_md5(file_path)) + " "
return out_line
def get_file_md5(file_path):
fd = open(file_path)
hasher = hashlib.sha256()
blocksize=1048576
buf = fd.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(blocksize)
return hasher.hexdigest()
def update_file_time(file_path, atime, mtime):
os.utime(file_path, (atime, mtime))
def main():
valid_args_list = [
("getdirlist", ["directory-to-scan", "out-file"], get_dir_list),
("matchdircopies",
["directory-list-file-golden", "directory-list-file-test",
"golden-errors-file", "test-errors-file",
"golden-only-file", "test-only-file",
"size-or-cksm_mismatches-file", "name-matches-file"],
match_dir_copies),
("updatefiletimes",
["directory-to-update", "directory-list-file-golden",
"directory-list-file-test", "log-file"],
update_file_times),
("listduplicatefiles",
["directory-list-file", "...", "duplicates-list-file",
"full-dup-dirs-list-file", "part-dup-dirs-list-file"],
list_duplicate_files),
("comparedirs",
["1st-dir", "2nd-dir", "output-file",],
compare_dirs)
]
try:
if len(sys.argv) == 1:
raise UsageError("Missing args")
valid_op_flag = False
for valid_args in valid_args_list:
if valid_args[0] == sys.argv[1]:
valid_op_flag = True
valid_arg_count_flag = True
if "..." not in valid_args:
if len(sys.argv) != len(valid_args[1]) + 2:
valid_arg_count_flag = False
else:
if len(sys.argv) < len(valid_args[1]) + 1:
valid_arg_count_flag = False
if valid_arg_count_flag == False:
raise UsageError(
"Wrong number of arguments for operation ' " +
sys.argv[1] + " ' ")
valid_args[2](sys.argv[2:])
break
if valid_op_flag == False:
raise UsageError("Unrecognized operation " + sys.argv[1])
except UsageError as err:
print err.msg
Usage(valid_args_list)
sys.exit(1)
except RuntimeError as err:
print err.msg
sys.exit(2)
if __name__ == "__main__":
main() | {
"repo_name": "unchaoss/unchaoss",
"path": "engine/py/dirops/dirops.py",
"copies": "1",
"size": "33264",
"license": "apache-2.0",
"hash": 4834666172884900000,
"line_mean": 40.8427672956,
"line_max": 140,
"alpha_frac": 0.5712782588,
"autogenerated": false,
"ratio": 3.5226093402520386,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4593887599052039,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chitrabhanu'
import os
from slackclient import SlackClient
import requests
import sys
import datetime
import time
import json
from json2html import json2html
# This value is used as a delay between successive page requests to avoid getting rate limited by Slack
SECONDS_BETWEEN_SUCCESSIVE_PAGE_REQUESTS = 2
RUNTIME_ERROR_PREFIX = "RUNTIME ERROR: "
class RuntimeError(Exception):
def __init__(self, msg):
self.msg = RUNTIME_ERROR_PREFIX + msg
class Iima79SlackBack:
def __init__(self, slack_client, slack_token):
self.slack_client = slack_client
self.slack_token=slack_token
self.epoch = datetime.datetime(1970,1,1)
self.run_datetime = datetime.datetime.today().strftime("%Y-%m-%d-%H-%M")
self.run_label = "cull_de_slack-"+ self.run_datetime
self.channels = {}
self.channels_html = ""
self.channel_id_by_name = {}
self.groups = {}
self.groups_html = ""
self.users = {}
self.users_html = ""
self.files = {}
self.files_html = ""
self.current_channel_messages = []
self.channel_yr_mth_lists = {}
self.output_dir = os.path.join(".", self.run_label)
self.merge_input_dirs = None
self.channels_json = "channels.json"
self.groups_json = "groups.json"
self.users_json = "users.json"
self.files_json = "files.json"
self.messages_json_suffix = "_messages.json"
self.html_prefix = "file:///nobackup/websaves/"
self.valid_ops = ["CLEAR", "GET", "SAVE","MERGE","RENDER"]
self.html_prolog = "<!DOCTYPE html>\n<html>\n<head>\n<title></title>\n</head>\n<body>"
self.html_epilog = "</body>\n</html>"
self.link_indicator = "___"
def set_run_label(self, run_label):
self.run_label = run_label
def get_run_label(self):
return self.run_label
def set_output_dir(self, output_dir):
self.output_dir = output_dir
def get_output_dir(self):
return self.output_dir
def set_merge_input_dirs(self, merge_input_dirs):
self.merge_input_dirs = merge_input_dirs
def get_merge_input_dirs(self):
return self.merge_input_dirs
def set_channels_json(self, channels_json):
self.channels_json = channels_json
def get_channels_json(self):
return self.channels_json
def set_channels_html(self, channels_html):
self.channels_html = channels_html
def get_channels_html(self):
return self.channels_html
def set_groups_json(self, groups_json):
self.groups_json = groups_json
def get_groups_json(self):
return self.groups_json
def set_groups_html(self, groups_html):
self.groups_html = groups_html
def get_groups_html(self):
return self.groups_html
def set_users_json(self, users_json):
self.users_json = users_json
def get_users_json(self):
return self.users_json
def set_users_html(self, users_html):
self.users_html = users_html
def get_users_html(self):
return self.users_html
def set_files_json(self, files_json):
self.files_json = files_json
def get_files_json(self):
return self.files_json
def set_files_html(self, files_html):
self.files_html = files_html
def get_files_html(self):
return self.files_html
def set_messages_json_suffix(self, messages_json_suffix):
self.messages_json_suffix = messages_json_suffix
def get_messages_json_suffix(self):
return self.messages_json_suffix
def set_html_prefix(self, html_prefix):
self.html_prefix = html_prefix
def get_html_prefix(self):
return self.html_prefix
def process_channels(self, ops ):
for op in ops:
if op not in self.valid_ops:
raise RuntimeError(__name__+"::"+" invalid op " + str(op) + " valid ops " + str(self.valid_ops))
ops = set(ops)
if "CLEAR" in ops:
self.clear_channels()
ops.remove("CLEAR")
if "GET" in ops:
self.get_channels() # We do not load messages to save memory
ops.remove("GET")
if "SAVE" in ops:
self.save_channels()
ops.remove("SAVE")
if "RENDER" in ops:
self.render_channels()
ops.remove("RENDER")
if "MERGE" in ops:
self.merge_channels()
ops.remove("MERGE")
def clear_channels(self):
self.current_channel_messages = {}
self.channels={}
def get_channels(self):
limit = 20
cursor = None
while cursor is None or cursor != '':
resp = self.slack_client.api_call("channels.list", limit=limit, cursor=cursor)
if resp.get('ok'):
for channel in resp['channels']:
self.get_full_channel(channel['id'])
cursor = resp['response_metadata']['next_cursor']
else:
raise RuntimeError(__name__+"::"+str(resp))
time.sleep(SECONDS_BETWEEN_SUCCESSIVE_PAGE_REQUESTS)
def get_full_channel(self, channel_id):
resp = self.slack_client.api_call("channels.info", channel=channel_id)
if resp.get('ok'):
self.channels[resp['channel']['id']] = resp['channel']
self.channel_id_by_name[resp['channel']['name']] = resp['channel']['id']
else:
raise RuntimeError(__name__+"::"+str(resp))
def save_channels(self):
summaries_dir = self.get_summaries_dir()
output_file_name = os.path.join(summaries_dir, self.channels_json)
with self.create_file(output_file_name, "w") as save_fd:
json.dump(self.channels, save_fd)
for channel_id in self.channels:
self.save_messages(channel_id)
def render_channels(self):
output_json = {"channels" : []}
for channel_id in self.channels:
info = {}
info ["Name"] = self.channels[channel_id]["name"]
info ["Topic"] = self.channels[channel_id]["topic"]["value"]
info ["Purpose"] = self.channels[channel_id] ["purpose"]["value"]
list_index = 0
later_messages_json_path = None
for year_mth in self.channel_yr_mth_lists[channel_id]:
messages_json_path = self.get_messages_json_path(channel_id, year_mth)
if list_index == len(self.channel_yr_mth_lists[channel_id]) - 1:
earlier_messages_json_path = None
else:
earlier_messages_year_mth = self.channel_yr_mth_lists[channel_id][list_index + 1]
earlier_messages_json_path =\
self.get_messages_json_path(channel_id, earlier_messages_year_mth)
result = self.create_rendered_messages(self.channels[channel_id]["name"], year_mth,
messages_json_path, later_messages_json_path,\
earlier_messages_json_path)
if result:
link = self.get_href(messages_json_path.replace(".json", ".html"), year_mth)
if "Latest Messages" not in info:
info ["Latest Messages"] = link
info ["Earliest Messages"] = link
later_messages_json_path = self.get_messages_json_path(channel_id, year_mth)
list_index += 1
output_json ["channels"].append(info)
if output_json["channels"] != []:
self.channels_html = json2html.convert(json = output_json)
else:
self.channels_html = ""
def merge_channels(self):
pass
def clear_messages(self, channel_id):
if channel_id in self.current_channel_messages:
del self.current_channel_messages[channel_id]
def save_messages(self, channel_id):
latest = None
has_more = True
self.current_channel_messages = []
year_mth = ""
while has_more:
resp = self.slack_client.api_call("channels.history", channel=channel_id, latest=latest)
if resp.get('ok'):
has_more = resp["has_more"]
for message in resp['messages']:
new_year_mth=self.get_yr_mth_for_ts(message["ts"])
if new_year_mth != year_mth:
if year_mth != "":
output_file_name = os.path.join(self.output_dir,\
self.get_messages_json_path(channel_id, year_mth))
with self.create_file(output_file_name, "w") as save_fd:
json.dump(self.current_channel_messages, save_fd)
if channel_id not in self.channel_yr_mth_lists:
self.channel_yr_mth_lists[channel_id] = []
self.channel_yr_mth_lists[channel_id].append(year_mth)
year_mth = new_year_mth
self.current_channel_messages = []
self.current_channel_messages.append(message)
latest = message["ts"]
else:
raise RuntimeError(__name__+"::"+str(resp))
time.sleep(SECONDS_BETWEEN_SUCCESSIVE_PAGE_REQUESTS)
if year_mth != "":
output_file_name = os.path.join(self.output_dir, self.get_messages_json_path(channel_id, year_mth))
with self.create_file(output_file_name, "w") as save_fd:
json.dump(self.current_channel_messages, save_fd)
if channel_id not in self.channel_yr_mth_lists:
self.channel_yr_mth_lists[channel_id] = []
self.channel_yr_mth_lists[channel_id].append(year_mth)
def get_messages_json_path(self, channel_id, year_mth):
output_path = os.path.join(self.output_dir, year_mth)
self.create_dir(output_path)
output_file_name = self.channels[channel_id]["name"] + self.messages_json_suffix
return os.path.join(year_mth, output_file_name)
def create_rendered_messages(self, channel_name, year_mth, messages_json_path, later_messages_json_path,\
earlier_messages_json_path):
with open(os.path.join(self.output_dir, messages_json_path)) as fd:
input_json = json.load(fd)
output_json = {}
if later_messages_json_path is not None:
link = later_messages_json_path.replace(".json", ".html")
text = link.replace(".html", "")
output_json["Later messages"] = self.get_href(link, text)
if earlier_messages_json_path is not None:
link = earlier_messages_json_path.replace(".json", ".html")
text = link.replace(".html", "")
output_json["Earlier messages"] = self.get_href(link, text)
output_json["messages"] = []
for message in input_json:
output_message_object = {}
if "subtype" not in message:
output_message_object["From"] = self.users[message["user"]]["name"]
output_message_object["Time"] = self.get_ca_time_str_for_ts(message["ts"]) + "(ts = " + message["ts"] + ")"
output_message_object["Text"] = message ["text"]
elif message["subtype"] == "file_share" or message["subtype"] == "file_mention":
output_message_object["From"] = self.users[message["user"]]["name"]
output_message_object["Time"] = self.get_ca_time_str_for_ts(message["ts"]) + "(ts = " + message["ts"] + ")"
output_message_object["Text"] = message["subtype"]
output_message_object["File"] = self.get_file_link_for_file(message["file"])
if output_message_object != {}:
output_json["messages"].append(output_message_object)
messages_html_path = messages_json_path.replace(".json", ".html")
if output_json["messages"] != []:
messages_html = "<h3>" + channel_name + " Messages " + year_mth + "</h3>"
messages_html += "<br />" + self.get_href(messages_json_path, "Click here for more Messages info") + ". "
messages_html += "(To locate a particular message use your browser to search for its ts (in parentheses after the time) from the table below)"
messages_html = messages_html + json2html.convert(json = output_json)
self.create_html_page(os.path.join(self.output_dir, messages_html_path), messages_html)
return True
else:
return False
def merge_messages(self):
pass
def post_message_to_channel(self, channel_id, text):
resp = self.slack_client.api_call("chat.postMessage", channel=channel_id, text=text)
if resp.get('ok'):
pass
else:
raise RuntimeError(__name__+"::"+str(resp))
def delete_messages(self, year_mth):
summaries_dir = self.get_summaries_dir()
channels_load_file_name = os.path.join(summaries_dir, self.channels_json)
with open(channels_load_file_name) as load_fd:
channels = json.load(load_fd)
total = 0
for channel_id in channels:
load_dir = os.path.join(self.output_dir, year_mth)
messages_file_name = channels[channel_id]["name"] + self.messages_json_suffix
messages_load_file_name = os.path.join(load_dir, messages_file_name)
if os.path.isfile(messages_load_file_name):
with open(messages_load_file_name) as load_fd:
messages = json.load(load_fd)
count = 0
for message in messages:
resp = self.slack_client.api_call("chat.delete", channel=channel_id, ts=message["ts"])
if resp.get('ok'):
print(message["ts"])
else:
print(message["ts"] + " " + str(resp))
#Raise RuntimeError(__name__+"::"+str(resp))
count += 1
time.sleep(SECONDS_BETWEEN_SUCCESSIVE_PAGE_REQUESTS)
print("CHANNEL: " + channels[channel_id]["name"] + ": " + str(count) + " messages deleted from " + year_mth)
total += count
print("TOTAL " + str(total) + " messages deleted from " + year_mth)
def delete_files(self, year_mth):
summaries_dir = self.get_summaries_dir()
files_load_file_name = os.path.join(summaries_dir, self.files_json)
with open(files_load_file_name) as load_fd:
files = json.load(load_fd)
count = 0
for file_id in files:
file_year_mth = self.get_yr_mth_for_ts(files[file_id]["created"])
if file_year_mth != year_mth:
continue
resp = self.slack_client.api_call("files.delete", file=file_id)
if resp.get('ok'):
pass
else:
raise RuntimeError(__name__+"::"+str(resp))
count += 1
time.sleep(SECONDS_BETWEEN_SUCCESSIVE_PAGE_REQUESTS)
print(str(count) + " files deleted from " + year_mth)
def process_groups(self, ops, ):
for op in ops:
if op not in self.valid_ops:
raise RuntimeError(__name__+"::"+" invalid op " + str(op) + " valid ops " + str(self.valid_ops))
ops = set(ops)
if "CLEAR" in ops:
self.clear_groups()
ops.remove("CLEAR")
if "GET" in ops:
self.get_groups()
ops.remove("GET")
if "SAVE" in ops:
self.save_groups()
ops.remove("SAVE")
if "RENDER" in ops:
self.render_groups()
ops.remove("RENDER")
if "MERGE" in ops:
self.merge_groups()
ops.remove("MERGE")
def clear_groups(self):
self.groups={}
def get_groups(self):
resp = self.slack_client.api_call("groups.list")
if resp.get('ok'):
for group in resp['groups']:
self.groups[group['id']] = group
else:
raise RuntimeError(__name__+"::"+str(resp))
def save_groups(self):
summaries_dir = self.get_summaries_dir()
output_file_name = os.path.join(summaries_dir, self.groups_json)
with self.create_file(output_file_name, "w") as save_fd:
json.dump(self.groups, save_fd)
def render_groups(self):
output_json = {"groups" : []}
for group_id in self.groups:
info = {}
info ["Name"] = self.groups[group_id]["name"]
info ["Topic"] = self.groups[group_id]["topic"]["value"]
info ["Purpose"] = self.groups[group_id]["purpose"]["value"]
output_json ["groups"].append(info)
if output_json["groups"] != []:
self.groups_html = json2html.convert(json = output_json)
else:
self.groups_html = ""
def merge_groups(self):
pass
def process_users(self, ops, ):
for op in ops:
if op not in self.valid_ops:
raise RuntimeError(__name__+"::"+" invalid op " + str(op) + " valid ops " + str(self.valid_ops))
ops = set(ops)
if "CLEAR" in ops:
self.clear_users()
ops.remove("CLEAR")
if "GET" in ops:
self.get_users()
ops.remove("GET")
if "SAVE" in ops:
self.save_users_and_images()
ops.remove("SAVE")
if "RENDER" in ops:
self.render_users()
ops.remove("RENDER")
if "MERGE" in ops:
self.merge_users()
ops.remove("MERGE")
def clear_users(self):
self.users={}
def get_users(self):
limit = 200
cursor = None
while cursor is None or cursor != '':
resp = self.slack_client.api_call("users.list", limit=limit, cursor=cursor)
if resp.get('ok'):
for user in resp['members']:
self.users[user['id']] = user
cursor = resp['response_metadata']['next_cursor']
else:
raise RuntimeError(__name__+"::"+str(resp))
time.sleep(SECONDS_BETWEEN_SUCCESSIVE_PAGE_REQUESTS)
def save_users_and_images(self):
summaries_dir = self.get_summaries_dir()
output_file_name = os.path.join(summaries_dir, self.users_json)
with self.create_file(output_file_name, "w") as save_fd:
json.dump(self.users, save_fd)
# Uncomment this code after doing the self-service version of this program because
# Slack provides image dir access only to specific users
#for user_id in self.users:
# for key in self.users[user_id]["profile"]:
# if isinstance(key, unicode) and key.startswith("image"):
# url = self.users[user_id]["profile"][key]
# images_dir = os.path.join(summaries_dir, (self.users[user_id]["name"] + "_" + "images"))
# self.create_dir(images_dir)
# file_name = os.path.split(url)[-1:][0]
# self.save_file(file_name, url, images_dir)
# os.rename(os.path.join(images_dir, file_name), os.path.join(images_dir, (key + ".jpg")))
def render_users(self):
output_json = {"users" : []}
for user_id in self.users:
info = {}
info ["Name"] = self.users[user_id]["name"]
if "real_name" in self.users[user_id]["profile"]:
info ["Real Name"] = self.users[user_id]["profile"]["real_name"]
else:
info["Real Name"] = "?"
if "email" in self.users[user_id]["profile"]:
info ["Email"] = self.users[user_id]["profile"]["email"]
else:
info["Email"] = "?"
# Uncomment this code after doing the self-service version of this program because
# Slack provides image dir access only to specific users
#images_dir = os.path.join(summaries_dir, (user_id + "_" + "images"))
#if os.path.isdir(images_dir):
# files = os.listdir(images_dir)
# if files != []:
# for file in files:
# file_path = os.path.join(images_dir, file)
# info["file"] = self.get_href(file_path, file)
output_json ["users"].append(info)
if output_json["users"] != []:
self.users_html = json2html.convert(json = output_json)
else:
self.users_html = ""
def merge_users(self):
pass
def process_files(self, ops, output_dir=None):
for op in ops:
if op not in self.valid_ops:
raise RuntimeError(__name__+"::"+" invalid op " + str(op) + " valid ops " + str(self.valid_ops))
ops = set(ops)
if "CLEAR" in ops:
self.clear_files()
ops.remove("CLEAR")
if "GET" in ops:
self.get_files()
ops.remove("GET")
if "SAVE" in ops:
self.save_files()
ops.remove("SAVE")
if "RENDER" in ops:
self.render_files()
ops.remove("RENDER")
if "MERGE" in ops:
self.merge_files()
ops.remove("MERGE")
def clear_files(self):
self.files={}
def save_file_and_thumbs(self, file_id):
file_name = self.files[file_id]["name"]
url = self.files[file_id]["url_private_download"]
output_dir = self.get_output_dir_for_ts(self.files[file_id]["created"])
self.save_file(file_name, url, output_dir)
for key in self.files[file_id]:
# The third term below reflects the fact that some self.files[file_id]["thumb_xxx"]
# entries are ints (eg 1024) and not unicode strings
if isinstance(key, unicode) and\
key.startswith("thumb") and \
isinstance(self.files[file_id][key], unicode) and\
self.files[file_id][key].startswith("http"):
url = self.files[file_id][key]
images_dir = os.path.join(output_dir, (file_id + "_" + "images"))
self.create_dir(images_dir)
file_name = os.path.split(url)[-1:][0]
self.save_file(file_name, url, images_dir)
def save_file(self, file_name, url, output_dir):
resp=requests.get(url, headers={'Authorization': 'Bearer %s' % self.slack_token}, stream=True)
with self.create_file(os.path.join(output_dir, file_name), 'wb') as fd:
for chunk in resp.iter_content(chunk_size=1024):
fd.write(chunk)
def get_files(self):
page = 1
pages = 1 # Starting default, actual value will be fetched
while (page - 1) < pages:
resp = self.slack_client.api_call("files.list", page = page)
if resp.get('ok'):
for file in resp['files']:
self.files[file['id']] = file
pages = resp['paging']['pages']
else:
raise RuntimeError(__name__+"::"+str(resp))
page += 1
time.sleep(SECONDS_BETWEEN_SUCCESSIVE_PAGE_REQUESTS)
def save_files(self):
summaries_dir = self.get_summaries_dir()
output_file_name = os.path.join(summaries_dir, self.files_json)
with self.create_file(output_file_name, "w") as save_fd:
json.dump(self.files, save_fd)
for file_id in self.files:
self.save_file_and_thumbs(file_id)
def render_files(self):
output_json = {"files" : []}
for file_id in self.files:
info = {}
info ["Title"] = self.files[file_id]["title"]
info ["Link"] = self.get_file_link_for_file(self.files[file_id])
output_json ["files"].append(info)
if output_json["files"] != []:
self.files_html = json2html.convert(json = output_json)
else:
self.files_html = ""
def merge_files(self):
pass
def create_index_file(self):
html = ""
html+= "<h2>" + "CULL-DE-SLACK generated output: " + self.run_label + ": " + self.run_datetime + "</h2>"
html += "<ol>"
html += "<li>CULL-DE-SLACK is an acronym for C-B U-ncle\'s L-ovely L-ittle D-oorway E-ntering S-lack!!</li>"
html += "<li>It deletes files and messages from our Slack group and generates them here to keep us within our Free Tier limits</li>"
html += "<li>This website will be replaced by a snazzier, jazzier version so do not fret if you find it somewhat stilted/mechanized! This is temporary</li>"
html += "<li>Should you hit problems, email CBD at: <a href=\"mailto:uncostservices@uncostservices.com\">CBD</a></li>"
html += "</ol>"
html += "<a name=\"top\"></a>\n"
if self.channels_html != "":
html += "<h3><a href=\"#channels\">Channels and Messages </a></h3>\n"
if self.users_html != "":
html += "<h3><a href=\"#users\">Users </a></h3>\n"
if self.files_html != "":
html += "<h3><a href=\"#files\">Files </a></h3\n>"
html += "<h3><a href=\"#fordevelopers\">For Developers (placeholder till we can get our public CULL-DE-SLACK website up)</a></h3\n>"
if self.channels_html != "":
html += "<a name=\"channels\"></a>\n"
html += "<h4><a href=\"#top\">Back to top</a></h4\n>"
html += "<h3>CHANNELS and MESSAGES</h3>\n"
channels_json_path = os.path.join(self.get_summaries_dir(), self.get_channels_json())
html += "<br /><a href = \"" + channels_json_path + "\">Click here for (excruciatingly detailed!) Channels info</a>. (To locate a particular channel use your browser to search for its name from the table below)"
html += self.channels_html
if self.users_html != "":
html += "<a name=\"users\"></a>\n"
html += "<h4><a href=\"#top\">Back to top</a></h4\n>"
html += "<h3>USERS</h3>\n"
users_json_path = os.path.join(self.get_summaries_dir(), self.get_users_json())
html += "<br /><a href = \"" + users_json_path + "\">Click here for (excruciatingly detailed!) Users info</a>. (To locate a particular channel use your browser to search for the (user) name from the table below)"
html += self.users_html
if self.files_html != "":
html += "<a name=\"files\"></a>\n"
html += "<h4><a href=\"#top\">Back to top</a></h4\n>"
html += "<h3>FILES</h3>\n"
files_json_path = os.path.join(self.get_summaries_dir(), self.get_files_json())
html += "<br /><a href = \"" + files_json_path + "\">Click here for (excruciatingly detailed!) Files info</a>. (To locate a particular file use your browser to search for its name from the table below)"
html += self.files_html
html += "<a name=\"fordevelopers\"></a>\n"
html += "<h3>FOR DEVELOPERS</h3>\n"
html += "<h5>If you are a developer interested in the code that generated this report:</h5>\n"
html += "<ul>"
html += "<li>It will be open sourced and will ultimately be available at <a href=\"https://github.com/unchaoss\">The Unchaoss repository at Github</a></li>"
html += "<li>It is in pure Python and uses packages only from PyPi</li>"
html += "<li>The intent is to grow it into a full service Slack interface for developers (along the lines of - but more full-featured than - <a href=\"https://pypi.python.org/pypi/slacker/\">Slacker</a>)</li>"
html += "<li>Developer Email: <a href=\"mailto:uncostservices@uncostservices.com\">Uncost Services</a></li>"
html += "</ul>"
html += "<p>Note that for a full solution to Slack backup you will need to allow individual users to run this code since even the admin cannot access private messages for deletion. (Depending on your solution) this may require creation of a Web portal, and/or the use of non-Pythonic environments.</p>"
self.create_html_page(os.path.join(self.get_summaries_dir(), "index.html"), html)
def get_file_link_for_file(self, file):
file_name = file["name"]
year_mth = self.get_yr_mth_for_ts(file["created"])
file_path = os.path.join(year_mth, file_name)
return self.get_href(file_path, file_name)
def get_href(self, target, text = ""):
return self.link_indicator + self.html_prefix + target + self.link_indicator + text + self.link_indicator
def get_ca_time_str_for_ts(self, gmt_ts):
microseconds_since_epoch = float(gmt_ts) * 1000000
gmt_offset_microseconds = 8 * 3600 * 1000000
if self.is_dst(gmt_ts):
gmt_offset_microseconds -= (3600 * 1000000)
microseconds_since_epoch -= gmt_offset_microseconds
return str(self.epoch + datetime.timedelta(microseconds=microseconds_since_epoch))
def is_dst(self, gmt_ts):
return True # For now. TODO
def get_yr_mth_for_ts(self, gmt_ts):
return self.get_ca_time_str_for_ts(gmt_ts)[0:7]
def get_output_dir_for_ts(self, gmt_ts):
year_mth = self.get_yr_mth_for_ts(gmt_ts)
output_dir = os.path.join(self.output_dir, year_mth)
self.create_dir(output_dir)
return output_dir
def get_summaries_dir(self):
output_dir = os.path.join(self.output_dir, ("slackback" + self.run_label))
self.create_dir(output_dir)
return output_dir
def create_file(self, file_name, mode):
if os.path.isfile(file_name):
try:
os.rename(file_name, (file_name + "." + self.run_datetime))
except IOError as err:
raise RuntimeError(str(err) + " renaming file " + file_name +\
" to " + (file_name + "." + self.run_datetime))
try:
fd = open(file_name, mode)
return fd
except IOError as err:
raise RuntimeError(str(err) + " creating " + file_name)
def create_html_page(self, file_name, html_text):
html_after_handling_link_indicators = ""
# Look for leading (link) indicator
index = html_text.find(self.link_indicator)
while index != -1:
html_after_handling_link_indicators += html_text[0:index]
# Remove leading (link) indicator
html_text = html_text[index + len(self.link_indicator):]
# Look for middle (link) indicator
index2 = html_text.find(self.link_indicator)
if index2 == -1:
raise RuntimeError("Missing link indicator after target, remaining html is: " + html_text)
target = html_text[0:index2]
# Remove middle (link) indicator
html_text = html_text[index2 + len(self.link_indicator):]
# Look for trailing (link) indicator
index3 = html_text.find(self.link_indicator)
if index3 == -1:
raise RuntimeError("Missing link indicator after text, remaining html is: " + html_text)
text = html_text[0:index3]
# Remove trailing (link) indicator
html_text = html_text[index3 + len(self.link_indicator):]
# Add link href to output
html_after_handling_link_indicators += ("<a href=\"" + target + "\">" + text + "</a>")
# Look for leading (link) indicator
index = html_text.find(self.link_indicator)
html_after_handling_link_indicators += html_text
output_html = ""
for hc in html_after_handling_link_indicators:
if ord(hc) >= 128:
output_html += "&#" + str(ord(hc)) + ";"
else:
output_html += hc
with self.create_file(file_name, "w") as fd:
fd.write(self.html_prolog + output_html + self.html_epilog)
def create_dir(self, dir_name):
if os.path.exists(dir_name):
if os.path.isdir(dir_name):
if os.access(dir_name, os.W_OK) == False:
raise RuntimeError("You do not have write access to dir " + dir_name)
else:
raise RuntimeError(dir_name + " is not a directory")
else:
try:
os.mkdir(dir_name)+++++++++++++++++++++++++++++++++++
except IOError as err:
raise RuntimeError(str(err) + " creating " + dir_name)
def main():
try:
SLACK_TOKEN = os.environ.get('SLACK_TOKEN')
slack_client = SlackClient(SLACK_TOKEN)
slack_obj = Iima79SlackBack(slack_client, SLACK_TOKEN)
run_datetime = datetime.datetime.today().strftime("%Y-%m-%d-%H-%M")
run_label = "cull_de_slack-"+ run_datetime
slack_obj.set_output_dir(os.path.join("/nobackup", run_label))
slack_obj.set_run_label("")
slack_obj.set_html_prefix("http://cbdasgupta.org/slackback/websaves/")
print("Users...")
slack_obj.process_users(["CLEAR", "GET","SAVE","RENDER"])
print("Channels...")
slack_obj.process_channels(["CLEAR", "GET","SAVE","RENDER"])
print("Files...")
slack_obj.process_files(["CLEAR", "GET","SAVE","RENDER"])
slack_obj.create_index_file()
slack_obj.set_merge_input_dirs(["/nobackup/cds-12-06-17"])
slack_obj.set_output_dir(["/nobackup/websaves"])
print("Merging Users...")
slack_obj.process_users(["MERGE","RENDER"])
print("Merging Channels...")
slack_obj.process_channels(["MERGE","RENDER"])
print("Merging Files...")
slack_obj.process_files(["MERGE","RENDER"])
slack_obj.create_index_file()
print("Done.")
#print ("Deleting messages 2017-03)")
#slack_obj.delete_messages('2017-03')
#print ("Deleting messages 2017-04)")
#slack_obj.delete_messages('2017-04')
#slack_obj.delete_files('2017-03')
except RuntimeError as err:
print err.msg
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_name": "unchaoss/unchaoss",
"path": "self-contained-apps/py/slackbak/slackback.py",
"copies": "1",
"size": "34669",
"license": "apache-2.0",
"hash": -3842203280328850000,
"line_mean": 44.0246753247,
"line_max": 310,
"alpha_frac": 0.5523378234,
"autogenerated": false,
"ratio": 3.746784826542743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9760652932779231,
"avg_score": 0.007693943432702314,
"num_lines": 770
} |
__author__ = 'chmod'
from rHLDS import const
from io import BytesIO
import socket
import sys
class Console:
host = ''
port = ''
password = ''
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def __init__(self, *, host, port=27015, password):
self.host = host
self.port = port
self.password = password
def connect(self):
self.sock.settimeout(4)
self.sock.connect((self.host, int(self.port)))
if self.execute('stats') == 'Bad rcon_password.':
print('Bad password!')
self.disconnect()
sys.exit(1)
def disconnect(self):
self.sock.close()
def getChallenge(self):
try:
#Format message to server
msg = BytesIO()
msg.write(const.startBytes)
msg.write(b'getchallenge')
msg.write(const.endBytes)
self.sock.send(msg.getvalue())
response = BytesIO(self.sock.recv(const.packetSize))
return str(response.getvalue()).split(" ")[1]
except Exception as e:
print(e)
self.disconnect()
sys.exit(1)
def execute(self, cmd):
try:
challenge = self.getChallenge()
#Format message to server
msg = BytesIO()
msg.write(const.startBytes)
msg.write('rcon '.encode())
msg.write(challenge.encode())
msg.write(b' ')
msg.write(self.password.encode())
msg.write(b' ')
msg.write(cmd.encode())
msg.write(const.endBytes)
self.sock.send(msg.getvalue())
response = BytesIO(self.sock.recv(const.packetSize))
return response.getvalue()[5:-3].decode()
except Exception as e:
print(e)
self.disconnect()
sys.exit(1)
| {
"repo_name": "chmod1/rHLDS",
"path": "rHLDS/console.py",
"copies": "1",
"size": "1886",
"license": "mit",
"hash": -2210590902798207700,
"line_mean": 26.3333333333,
"line_max": 64,
"alpha_frac": 0.5349946978,
"autogenerated": false,
"ratio": 4.108932461873638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5143927159673638,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chong-U Lim, culim@mit.edu'
class Analyzer(object):
def __init__(self, puzzlescript):
self.puzzlescript = puzzlescript
def get_number_of_levels(self):
return len(self.puzzlescript['levels'].levels)
def get_number_of_rules(self):
if ('rules' not in self.puzzlescript):
return 0
return len(self.puzzlescript['rules'].rules)
def get_number_of_winconditions(self):
return len(self.puzzlescript['winconditions'].winconditions)
def get_number_of_collisionlayers(self):
return len(self.puzzlescript['collisionlayers'].collisionlayers)
def get_number_of_legends(self):
return len(self.puzzlescript['legend'].legends)
def get_number_of_objects(self):
return len(self.puzzlescript['objects'].objects)
def get_number_of_rule_loops(self):
return len(self.puzzlescript['rules'].loops)
def get_number_of_messages(self):
return len(self.puzzlescript['levels'].messages)
def get_average_level_width(self):
widths = [level.width for level in self.puzzlescript['levels'].levels ]
return sum(widths)/float(len(widths));
def get_average_level_height(self):
heights = [level.height for level in self.puzzlescript['levels'].levels ]
return sum(heights)/float(len(heights))
def printStats(self, scriptName):
nObjects = self.get_number_of_objects()
nRules = self.get_number_of_rules()
nLevels = self.get_number_of_levels()
aWidth = self.get_average_level_width()
aHeight = self.get_average_level_height()
nWinConditions = self.get_number_of_winconditions()
res = "%% %% %% ROW %% %% %%\n%s &\n%s &\n%s &\n%s &\n%s &\n%s &\n%s \n\\\\ \\hline" %(scriptName, nObjects,nRules,nLevels,aWidth,aHeight,nWinConditions)
print res
class LevelAnalyzer(object):
def __init__(self, level):
self.level = level
def get_fragmentation_score(self, level, character):
'''
Returns the number of islands in this level that
contain the specified character.
'''
def get_domination_score(self, level, character):
'''
'''
def get_sparseness_score(self, level, character):
'''
'''
def get_volume_score(self, level, character):
'''
'''
| {
"repo_name": "chongdashu/puzzlescript-analyze",
"path": "python/analyzer.py",
"copies": "1",
"size": "2109",
"license": "mit",
"hash": 8661229490364737000,
"line_mean": 27.5,
"line_max": 155,
"alpha_frac": 0.7031768611,
"autogenerated": false,
"ratio": 2.925104022191401,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4128280883291401,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chong-U Lim, culim@mit.edu'
import re,sys,os,copy
class Script(object):
def __init__(self, txt):
self.sections = {}
self.txt = txt
if (self.txt):
self.parse(txt)
def parse(self, txt):
section = Section.create(Section.TYPE_PRELUDE)
self.add_section(section)
lines = txt.split("\n")
for line in lines:
if Section.is_section(line):
# Check if the line has a keyword for the start
# of a new section.
print 'Parsing section: %s' %(line)
section = self.create_section(line)
if section:
section.parse_line(line)
def add_section(self, section):
self.sections[section.type] = section
def section(self, name):
return self.sections.get(name)
def create_section(self, name):
section = Section.create(name)
if section:
self.add_section(section)
return section
def __getitem__(self, index):
return self.sections[index.lower()]
def __repr__(self):
return "Script(%s)" %(self.sections.keys())
# ------- Elements ------- #
class PSMessage(object):
def __init__(self, text, last_level_index_=0):
self.last_level_index = last_level_index_
self.text = text
def __repr__(self):
return "PSMessage(%s,%s)" %(self.last_level_index, self.text)
class PSLevel(object):
def __init__(self):
self.definition = []
self.width = 0
self.height = 0
def __repr__(self):
return "PSLevel[%sx%s](%s)" %(self.width,self.height,self.definition)
def parse_line(self, line):
self.definition.append(list(line))
self.width = len(line)
self.height += 1
def serialize(self):
'''
Returns the level flattened as a single string.
'''
return PSLevel.level_to_string(self)
def get_symbols(self):
'''
Returns a list of all the symbols (legend characters) used in this level.
'''
return list(set("".join(self.definition)))
def fill(self, row, col, oldchar, newchar):
if oldchar == None:
oldchar = self.definition[row][col]
if not self.definition[row][col] == oldchar:
return
self.definition[row][col] = newchar
if col > 0:
self.fill(row, col-1, oldchar, newchar)
if col < self.width-1:
self.fill(row, col+1, oldchar, newchar)
if row > 0:
self.fill(row-1, col, oldchar, newchar)
if row < self.height-1:
self.fill(row+1, col, oldchar, newchar)
def output(self):
for r in range(self.height):
for c in range(self.width):
sys.stdout.write(self.definition[r][c]);
sys.stdout.write(os.linesep)
@staticmethod
def level_to_string(level):
'''
Returns the given level flattened as a single string.
@param level a PSLevel object.
'''
definition = level.definition[:]
return "".join(["".join(col) for col in definition])
@staticmethod
def string_to_level(string, width, height):
'''
Returns the given level flattened as a single string.
@param level a PSLevel object.
'''
level = PSLevel()
for r in range(height):
startIndex = r*width
endIndex = startIndex+width
row = string[startIndex:endIndex]
level.parse_line(row)
return level
class PSObject(object):
def __init__(self, line):
self.definition = []
self.declaration = line
self.declaration_tokens = line.split(" ")
self.name = self.declaration_tokens[0]
if len(self.declaration_tokens) > 1:
self.legend = self.declaration_tokens[1]
def __repr__(self):
return "PSOBject(%s)" %(self.declaration)
def parse_line(self, line):
self.definition.append(line)
class PSRule(object):
def __init__(self, line):
match = re.match("((.)+)\s*->\s*((.)+)", line)
if match:
self.lhs = match.group(1)
self.rhs = match.group(3)
def __repr__(self):
return "PSRule(%s)" %str(self)
def __str__(self):
return "%s -> %s" %(self.lhs, self.rhs)
class PSWinCondition(object):
OPERATOR_NO = "no"
OPERATOR_ALL = "all"
OPERATOR_SOME = "some"
def __init__(self, line):
self.text = line.strip()
self.args = []
self.condition = ""
match = re.match("((.)+) ((.)+)\son\s((.)+)", self.text)
if match:
self.condition = match.group(1)
self.args.append(match.group(3))
self.args.append(match.group(5))
else:
match = re.match("((.)+)\s((.)+)", self.text)
if match:
self.condition = match.group(1)
self.args.append(match.group(3))
def __repr__(self):
return "PSWinCondition(%s)" %str(self)
def __str__(self):
string = "[%s] [%s]" %(self.condition, self.args[0])
if len(self.args) > 1:
string += " on [%s]" %(self.args[1])
return string
class PSCollisionLayer(object):
def __init__(self, line):
self.objects = line.strip().split(',')
def __repr__(self):
return "PSCollisionLayer(%s)" %str(self)
def __str__(self):
return str(self.objects)
# ------- Sections------- #
class Section(object):
TYPE_PRELUDE = "prelude"
TYPE_OBJECTS = "objects"
TYPE_LEGEND = "legend"
TYPE_SOUNDS = "sounds"
TYPE_COLLISIONLAYERS = "collisionlayers"
TYPE_RULES = "rules"
TYPE_WINCONDITIONS = "winconditions"
TYPE_LEVELS = "levels"
TYPES = [
TYPE_PRELUDE,
TYPE_OBJECTS,
TYPE_LEGEND,
TYPE_SOUNDS,
TYPE_COLLISIONLAYERS,
TYPE_RULES,
TYPE_WINCONDITIONS,
TYPE_LEVELS
]
def __init__(self, type_):
self.tokens = {}
self.type = type_
self.is_parsing_comment = False
def set(self, key, value):
self.tokens[key] = value
def get(self, key):
return self.tokens.get(key)
def type(self):
return self.type
def __repr__(self):
return "%s{%s}" %(self.type.upper(), self.tokens.keys())
def parse_line(self,line):
parsed_line = False
line = line.strip()
if self.is_parsing_comment:
match = re.match("(.)*\)", line)
if match:
self.is_parsing_comment = False
print "\tEnding comment: %s" %line
return True
match = re.match("\((.)+", line)
if match:
if not match.group(1) == ")":
self.is_parsing_comment = True
print "\tStarting comment: %s" %line
else:
print "\tSkipping comment: %s" %line
parsed_line = True
return parsed_line
# Static methods
################
@staticmethod
def create(type_):
sectionType = type_.lower()
if sectionType == Section.TYPE_PRELUDE:
return PreludeSection(sectionType)
elif sectionType == Section.TYPE_OBJECTS:
return ObjectsSection(sectionType)
elif sectionType == Section.TYPE_LEGEND:
return LegendSection(sectionType)
elif sectionType == Section.TYPE_LEVELS:
return LevelsSection(sectionType)
elif sectionType == Section.TYPE_RULES:
return RulesSection(sectionType)
elif sectionType == Section.TYPE_WINCONDITIONS:
return WinConditionsSection(sectionType)
elif sectionType == Section.TYPE_COLLISIONLAYERS:
return CollisionLayersSection(sectionType)
elif sectionType == Section.TYPE_SOUNDS:
return SoundsSection(sectionType)
else:
return Section(sectionType)
@staticmethod
def is_section(name):
return name.lower() in Section.TYPES
@staticmethod
def is_comment(line):
return re.match("(=)+",line) is not None
@staticmethod
def is_keyline(line):
return Section.is_section(line) or Section.is_comment(line)
class SoundsSection(Section):
def __init__(self, type_):
Section.__init__(self,type_)
self.sounds = []
def parse_line (self, line):
if (Section.parse_line(self, line)):
return
if line.strip() and not Section.is_keyline(line):
self.sounds.append(line.strip())
class CollisionLayersSection(Section):
def __init__(self, type_):
Section.__init__(self,type_)
self.collisionlayers = []
def parse_line (self, line):
if Section.parse_line(self,line):
return
if line.strip() and not Section.is_keyline(line):
layer = PSCollisionLayer(line)
self.collisionlayers.append(layer)
class WinConditionsSection(Section):
def __init__(self, type_):
Section.__init__(self,type_)
self.winconditions = []
def parse_line(self, line):
parsed_line = Section.parse_line(self,line)
if parsed_line :
return
if line.strip() and not Section.is_keyline(line):
print '\tParsing Win Condition: %s' %(line.strip())
wincondition = PSWinCondition(line.strip())
self.winconditions.append(wincondition)
class RulesSection(Section):
def __init__(self, type_):
Section.__init__(self,type_)
self.rules = []
self.loops = []
self.current_loop = None
def parse_line(self, line):
parsed_line = Section.parse_line(self,line)
if parsed_line:
return
if line.strip() and line.strip().lower() == "startloop":
self.current_loop = [];
elif line.strip() and line.strip().lower() == "endloop":
self.loops.append(self.current_loop)
self.current_loop = None
elif line.strip() and not Section.is_keyline(line):
print '\tParsing Rule: %s' %(line.strip())
rule = PSRule(line.strip())
self.rules.append(rule)
self.tokens[rule.lhs] = rule
if self.current_loop is not None:
self.current_loop.append(rule)
class LevelsSection(Section):
def __init__(self, type_):
Section.__init__(self,type_)
# Indicates if already parsed an object declaration
# and that we are now currently parsing the definition
# of the object.
self.isParsingLevel = False
self.levels = []
self.messages = []
self.current_level = None
def parse_line(self, line):
if self.isParsingLevel:
if line.strip():
# Case (1): Non-empty line means that we are still parsing the
# level definition.
self.current_level.parse_line(line)
else:
# Case (2): Empty line means that we are done.
self.isParsingLevel = False
self.tokens[len(self.levels)] = self.current_level
self.levels.append(self.current_level)
print '\tCreated new level: %s' %(self.current_level)
else:
if line.strip() and not Section.is_keyline(line):
# Case (3): Non-empty line. Need to check if start of
# a level, or a message.
match = re.match("MESSAGE ((.)+)", line.strip(), re.IGNORECASE)
if match:
# Case (3a): A message.
print '\tCreating new message: %s' %(line)
text = match.group(1)
message = PSMessage(text, len(self.levels))
self.messages.append(message)
else:
# Case (3b): A level definition start.
# print '\tCreating new level: %s' %(line)
self.current_level = PSLevel()
self.current_level.parse_line(line)
self.isParsingLevel = True
class LegendSection(Section):
def __init__(self, type_):
Section.__init__(self,type_)
self.legends = self.tokens
def parse_line(self, line):
if line.strip() and not Section.is_keyline(line):
print '\tParsing Legend: %s' %(line)
line_tokens = line.split("=")
key = line_tokens[0].strip()
if re.match("[a-zA-Z]+ and [a-zA-Z]+", line_tokens[1].strip()):
val = tuple(x.strip() for x in line_tokens[1].strip().split("and"))
else:
val = tuple(x.strip() for x in line_tokens[1].strip().split("or"))
self.tokens[key] = val
class ObjectsSection(Section):
def __init__(self, type_):
Section.__init__(self,type_)
# Indicates if already parsed an object declaration
# and that we are now currently parsing the definition
# of the object.
self.isParsingDefinition = False
self.objects = self.tokens
def parse_line(self, line):
if self.isParsingDefinition:
if line.strip():
# Case (1): Non-empty line means that we are still parsing the
# objects definition.
self.current_object.parse_line(line)
else:
# Case (2): Empty line means that the definition is ending.
self.isParsingDefinition = False
self.tokens[self.current_object.name] = self.current_object
else:
if line.strip() and not Section.is_keyline(line):
print '\tCreating new object: %s' %(line)
self.current_object = PSObject(line)
self.isParsingDefinition = True
class PreludeSection(Section):
TOKEN_AUTHOR = "author"
TOKEN_COLOR_PALETTE = "color_palette"
TOKEN_AGAIN_INTERVAL = "again_interval"
TOKEN_BACKGROUND_COLOR = "debug"
TOKEN_FLICKSCREEN = "flickscreen"
TOKEN_HOMEPAGE = "homepage"
TOKEN_KEY_REPEAT_INTERVAL = "key_repeat_interval"
TOKEN_NOACTION = "noaction"
TOKEN_NORESTART = "norestart"
TOKEN_REQUIRE_PLAYER_MOMENT = "require_player_moment"
TOKEN_RUN_RULES_ON_LEVEL_START = "run_rules_on_level_start"
TOKEN_SCANLINE = "scanline"
TOKEN_TEXT_COLOR = "text_color"
TOKEN_TITLE = "title"
TOKEN_VERBOSE_LOGGING = "verbose_logging"
TOKEN_YOUTUBE = "youtube"
TOKEN_ZOOMSCREEN = "zoomscreen"
def __init__(self, type_):
Section.__init__(self, type_)
self.title = None
self.author = None
self.homepage = None
def parse_line(self,line):
match = None
# Title
match = re.match("([a-z]+) +(.+)", line)
if match:
token = match.group(1)
value = match.group(2)
self.set(token, value)
| {
"repo_name": "chongdashu/puzzlescript-analyze",
"path": "python/puzzlescript.py",
"copies": "1",
"size": "12603",
"license": "mit",
"hash": -1530629649629180000,
"line_mean": 22.6011235955,
"line_max": 75,
"alpha_frac": 0.6592874712,
"autogenerated": false,
"ratio": 2.9724056603773583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41316931315773586,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chong-U Lim, culim@mit.edu'
__version__ = '2014.02.23'
import datetime
class Arff(object):
def __init__(self):
self.title = "Untitled"
self.sources = []
self.relation = "default"
self.attributes = []
self.instances = 0
def setTitle(self, title):
self.title = title
def addSource(self, source):
self.sources.append(source)
def setRelation(self, relation):
self.relation = relation
def addAttribute(self, attribute):
self.attributes.append(attribute)
if self.instances == 0:
self.instances = len(attribute.values)
elif self.instances != len(attribute.values):
print "Warning! Mismatch in number of instances for attribute %s" %(attribute.name)
def write(self, filename):
f = open(filename, "w")
## TITLE ##
###########
f.write('%% 1. Title: %s\n' %(self.title))
## SPACING ##
#############
f.write('%\n')
## SOURCES ##
###########
f.write('% 2. Sources:\n')
for index,source in enumerate(self.sources):
f.write('%%\t(%s) %s\n' %(chr(ord('a')+index), source))
f.write('%%\t(%s) Date: %s\n' %(chr(ord('a')+len(self.sources)), str(datetime.datetime.today())))
## SPACING ##
#############
f.write('%\n')
## RELATION ##
#############
f.write('@RELATION %s\n' %self.relation)
## EMPTY-SPACING ##
f.write('\n')
## ATTRIBUTE DECLARATION ##
###########################
for index,attribute in enumerate(self.attributes):
if attribute.type == Attribute.TYPE_CLASS:
f.write('@ATTRIBUTE %s %s\n' %(attribute.name, str(attribute.types).replace('[','{').replace(']','}').replace("'","")))
else:
f.write('@ATTRIBUTE %s %s\n' %(attribute.name, attribute.type))
## EMPTY-SPACING ##
f.write('\n')
## DATA ##
##########
f.write('@DATA\n')
## EMPTY-SPACING ##
# f.write('\n')
for instance in range(self.instances):
for index, attribute in enumerate(self.attributes):
#print "instance=%s, index=%s, attribute=%s" %(instance, index,attribute.name)
if attribute.type == Attribute.TYPE_STRING:
f.write('"%s"' %(attribute.values[instance]))
else:
f.write("%s" %(attribute.values[instance]))
if index < len(self.attributes)-1:
f.write(",")
else:
f.write("\n")
f.close()
class Attribute(object):
TYPE_NUMERIC = "NUMERIC"
TYPE_CLASS = "CLASS"
TYPE_STRING = "STRING"
def __init__(self, name, type_, values = []):
self.name = name
self.type = type_
self.values = values
class ClassAttribute(Attribute):
def __init__(self, name, types_, values =[]):
self.name = name
self.type = Attribute.TYPE_CLASS
self.values = values
self.types = list(set(types_))
| {
"repo_name": "chongdashu/puzzlescript-analyze",
"path": "python/weka.py",
"copies": "1",
"size": "2635",
"license": "mit",
"hash": 8366106543745340000,
"line_mean": 22.9545454545,
"line_max": 123,
"alpha_frac": 0.5984819734,
"autogenerated": false,
"ratio": 2.957351290684624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4055833264084624,
"avg_score": null,
"num_lines": null
} |
__author__ = "Chris Barnett"
__version__ = "0.3"
__license__ = "MIT"
def post_rings_kcf_to_image(inputstream):
"""
posts kcf to the image converter at RINGS
'http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/KCFtoIMAGE/KCF_to_IMAGE.pl'
:param inputstream: read and then passed to the textarea in web form
"""
import urllib
if inputstream is None or inputstream == []:
return []
# URL to post to
# changed url to action url found in the form source of the linearcodetokcf page
url = 'http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/KCFtoIMAGE/KCF_to_IMAGE.pl'
kcfdata = inputstream.read()
file = ""
# values contains all the names of the items in the form and the appropriate data
values = dict(KCF=kcfdata, KCFfile=file, submit='SUBMIT')
html = urllib.urlopen(url, urllib.urlencode(values)).readlines()
return ''.join(html[13:])
# note in this example the images are embedded in the html
def get_first_image_from_html(html):
from BeautifulSoup import BeautifulSoup
soup = BeautifulSoup(html)
tags = soup.findAll(name='img') # have to use explicit name= , as source html is damaged *by me..
imgsrc = (list(tag['src'] for tag in tags))
if len(imgsrc) > 1: # rings logo image and at least one glycan image (thus greater than 1 i.e. 2 or more)
_, base64img = imgsrc[1].split(",") # get the first glycan image
else:
raise IOError("Server did not return an image. Error could be remote or your file.")
# unfortunately only the first img is saved to png
# use pillow or just view the html to see everything
# cool example http://stackoverflow.com/questions/10647311/how-to-merge-images-using-python-pil-library
return base64img.decode("base64")
if __name__ == "__main__":
import sys
try:
inputname = sys.argv[1]
pngoutputname = sys.argv[2]
htmloutputname = sys.argv[3]
except Exception as e:
raise Exception(e, "Please pass an input, pngoutput and htmloutput filename as arguments")
instream = file(inputname, 'r')
pngoutstream = file(pngoutputname, "wb")
htmloutstream = file(htmloutputname, "w")
try:
html = post_rings_kcf_to_image(instream)
img = get_first_image_from_html(html)
htmloutstream.write(html)
pngoutstream.write(img)
except Exception as e:
raise
finally:
instream.close()
pngoutstream.close()
htmloutstream.close()
| {
"repo_name": "chrisbarnettster/cfg-analysis-on-heroku-jupyter",
"path": "notebooks/scripts/post_kcf_to_image.py",
"copies": "1",
"size": "2501",
"license": "mit",
"hash": -6321263924505291000,
"line_mean": 35.7794117647,
"line_max": 109,
"alpha_frac": 0.6617353059,
"autogenerated": false,
"ratio": 3.46879334257975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46305286484797503,
"avg_score": null,
"num_lines": null
} |
__author__ = "Chris Barnett"
__version__ = "0.5.2"
__license__ = "MIT"
from BeautifulSoup import BeautifulSoup
import mechanize
class PrettifyHandler(mechanize.BaseHandler):
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = mechanize.response_seek_wrapper(response)
# only use BeautifulSoup if response is html
if response.info().dict.has_key('content-type') and ('html' in response.info().dict['content-type']):
soup = BeautifulSoup(response.get_data())
response.set_data(soup.prettify())
return response
def mechanise_glycan_convert(inputstream, format, textformat="json", debug=False):
"""
Use mechanise to submit input glycan and formats to the new converter tool at RINGS
:param inputstream: input glycan file stream that is read and then passed to the textarea in web form
:param format: format to convert to. Options change dependent on input type. ['Glycoct', 'Linearcode', 'Mol', 'Wurcs'])
:param textformat: output returned in text, json or html. default is text.
:param debug: print debug info from mechanise
Can convert to WURCS, mol, Glycoct, Glycoct{condensed}, LinearCode, KCF.
Does not yet support Linucs as input format. Cannot convert to GLYDE2, IUPAC, Linucs,
Converts to image when kcf html is returned.
http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/index.pl
http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert_index2.pl
which then directs to http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert_index2.pl
http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert.pl
"""
import mechanize
import cookielib
if inputstream is None or inputstream == []:
raise IOError("empty input stream")
#return None
if format is None or format == "":
return inputstream #
# create a Browser
br = mechanize.Browser()
br.add_handler(PrettifyHandler())
# handle cookies - Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
br.set_debug_http(debug)
br.set_debug_redirects(debug)
br.set_debug_responses(debug)
br.addheaders = [('User-agent',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
# Open site
page = 'http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/index.pl'
br.open(page)
# Show the response headers
response_info = br.response().info()
# select the input form
br.select_form(nr=0)
# read user glycan and submit
glycandata = inputstream.read() # read into a variable as I need it later
br.form["in_data"] = glycandata
br.submit()
# the submit redirects to another page which is generated based on the input glycan
# now submit second form
br.select_form(nr=0)
#. bugfix - reinsert data as 7 spaces get prepended to it during the post ?!
br.form["in_data"] = glycandata
# look at the convert_to control. These are the options allowed for the particular input as determined by rings
control = br.form.find_control("convert_to", type="select")
available_formats = [item.attrs['value'] for item in control.items] # should match user selected format .
# check that the user entered format is in the available formats, if not raise error
if format not in available_formats:
raise IOError ("Requested ", format," but input glycan can only be converted to the following formats: ",available_formats )
# set the format
br.form["convert_to"]=[format,]
# check and set textformat
if textformat not in ["json","html","text"]:
raise IOError ("Requested ", textformat, " but can only output json, html or text" )
br.form["type"]=[textformat,]
#submit conversion and get response
br.submit()
#import time
#time.sleep(5)
response = br.response().read() # the converted file!
if response is None or str(response.strip()) == '':
raise IOError("empty response, I recommend using the json format")
#return None
return response
def clean_json_response(response):
"""
# look at json status has failure status, submitData has original data and result has output with format
:param response: json from RINGS convert
:return: a list of glycan structures
"""
import json
import re
import StringIO
# RINGS bug, additional data returned with JSON format. Clean this up
jsonoutputasfilehandle = StringIO.StringIO(''.join(response))
keeplines=[] # only keep lines that look like JSON
for line in jsonoutputasfilehandle.readlines():
if line[0]=="[":
keeplines.append(line)
response2=''.join(keeplines)
# RINGs bug. Now remove junk data appended to the JSON lines for example "}}]GLC "
p = re.compile( '(}}].*)')
jsontobeparsed = p.subn( '}}]', response2)
# load json
loaded_as_json = json.loads(jsontobeparsed[0])
structures=[]
# there could be multiple structures so iterate over, structures are numbered using the No tag.
# .. matches for linearcode and eol fixes
linearcodefix = re.compile( '(;$)')
eolfix = re.compile( '(\n$)')
for glycan in loaded_as_json:
if str(glycan["status"]) == "false":
raise IOError(glycan["result"]["message"]) # raise error even though not all structures need be broken. It is no use letting through a broken structure. important to let the user know.
else:
# bugfix remove ";" from end of LinearCode
lcfixed = linearcodefix.subn( '\n', str(glycan["result"]["structure"]))[0]
# now remove all \n at end of each sequence. Some have, some don't, so remove all and add later
eolfixed = eolfix.subn( '', lcfixed)[0]
structures.append( eolfixed)
return structures
def defunct_post_rings_convert(inputstream):
"""
#impossible to do with urllib2
posts input glycan stream to the new converter tool at RINGS
http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/index.pl
http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert_index2.pl
which then directs to http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert_index2.pl
http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert.pl
:param inputstream: read and then passed to the textarea in web form
"""
import urllib2, urllib
if inputstream is None or inputstream == []:
return []
# URL to post to
# changed url to action url found in the form source of the linearcodetokcf page
url = 'http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert_index2.pl'
in_data = inputstream.read()
file = ""
# values contains all the names of the items in the form and the appropriate data
values = dict(in_data=in_data, datasetname="default1", submit='SUBMIT')
response = urllib2.urlopen(url, urllib.urlencode(values))
the_page = response.read()
the_url = response.geturl()
return response, the_page, the_url
# . page resulting from post is dynamic and I then have to post to this. urllib2 cannot do this easily.
def defunct_automated_glycan_convert(inputstream):
"""
seems cool but starts a browser there is no strictly headless solution with selenium
use selenium to input glycan stream to the new converter tool at RINGS
http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/index.pl
http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert_index2.pl
which then directs to http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert_index2.pl
http://rings.t.soka.ac.jp/cgi-bin/tools/utilities/convert/convert.pl
"""
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import time, re
driver = webdriver.Firefox()
driver.implicitly_wait(30)
base_url = "http://rings.t.soka.ac.jp/"
verificationErrors = []
accept_next_alert = True
driver.get(base_url + "/cgi-bin/tools/utilities/convert/index.pl")
driver.find_element_by_name("datasetname").clear()
driver.find_element_by_name("datasetname").send_keys("default1")
driver.find_element_by_name("in_data").clear()
driver.find_element_by_name("in_data").send_keys(
"ENTRY 12345 Glycan\nNODE 2\n 1 galnac 0 0\n 2 gal -8 0\nEDGE 1\n 1 2:1 1\n///")
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
Select(driver.find_element_by_name("convert_to")).select_by_visible_text("WURCS")
# ERROR: Caught exception [Error: Dom locators are not implemented yet!]
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
def defunct_driver_glycan_convert(inputstream):
"""
seems cool but starts a browser there is no strictly headless solution with selenium
"""
from contextlib import closing
from selenium.webdriver import Firefox # pip install selenium
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
base_url = "http://rings.t.soka.ac.jp/"
url = base_url + "/cgi-bin/tools/utilities/convert/index.pl"
# use firefox to get page with javascript generated content
with closing(Firefox()) as browser:
browser.get(url)
browser.find_element_by_name("datasetname").clear()
browser.find_element_by_name("datasetname").send_keys("default1")
browser.find_element_by_name("in_data").clear()
browser.find_element_by_name("in_data").send_keys(
"ENTRY 12345 Glycan\nNODE 2\n 1 galnac 0 0\n 2 gal -8 0\nEDGE 1\n 1 2:1 1\n///")
browser.find_element_by_css_selector("input[type=\"submit\"]").click()
# wait for the page to load
WebDriverWait(browser, timeout=10).until(
lambda x: x.find_element_by_id('convert_to'))
Select(browser.find_element_by_name("convert_to")).select_by_visible_text("WURCS")
browser.find_element_by_css_selector("input[type=\"submit\"]").click()
# store it to string variable
page_source = browser.page_source
print(page_source)
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: python %prog [options]\n"
parser = OptionParser(usage=usage)
parser.add_option("-i", action="store", type="string", dest="i", default="input",
help="input any glycan file (input)")
parser.add_option("-f", action="store", type="string", dest="f", default="Kcf",
help="format to convert to (Kcf)")
parser.add_option("-t", action="store", type="string", dest="t", default="text",
help="format style, text, html or json")
parser.add_option("-o", action="store", type="string", dest="o", default="output",
help="output glycan file (output)")
parser.add_option("-j", action="store", type="string", dest="j", default="jsonoutput",
help="output json output, only if json format is selected (output.json)")
(options, args) = parser.parse_args()
try:
instream = file(options.i, 'r')
except Exception as e:
raise IOError(e, "the input file specified does not exist. Use -h flag for help")
m = mechanise_glycan_convert(instream, options.f, options.t)
if options.t =="text" or options.t=="html":
with open(options.o,'w') as f:
f.write(m)
elif options.t == "json":
with open(options.j,'w') as f1:
f1.write(m)
with open(options.o,'w') as f:
f.write("\n".join(clean_json_response(m)))
| {
"repo_name": "chrisbarnettster/cfg-analysis-on-heroku-jupyter",
"path": "notebooks/scripts/post_glycan_convert.py",
"copies": "1",
"size": "12539",
"license": "mit",
"hash": -7614142839339036000,
"line_mean": 43.1514084507,
"line_max": 196,
"alpha_frac": 0.6596219794,
"autogenerated": false,
"ratio": 3.6083453237410072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9741054988266584,
"avg_score": 0.005382462974884595,
"num_lines": 284
} |
from __future__ import print_function, division
import numpy as np
import pandas as pd
from subprocess import call
from os import path
from sys import argv
jamierod_results_path = '/nfs/slac/g/ki/ki18/des/cpd/jamierod_results.csv'
jamierod_results = pd.read_csv(jamierod_results_path)
#out_dir = '/nfs/slac/g/ki/ki18/des/cpd/DeconvOutput'
out_dir = '/nfs/slac/g/ki/ki18/des/swmclau2/DeconvOutput'
#code_path = '/nfs/slac/g/ki/ki18/cpd/Projects/WavefrontPSF/code/DeconvolvePSF/afterburner.py'
code_path = '/u/ki/swmclau2/Git/DeconvolvePSF/DeconvolvePSF/afterburner.py'
if len(argv) == 1: #do a random collection of 40
# choose a random 40 expids from jamierod results
indx_choice = np.random.choice(len(jamierod_results), 40)
expids = jamierod_results.iloc[indx_choice]['expid']
expids = list(expids)
else:
expids = [int(expid) for expid in sys[1:] ]
req_time = 240 # minutes
for expid in expids:
print(expid)
# check that the expid doesn't exist in the output
if not path.exists(out_dir + '/{0:08d}'.format(expid)):
jobname = '{0}PSF'.format(expid)
logfile = out_dir + '/{0}.log'.format(expid)
command = ['bsub',
'-J', jobname,
'-o', logfile,
'-W', str(req_time),
'python', code_path,
str(expid), out_dir]
call(command)
else:
print('{0} exists!'.format(expid))
| {
"repo_name": "aaronroodman/DeconvolvePSF",
"path": "src/do_call.py",
"copies": "1",
"size": "1556",
"license": "mit",
"hash": 7853368597436541000,
"line_mean": 34.3636363636,
"line_max": 94,
"alpha_frac": 0.6503856041,
"autogenerated": false,
"ratio": 2.897579143389199,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4047964747489199,
"avg_score": null,
"num_lines": null
} |
AUTHOR = 'Chris Dent'
AUTHOR_EMAIL = 'cdent@peermore.com'
NAME = 'tiddlywebplugins.twimport'
DESCRIPTION = 'TiddlyWiki and tiddler import tools for TiddyWeb'
VERSION = '1.1.1'
import os
from setuptools import setup, find_packages
CLASSIFIERS = """
Environment :: Web Environment
License :: OSI Approved :: BSD License
Operating System :: OS Independent
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.3
Topic :: Internet :: WWW/HTTP :: WSGI
""".strip().splitlines()
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
url = 'http://pypi.python.org/pypi/%s' % NAME,
packages = find_packages(exclude=['test']),
author_email = AUTHOR_EMAIL,
classifiers = CLASSIFIERS,
platforms = 'Posix; MacOS X; Windows',
install_requires = ['setuptools',
'tiddlyweb>=2.0.0',
'tiddlywebplugins.utils',
'html5lib'],
zip_safe = False
)
| {
"repo_name": "tiddlyweb/tiddlywebplugins.twimport",
"path": "setup.py",
"copies": "1",
"size": "1086",
"license": "bsd-3-clause",
"hash": 4211076501607947000,
"line_mean": 26.15,
"line_max": 86,
"alpha_frac": 0.664825046,
"autogenerated": false,
"ratio": 3.503225806451613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4668050852451613,
"avg_score": null,
"num_lines": null
} |
from configparser import ConfigParser
from os import path
from subprocess import Popen, CalledProcessError, PIPE, STDOUT
import shlex
def main():
config_file = 'settings.ini'
# Check if the ini file exists, create if not
if not path.isfile(config_file):
create_ini(config_file)
print(config_file, ' not found, a default one has been created. Set it up and then re-run.')
quit()
# Read in all the settings
config = ConfigParser()
config.read(config_file)
backup_root = config.get('DEFAULT', 'backup_root')
directories = config.get('DEFAULT', 'directories').split()
dir_prefix = config.get('DEFAULT', 'dir_prefix')
max_backups = config.getint('DEFAULT', 'max_backups')
debug = config.getboolean('DEFAULT', 'debug')
# Drop out one from the # of backups
max_backups -= 1
for directory in directories:
print('Working folder: ' + directory)
directory = backup_root + directory + '/'
last_backup = directory + dir_prefix + str('%02d' % max_backups)
print('Removing last backup')
try:
run_cmd('rm -R ' + last_backup, debug)
except CalledProcessError as err:
if "No such file or directory" not in str(err.output):
raise
print("Last backup doesn't exist. Skipping.")
# Loop through the snapshots and move each backup by one in the chain
print('Moving target folders')
for x in range(max_backups, 0, -1):
src_index = x - 1
dest = directory + dir_prefix + ('%02d' % x)
src = directory + dir_prefix + ('%02d' % src_index)
try:
run_cmd('mv ' + src + ' ' + dest, debug)
except CalledProcessError as err:
if "No such file or directory" not in str(err.output):
raise
print("Cannot move directories... source doesn't exist.")
print('Hard linking first snapshot to current snapshot')
run_cmd('cp -al ' + directory + dir_prefix + '01 ' + directory + dir_prefix + '00', debug)
def run_cmd(cmd, debug):
if debug:
print(cmd)
return
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=STDOUT)
dump_output = process.communicate()[0]
exit_code = process.wait()
if exit_code != 0:
print(dump_output)
raise CalledProcessError(exit_code, cmd, dump_output)
def create_ini(config_file):
config = ConfigParser()
config['DEFAULT'] = {
'backup_root': '/path/to/backups/',
'directories': 'dir1 dir2 dir3',
'dir_prefix': 'daily.',
'max_backups': 15,
'debug': True
}
with open(config_file, 'w') as configfile:
config.write(configfile)
if __name__ == '__main__':
main() | {
"repo_name": "ChrisEby/SnapshotCycle",
"path": "snapshot_cycle.py",
"copies": "1",
"size": "2847",
"license": "mit",
"hash": 5450643881733936000,
"line_mean": 31.7356321839,
"line_max": 101,
"alpha_frac": 0.5904460836,
"autogenerated": false,
"ratio": 3.9707112970711296,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.506115738067113,
"avg_score": null,
"num_lines": null
} |
__author__ = "Chris Greene"
import pdb
from dolfin import *
import time
import montecarlo_mockup as mc
import move_particles_c as c_interface
import numpy as np
import dolfin_util as du
import mcoptions,sys,os
import re
import photocurrent as pc
import density_funcs
import materials
import meshes
class Problem:
pass
class DolfinFiles:
pass
class ResultsFile:
pass
options = mcoptions.get_options()
def custom_func(mesh,V):
f = Function(V)
return f
def init_problem(mesh,V,V2,options):
print "Initializing Probleming"
problem = Problem()
problem.space = V
# Define boundary condition
#for reasons I don't know, pBoundary needs to be
#kept globally
pBoundary = Constant(0.0)
nBoundary = Constant(options.V)
bc0 = DirichletBC(V, pBoundary, mesh.InnerBoundary)
bc1 = DirichletBC(V, nBoundary, mesh.OuterBoundary)
mesh.V = options.V
problem.bcs = [bc0,bc1]#prevent bad garbage?
problem.boundaryFuncs = [pBoundary,nBoundary]
problem.V = V
problem.V2 = V2
#init particles
#electrons, holes
print "adding electrons to regions"
# mc.init_electrons(options.gen_num,mesh.n_region.keys(),
# charge=-10,mesh=mesh)
# mc.init_electrons(options.gen_num,mesh.p_region.keys(),
# charge=10,mesh=mesh)
print "Creating density functions"
problem.density_funcs = density_funcs.DensityFuncs()
problem.density_funcs.holes = Function(V)
problem.density_funcs.electrons = Function(V)
problem.density_funcs.combined_density = Function(V)
problem.density_funcs.poisson_density = Function(V)
problem.density_funcs.scaled_density = Function(V)
print dir(problem.density_funcs.poisson_density.vector())
temp = problem.density_funcs.poisson_density.vector()
temp[:] = problem.density_funcs.combined_density.vector().array()
print "Creating Average Densities"
problem.avg_dens = mc.AverageFunc(problem.density_funcs.combined_density.vector().array())
problem.avg_holes = mc.AverageFunc(problem.density_funcs.combined_density.vector().array())
problem.avg_electrons = mc.AverageFunc(problem.density_funcs.combined_density.vector().array())
return problem
#this will be replaced with
#database initialization and connection
def init_dolfin_files():
#init Files
print "Initializing Files"
df = DolfinFiles()
print "Creating Files"
df.datadir = options.datadir
if not os.path.exists(df.datadir):
os.mkdir(df.datadir)
df.file = File(df.datadir+"/poisson_attract.pvd")
df.dfile = File(df.datadir+"/density_attract.pvd")
df.adfile = File(df.datadir+"/avg_density.pvd")
df.avfile = File(df.datadir+"/avg_voltage.pvd")
df.gradfile = File(df.datadir+"/grad_force.pvd")
df.avggradfile = File(df.datadir+"/avg_force.pvd")
return df
#other files
def new_file(name):
print "Creating results File"
files = os.listdir("results")
num=max([0]+map(int,re.findall("([0-9]+)"+name," ".join(files))))
num += 1
filename = ("results/"+str(num)+name+
"_".join(map(str,time.gmtime())))
print "Creating:",filename
results_file = open(filename,"w")
results_file.write(str(options.V)+"\n")
results_file.write("c:"+str(options.num))
results_file.write(" scale:"+str(options.scale))
results_file.write(" particles:"+str(options.gen_num))
results_file.write(" size:"+str(options.size))
results_file.write(" tag:"+str(options.tag))
results_file.write("\n")
return results_file
def init_database():
#connect to local database
if os.path.exists("database/runs.db"):
raise IOError("database/runs.db does not exist. You'll need to initialize it")
runs = sqlite3.connect("database/runs.db")
if runs == None:
raise IOError("Failure to open database/runs.db")
return runs
def PoissonSolve(mesh,density,bcs,V):
print "Solving Poisson Equation"
lengthr = Constant(1./mesh.length_scale)#Constant(mesh,1./mesh.length_scale)
length = Constant(mesh.length_scale)
u = TrialFunction(V)
v = TestFunction(V)
a = dot(grad(v), grad(u))*lengthr*dx
L = v*(density)*length*dx
# Compute solution
problem = VariationalProblem(a, L, bcs)
sol = problem.solve()
return sol
def write_results(df,rf,problem,sol,electric_field,current_values):
#Write Results
df.file << sol
# df.dfile << problem.density_funcs.combined_density
# df.adfile << problem.density_funcs.poisson_density
# df.gradfile << electric_field #moved, might have destabilized it.
#write current
rf.current.write(str(current_values[-1]));
rf.current.write("\n");rf.current.flush()
def final_record_files(df,rf,sol,problem,mesh):
df.file << sol
df.dfile << problem.density_funcs.combined_density
#dump average
problem.density_funcs.combined_density.vector()[:]=problem.avg_dens.func
for x in problem.avg_dens.func:
rf.density.write(str(x)+" ")
df.adfile << problem.density_funcs.combined_density
avgE=mc.negGradient(mesh,PoissonSolve(mesh,
problem.density_funcs.combined_density,
problem.bcs,problem.V),
problem.V2)
df.avggradfile << avgE
def mainloop(mesh,system,problem,df,rf,scale):
print "Beginning Simulation"
current_values = []
for x in range(options.num):
start1 = time.time()
#Solve equation using avg_dens
print "combined_density",problem.density_funcs.combined_density
sol = PoissonSolve(mesh,
problem.density_funcs.scaled_density,
problem.bcs,problem.space)
#handle Monte Carlo
print "Starting Step ",x
electric_field = (mc.negGradient(mesh,sol,problem.V2))
start2 = time.time()
mc.MonteCarlo(mesh,system,sol,electric_field,
problem.density_funcs,problem.avg_dens,
problem.avg_electrons,problem.avg_holes,
current_values)
end2 = time.time()
print problem.density_funcs.combined_density
#Report
write_results(df,rf,problem,sol,electric_field,current_values)
end = time.time()
print "Monte Took: ",end2-start2
print "Loop Took:",end-start1
#photocurrent
current= pc.generate_photo_current(mesh,electric_field,problem)
rf.current.write("pc: "+str(current)+"\n")
final_record_files(df,rf,sol,problem,mesh)
avg_length = 0
for particle in mesh.trajectories:
avg_length += len(mesh.trajectories[particle])
#rf.trajectory.write(str(mesh.trajectories[particle]))
#rf.trajectory.write("\n")
print current_values
# avg_length /= 1.*len(mesh.trajectories)
print "Average trajectory length:",avg_length
def init_files():
dolfinFiles = init_dolfin_files()
rf = ResultsFile()
rf.current = new_file("current")
rf.density = new_file("density")
rf.trajectory = new_file("trajectory")
return (dolfinFiles,rf)
#main
def main():
#init mesh
mesh = options.mesh(options,
options.materials[0],
options.materials[1])
#these seem to need to be global
V = FunctionSpace(mesh, "CG", 1)
V2 = VectorFunctionSpace(mesh,"CG",1,2)
problem = init_problem(mesh,V,V2,options)
system = c_interface.init_system(mesh,
problem.density_funcs.poisson_density.vector().array(),
options.gen_num, options.length)
#init Files
(dolfinFiles,rf)=init_files()
#mainloop
mainloop(mesh,system,problem,dolfinFiles,rf,options.scale)
if __name__=="__main__":
main()
| {
"repo_name": "cwgreene/Nanostructure-Simulator",
"path": "monte.py",
"copies": "1",
"size": "6970",
"license": "mit",
"hash": -8086935310102842000,
"line_mean": 28.7863247863,
"line_max": 96,
"alpha_frac": 0.7305595409,
"autogenerated": false,
"ratio": 2.888520513883133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8828150478435438,
"avg_score": 0.05818591526953895,
"num_lines": 234
} |
__author__ = 'Chris Krycho'
__copyright__ = '2013 Chris Krycho'
from logging import error, warning
from sys import exit
try:
from jinja2 import Environment, FileSystemLoader, TemplateNotFound
except ImportError as import_error:
error(import_error)
exit()
class Renderer():
DEFAULT_NAME = 'default'
def __init__(self, site_info):
self.site_info = site_info
self.template_path = self.site_info.template.directory
self.environment = Environment(loader=FileSystemLoader(searchpath=self.template_path))
self.templates = {'default': self.environment.get_template(self.site_info.template.default)}
def render_page(self, page):
template_name = page.meta['template'] if 'template' in page.meta else self.DEFAULT_NAME
template = self.__get_template(template_name)
return template.render(site=self.site_info, pages=[page])
def render_page_set(self, pages, template_name=DEFAULT_NAME):
template = self.__get_template(template_name)
return template.render(site=self.site_info, pages=pages)
def __get_template(self, template_name):
'''
Retrieve the template for rendering using Environment::get_template.
Start by checking templates already stored by previous calls to this
method (minimizing calls to the file system).
'''
if template_name in self.templates:
template = self.templates[template_name]
else:
try:
template = self.environment.get_template(template_name)
self.templates[template_name] = template
except TemplateNotFound:
template = self.templates['default']
warning_msg = "Specified template {} not found.'".format(template_name)
warning(warning_msg)
return template
| {
"repo_name": "chriskrycho/step-stool",
"path": "step_stool/render.py",
"copies": "1",
"size": "1860",
"license": "mit",
"hash": -980047906801529300,
"line_mean": 36.2,
"line_max": 100,
"alpha_frac": 0.6569892473,
"autogenerated": false,
"ratio": 4.2465753424657535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5403564589765754,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chris Lewis'
__version__ = '0.1.0'
__email__ = 'clewis1@c.ringling.edu'
import sys
import json
import maya.cmds as mc
from maya.OpenMaya import *
from maya.OpenMayaMPx import *
kPluginTranslatorTypeName = 'Three.js'
kOptionScript = 'ThreeJsExportScript'
kDefaultOptionsString = '0'
FLOAT_PRECISION = 8
# adds decimal precision to JSON encoding
class DecimalEncoder(json.JSONEncoder):
def _iterencode(self, o, markers=None):
if isinstance(o, float):
s = str(o)
if '.' in s and len(s[s.index('.'):]) > FLOAT_PRECISION - 1:
s = '%.{0}f'.format(FLOAT_PRECISION) % o
while '.' in s and s[-1] == '0':
s = s[-2]
return (s for s in [s])
return super(DecimalEncoder, self)._iterencode(o, markers)
class ThreeJsError(Exception):
pass
class ThreeJsWriter(object):
def __init__(self):
self.componentKeys = ['vertices', 'normals', 'colors', 'uvs', 'materials', 'faces']
def _parseOptions(self, optionsString):
self.options = dict([(x, False) for x in self.componentKeys])
optionsString = optionsString[2:] # trim off the "0;" that Maya adds to the options string
for option in optionsString.split(' '):
self.options[option] = True
def _updateOffsets(self):
for key in self.componentKeys:
if key == 'uvs':
continue
self.offsets[key] = len(getattr(self, key))
for i in range(len(self.uvs)):
self.offsets['uvs'][i] = len(self.uvs[i])
def _getTypeBitmask(self, options):
bitmask = 0
if options['materials']:
bitmask |= 2
if options['uvs']:
bitmask |= 8
if options['normals']:
bitmask |= 32
if options['colors']:
bitmask |= 128
return bitmask
def _exportMesh(self, dagPath, component):
mesh = MFnMesh(dagPath)
options = self.options.copy()
self._updateOffsets()
# export vertex data
if options['vertices']:
try:
iterVerts = MItMeshVertex(dagPath, component)
while not iterVerts.isDone():
point = iterVerts.position(MSpace.kWorld)
self.vertices += [point.x, point.y, point.z]
iterVerts.next()
except:
options['vertices'] = False
# export material data
# TODO: actually parse material data
materialIndices = MIntArray()
if options['materials']:
try:
shaders = MObjectArray()
mesh.getConnectedShaders(0, shaders, materialIndices)
while len(self.materials) < shaders.length():
self.materials.append({}) # placeholder material definition
except:
self.materials = [{}]
# export uv data
if options['uvs']:
try:
uvLayers = []
mesh.getUVSetNames(uvLayers)
while len(uvLayers) > len(self.uvs):
self.uvs.append([])
self.offsets['uvs'].append(0)
for i, layer in enumerate(uvLayers):
uList = MFloatArray()
vList = MFloatArray()
mesh.getUVs(uList, vList, layer)
for j in xrange(uList.length()):
self.uvs[i] += [uList[j], vList[j]]
except:
options['uvs'] = False
# export normal data
if options['normals']:
try:
normals = MFloatVectorArray()
mesh.getNormals(normals, MSpace.kWorld)
for i in xrange(normals.length()):
point = normals[i]
self.normals += [point.x, point.y, point.z]
except:
options['normals'] = False
# export color data
if options['colors']:
try:
colors = MColorArray()
mesh.getColors(colors)
for i in xrange(colors.length()):
color = colors[i]
# uncolored vertices are set to (-1, -1, -1). Clamps colors to (0, 0, 0).
self.colors += [max(color.r, 0), max(color.g, 0), max(color.b, 0)]
except:
options['colors'] = False
# export face data
if not options['vertices']:
return
bitmask = self._getTypeBitmask(options)
iterPolys = MItMeshPolygon(dagPath, component)
while not iterPolys.isDone():
self.faces.append(bitmask)
# export face vertices
verts = MIntArray()
iterPolys.getVertices(verts)
for i in xrange(verts.length()):
self.faces.append(verts[i] + self.offsets['vertices'])
# export face vertex materials
if options['materials']:
if materialIndices.length():
self.faces.append(materialIndices[iterPolys.index()])
# export face vertex uvs
if options['uvs']:
util = MScriptUtil()
uvPtr = util.asIntPtr()
for i, layer in enumerate(uvLayers):
for j in xrange(verts.length()):
iterPolys.getUVIndex(j, uvPtr, layer)
uvIndex = util.getInt(uvPtr)
self.faces.append(uvIndex + self.offsets['uvs'][i])
# export face vertex normals
if options['normals']:
for i in xrange(3):
normalIndex = iterPolys.normalIndex(i)
self.faces.append(normalIndex + self.offsets['normals'])
# export face vertex colors
if options['colors']:
colors = MIntArray()
iterPolys.getColorIndices(colors)
for i in xrange(colors.length()):
self.faces.append(colors[i] + self.offsets['colors'])
iterPolys.next()
def _getMeshes(self, nodes):
meshes = []
for node in nodes:
if mc.nodeType(node) == 'mesh':
meshes.append(node)
else:
for child in mc.listRelatives(node, s=1):
if mc.nodeType(child) == 'mesh':
meshes.append(child)
return meshes
def _exportMeshes(self):
# export all
if self.accessMode == MPxFileTranslator.kExportAccessMode:
mc.select(self._getMeshes(mc.ls(typ='mesh')))
# export selection
elif self.accessMode == MPxFileTranslator.kExportActiveAccessMode:
mc.select(self._getMeshes(mc.ls(sl=1)))
else:
raise ThreeJsError('Unsupported access mode: {0}'.format(self.accessMode))
dups = [mc.duplicate(mesh)[0] for mesh in mc.ls(sl=1)]
combined = mc.polyUnite(dups, mergeUVSets=1, ch=0) if len(dups) > 1 else dups[0]
mc.polyTriangulate(combined)
mc.select(combined)
sel = MSelectionList()
MGlobal.getActiveSelectionList(sel)
mDag = MDagPath()
mComp = MObject()
sel.getDagPath(0, mDag, mComp)
self._exportMesh(mDag, mComp)
mc.delete(combined)
def write(self, path, optionString, accessMode):
self.path = path
self._parseOptions(optionString)
self.accessMode = accessMode
self.root = dict(metadata=dict(formatVersion=3))
self.offsets = dict()
for key in self.componentKeys:
setattr(self, key, [])
self.offsets[key] = 0
self.offsets['uvs'] = []
self.uvs = []
self._exportMeshes()
# add the component buffers to the root JSON object
for key in self.componentKeys:
buffer_ = getattr(self, key)
if buffer_:
self.root[key] = buffer_
# materials are required for parsing
if not self.root.has_key('materials'):
self.root['materials'] = [{}]
# write the file
with file(self.path, 'w') as f:
f.write(json.dumps(self.root, separators=(',',':'), cls=DecimalEncoder))
class ThreeJsTranslator(MPxFileTranslator):
def __init__(self):
MPxFileTranslator.__init__(self)
def haveWriteMethod(self):
return True
def filter(self):
return '*.js'
def defaultExtension(self):
return 'js'
def writer(self, fileObject, optionString, accessMode):
path = fileObject.fullName()
writer = ThreeJsWriter()
writer.write(path, optionString, accessMode)
def translatorCreator():
return asMPxPtr(ThreeJsTranslator())
def initializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator, kOptionScript, kDefaultOptionsString)
except:
sys.stderr.write('Failed to register translator: %s' % kPluginTranslatorTypeName)
raise
def uninitializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.deregisterFileTranslator(kPluginTranslatorTypeName)
except:
sys.stderr.write('Failed to deregister translator: %s' % kPluginTranslatorTypeName)
raise | {
"repo_name": "mind0n/hive",
"path": "History/Website/3js/utils/exporters/maya/plug-ins/threeJsFileTranlator.py",
"copies": "2",
"size": "9427",
"license": "mit",
"hash": -7738290745982726000,
"line_mean": 33.7896678967,
"line_max": 128,
"alpha_frac": 0.5470457197,
"autogenerated": false,
"ratio": 4.169394073418841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5716439793118842,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chris Lewis'
__version__ = '0.1.0'
__email__ = 'clewis1@c.ringling.edu'
import sys
import json
import maya.cmds as mc
from maya.OpenMaya import *
from maya.OpenMayaMPx import *
kPluginTranslatorTypeName = 'Three.js'
kOptionScript = 'ThreeJsExportScript'
kDefaultOptionsString = '0'
FLOAT_PRECISION = 8
# adds decimal precision to JSON encoding
class DecimalEncoder(json.JSONEncoder):
def _iterencode(self, o, markers=None):
if isinstance(o, float):
s = str(o)
if '.' in s and len(s[s.index('.'):]) > FLOAT_PRECISION - 1:
s = '%.{0}f'.format(FLOAT_PRECISION) % o
while '.' in s and s[-1] == '0':
s = s[:-1] # this actually removes the last "0" from the string
if s[-1] == '.': # added this test to avoid leaving "0." instead of "0.0",
s += '0' # which would throw an error while loading the file
return (s for s in [s])
return super(DecimalEncoder, self)._iterencode(o, markers)
class ThreeJsError(Exception):
pass
class ThreeJsWriter(object):
def __init__(self):
self.componentKeys = ['vertices', 'normals', 'colors', 'uvs', 'materials', 'faces']
def _parseOptions(self, optionsString):
self.options = dict([(x, False) for x in self.componentKeys])
optionsString = optionsString[2:] # trim off the "0;" that Maya adds to the options string
for option in optionsString.split(' '):
self.options[option] = True
def _updateOffsets(self):
for key in self.componentKeys:
if key == 'uvs':
continue
self.offsets[key] = len(getattr(self, key))
for i in range(len(self.uvs)):
self.offsets['uvs'][i] = len(self.uvs[i])
def _getTypeBitmask(self, options):
bitmask = 0
if options['materials']:
bitmask |= 2
if options['uvs']:
bitmask |= 8
if options['normals']:
bitmask |= 32
if options['colors']:
bitmask |= 128
return bitmask
def _exportMesh(self, dagPath, component):
mesh = MFnMesh(dagPath)
options = self.options.copy()
self._updateOffsets()
# export vertex data
if options['vertices']:
try:
iterVerts = MItMeshVertex(dagPath, component)
while not iterVerts.isDone():
point = iterVerts.position(MSpace.kWorld)
self.vertices += [point.x, point.y, point.z]
iterVerts.next()
except:
options['vertices'] = False
# export material data
# TODO: actually parse material data
materialIndices = MIntArray()
if options['materials']:
try:
shaders = MObjectArray()
mesh.getConnectedShaders(0, shaders, materialIndices)
while len(self.materials) < shaders.length():
self.materials.append({}) # placeholder material definition
except:
self.materials = [{}]
# export uv data
if options['uvs']:
try:
uvLayers = []
mesh.getUVSetNames(uvLayers)
while len(uvLayers) > len(self.uvs):
self.uvs.append([])
self.offsets['uvs'].append(0)
for i, layer in enumerate(uvLayers):
uList = MFloatArray()
vList = MFloatArray()
mesh.getUVs(uList, vList, layer)
for j in xrange(uList.length()):
self.uvs[i] += [uList[j], vList[j]]
except:
options['uvs'] = False
# export normal data
if options['normals']:
try:
normals = MFloatVectorArray()
mesh.getNormals(normals, MSpace.kWorld)
for i in xrange(normals.length()):
point = normals[i]
self.normals += [point.x, point.y, point.z]
except:
options['normals'] = False
# export color data
if options['colors']:
try:
colors = MColorArray()
mesh.getColors(colors)
for i in xrange(colors.length()):
color = colors[i]
# uncolored vertices are set to (-1, -1, -1). Clamps colors to (0, 0, 0).
self.colors += [max(color.r, 0), max(color.g, 0), max(color.b, 0)]
except:
options['colors'] = False
# export face data
if not options['vertices']:
return
bitmask = self._getTypeBitmask(options)
iterPolys = MItMeshPolygon(dagPath, component)
while not iterPolys.isDone():
self.faces.append(bitmask)
# export face vertices
verts = MIntArray()
iterPolys.getVertices(verts)
for i in xrange(verts.length()):
self.faces.append(verts[i] + self.offsets['vertices'])
# export face vertex materials
if options['materials']:
if materialIndices.length():
self.faces.append(materialIndices[iterPolys.index()])
# export face vertex uvs
if options['uvs']:
util = MScriptUtil()
uvPtr = util.asIntPtr()
for i, layer in enumerate(uvLayers):
for j in xrange(verts.length()):
iterPolys.getUVIndex(j, uvPtr, layer)
uvIndex = util.getInt(uvPtr)
self.faces.append(uvIndex + self.offsets['uvs'][i])
# export face vertex normals
if options['normals']:
for i in xrange(3):
normalIndex = iterPolys.normalIndex(i)
self.faces.append(normalIndex + self.offsets['normals'])
# export face vertex colors
if options['colors']:
colors = MIntArray()
iterPolys.getColorIndices(colors)
for i in xrange(colors.length()):
self.faces.append(colors[i] + self.offsets['colors'])
iterPolys.next()
def _getMeshes(self, nodes):
meshes = []
for node in nodes:
if mc.nodeType(node) == 'mesh':
meshes.append(node)
else:
for child in mc.listRelatives(node, s=1):
if mc.nodeType(child) == 'mesh':
meshes.append(child)
return meshes
def _exportMeshes(self):
# export all
if self.accessMode == MPxFileTranslator.kExportAccessMode:
mc.select(self._getMeshes(mc.ls(typ='mesh')))
# export selection
elif self.accessMode == MPxFileTranslator.kExportActiveAccessMode:
mc.select(self._getMeshes(mc.ls(sl=1)))
else:
raise ThreeJsError('Unsupported access mode: {0}'.format(self.accessMode))
dups = [mc.duplicate(mesh)[0] for mesh in mc.ls(sl=1)]
combined = mc.polyUnite(dups, mergeUVSets=1, ch=0) if len(dups) > 1 else dups[0]
mc.polyTriangulate(combined)
mc.select(combined)
sel = MSelectionList()
MGlobal.getActiveSelectionList(sel)
mDag = MDagPath()
mComp = MObject()
sel.getDagPath(0, mDag, mComp)
self._exportMesh(mDag, mComp)
mc.delete(combined)
def write(self, path, optionString, accessMode):
self.path = path
self._parseOptions(optionString)
self.accessMode = accessMode
self.root = dict(metadata=dict(formatVersion=3))
self.offsets = dict()
for key in self.componentKeys:
setattr(self, key, [])
self.offsets[key] = 0
self.offsets['uvs'] = []
self.uvs = []
self._exportMeshes()
# add the component buffers to the root JSON object
for key in self.componentKeys:
buffer_ = getattr(self, key)
if buffer_:
self.root[key] = buffer_
# materials are required for parsing
if not self.root.has_key('materials'):
self.root['materials'] = [{}]
# write the file
with file(self.path, 'w') as f:
f.write(json.dumps(self.root, separators=(',',':'), cls=DecimalEncoder))
class ThreeJsTranslator(MPxFileTranslator):
def __init__(self):
MPxFileTranslator.__init__(self)
def haveWriteMethod(self):
return True
def filter(self):
return '*.js'
def defaultExtension(self):
return 'js'
def writer(self, fileObject, optionString, accessMode):
path = fileObject.fullName()
writer = ThreeJsWriter()
writer.write(path, optionString, accessMode)
def translatorCreator():
return asMPxPtr(ThreeJsTranslator())
def initializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator, kOptionScript, kDefaultOptionsString)
except:
sys.stderr.write('Failed to register translator: %s' % kPluginTranslatorTypeName)
raise
def uninitializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.deregisterFileTranslator(kPluginTranslatorTypeName)
except:
sys.stderr.write('Failed to deregister translator: %s' % kPluginTranslatorTypeName)
raise | {
"repo_name": "stanwmusic/three.js",
"path": "utils/exporters/maya/plug-ins/threeJsFileTranslator.py",
"copies": "25",
"size": "9928",
"license": "mit",
"hash": 2361810420530528000,
"line_mean": 34.3736263736,
"line_max": 128,
"alpha_frac": 0.5321313457,
"autogenerated": false,
"ratio": 4.271944922547332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
S5/HTML Slideshow Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import docutils
from docutils import frontend, nodes, utils
from docutils.writers import html4css1
from docutils.parsers.rst import directives
themes_dir_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), 'themes'))
def find_theme(name):
# Where else to look for a theme?
# Check working dir? Destination dir? Config dir? Plugins dir?
path = os.path.join(themes_dir_path, name)
if not os.path.isdir(path):
raise docutils.ApplicationError(
'Theme directory not found: %r (path: %r)' % (name, path))
return path
class Writer(html4css1.Writer):
settings_spec = html4css1.Writer.settings_spec + (
'S5 Slideshow Specific Options',
'For the S5/HTML writer, the --no-toc-backlinks option '
'(defined in General Docutils Options above) is the default, '
'and should not be changed.',
(('Specify an installed S5 theme by name. Overrides --theme-url. '
'The default theme name is "default". The theme files will be '
'copied into a "ui/<theme>" directory, in the same directory as the '
'destination file (output HTML). Note that existing theme files '
'will not be overwritten (unless --overwrite-theme-files is used).',
['--theme'],
{'default': 'default', 'metavar': '<name>',
'overrides': 'theme_url'}),
('Specify an S5 theme URL. The destination file (output HTML) will '
'link to this theme; nothing will be copied. Overrides --theme.',
['--theme-url'],
{'metavar': '<URL>', 'overrides': 'theme'}),
('Allow existing theme files in the ``ui/<theme>`` directory to be '
'overwritten. The default is not to overwrite theme files.',
['--overwrite-theme-files'],
{'action': 'store_true'}),
('Keep existing theme files in the ``ui/<theme>`` directory; do not '
'overwrite any. This is the default.',
['--keep-theme-files'],
{'dest': 'overwrite_theme_files', 'action': 'store_false'}),
('Enable the current slide indicator ("1 / 15"). '
'The default is to disable it.',
['--current-slide'],
{'action': 'store_true'}),
('Disable the current slide indicator. This is the default.',
['--no-current-slide'],
{'dest': 'current_slide', 'action': 'store_false'}),))
settings_default_overrides = {'toc_backlinks': 0}
config_section = 's5_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = S5HTMLTranslator
class S5HTMLTranslator(html4css1.HTMLTranslator):
doctype = (
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
s5_stylesheet_template = """\
<!-- configuration parameters -->
<meta name="defaultView" content="slideshow" />
<meta name="controlVis" content="hidden" />
<!-- style sheet links -->
<script src="%(path)s/slides.js" type="text/javascript"></script>
<link rel="stylesheet" href="%(path)s/slides.css"
type="text/css" media="projection" id="slideProj" />
<link rel="stylesheet" href="%(path)s/outline.css"
type="text/css" media="screen" id="outlineStyle" />
<link rel="stylesheet" href="%(path)s/print.css"
type="text/css" media="print" id="slidePrint" />
<link rel="stylesheet" href="%(path)s/opera.css"
type="text/css" media="projection" id="operaFix" />\n"""
# The script element must go in front of the link elements to
# avoid a flash of unstyled content (FOUC), reproducible with
# Firefox.
disable_current_slide = """
<style type="text/css">
#currentSlide {display: none;}
</style>\n"""
layout_template = """\
<div class="layout">
<div id="controls"></div>
<div id="currentSlide"></div>
<div id="header">
%(header)s
</div>
<div id="footer">
%(title)s%(footer)s
</div>
</div>\n"""
# <div class="topleft"></div>
# <div class="topright"></div>
# <div class="bottomleft"></div>
# <div class="bottomright"></div>
default_theme = 'default'
"""Name of the default theme."""
base_theme_file = '__base__'
"""Name of the file containing the name of the base theme."""
direct_theme_files = (
'slides.css', 'outline.css', 'print.css', 'opera.css', 'slides.js')
"""Names of theme files directly linked to in the output HTML"""
indirect_theme_files = (
's5-core.css', 'framing.css', 'pretty.css', 'blank.gif', 'iepngfix.htc')
"""Names of files used indirectly; imported or used by files in
`direct_theme_files`."""
required_theme_files = indirect_theme_files + direct_theme_files
"""Names of mandatory theme files."""
def __init__(self, *args):
html4css1.HTMLTranslator.__init__(self, *args)
#insert S5-specific stylesheet and script stuff:
self.theme_file_path = None
self.setup_theme()
self.stylesheet.append(self.s5_stylesheet_template
% {'path': self.theme_file_path})
if not self.document.settings.current_slide:
self.stylesheet.append(self.disable_current_slide)
self.add_meta('<meta name="version" content="S5 1.1" />\n')
self.s5_footer = []
self.s5_header = []
self.section_count = 0
self.theme_files_copied = None
def setup_theme(self):
if self.document.settings.theme:
self.copy_theme()
elif self.document.settings.theme_url:
self.theme_file_path = self.document.settings.theme_url
else:
raise docutils.ApplicationError(
'No theme specified for S5/HTML writer.')
def copy_theme(self):
"""
Locate & copy theme files.
A theme may be explicitly based on another theme via a '__base__'
file. The default base theme is 'default'. Files are accumulated
from the specified theme, any base themes, and 'default'.
"""
settings = self.document.settings
path = find_theme(settings.theme)
theme_paths = [path]
self.theme_files_copied = {}
required_files_copied = {}
# This is a link (URL) in HTML, so we use "/", not os.sep:
self.theme_file_path = '%s/%s' % ('ui', settings.theme)
if settings._destination:
dest = os.path.join(
os.path.dirname(settings._destination), 'ui', settings.theme)
if not os.path.isdir(dest):
os.makedirs(dest)
else:
# no destination, so we can't copy the theme
return
default = 0
while path:
for f in os.listdir(path): # copy all files from each theme
if f == self.base_theme_file:
continue # ... except the "__base__" file
if ( self.copy_file(f, path, dest)
and f in self.required_theme_files):
required_files_copied[f] = 1
if default:
break # "default" theme has no base theme
# Find the "__base__" file in theme directory:
base_theme_file = os.path.join(path, self.base_theme_file)
# If it exists, read it and record the theme path:
if os.path.isfile(base_theme_file):
lines = open(base_theme_file).readlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
path = find_theme(line)
if path in theme_paths: # check for duplicates (cycles)
path = None # if found, use default base
else:
theme_paths.append(path)
break
else: # no theme name found
path = None # use default base
else: # no base theme file found
path = None # use default base
if not path:
path = find_theme(self.default_theme)
theme_paths.append(path)
default = 1
if len(required_files_copied) != len(self.required_theme_files):
# Some required files weren't found & couldn't be copied.
required = list(self.required_theme_files)
for f in required_files_copied.keys():
required.remove(f)
raise docutils.ApplicationError(
'Theme files not found: %s'
% ', '.join(['%r' % f for f in required]))
files_to_skip_pattern = re.compile(r'~$|\.bak$|#$|\.cvsignore$')
def copy_file(self, name, source_dir, dest_dir):
"""
Copy file `name` from `source_dir` to `dest_dir`.
Return 1 if the file exists in either `source_dir` or `dest_dir`.
"""
source = os.path.join(source_dir, name)
dest = os.path.join(dest_dir, name)
if self.theme_files_copied.has_key(dest):
return 1
else:
self.theme_files_copied[dest] = 1
if os.path.isfile(source):
if self.files_to_skip_pattern.search(source):
return None
settings = self.document.settings
if os.path.exists(dest) and not settings.overwrite_theme_files:
settings.record_dependencies.add(dest)
else:
src_file = open(source, 'rb')
src_data = src_file.read()
src_file.close()
dest_file = open(dest, 'wb')
dest_dir = dest_dir.replace(os.sep, '/')
dest_file.write(src_data.replace(
'ui/default', dest_dir[dest_dir.rfind('ui/'):]))
dest_file.close()
settings.record_dependencies.add(source)
return 1
if os.path.isfile(dest):
return 1
def depart_document(self, node):
header = ''.join(self.s5_header)
footer = ''.join(self.s5_footer)
title = ''.join(self.html_title).replace('<h1 class="title">', '<h1>')
layout = self.layout_template % {'header': header,
'title': title,
'footer': footer}
self.fragment.extend(self.body)
self.body_prefix.extend(layout)
self.body_prefix.append('<div class="presentation">\n')
self.body_prefix.append(
self.starttag({'classes': ['slide'], 'ids': ['slide0']}, 'div'))
if not self.section_count:
self.body.append('</div>\n')
self.body_suffix.insert(0, '</div>\n')
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
def depart_footer(self, node):
start = self.context.pop()
self.s5_footer.append('<h2>')
self.s5_footer.extend(self.body[start:])
self.s5_footer.append('</h2>')
del self.body[start:]
def depart_header(self, node):
start = self.context.pop()
header = ['<div id="header">\n']
header.extend(self.body[start:])
header.append('\n</div>\n')
del self.body[start:]
self.s5_header.extend(header)
def visit_section(self, node):
if not self.section_count:
self.body.append('\n</div>\n')
self.section_count += 1
self.section_level += 1
if self.section_level > 1:
# dummy for matching div's
self.body.append(self.starttag(node, 'div', CLASS='section'))
else:
self.body.append(self.starttag(node, 'div', CLASS='slide'))
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.section):
level = self.section_level + self.initial_header_level - 1
if level == 1:
level = 2
tag = 'h%s' % level
self.body.append(self.starttag(node, tag, ''))
self.context.append('</%s>\n' % tag)
else:
html4css1.HTMLTranslator.visit_subtitle(self, node)
def visit_title(self, node, move_ids=0):
html4css1.HTMLTranslator.visit_title(self, node, move_ids=move_ids)
| {
"repo_name": "santisiri/popego",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/docutils-0.4-py2.5.egg/docutils/writers/s5_html/__init__.py",
"copies": "6",
"size": "13039",
"license": "bsd-3-clause",
"hash": 5015931710226948000,
"line_mean": 38.9969325153,
"line_max": 80,
"alpha_frac": 0.5653807807,
"autogenerated": false,
"ratio": 3.8771929824561404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003909568320652418,
"num_lines": 326
} |
"""
S5/HTML Slideshow Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import docutils
from docutils import frontend, nodes, utils
from docutils.writers import html4css1
from docutils.parsers.rst import directives
themes_dir_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), 'themes'))
def find_theme(name):
# Where else to look for a theme?
# Check working dir? Destination dir? Config dir? Plugins dir?
path = os.path.join(themes_dir_path, name)
if not os.path.isdir(path):
raise docutils.ApplicationError(
'Theme directory not found: %r (path: %r)' % (name, path))
return path
class Writer(html4css1.Writer):
settings_spec = html4css1.Writer.settings_spec + (
'S5 Slideshow Specific Options',
'For the S5/HTML writer, the --no-toc-backlinks option '
'(defined in General Docutils Options above) is the default, '
'and should not be changed.',
(('Specify an installed S5 theme by name. Overrides --theme-url. '
'The default theme name is "default". The theme files will be '
'copied into a "ui/<theme>" directory, in the same directory as the '
'destination file (output HTML). Note that existing theme files '
'will not be overwritten (unless --overwrite-theme-files is used).',
['--theme'],
{'default': 'default', 'metavar': '<name>',
'overrides': 'theme_url'}),
('Specify an S5 theme URL. The destination file (output HTML) will '
'link to this theme; nothing will be copied. Overrides --theme.',
['--theme-url'],
{'metavar': '<URL>', 'overrides': 'theme'}),
('Allow existing theme files in the ``ui/<theme>`` directory to be '
'overwritten. The default is not to overwrite theme files.',
['--overwrite-theme-files'],
{'action': 'store_true'}),
('Keep existing theme files in the ``ui/<theme>`` directory; do not '
'overwrite any. This is the default.',
['--keep-theme-files'],
{'dest': 'overwrite_theme_files', 'action': 'store_false'}),
('Set the initial view mode to "slideshow" [default] or "outline".',
['--view-mode'],
{'choices': ['slideshow', 'outline'], 'default': 'slideshow',
'metavar': '<mode>'}),
('Normally hide the presentation controls in slideshow mode. '
'This is the default.',
['--hidden-controls'],
{'action': 'store_true', 'default': True}),
('Always show the presentation controls in slideshow mode. '
'The default is to hide the controls.',
['--visible-controls'],
{'dest': 'hidden_controls', 'action': 'store_false'}),
('Enable the current slide indicator ("1 / 15"). '
'The default is to disable it.',
['--current-slide'],
{'action': 'store_true'}),
('Disable the current slide indicator. This is the default.',
['--no-current-slide'],
{'dest': 'current_slide', 'action': 'store_false'}),))
settings_default_overrides = {'toc_backlinks': 0}
config_section = 's5_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = S5HTMLTranslator
class S5HTMLTranslator(html4css1.HTMLTranslator):
s5_stylesheet_template = """\
<!-- configuration parameters -->
<meta name="defaultView" content="%(view_mode)s" />
<meta name="controlVis" content="%(control_visibility)s" />
<!-- style sheet links -->
<script src="%(path)s/slides.js" type="text/javascript"></script>
<link rel="stylesheet" href="%(path)s/slides.css"
type="text/css" media="projection" id="slideProj" />
<link rel="stylesheet" href="%(path)s/outline.css"
type="text/css" media="screen" id="outlineStyle" />
<link rel="stylesheet" href="%(path)s/print.css"
type="text/css" media="print" id="slidePrint" />
<link rel="stylesheet" href="%(path)s/opera.css"
type="text/css" media="projection" id="operaFix" />\n"""
# The script element must go in front of the link elements to
# avoid a flash of unstyled content (FOUC), reproducible with
# Firefox.
disable_current_slide = """
<style type="text/css">
#currentSlide {display: none;}
</style>\n"""
layout_template = """\
<div class="layout">
<div id="controls"></div>
<div id="currentSlide"></div>
<div id="header">
%(header)s
</div>
<div id="footer">
%(title)s%(footer)s
</div>
</div>\n"""
# <div class="topleft"></div>
# <div class="topright"></div>
# <div class="bottomleft"></div>
# <div class="bottomright"></div>
default_theme = 'default'
"""Name of the default theme."""
base_theme_file = '__base__'
"""Name of the file containing the name of the base theme."""
direct_theme_files = (
'slides.css', 'outline.css', 'print.css', 'opera.css', 'slides.js')
"""Names of theme files directly linked to in the output HTML"""
indirect_theme_files = (
's5-core.css', 'framing.css', 'pretty.css', 'blank.gif', 'iepngfix.htc')
"""Names of files used indirectly; imported or used by files in
`direct_theme_files`."""
required_theme_files = indirect_theme_files + direct_theme_files
"""Names of mandatory theme files."""
def __init__(self, *args):
html4css1.HTMLTranslator.__init__(self, *args)
#insert S5-specific stylesheet and script stuff:
self.theme_file_path = None
self.setup_theme()
view_mode = self.document.settings.view_mode
control_visibility = ('visible', 'hidden')[self.document.settings
.hidden_controls]
self.stylesheet.append(self.s5_stylesheet_template
% {'path': self.theme_file_path,
'view_mode': view_mode,
'control_visibility': control_visibility})
if not self.document.settings.current_slide:
self.stylesheet.append(self.disable_current_slide)
self.add_meta('<meta name="version" content="S5 1.1" />\n')
self.s5_footer = []
self.s5_header = []
self.section_count = 0
self.theme_files_copied = None
def setup_theme(self):
if self.document.settings.theme:
self.copy_theme()
elif self.document.settings.theme_url:
self.theme_file_path = self.document.settings.theme_url
else:
raise docutils.ApplicationError(
'No theme specified for S5/HTML writer.')
def copy_theme(self):
"""
Locate & copy theme files.
A theme may be explicitly based on another theme via a '__base__'
file. The default base theme is 'default'. Files are accumulated
from the specified theme, any base themes, and 'default'.
"""
settings = self.document.settings
path = find_theme(settings.theme)
theme_paths = [path]
self.theme_files_copied = {}
required_files_copied = {}
# This is a link (URL) in HTML, so we use "/", not os.sep:
self.theme_file_path = '%s/%s' % ('ui', settings.theme)
if settings._destination:
dest = os.path.join(
os.path.dirname(settings._destination), 'ui', settings.theme)
if not os.path.isdir(dest):
os.makedirs(dest)
else:
# no destination, so we can't copy the theme
return
default = 0
while path:
for f in os.listdir(path): # copy all files from each theme
if f == self.base_theme_file:
continue # ... except the "__base__" file
if ( self.copy_file(f, path, dest)
and f in self.required_theme_files):
required_files_copied[f] = 1
if default:
break # "default" theme has no base theme
# Find the "__base__" file in theme directory:
base_theme_file = os.path.join(path, self.base_theme_file)
# If it exists, read it and record the theme path:
if os.path.isfile(base_theme_file):
lines = open(base_theme_file).readlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
path = find_theme(line)
if path in theme_paths: # check for duplicates (cycles)
path = None # if found, use default base
else:
theme_paths.append(path)
break
else: # no theme name found
path = None # use default base
else: # no base theme file found
path = None # use default base
if not path:
path = find_theme(self.default_theme)
theme_paths.append(path)
default = 1
if len(required_files_copied) != len(self.required_theme_files):
# Some required files weren't found & couldn't be copied.
required = list(self.required_theme_files)
for f in required_files_copied.keys():
required.remove(f)
raise docutils.ApplicationError(
'Theme files not found: %s'
% ', '.join(['%r' % f for f in required]))
files_to_skip_pattern = re.compile(r'~$|\.bak$|#$|\.cvsignore$')
def copy_file(self, name, source_dir, dest_dir):
"""
Copy file `name` from `source_dir` to `dest_dir`.
Return 1 if the file exists in either `source_dir` or `dest_dir`.
"""
source = os.path.join(source_dir, name)
dest = os.path.join(dest_dir, name)
if self.theme_files_copied.has_key(dest):
return 1
else:
self.theme_files_copied[dest] = 1
if os.path.isfile(source):
if self.files_to_skip_pattern.search(source):
return None
settings = self.document.settings
if os.path.exists(dest) and not settings.overwrite_theme_files:
settings.record_dependencies.add(dest)
else:
src_file = open(source, 'rb')
src_data = src_file.read()
src_file.close()
dest_file = open(dest, 'wb')
dest_dir = dest_dir.replace(os.sep, '/')
dest_file.write(src_data.replace(
'ui/default', dest_dir[dest_dir.rfind('ui/'):]))
dest_file.close()
settings.record_dependencies.add(source)
return 1
if os.path.isfile(dest):
return 1
def depart_document(self, node):
header = ''.join(self.s5_header)
footer = ''.join(self.s5_footer)
title = ''.join(self.html_title).replace('<h1 class="title">', '<h1>')
layout = self.layout_template % {'header': header,
'title': title,
'footer': footer}
self.fragment.extend(self.body)
self.body_prefix.extend(layout)
self.body_prefix.append('<div class="presentation">\n')
self.body_prefix.append(
self.starttag({'classes': ['slide'], 'ids': ['slide0']}, 'div'))
if not self.section_count:
self.body.append('</div>\n')
self.body_suffix.insert(0, '</div>\n')
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
def depart_footer(self, node):
start = self.context.pop()
self.s5_footer.append('<h2>')
self.s5_footer.extend(self.body[start:])
self.s5_footer.append('</h2>')
del self.body[start:]
def depart_header(self, node):
start = self.context.pop()
header = ['<div id="header">\n']
header.extend(self.body[start:])
header.append('\n</div>\n')
del self.body[start:]
self.s5_header.extend(header)
def visit_section(self, node):
if not self.section_count:
self.body.append('\n</div>\n')
self.section_count += 1
self.section_level += 1
if self.section_level > 1:
# dummy for matching div's
self.body.append(self.starttag(node, 'div', CLASS='section'))
else:
self.body.append(self.starttag(node, 'div', CLASS='slide'))
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.section):
level = self.section_level + self.initial_header_level - 1
if level == 1:
level = 2
tag = 'h%s' % level
self.body.append(self.starttag(node, tag, ''))
self.context.append('</%s>\n' % tag)
else:
html4css1.HTMLTranslator.visit_subtitle(self, node)
def visit_title(self, node, move_ids=0):
html4css1.HTMLTranslator.visit_title(self, node, move_ids=move_ids)
| {
"repo_name": "hugs/selenium",
"path": "selenium/src/py/lib/docutils/writers/s5_html/__init__.py",
"copies": "5",
"size": "14189",
"license": "apache-2.0",
"hash": -539343065299782660,
"line_mean": 39.8554572271,
"line_max": 80,
"alpha_frac": 0.5480301642,
"autogenerated": false,
"ratio": 3.982318271119843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005557072796377984,
"num_lines": 339
} |
# This example loads a large 800MB Hacker News comments dataset
# and preprocesses it. This can take a few hours, and a lot of
# memory, so please be patient!
from lda2vec import preprocess, Corpus
import numpy as np
import pandas as pd
import logging
import cPickle as pickle
import os.path
logging.basicConfig()
max_length = 250 # Limit of 250 words per comment
min_author_comments = 50 # Exclude authors with fewer comments
nrows = None # Number of rows of file to read; None reads in full file
fn = "hacker_news_comments.csv"
url = "https://zenodo.org/record/45901/files/hacker_news_comments.csv"
if not os.path.exists(fn):
import requests
response = requests.get(url, stream=True, timeout=2400)
with open(fn, 'w') as fh:
# Iterate over 1MB chunks
for data in response.iter_content(1024**2):
fh.write(data)
features = []
# Convert to unicode (spaCy only works with unicode)
features = pd.read_csv(fn, encoding='utf8', nrows=nrows)
# Convert all integer arrays to int32
for col, dtype in zip(features.columns, features.dtypes):
if dtype is np.dtype('int64'):
features[col] = features[col].astype('int32')
# Tokenize the texts
# If this fails it's likely spacy. Install a recent spacy version.
# Only the most recent versions have tokenization of noun phrases
# I'm using SHA dfd1a1d3a24b4ef5904975268c1bbb13ae1a32ff
# Also try running python -m spacy.en.download all --force
texts = features.pop('comment_text').values
tokens, vocab = preprocess.tokenize(texts, max_length, n_threads=4,
merge=True)
del texts
# Make a ranked list of rare vs frequent words
corpus = Corpus()
corpus.update_word_count(tokens)
corpus.finalize()
# The tokenization uses spaCy indices, and so may have gaps
# between indices for words that aren't present in our dataset.
# This builds a new compact index
compact = corpus.to_compact(tokens)
# Remove extremely rare words
pruned = corpus.filter_count(compact, min_count=10)
# Words tend to have power law frequency, so selectively
# downsample the most prevalent words
clean = corpus.subsample_frequent(pruned)
print "n_words", np.unique(clean).max()
# Extract numpy arrays over the fields we want covered by topics
# Convert to categorical variables
author_counts = features['comment_author'].value_counts()
to_remove = author_counts[author_counts < min_author_comments].index
mask = features['comment_author'].isin(to_remove).values
author_name = features['comment_author'].values.copy()
author_name[mask] = 'infrequent_author'
features['comment_author'] = author_name
authors = pd.Categorical(features['comment_author'])
author_id = authors.codes
author_name = authors.categories
story_id = pd.Categorical(features['story_id']).codes
# Chop timestamps into days
story_time = pd.to_datetime(features['story_time'], unit='s')
days_since = (story_time - story_time.min()) / pd.Timedelta('1 day')
time_id = days_since.astype('int32')
features['story_id_codes'] = story_id
features['author_id_codes'] = story_id
features['time_id_codes'] = time_id
print "n_authors", author_id.max()
print "n_stories", story_id.max()
print "n_times", time_id.max()
# Extract outcome supervised features
ranking = features['comment_ranking'].values
score = features['story_comment_count'].values
# Now flatten a 2D array of document per row and word position
# per column to a 1D array of words. This will also remove skips
# and OoV words
feature_arrs = (story_id, author_id, time_id, ranking, score)
flattened, features_flat = corpus.compact_to_flat(pruned, *feature_arrs)
# Flattened feature arrays
(story_id_f, author_id_f, time_id_f, ranking_f, score_f) = features_flat
# Save the data
pickle.dump(corpus, open('corpus', 'w'), protocol=2)
pickle.dump(vocab, open('vocab', 'w'), protocol=2)
features.to_pickle('features.pd')
data = dict(flattened=flattened, story_id=story_id_f, author_id=author_id_f,
time_id=time_id_f, ranking=ranking_f, score=score_f,
author_name=author_name, author_index=author_id)
np.savez('data', **data)
np.save(open('tokens', 'w'), tokens)
| {
"repo_name": "cemoody/lda2vec",
"path": "examples/hacker_news/data/preprocess.py",
"copies": "1",
"size": "4172",
"license": "mit",
"hash": 6543860040082700000,
"line_mean": 36.9272727273,
"line_max": 76,
"alpha_frac": 0.7293863854,
"autogenerated": false,
"ratio": 3.3084853291038856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9537871714503885,
"avg_score": 0,
"num_lines": 110
} |
# This simple example loads the newsgroups data from sklearn
# and train an LDA-like model on it
import logging
import pickle
from sklearn.datasets import fetch_20newsgroups
import numpy as np
from lda2vec import preprocess, Corpus
logging.basicConfig()
# Fetch data
remove = ('headers', 'footers', 'quotes')
texts = fetch_20newsgroups(subset='train', remove=remove).data
# Remove tokens with these substrings
bad = set(["ax>", '`@("', '---', '===', '^^^'])
def clean(line):
return ' '.join(w for w in line.split() if not any(t in w for t in bad))
# Preprocess data
max_length = 10000 # Limit of 10k words per document
# Convert to unicode (spaCy only works with unicode)
texts = [unicode(clean(d)) for d in texts]
tokens, vocab = preprocess.tokenize(texts, max_length, merge=False,
n_threads=4)
corpus = Corpus()
# Make a ranked list of rare vs frequent words
corpus.update_word_count(tokens)
corpus.finalize()
# The tokenization uses spaCy indices, and so may have gaps
# between indices for words that aren't present in our dataset.
# This builds a new compact index
compact = corpus.to_compact(tokens)
# Remove extremely rare words
pruned = corpus.filter_count(compact, min_count=30)
# Convert the compactified arrays into bag of words arrays
bow = corpus.compact_to_bow(pruned)
# Words tend to have power law frequency, so selectively
# downsample the most prevalent words
clean = corpus.subsample_frequent(pruned)
# Now flatten a 2D array of document per row and word position
# per column to a 1D array of words. This will also remove skips
# and OoV words
doc_ids = np.arange(pruned.shape[0])
flattened, (doc_ids,) = corpus.compact_to_flat(pruned, doc_ids)
assert flattened.min() >= 0
# Fill in the pretrained word vectors
n_dim = 300
fn_wordvc = 'GoogleNews-vectors-negative300.bin'
vectors, s, f = corpus.compact_word_vectors(vocab, filename=fn_wordvc)
# Save all of the preprocessed files
pickle.dump(vocab, open('vocab.pkl', 'w'))
pickle.dump(corpus, open('corpus.pkl', 'w'))
np.save("flattened", flattened)
np.save("doc_ids", doc_ids)
np.save("pruned", pruned)
np.save("bow", bow)
np.save("vectors", vectors)
| {
"repo_name": "cemoody/lda2vec",
"path": "examples/twenty_newsgroups/data/preprocess.py",
"copies": "1",
"size": "2233",
"license": "mit",
"hash": -2383055556073981000,
"line_mean": 33.890625,
"line_max": 76,
"alpha_frac": 0.7254814151,
"autogenerated": false,
"ratio": 3.3130563798219583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9537117340376504,
"avg_score": 0.0002840909090909091,
"num_lines": 64
} |
# This simple example loads the newsgroups data from sklearn
# and train an LDA-like model on it
import os
import os.path
import pickle
import time
import shelve
import chainer
from chainer import cuda
from chainer import serializers
import chainer.optimizers as O
import numpy as np
from lda2vec import utils
from lda2vec import prepare_topics, print_top_words_per_topic, topic_coherence
from lda2vec_model import LDA2Vec
gpu_id = int(os.getenv('CUDA_GPU', 0))
cuda.get_device(gpu_id).use()
print "Using GPU " + str(gpu_id)
data_dir = os.getenv('data_dir', '../data/')
fn_vocab = '{data_dir:s}/vocab.pkl'.format(data_dir=data_dir)
fn_corpus = '{data_dir:s}/corpus.pkl'.format(data_dir=data_dir)
fn_flatnd = '{data_dir:s}/flattened.npy'.format(data_dir=data_dir)
fn_docids = '{data_dir:s}/doc_ids.npy'.format(data_dir=data_dir)
fn_vectors = '{data_dir:s}/vectors.npy'.format(data_dir=data_dir)
vocab = pickle.load(open(fn_vocab, 'r'))
corpus = pickle.load(open(fn_corpus, 'r'))
flattened = np.load(fn_flatnd)
doc_ids = np.load(fn_docids)
vectors = np.load(fn_vectors)
# Model Parameters
# Number of documents
n_docs = doc_ids.max() + 1
# Number of unique words in the vocabulary
n_vocab = flattened.max() + 1
# 'Strength' of the dircihlet prior; 200.0 seems to work well
clambda = 200.0
# Number of topics to fit
n_topics = int(os.getenv('n_topics', 20))
batchsize = 4096
# Power for neg sampling
power = float(os.getenv('power', 0.75))
# Intialize with pretrained word vectors
pretrained = bool(int(os.getenv('pretrained', True)))
# Sampling temperature
temperature = float(os.getenv('temperature', 1.0))
# Number of dimensions in a single word vector
n_units = int(os.getenv('n_units', 300))
# Get the string representation for every compact key
words = corpus.word_list(vocab)[:n_vocab]
# How many tokens are in each document
doc_idx, lengths = np.unique(doc_ids, return_counts=True)
doc_lengths = np.zeros(doc_ids.max() + 1, dtype='int32')
doc_lengths[doc_idx] = lengths
# Count all token frequencies
tok_idx, freq = np.unique(flattened, return_counts=True)
term_frequency = np.zeros(n_vocab, dtype='int32')
term_frequency[tok_idx] = freq
for key in sorted(locals().keys()):
val = locals()[key]
if len(str(val)) < 100 and '<' not in str(val):
print key, val
model = LDA2Vec(n_documents=n_docs, n_document_topics=n_topics,
n_units=n_units, n_vocab=n_vocab, counts=term_frequency,
n_samples=15, power=power, temperature=temperature)
if os.path.exists('lda2vec.hdf5'):
print "Reloading from saved"
serializers.load_hdf5("lda2vec.hdf5", model)
if pretrained:
model.sampler.W.data[:, :] = vectors[:n_vocab, :]
model.to_gpu()
optimizer = O.Adam()
optimizer.setup(model)
clip = chainer.optimizer.GradientClipping(5.0)
optimizer.add_hook(clip)
j = 0
epoch = 0
fraction = batchsize * 1.0 / flattened.shape[0]
progress = shelve.open('progress.shelve')
for epoch in range(200):
data = prepare_topics(cuda.to_cpu(model.mixture.weights.W.data).copy(),
cuda.to_cpu(model.mixture.factors.W.data).copy(),
cuda.to_cpu(model.sampler.W.data).copy(),
words)
top_words = print_top_words_per_topic(data)
if j % 100 == 0 and j > 100:
coherence = topic_coherence(top_words)
for j in range(n_topics):
print j, coherence[(j, 'cv')]
kw = dict(top_words=top_words, coherence=coherence, epoch=epoch)
progress[str(epoch)] = pickle.dumps(kw)
data['doc_lengths'] = doc_lengths
data['term_frequency'] = term_frequency
np.savez('topics.pyldavis', **data)
for d, f in utils.chunks(batchsize, doc_ids, flattened):
t0 = time.time()
optimizer.zero_grads()
l = model.fit_partial(d.copy(), f.copy())
prior = model.prior()
loss = prior * fraction
loss.backward()
optimizer.update()
msg = ("J:{j:05d} E:{epoch:05d} L:{loss:1.3e} "
"P:{prior:1.3e} R:{rate:1.3e}")
prior.to_cpu()
loss.to_cpu()
t1 = time.time()
dt = t1 - t0
rate = batchsize / dt
logs = dict(loss=float(l), epoch=epoch, j=j,
prior=float(prior.data), rate=rate)
print msg.format(**logs)
j += 1
serializers.save_hdf5("lda2vec.hdf5", model)
| {
"repo_name": "cemoody/lda2vec",
"path": "examples/twenty_newsgroups/lda2vec/lda2vec_run.py",
"copies": "1",
"size": "4411",
"license": "mit",
"hash": -5107678301188548000,
"line_mean": 34.5725806452,
"line_max": 78,
"alpha_frac": 0.6585808207,
"autogenerated": false,
"ratio": 3.069589422407794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4228170243107794,
"avg_score": null,
"num_lines": null
} |
# This simple example loads the newsgroups data from sklearn
# and train an LDA-like model on it
import os.path
import pickle
import time
from chainer import serializers
from chainer import cuda
import chainer.optimizers as O
import numpy as np
from lda2vec import prepare_topics, print_top_words_per_topic
from lda2vec import utils
from lda import LDA
gpu_id = int(os.getenv('CUDA_GPU', 0))
cuda.get_device(gpu_id).use()
print "Using GPU " + str(gpu_id)
vocab = pickle.load(open('vocab.pkl', 'r'))
corpus = pickle.load(open('corpus.pkl', 'r'))
bow = np.load("bow.npy").astype('float32')
# Remove bow counts on the first two tokens, which <SKIP> and <EOS>
bow[:, :2] = 0
# Normalize bag of words to be a probability
# bow = bow / bow.sum(axis=1)[:, None]
# Number of docs
n_docs = bow.shape[0]
# Number of unique words in the vocabulary
n_vocab = bow.shape[1]
# Number of dimensions in a single word vector
n_units = 256
# number of topics
n_topics = 20
batchsize = 128
counts = corpus.keys_counts[:n_vocab]
# Get the string representation for every compact key
words = corpus.word_list(vocab)[:n_vocab]
model = LDA(n_docs, n_topics, n_units, n_vocab)
if os.path.exists('lda.hdf5'):
print "Reloading from saved"
serializers.load_hdf5("lda.hdf5", model)
model.to_gpu()
optimizer = O.Adam()
optimizer.setup(model)
j = 0
fraction = batchsize * 1.0 / bow.shape[0]
for epoch in range(50000000):
if epoch % 100 == 0:
p = cuda.to_cpu(model.proportions.W.data).copy()
f = cuda.to_cpu(model.factors.W.data).copy()
w = cuda.to_cpu(model.embedding.W.data).copy()
d = prepare_topics(p, f, w, words)
print_top_words_per_topic(d)
for (ids, batch) in utils.chunks(batchsize, np.arange(bow.shape[0]), bow):
t0 = time.time()
optimizer.zero_grads()
rec, ld = model.forward(ids, batch)
l = rec + ld
l.backward()
optimizer.update()
msg = ("J:{j:05d} E:{epoch:05d} L:{rec:1.3e} "
"P:{ld:1.3e} R:{rate:1.3e}")
l.to_cpu()
rec.to_cpu()
ld.to_cpu()
t1 = time.time()
dt = t1 - t0
rate = batchsize / dt
logs = dict(rec=float(rec.data), epoch=epoch, j=j,
ld=float(ld.data), rate=rate)
print msg.format(**logs)
j += 1
if epoch % 100 == 0:
serializers.save_hdf5("lda.hdf5", model)
| {
"repo_name": "cemoody/lda2vec",
"path": "examples/twenty_newsgroups/lda/lda_run.py",
"copies": "1",
"size": "2451",
"license": "mit",
"hash": -6793085741310999000,
"line_mean": 29.2592592593,
"line_max": 78,
"alpha_frac": 0.6344349245,
"autogenerated": false,
"ratio": 2.9817518248175183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41161867493175186,
"avg_score": null,
"num_lines": null
} |
# This simple example loads the newsgroups data from sklearn
# and train an LDA-like model on it
import os.path
import pickle
import time
from chainer import serializers
import chainer.optimizers as O
import numpy as np
from lda2vec import utils
from nvdm import NVDM
vocab = pickle.load(open('vocab.pkl', 'r'))
corpus = pickle.load(open('corpus.pkl', 'r'))
bow = np.load("bow.npy").astype('float32')
# Remove bow counts on the first two tokens, which <SKIP> and <EOS>
bow[:, :2] = 0
# Normalize bag of words to be a probability
bow = bow / bow.sum(axis=1)[:, None]
# Number of unique words in the vocabulary
n_vocab = bow.shape[1]
# Number of dimensions in a single word vector
n_units = 256
batchsize = 128
counts = corpus.keys_counts[:n_vocab]
# Get the string representation for every compact key
words = corpus.word_list(vocab)[:n_vocab]
model = NVDM(n_vocab, n_units)
if os.path.exists('nvdm.hdf5'):
print "Reloading from saved"
serializers.load_hdf5("nvdm.hdf5", model)
# model.to_gpu()
optimizer = O.Adam()
optimizer.setup(model)
j = 0
fraction = batchsize * 1.0 / bow.shape[0]
for epoch in range(500):
for (batch,) in utils.chunks(batchsize, bow):
t0 = time.time()
rec, kl = model.observe(batch)
optimizer.zero_grads()
l = rec + kl
l.backward()
optimizer.update()
msg = ("J:{j:05d} E:{epoch:05d} L:{rec:1.3e} "
"P:{kl:1.3e} R:{rate:1.3e}")
l.to_cpu()
rec.to_cpu()
kl.to_cpu()
t1 = time.time()
dt = t1 - t0
rate = batchsize / dt
logs = dict(rec=float(rec.data), epoch=epoch, j=j,
kl=float(kl.data), rate=rate)
print msg.format(**logs)
j += 1
serializers.save_hdf5("nvdm.hdf5", model)
| {
"repo_name": "cemoody/lda2vec",
"path": "examples/twenty_newsgroups/nvdm/nvdm_run.py",
"copies": "1",
"size": "1839",
"license": "mit",
"hash": -5795243133006661000,
"line_mean": 27.734375,
"line_max": 67,
"alpha_frac": 0.6356715606,
"autogenerated": false,
"ratio": 3.059900166389351,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4195571726989351,
"avg_score": null,
"num_lines": null
} |
# This simple example loads the newsgroups data from sklearn
# and train an LDA-like model on it
import os.path
import pickle
import time
import chainer
from chainer import cuda
from chainer import serializers
import chainer.optimizers as O
import numpy as np
from lda2vec import utils
from lda2vec import prepare_topics, print_top_words_per_topic
from lda2vec_model import LDA2Vec
gpu_id = int(os.getenv('CUDA_GPU', 0))
cuda.get_device(gpu_id).use()
print "Using GPU " + str(gpu_id)
# You must run preprocess.py before this data becomes available
vocab = pickle.load(open('../data/vocab', 'r'))
corpus = pickle.load(open('../data/corpus', 'r'))
data = np.load(open('../data/data.npz', 'r'))
flattened = data['flattened']
story_id = data['story_id']
author_id = data['author_id']
time_id = data['time_id']
ranking = data['ranking'].astype('float32')
score = data['score'].astype('float32')
# Model Parameters
# Number of documents
n_stories = story_id.max() + 1
# Number of users
n_authors = author_id.max() + 1
# Number of unique words in the vocabulary
n_vocab = flattened.max() + 1
# Number of dimensions in a single word vector
n_units = 256
# Number of topics to fit
n_story_topics = 40
n_author_topics = 20
batchsize = 4096
# Get the string representation for every compact key
words = corpus.word_list(vocab)[:n_vocab]
# How many tokens are in each story
sty_idx, lengths = np.unique(story_id, return_counts=True)
sty_len = np.zeros(sty_idx.max() + 1, dtype='int32')
sty_len[sty_idx] = lengths
# How many tokens are in each author
aut_idx, lengths = np.unique(author_id, return_counts=True)
aut_len = np.zeros(aut_idx.max() + 1, dtype='int32')
aut_len[aut_idx] = lengths
# Count all token frequencies
tok_idx, freq = np.unique(flattened, return_counts=True)
term_frequency = np.zeros(n_vocab, dtype='int32')
term_frequency[tok_idx] = freq
model = LDA2Vec(n_stories=n_stories, n_story_topics=n_story_topics,
n_authors=n_authors, n_author_topics=n_author_topics,
n_units=n_units, n_vocab=n_vocab, counts=term_frequency,
n_samples=15)
if os.path.exists('lda2vec.hdf5'):
print "Reloading from saved"
serializers.load_hdf5("lda2vec.hdf5", model)
model.to_gpu()
optimizer = O.Adam()
optimizer.setup(model)
clip = chainer.optimizer.GradientClipping(5.0)
optimizer.add_hook(clip)
j = 0
epoch = 0
fraction = batchsize * 1.0 / flattened.shape[0]
for epoch in range(5000):
ts = prepare_topics(cuda.to_cpu(model.mixture_sty.weights.W.data).copy(),
cuda.to_cpu(model.mixture_sty.factors.W.data).copy(),
cuda.to_cpu(model.sampler.W.data).copy(),
words)
print_top_words_per_topic(ts)
ts['doc_lengths'] = sty_len
ts['term_frequency'] = term_frequency
np.savez('topics.story.pyldavis', **ts)
ta = prepare_topics(cuda.to_cpu(model.mixture_aut.weights.W.data).copy(),
cuda.to_cpu(model.mixture_aut.factors.W.data).copy(),
cuda.to_cpu(model.sampler.W.data).copy(),
words)
print_top_words_per_topic(ta)
ta['doc_lengths'] = aut_len
ta['term_frequency'] = term_frequency
np.savez('topics.author.pyldavis', **ta)
for s, a, f in utils.chunks(batchsize, story_id, author_id, flattened):
t0 = time.time()
optimizer.zero_grads()
l = model.fit_partial(s.copy(), a.copy(), f.copy())
prior = model.prior()
loss = prior * fraction
loss.backward()
optimizer.update()
msg = ("J:{j:05d} E:{epoch:05d} L:{loss:1.3e} "
"P:{prior:1.3e} R:{rate:1.3e}")
prior.to_cpu()
loss.to_cpu()
t1 = time.time()
dt = t1 - t0
rate = batchsize / dt
logs = dict(loss=float(l), epoch=epoch, j=j,
prior=float(prior.data), rate=rate)
print msg.format(**logs)
j += 1
serializers.save_hdf5("lda2vec.hdf5", model)
| {
"repo_name": "cemoody/lda2vec",
"path": "examples/hacker_news/lda2vec/lda2vec_run.py",
"copies": "1",
"size": "4044",
"license": "mit",
"hash": 4763528635687371000,
"line_mean": 32.9831932773,
"line_max": 77,
"alpha_frac": 0.6456478734,
"autogenerated": false,
"ratio": 3.117964533538936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.926155674596487,
"avg_score": 0.0004111321948134092,
"num_lines": 119
} |
def _print_zone_info(zoneinfo):
print "="*80
print "| ID: %s" % zoneinfo['Id'].split("/")[-1]
print "| Name: %s" % zoneinfo['Name']
print "| Ref: %s" % zoneinfo['CallerReference']
print "="*80
print zoneinfo['Config']
print
def create(conn, hostname, caller_reference=None, comment=''):
"""Create a hosted zone, returning the nameservers"""
response = conn.create_hosted_zone(hostname, caller_reference, comment)
print "Pending, please add the following Name Servers:"
for ns in response.NameServers:
print "\t", ns
def delete_zone(conn, hosted_zone_id):
"""Delete a hosted zone by ID"""
response = conn.delete_hosted_zone(hosted_zone_id)
print response
def ls(conn):
"""List all hosted zones"""
response = conn.get_all_hosted_zones()
for zoneinfo in response['ListHostedZonesResponse']['HostedZones']:
_print_zone_info(zoneinfo)
def get(conn, hosted_zone_id, type=None, name=None, maxitems=None):
"""Get all the records for a single zone"""
response = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=maxitems)
# If a maximum number of items was set, we limit to that number
# by turning the response into an actual list (copying it)
# instead of allowing it to page
if maxitems:
response = response[:]
print '%-40s %-5s %-20s %s' % ("Name", "Type", "TTL", "Value(s)")
for record in response:
print '%-40s %-5s %-20s %s' % (record.name, record.type, record.ttl, record.to_print())
def _add_del(conn, hosted_zone_id, change, name, type, identifier, weight, values, ttl, comment):
from boto.route53.record import ResourceRecordSets
changes = ResourceRecordSets(conn, hosted_zone_id, comment)
change = changes.add_change(change, name, type, ttl,
identifier=identifier, weight=weight)
for value in values.split(','):
change.add_value(value)
print changes.commit()
def _add_del_alias(conn, hosted_zone_id, change, name, type, identifier, weight, alias_hosted_zone_id, alias_dns_name, comment):
from boto.route53.record import ResourceRecordSets
changes = ResourceRecordSets(conn, hosted_zone_id, comment)
change = changes.add_change(change, name, type,
identifier=identifier, weight=weight)
change.set_alias(alias_hosted_zone_id, alias_dns_name)
print changes.commit()
def add_record(conn, hosted_zone_id, name, type, values, ttl=600,
identifier=None, weight=None, comment=""):
"""Add a new record to a zone. identifier and weight are optional."""
_add_del(conn, hosted_zone_id, "CREATE", name, type, identifier,
weight, values, ttl, comment)
def del_record(conn, hosted_zone_id, name, type, values, ttl=600,
identifier=None, weight=None, comment=""):
"""Delete a record from a zone: name, type, ttl, identifier, and weight must match."""
_add_del(conn, hosted_zone_id, "DELETE", name, type, identifier,
weight, values, ttl, comment)
def add_alias(conn, hosted_zone_id, name, type, alias_hosted_zone_id,
alias_dns_name, identifier=None, weight=None, comment=""):
"""Add a new alias to a zone. identifier and weight are optional."""
_add_del_alias(conn, hosted_zone_id, "CREATE", name, type, identifier,
weight, alias_hosted_zone_id, alias_dns_name, comment)
def del_alias(conn, hosted_zone_id, name, type, alias_hosted_zone_id,
alias_dns_name, identifier=None, weight=None, comment=""):
"""Delete an alias from a zone: name, type, alias_hosted_zone_id, alias_dns_name, weight and identifier must match."""
_add_del_alias(conn, hosted_zone_id, "DELETE", name, type, identifier,
weight, alias_hosted_zone_id, alias_dns_name, comment)
def change_record(conn, hosted_zone_id, name, type, newvalues, ttl=600,
identifier=None, weight=None, comment=""):
"""Delete and then add a record to a zone. identifier and weight are optional."""
from boto.route53.record import ResourceRecordSets
changes = ResourceRecordSets(conn, hosted_zone_id, comment)
# Assume there are not more than 10 WRRs for a given (name, type)
responses = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=10)
for response in responses:
if response.name != name or response.type != type:
continue
if response.identifier != identifier or response.weight != weight:
continue
change1 = changes.add_change("DELETE", name, type, response.ttl,
identifier=response.identifier,
weight=response.weight)
for old_value in response.resource_records:
change1.add_value(old_value)
change2 = changes.add_change("CREATE", name, type, ttl,
identifier=identifier, weight=weight)
for new_value in newvalues.split(','):
change2.add_value(new_value)
print changes.commit()
def change_alias(conn, hosted_zone_id, name, type, new_alias_hosted_zone_id, new_alias_dns_name, identifier=None, weight=None, comment=""):
"""Delete and then add an alias to a zone. identifier and weight are optional."""
from boto.route53.record import ResourceRecordSets
changes = ResourceRecordSets(conn, hosted_zone_id, comment)
# Assume there are not more than 10 WRRs for a given (name, type)
responses = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=10)
for response in responses:
if response.name != name or response.type != type:
continue
if response.identifier != identifier or response.weight != weight:
continue
change1 = changes.add_change("DELETE", name, type,
identifier=response.identifier,
weight=response.weight)
change1.set_alias(response.alias_hosted_zone_id, response.alias_dns_name)
change2 = changes.add_change("CREATE", name, type, identifier=identifier, weight=weight)
change2.set_alias(new_alias_hosted_zone_id, new_alias_dns_name)
print changes.commit()
def help(conn, fnc=None):
"""Prints this help message"""
import inspect
self = sys.modules['__main__']
if fnc:
try:
cmd = getattr(self, fnc)
except:
cmd = None
if not inspect.isfunction(cmd):
print "No function named: %s found" % fnc
sys.exit(2)
(args, varargs, varkw, defaults) = inspect.getargspec(cmd)
print cmd.__doc__
print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args[1:]]))
else:
print "Usage: route53 [command]"
for cname in dir(self):
if not cname.startswith("_"):
cmd = getattr(self, cname)
if inspect.isfunction(cmd):
doc = cmd.__doc__
print "\t%-20s %s" % (cname, doc)
sys.exit(1)
if __name__ == "__main__":
import boto
import sys
conn = boto.connect_route53()
self = sys.modules['__main__']
if len(sys.argv) >= 2:
try:
cmd = getattr(self, sys.argv[1])
except:
cmd = None
args = sys.argv[2:]
else:
cmd = help
args = []
if not cmd:
cmd = help
try:
cmd(conn, *args)
except TypeError, e:
print e
help(conn, cmd.__name__)
| {
"repo_name": "milannic/expCPython",
"path": "concoord-1.0.2/build/lib.linux-x86_64-2.7/concoord/route53.py",
"copies": "3",
"size": "9010",
"license": "mit",
"hash": 1712553647002299100,
"line_mean": 43.603960396,
"line_max": 139,
"alpha_frac": 0.6445061043,
"autogenerated": false,
"ratio": 3.47473968376398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0031192755262520245,
"num_lines": 202
} |
__author__ = 'chris'
from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol
class WSProtocol(WebSocketServerProtocol):
def onOpen(self):
self.factory.register(self)
def onMessage(self, payload, isBinary):
"""
handle outgoing messages and notifications here
"""
def connectionLost(self, reason):
WebSocketServerProtocol.connectionLost(self, reason)
self.factory.unregister(self)
class WSFactory(WebSocketServerFactory):
"""
Simple broadcast server broadcasting any message it receives to all
currently connected clients.
"""
def __init__(self, url, mserver, debug=False, debugCodePaths=False):
WebSocketServerFactory.__init__(self, url, debug=debug, debugCodePaths=debugCodePaths)
self.mserver = mserver
self.clients = []
def register(self, client):
if client not in self.clients:
self.clients.append(client)
def unregister(self, client):
if client in self.clients:
self.clients.remove(client)
def push(self, msg):
for c in self.clients:
c.sendMessage(msg)
| {
"repo_name": "bankonme/OpenBazaar-Server",
"path": "ws.py",
"copies": "2",
"size": "1175",
"license": "mit",
"hash": 3861046384917417000,
"line_mean": 27.6585365854,
"line_max": 94,
"alpha_frac": 0.6689361702,
"autogenerated": false,
"ratio": 4.3357933579335795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6004729528133579,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chris'
from binascii import unhexlify
import dht.constants
import mock
import nacl.signing
import nacl.hash
from txrudp import packet, connection, rudp, constants
from twisted.internet import udp, address, task
from twisted.trial import unittest
from dht.crawling import RPCFindResponse, NodeSpiderCrawl, ValueSpiderCrawl
from dht.node import Node, NodeHeap
from dht.utils import digest
from dht.storage import ForgetfulStorage
from dht.protocol import KademliaProtocol
from protos.objects import Value
from wireprotocol import OpenBazaarProtocol
class ValueSpiderCrawlTest(unittest.TestCase):
def setUp(self):
self.public_ip = '123.45.67.89'
self.port = 12345
self.own_addr = (self.public_ip, self.port)
self.addr1 = ('132.54.76.98', 54321)
self.addr2 = ('231.76.45.89', 15243)
self.addr3 = ("193.193.111.00", 99999)
self.clock = task.Clock()
connection.REACTOR.callLater = self.clock.callLater
self.proto_mock = mock.Mock(spec_set=rudp.ConnectionMultiplexer)
self.handler_mock = mock.Mock(spec_set=connection.Handler)
self.con = connection.Connection(
self.proto_mock,
self.handler_mock,
self.own_addr,
self.addr1
)
valid_key = "1a5c8e67edb8d279d1ae32fa2da97e236b95e95c837dc8c3c7c2ff7a7cc29855"
self.signing_key = nacl.signing.SigningKey(valid_key, encoder=nacl.encoding.HexEncoder)
verify_key = self.signing_key.verify_key
signed_pubkey = self.signing_key.sign(str(verify_key))
h = nacl.hash.sha512(signed_pubkey)
self.storage = ForgetfulStorage()
self.node = Node(unhexlify(h[:40]), self.public_ip, self.port, signed_pubkey, True)
self.protocol = KademliaProtocol(self.node, self.storage, 20)
self.wire_protocol = OpenBazaarProtocol(self.own_addr)
self.wire_protocol.register_processor(self.protocol)
self.protocol.connect_multiplexer(self.wire_protocol)
self.handler = self.wire_protocol.ConnHandler([self.protocol])
transport = mock.Mock(spec_set=udp.Port)
ret_val = address.IPv4Address('UDP', self.public_ip, self.port)
transport.attach_mock(mock.Mock(return_value=ret_val), 'getHost')
self.wire_protocol.makeConnection(transport)
self.node1 = Node(digest("id1"), self.addr1[0], self.addr1[1], digest("key1"), True)
self.node2 = Node(digest("id2"), self.addr2[0], self.addr2[1], digest("key2"), True)
self.node3 = Node(digest("id3"), self.addr3[0], self.addr3[1], digest("key3"), True)
def tearDown(self):
self.con.shutdown()
self.wire_protocol.shutdown()
def test_find(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = ValueSpiderCrawl(self.protocol, node, nearest,
dht.constants.KSIZE, dht.constants.ALPHA)
spider.find()
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
def test_nodesFound(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
# test resonse with uncontacted nodes
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = ValueSpiderCrawl(self.protocol, node, nearest, dht.constants.KSIZE, dht.constants.ALPHA)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
spider._nodesFound(responses)
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
# test all been contacted
spider = ValueSpiderCrawl(self.protocol, node, nearest, dht.constants.KSIZE, dht.constants.ALPHA)
for peer in spider.nearest.getUncontacted():
spider.nearest.markContacted(peer)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node2.id: response}
resp = spider._nodesFound(responses)
self.assertTrue(resp is None)
# test didn't happen
spider = ValueSpiderCrawl(self.protocol, node, nearest, dht.constants.KSIZE, dht.constants.ALPHA)
response = (False, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
spider._nodesFound(responses)
self.assertTrue(len(spider.nearest) == 2)
# test got value
val = Value()
val.valueKey = digest("contractID")
val.serializedData = self.protocol.sourceNode.getProto().SerializeToString()
response = (True, ("value", val.SerializeToString()))
responses = {self.node3.id: response}
spider.nearestWithoutValue = NodeHeap(node, 1)
value = spider._nodesFound(responses)
self.assertEqual(value[0], val.SerializeToString())
def test_handleFoundValues(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = ValueSpiderCrawl(self.protocol, node, nearest, dht.constants.KSIZE, dht.constants.ALPHA)
val = Value()
val.valueKey = digest("contractID")
val.serializedData = self.node1.getProto().SerializeToString()
val1 = val.SerializeToString()
value = spider._handleFoundValues([(val1,)])
self.assertEqual(value[0], val.SerializeToString())
# test handle multiple values
val.serializedData = self.node2.getProto().SerializeToString()
val2 = val.SerializeToString()
found_values = [(val1,), (val1,), (val2,)]
self.assertEqual(spider._handleFoundValues(found_values), (val1,))
# test store value at nearest without value
spider.nearestWithoutValue.push(self.node1)
spider._handleFoundValues(found_values)
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertTrue(len(self.proto_mock.send_datagram.call_args_list) > 1)
self.proto_mock.send_datagram.call_args_list = []
def _connecting_to_connected(self):
remote_synack_packet = packet.Packet.from_data(
42,
self.con.own_addr,
self.con.dest_addr,
ack=0,
syn=True
)
self.con.receive_packet(remote_synack_packet)
self.clock.advance(0)
connection.REACTOR.runUntilCurrent()
self.next_remote_seqnum = 43
m_calls = self.proto_mock.send_datagram.call_args_list
sent_syn_packet = packet.Packet.from_bytes(m_calls[0][0][0])
seqnum = sent_syn_packet.sequence_number
self.handler_mock.reset_mock()
self.proto_mock.reset_mock()
self.next_seqnum = seqnum + 1
class NodeSpiderCrawlTest(unittest.TestCase):
def setUp(self):
self.public_ip = '123.45.67.89'
self.port = 12345
self.own_addr = (self.public_ip, self.port)
self.addr1 = ('132.54.76.98', 54321)
self.addr2 = ('231.76.45.89', 15243)
self.addr3 = ("193.193.111.00", 99999)
self.clock = task.Clock()
connection.REACTOR.callLater = self.clock.callLater
self.proto_mock = mock.Mock(spec_set=rudp.ConnectionMultiplexer)
self.handler_mock = mock.Mock(spec_set=connection.Handler)
self.con = connection.Connection(
self.proto_mock,
self.handler_mock,
self.own_addr,
self.addr1
)
valid_key = "1a5c8e67edb8d279d1ae32fa2da97e236b95e95c837dc8c3c7c2ff7a7cc29855"
self.signing_key = nacl.signing.SigningKey(valid_key, encoder=nacl.encoding.HexEncoder)
verify_key = self.signing_key.verify_key
signed_pubkey = self.signing_key.sign(str(verify_key))
h = nacl.hash.sha512(signed_pubkey)
self.storage = ForgetfulStorage()
self.node = Node(unhexlify(h[:40]), self.public_ip, self.port, signed_pubkey, True)
self.protocol = KademliaProtocol(self.node, self.storage, 20)
self.wire_protocol = OpenBazaarProtocol(self.own_addr)
self.wire_protocol.register_processor(self.protocol)
self.protocol.connect_multiplexer(self.wire_protocol)
self.handler = self.wire_protocol.ConnHandler([self.protocol])
transport = mock.Mock(spec_set=udp.Port)
ret_val = address.IPv4Address('UDP', self.public_ip, self.port)
transport.attach_mock(mock.Mock(return_value=ret_val), 'getHost')
self.wire_protocol.makeConnection(transport)
self.node1 = Node(digest("id1"), self.addr1[0], self.addr1[1], digest("key1"), True)
self.node2 = Node(digest("id2"), self.addr2[0], self.addr2[1], digest("key2"), True)
self.node3 = Node(digest("id3"), self.addr3[0], self.addr3[1], digest("key3"), True)
def test_find(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = NodeSpiderCrawl(self.protocol, node, nearest, 20, 3)
spider.find()
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
def test_nodesFound(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = NodeSpiderCrawl(self.protocol, node, nearest, 20, 3)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
spider._nodesFound(responses)
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
nodes = spider._nodesFound(responses)
node_protos = []
for n in nodes:
node_protos.append(n.getProto())
self.assertTrue(self.node1.getProto() in node_protos)
self.assertTrue(self.node2.getProto() in node_protos)
self.assertTrue(self.node3.getProto() in node_protos)
response = (False, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
nodes = spider._nodesFound(responses)
node_protos = []
for n in nodes:
node_protos.append(n.getProto())
self.assertTrue(self.node2.getProto() in node_protos)
self.assertTrue(self.node3.getProto() in node_protos)
def _connecting_to_connected(self):
remote_synack_packet = packet.Packet.from_data(
42,
self.con.own_addr,
self.con.dest_addr,
ack=0,
syn=True
)
self.con.receive_packet(remote_synack_packet)
self.clock.advance(0)
connection.REACTOR.runUntilCurrent()
self.next_remote_seqnum = 43
m_calls = self.proto_mock.send_datagram.call_args_list
sent_syn_packet = packet.Packet.from_bytes(m_calls[0][0][0])
seqnum = sent_syn_packet.sequence_number
self.handler_mock.reset_mock()
self.proto_mock.reset_mock()
self.next_seqnum = seqnum + 1
class RPCFindResponseTest(unittest.TestCase):
def test_happened(self):
response = (True, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertTrue(r.happened())
response = (False, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertFalse(r.happened())
def test_hasValue(self):
response = (True, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertTrue(r.hasValue())
response = (False, "a node")
r = RPCFindResponse(response)
self.assertFalse(r.hasValue())
def test_getValue(self):
response = (True, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertEqual(r.getValue(), ("some_value",))
def test_getNodeList(self):
node1 = Node(digest("id1"), "127.0.0.1", 12345, signed_pubkey=digest("key1"), vendor=True)
node2 = Node(digest("id2"), "127.0.0.1", 22222, signed_pubkey=digest("key2"), vendor=True)
node3 = Node(digest("id3"), "127.0.0.1", 77777, signed_pubkey=digest("key3"))
response = (True, (node1.getProto().SerializeToString(), node2.getProto().SerializeToString(),
node3.getProto().SerializeToString(),
"sdfasdfsd"))
r = RPCFindResponse(response)
nodes = r.getNodeList()
self.assertEqual(nodes[0].getProto(), node1.getProto())
self.assertEqual(nodes[1].getProto(), node2.getProto())
self.assertEqual(nodes[2].getProto(), node3.getProto())
| {
"repo_name": "eXcomm/OpenBazaar-Server",
"path": "dht/tests/test_crawling.py",
"copies": "3",
"size": "15291",
"license": "mit",
"hash": -806074362060223000,
"line_mean": 41.1239669421,
"line_max": 113,
"alpha_frac": 0.6449545484,
"autogenerated": false,
"ratio": 3.58943661971831,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.573439116811831,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chris'
from binascii import unhexlify
import mock
import nacl.signing
import nacl.hash
from txrudp import packet, connection, rudp, constants
from twisted.internet import udp, address, task
from twisted.trial import unittest
from dht.crawling import RPCFindResponse, NodeSpiderCrawl, ValueSpiderCrawl
from dht.node import Node, NodeHeap
from dht.utils import digest
from dht.storage import ForgetfulStorage
from dht.protocol import KademliaProtocol
from protos.objects import Value
from wireprotocol import OpenBazaarProtocol
from db.datastore import Database
from constants import ALPHA, KSIZE
class ValueSpiderCrawlTest(unittest.TestCase):
def setUp(self):
self.public_ip = '123.45.67.89'
self.port = 12345
self.own_addr = (self.public_ip, self.port)
self.addr1 = ('132.54.76.98', 54321)
self.addr2 = ('231.76.45.89', 15243)
self.addr3 = ("193.193.111.00", 99999)
self.clock = task.Clock()
connection.REACTOR.callLater = self.clock.callLater
self.proto_mock = mock.Mock(spec_set=rudp.ConnectionMultiplexer)
self.handler_mock = mock.Mock(spec_set=connection.Handler)
self.con = connection.Connection(
self.proto_mock,
self.handler_mock,
self.own_addr,
self.addr1
)
valid_key = "1a5c8e67edb8d279d1ae32fa2da97e236b95e95c837dc8c3c7c2ff7a7cc29855"
self.signing_key = nacl.signing.SigningKey(valid_key, encoder=nacl.encoding.HexEncoder)
verify_key = self.signing_key.verify_key
signed_pubkey = self.signing_key.sign(str(verify_key))
h = nacl.hash.sha512(signed_pubkey)
self.storage = ForgetfulStorage()
self.node = Node(unhexlify(h[:40]), self.public_ip, self.port, signed_pubkey, True)
self.db = Database(filepath=":memory:")
self.protocol = KademliaProtocol(self.node, self.storage, 20, self.db)
self.wire_protocol = OpenBazaarProtocol(self.own_addr)
self.wire_protocol.register_processor(self.protocol)
self.protocol.connect_multiplexer(self.wire_protocol)
self.handler = self.wire_protocol.ConnHandler([self.protocol], self.wire_protocol)
transport = mock.Mock(spec_set=udp.Port)
ret_val = address.IPv4Address('UDP', self.public_ip, self.port)
transport.attach_mock(mock.Mock(return_value=ret_val), 'getHost')
self.wire_protocol.makeConnection(transport)
self.node1 = Node(digest("id1"), self.addr1[0], self.addr1[1], digest("key1"), True)
self.node2 = Node(digest("id2"), self.addr2[0], self.addr2[1], digest("key2"), True)
self.node3 = Node(digest("id3"), self.addr3[0], self.addr3[1], digest("key3"), True)
def tearDown(self):
self.con.shutdown()
self.wire_protocol.shutdown()
def test_find(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
spider.find()
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
def test_nodesFound(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
# test resonse with uncontacted nodes
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
spider._nodesFound(responses)
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
# test all been contacted
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
for peer in spider.nearest.getUncontacted():
spider.nearest.markContacted(peer)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node2.id: response}
resp = spider._nodesFound(responses)
self.assertTrue(resp is None)
# test didn't happen
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
response = (False, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
spider._nodesFound(responses)
self.assertTrue(len(spider.nearest) == 2)
# test got value
val = Value()
val.valueKey = digest("contractID")
val.serializedData = self.protocol.sourceNode.getProto().SerializeToString()
response = (True, ("value", val.SerializeToString()))
responses = {self.node3.id: response}
spider.nearestWithoutValue = NodeHeap(node, 1)
value = spider._nodesFound(responses)
self.assertEqual(value[0], val.SerializeToString())
def test_handleFoundValues(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
val = Value()
val.valueKey = digest("contractID")
val.serializedData = self.node1.getProto().SerializeToString()
val1 = val.SerializeToString()
value = spider._handleFoundValues([(val1,)])
self.assertEqual(value[0], val.SerializeToString())
# test handle multiple values
val.serializedData = self.node2.getProto().SerializeToString()
val2 = val.SerializeToString()
found_values = [(val1,), (val1,), (val2,)]
self.assertEqual(spider._handleFoundValues(found_values), (val1,))
# test store value at nearest without value
spider.nearestWithoutValue.push(self.node1)
spider._handleFoundValues(found_values)
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertTrue(len(self.proto_mock.send_datagram.call_args_list) > 1)
self.proto_mock.send_datagram.call_args_list = []
def _connecting_to_connected(self):
remote_synack_packet = packet.Packet.from_data(
42,
self.con.own_addr,
self.con.dest_addr,
ack=0,
syn=True
)
self.con.receive_packet(remote_synack_packet)
self.clock.advance(0)
connection.REACTOR.runUntilCurrent()
self.next_remote_seqnum = 43
m_calls = self.proto_mock.send_datagram.call_args_list
sent_syn_packet = packet.Packet.from_bytes(m_calls[0][0][0])
seqnum = sent_syn_packet.sequence_number
self.handler_mock.reset_mock()
self.proto_mock.reset_mock()
self.next_seqnum = seqnum + 1
class NodeSpiderCrawlTest(unittest.TestCase):
def setUp(self):
self.public_ip = '123.45.67.89'
self.port = 12345
self.own_addr = (self.public_ip, self.port)
self.addr1 = ('132.54.76.98', 54321)
self.addr2 = ('231.76.45.89', 15243)
self.addr3 = ("193.193.111.00", 99999)
self.clock = task.Clock()
connection.REACTOR.callLater = self.clock.callLater
self.proto_mock = mock.Mock(spec_set=rudp.ConnectionMultiplexer)
self.handler_mock = mock.Mock(spec_set=connection.Handler)
self.con = connection.Connection(
self.proto_mock,
self.handler_mock,
self.own_addr,
self.addr1
)
valid_key = "1a5c8e67edb8d279d1ae32fa2da97e236b95e95c837dc8c3c7c2ff7a7cc29855"
self.signing_key = nacl.signing.SigningKey(valid_key, encoder=nacl.encoding.HexEncoder)
verify_key = self.signing_key.verify_key
signed_pubkey = self.signing_key.sign(str(verify_key))
h = nacl.hash.sha512(signed_pubkey)
self.storage = ForgetfulStorage()
self.node = Node(unhexlify(h[:40]), self.public_ip, self.port, signed_pubkey, True)
self.db = Database(filepath=":memory:")
self.protocol = KademliaProtocol(self.node, self.storage, 20, self.db)
self.wire_protocol = OpenBazaarProtocol(self.own_addr)
self.wire_protocol.register_processor(self.protocol)
self.protocol.connect_multiplexer(self.wire_protocol)
self.handler = self.wire_protocol.ConnHandler([self.protocol], self.wire_protocol)
transport = mock.Mock(spec_set=udp.Port)
ret_val = address.IPv4Address('UDP', self.public_ip, self.port)
transport.attach_mock(mock.Mock(return_value=ret_val), 'getHost')
self.wire_protocol.makeConnection(transport)
self.node1 = Node(digest("id1"), self.addr1[0], self.addr1[1], digest("key1"), True)
self.node2 = Node(digest("id2"), self.addr2[0], self.addr2[1], digest("key2"), True)
self.node3 = Node(digest("id3"), self.addr3[0], self.addr3[1], digest("key3"), True)
def test_find(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = NodeSpiderCrawl(self.protocol, node, nearest, 20, 3)
spider.find()
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
def test_nodesFound(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = NodeSpiderCrawl(self.protocol, node, nearest, 20, 3)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
spider._nodesFound(responses)
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
nodes = spider._nodesFound(responses)
node_protos = []
for n in nodes:
node_protos.append(n.getProto())
self.assertTrue(self.node1.getProto() in node_protos)
self.assertTrue(self.node2.getProto() in node_protos)
self.assertTrue(self.node3.getProto() in node_protos)
response = (False, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
nodes = spider._nodesFound(responses)
node_protos = []
for n in nodes:
node_protos.append(n.getProto())
self.assertTrue(self.node2.getProto() in node_protos)
self.assertTrue(self.node3.getProto() in node_protos)
def _connecting_to_connected(self):
remote_synack_packet = packet.Packet.from_data(
42,
self.con.own_addr,
self.con.dest_addr,
ack=0,
syn=True
)
self.con.receive_packet(remote_synack_packet)
self.clock.advance(0)
connection.REACTOR.runUntilCurrent()
self.next_remote_seqnum = 43
m_calls = self.proto_mock.send_datagram.call_args_list
sent_syn_packet = packet.Packet.from_bytes(m_calls[0][0][0])
seqnum = sent_syn_packet.sequence_number
self.handler_mock.reset_mock()
self.proto_mock.reset_mock()
self.next_seqnum = seqnum + 1
class RPCFindResponseTest(unittest.TestCase):
def test_happened(self):
response = (True, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertTrue(r.happened())
response = (False, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertFalse(r.happened())
def test_hasValue(self):
response = (True, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertTrue(r.hasValue())
response = (False, "a node")
r = RPCFindResponse(response)
self.assertFalse(r.hasValue())
def test_getValue(self):
response = (True, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertEqual(r.getValue(), ("some_value",))
def test_getNodeList(self):
node1 = Node(digest("id1"), "127.0.0.1", 12345, signed_pubkey=digest("key1"), vendor=True)
node2 = Node(digest("id2"), "127.0.0.1", 22222, signed_pubkey=digest("key2"), vendor=True)
node3 = Node(digest("id3"), "127.0.0.1", 77777, signed_pubkey=digest("key3"))
response = (True, (node1.getProto().SerializeToString(), node2.getProto().SerializeToString(),
node3.getProto().SerializeToString(),
"sdfasdfsd"))
r = RPCFindResponse(response)
nodes = r.getNodeList()
self.assertEqual(nodes[0].getProto(), node1.getProto())
self.assertEqual(nodes[1].getProto(), node2.getProto())
self.assertEqual(nodes[2].getProto(), node3.getProto())
| {
"repo_name": "Joaz/OpenBazaar-Server",
"path": "dht/tests/test_crawling.py",
"copies": "2",
"size": "15319",
"license": "mit",
"hash": 825506423789790700,
"line_mean": 40.9698630137,
"line_max": 113,
"alpha_frac": 0.6450812716,
"autogenerated": false,
"ratio": 3.5943219145940875,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5239403186194087,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chris'
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
def get(key, default):
return getattr(settings, key, default)
# AUTH based settings
WOOEY_ALLOW_ANONYMOUS = get('WOOEY_ALLOW_ANONYMOUS', True)
WOOEY_AUTH = get('WOOEY_AUTH', True)
WOOEY_LOGIN_URL = get('WOOEY_LOGIN_URL', settings.LOGIN_URL)
WOOEY_REGISTER_URL = get('WOOEY_REGISTER_URL', '/accounts/register/')
# Celery and job queue settings
WOOEY_CELERY = get('WOOEY_CELERY', True)
WOOEY_CELERY_TASKS = get('WOOEY_CELERY_TASKS', 'wooey.tasks')
WOOEY_CELERY_STOPPABLE_JOBS = 'amqp' in get('CELERY_BROKER_URL', '')
# Site setup settings
WOOEY_DEFAULT_SCRIPT_GROUP = get('WOOEY_DEFAULT_SCRIPT_GROUP', _('Scripts'))
WOOEY_EPHEMERAL_FILES = get('WOOEY_EPHEMERAL_FILES', False)
WOOEY_FILE_DIR = get('WOOEY_FILE_DIR', 'wooey_files')
WOOEY_JOB_EXPIRATION = get('WOOEY_JOB_EXPIRATION', {'anonymous': None, 'users': None})
WOOEY_REALTIME_CACHE = get('WOOEY_REALTIME_CACHE', None)
WOOEY_SCRIPT_DIR = get('WOOEY_SCRIPT_DIR', 'wooey_scripts')
# User interface settings
WOOEY_SHOW_LOCKED_SCRIPTS = get('WOOEY_SHOW_LOCKED_SCRIPTS', True)
WOOEY_SITE_NAME = get('WOOEY_SITE_NAME', _('Wooey!'))
WOOEY_SITE_TAG = get('WOOEY_SITE_TAG', _('A web UI for Python scripts'))
| {
"repo_name": "wooey/Wooey",
"path": "wooey/settings.py",
"copies": "1",
"size": "1273",
"license": "bsd-3-clause",
"hash": 2882682810500737000,
"line_mean": 38.78125,
"line_max": 86,
"alpha_frac": 0.7227022781,
"autogenerated": false,
"ratio": 2.641078838174274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8703086834178034,
"avg_score": 0.032138856419248005,
"num_lines": 32
} |
{
"repo_name": "hottwaj/Wooey",
"path": "wooey/views/mixins.py",
"copies": "4",
"size": "1736",
"license": "bsd-3-clause",
"hash": 8870189604861213000,
"line_mean": 39.3720930233,
"line_max": 152,
"alpha_frac": 0.6630184332,
"autogenerated": false,
"ratio": 3.116696588868941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001103586310318801,
"num_lines": 43
} | |
__author__ = 'chris'
from OpenSSL import SSL
from twisted.internet import ssl
class ChainedOpenSSLContextFactory(ssl.DefaultOpenSSLContextFactory):
def __init__(self, privateKeyFileName, certificateChainFileName,
sslmethod=SSL.SSLv23_METHOD):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateChainFileName: Name of a file containing a certificate chain
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateChainFileName = certificateChainFileName
self.sslmethod = sslmethod
self.cacheContext()
ssl.DefaultOpenSSLContextFactory.__init__(self, privateKeyFileName,
certificateChainFileName, self.sslmethod)
def cacheContext(self):
ctx = SSL.Context(self.sslmethod)
ctx.use_certificate_chain_file(self.certificateChainFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
| {
"repo_name": "OpenBazaar/OpenBazaar-Server",
"path": "net/sslcontext.py",
"copies": "7",
"size": "1076",
"license": "mit",
"hash": -374273050424855700,
"line_mean": 40.3846153846,
"line_max": 91,
"alpha_frac": 0.6802973978,
"autogenerated": false,
"ratio": 4.846846846846847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9027144244646848,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chris'
from twisted.internet import reactor, task
from protos.message import Command, PING, STUN, STORE, HOLE_PUNCH, INV, VALUES
from log import Logger
from constants import SEED_NODE, SEED_NODE_TESTNET
class BanScore(object):
def __init__(self, peer_ip, multiplexer, ban_time=86400):
self.peer_ip = peer_ip
self.multiplexer = multiplexer
self.ban_time = ban_time
self.scores = {
PING: 0,
STUN: 0,
STORE: 0,
INV: 0,
}
self.scoring_loop = task.LoopingCall(self.adjust_scores)
self.scoring_loop.start(30)
self.log = Logger(system=self)
def process_message(self, message):
if message.command == PING:
self.scores[PING] += 1
if self.scores[PING] > 4:
self.ban(PING)
elif message.command == STUN:
self.scores[STUN] += 1
if self.scores[STUN] > 1:
self.ban(STUN)
elif message.command == HOLE_PUNCH:
if self.peer_ip[0] != SEED_NODE[0] or self.peer_ip[0] != SEED_NODE_TESTNET[0]:
self.ban(HOLE_PUNCH)
elif message.command == STORE:
args = tuple(message.arguments)
for arg in args:
self.scores[STORE] += len(arg)
if self.scores[STORE] > 1000000:
self.ban(STORE)
elif message.command == INV:
self.scores[INV] += 30
if self.scores[INV] > 120:
self.ban(INV)
elif message.command == VALUES:
self.scores[VALUES] += 30
if self.scores[VALUES] > 120:
self.ban(VALUES)
def ban(self, message_type):
self.log.warning("Banned %s. Reason: too many %s messages." %
(self.peer_ip[0], Command.Name(message_type)))
self.multiplexer.ban_ip(self.peer_ip[0])
self.multiplexer[self.peer_ip].shutdown()
reactor.callLater(self.ban_time, self.multiplexer.remove_ip_ban, self.peer_ip[0])
def adjust_scores(self):
for k, v in self.scores.items():
if v > 0:
if k == STORE:
self.scores[k] = v - 350
else:
self.scores[k] = v - 1
| {
"repo_name": "hauxir/OpenBazaar-Server",
"path": "net/dos.py",
"copies": "1",
"size": "2291",
"license": "mit",
"hash": -7546997388569179000,
"line_mean": 34.796875,
"line_max": 90,
"alpha_frac": 0.5351374945,
"autogenerated": false,
"ratio": 3.5464396284829722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45815771229829727,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chris'
from unittest import TestCase
import subprocess
import os
import shutil
import sys
BASE_DIR = os.path.split(__file__)[0]
WOOEY_SCRIPT_PATH = os.path.join(BASE_DIR, '..', 'scripts', 'wooify')
WOOEY_TEST_PROJECT_NAME = 'wooey_project'
WOOEY_TEST_PROJECT_PATH = os.path.join(BASE_DIR, WOOEY_TEST_PROJECT_NAME)
WOOEY_TEST_PROJECT_MANAGE = os.path.join(WOOEY_TEST_PROJECT_PATH, 'manage.py')
PYTHON_INTERPRETTER = sys.executable if sys.executable else 'python'
env = os.environ
env['DJANGO_SETTINGS_MODULE'] = '{}.settings'.format(WOOEY_TEST_PROJECT_NAME)
env['TESTING'] = 'True'
class TestProject(TestCase):
def setUp(self):
os.chdir(BASE_DIR)
# if old stuff exists, remove it
if os.path.exists(WOOEY_TEST_PROJECT_PATH):
shutil.rmtree(WOOEY_TEST_PROJECT_PATH)
def tearDown(self):
os.chdir(BASE_DIR)
if os.path.exists(WOOEY_TEST_PROJECT_PATH):
shutil.rmtree(WOOEY_TEST_PROJECT_PATH)
def test_bootstrap(self):
from wooey.backend import command_line
sys.argv = [WOOEY_SCRIPT_PATH, '-p', WOOEY_TEST_PROJECT_NAME]
ret = command_line.bootstrap(env=env, cwd=BASE_DIR)
self.assertIsNone(ret)
# test our script is executable from the command line, it will fail with return code of 1 since
# the project already exists
proc = subprocess.Popen([PYTHON_INTERPRETTER, WOOEY_SCRIPT_PATH, '-p', WOOEY_TEST_PROJECT_NAME])
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 1, stderr)
| {
"repo_name": "hottwaj/Wooey",
"path": "tests/test_project.py",
"copies": "4",
"size": "1558",
"license": "bsd-3-clause",
"hash": -4403777348932068000,
"line_mean": 37,
"line_max": 104,
"alpha_frac": 0.6810012837,
"autogenerated": false,
"ratio": 3.2256728778467907,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001008815033205277,
"num_lines": 41
} |
__author__ = 'chris'
from zope.interface.verify import verifyObject
from txrudp.rudp import ConnectionMultiplexer
from txrudp.connection import HandlerFactory, Handler
from txrudp.crypto_connection import CryptoConnectionFactory
from interfaces import MessageProcessor
from protos.message import Message, FIND_VALUE
from log import Logger
from dht.node import Node
class OpenBazaarProtocol(ConnectionMultiplexer):
"""
A protocol extending the txrudp datagram protocol. This is the main protocol
which gets passed into the twisted UDPServer. It handles the setup and tear down
of all connections, parses messages coming off the wire and passes them off to
the appropriate classes for processing.
"""
def __init__(self, ip_address, testnet=False):
"""
Initialize the new protocol with the connection handler factory.
Args:
ip_address: a `tuple` of the (ip address, port) of ths node.
"""
self.ip_address = ip_address
self.testnet = testnet
self.ws = None
self.blockchain = None
self.processors = []
self.factory = self.ConnHandlerFactory(self.processors, self)
ConnectionMultiplexer.__init__(self, CryptoConnectionFactory(self.factory), self.ip_address[0])
class ConnHandler(Handler):
def __init__(self, processors, active_connections, *args, **kwargs):
super(OpenBazaarProtocol.ConnHandler, self).__init__(*args, **kwargs)
self.log = Logger(system=self)
self.processors = processors
self.active_connections = active_connections
self.connection = None
self.node = None
def receive_message(self, datagram):
if len(datagram) < 166:
self.log.warning("received datagram too small from %s, ignoring" % str(self.connection.dest_addr))
return False
m = Message()
try:
m.ParseFromString(datagram)
self.node = Node(m.sender.guid, m.sender.ip, m.sender.port,
m.sender.signedPublicKey, m.sender.vendor)
for processor in self.processors:
if m.command in processor:
processor.receive_message(datagram, self.connection)
except Exception:
# If message isn't formatted property then ignore
self.log.warning("Received unknown message from %s, ignoring" % str(self.connection.dest_addr))
return False
def handle_shutdown(self):
del self.active_connections[self.connection.dest_addr]
if self.node is not None:
for processor in self.processors:
if FIND_VALUE in processor:
processor.router.removeContact(self.node)
self.log.info(
"Connection with (%s, %s) terminated" % (self.connection.dest_addr[0],
self.connection.dest_addr[1]))\
class ConnHandlerFactory(HandlerFactory):
def __init__(self, processors, active_connections):
super(OpenBazaarProtocol.ConnHandlerFactory, self).__init__()
self.processors = processors
self.active_connecitons = active_connections
def make_new_handler(self, *args, **kwargs):
return OpenBazaarProtocol.ConnHandler(self.processors, self.active_connecitons)
def register_processor(self, processor):
"""Add a new class which implements the `MessageProcessor` interface."""
if verifyObject(MessageProcessor, processor):
self.processors.append(processor)
def unregister_processor(self, processor):
"""Unregister the given processor."""
if processor in self.processors:
self.processors.remove(processor)
def set_servers(self, ws, blockchain):
self.ws = ws
self.blockchain = blockchain
def send_message(self, datagram, address):
"""
Sends a datagram over the wire to the given address. It will create a new rudp connection if one
does not already exist for this peer.
Args:
datagram: the raw data to send over the wire
address: a `tuple` of (ip address, port) of the recipient.
"""
if address not in self:
con = self.make_new_connection((self.ip_address[0], self.ip_address[1]), address)
else:
con = self[address]
con.send_message(datagram)
| {
"repo_name": "melpomene/OpenBazaar-Server",
"path": "wireprotocol.py",
"copies": "1",
"size": "4583",
"license": "mit",
"hash": -6045484732270255000,
"line_mean": 40.6636363636,
"line_max": 114,
"alpha_frac": 0.6233907921,
"autogenerated": false,
"ratio": 4.537623762376238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010564627551416432,
"num_lines": 110
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.