file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
utils.py | import jsonpickle
import json as serializer
from pkg_resources import Requirement, resource_filename
import os
import csv
from Crypto.Cipher import ARC4
import base64
import socket
import getpass
from solidfire.factory import ElementFactory
from filelock import FileLock
import sys
def kv_string_to_dict(kv_string):
new_dict = {}
items = kv_string.split(',')
for item in items:
kvs = item.split('=')
new_dict[kvs[0]] = kvs[1]
def print_result(objs, log, as_json=False, as_pickle=False, depth=None, filter_tree=None):
# There are 3 acceptable parameter sets to provide:
# 1. json=True, depth=None, filter_tree=None
# 2. json=False, depth=#, filter_tree=None
# 3. json=False, depth=#, filter_tree=acceptable string
# Error case
if as_json and (depth is not None or filter_tree is not None):
log.error("If you choose to print it as json, do not provide a depth or filter. Those are for printing it as a tree.")
exit()
"""
SDK1.6 Note:
Since print_tree is not supported in 1.6, when both the available output formats
json and pickle formats are set to False, change the default output format (pickle) to True.
"""
if as_json == False and as_pickle == False:
as_pickle = True
# If json is true, we print it as json and return:
if as_json == True or as_pickle == True:
print_result_as_json(objs, as_pickle)
return
"""
SDK1.6 Note:
Commenting out these lines as print_tree is not supported in 1.6.
"""
"""
# If we have a filter, apply it.
if filter_tree is not None:
try:
objs_to_print = filter_objects_from_simple_keypaths(objs, filter_tree.split(','))
except Exception as e:
log.error(e.args[0])
exit(1)
else:
objs_to_print = objs
# Set up a default depth
if depth is None:
depth = 10
# Next, print the tree to the appropriate depth
print_result_as_tree(objs_to_print, depth)
"""
def print_result_as_json(objs, pickle=False):
#print(jsonpickle.encode(objs))
nestedDict = serializer.loads(jsonpickle.encode(objs))
filteredDict = type(nestedDict)()
if(pickle==False):
remove_pickling(nestedDict, filteredDict)
else:
filteredDict = nestedDict
print(serializer.dumps(filteredDict,indent=4))
def remove_pickling(nestedDict, filteredDict):
if type(nestedDict) is dict:
#foreach key, if list, recurse, if dict, recurse, if string recurse unless py/obj is key.
for key in nestedDict:
if key == "py/object":
continue
else:
filteredDict[key] = type(nestedDict[key])()
filteredDict[key] = remove_pickling(nestedDict[key], filteredDict[key])
return filteredDict
if type(nestedDict) is list:
# foreach item
for i in range(len(nestedDict)):
filteredDict.append(type(nestedDict[i])())
filteredDict[i] = remove_pickling(nestedDict[i], filteredDict[i])
return filteredDict
return nestedDict
"""
SDK1.6 Note:
Commenting this as print_tree is not supported in SDK 1.6.
"""
def get_result_as_tree(objs, depth=1, currentDepth=0, lastKey = ""):
print("print_tree is not supported in SDK1.6")
"""stringToReturn = ""
if(currentDepth > depth):
return "<to see more details, increase depth>\n"
if(type(objs) is str or type(objs) is bool or type(objs) is int or type(objs) is type(u'') or objs is None or type(objs) is float):# or (sys.version_info[0]<3 and type(objs) is long)):
return str(objs) + "\n"
if(type(objs) is list):
stringToReturn += "\n"
for i in range(len(objs)):
obj = objs[i]
stringToReturn += currentDepth*" "+get_result_as_tree(obj, depth, currentDepth+1, lastKey)
return stringToReturn
if(isinstance(objs, dict)):
stringToReturn += "\n"
for key in objs:
stringToReturn += currentDepth*" "+key+": "+get_result_as_tree(objs[key], depth, currentDepth+1, key)
return stringToReturn
if (isinstance(objs, tuple)):
return str(objs[0]) + "\n"
if(objs is None):
return stringToReturn
mydict = objs.__dict__
stringToReturn += "\n"
for key in mydict:
stringToReturn += currentDepth*" "
stringToReturn += key+": "+get_result_as_tree(mydict[key], depth, currentDepth+1, key)
return stringToReturn
"""
def filter_objects_from_simple_keypaths(objs, simpleKeyPaths):
# First, we assemble the key paths.
# They start out like this:
# [accouts.username, accounts.initiator_secret.secret, accounts.status]
# and become like this:
# {"accounts":{"username":True, "initiator_secret":{"secret":True}, "status":True}
keyPaths = dict()
for simpleKeyPath in simpleKeyPaths:
currentLevel = keyPaths
keyPathArray = simpleKeyPath.split('.')
for i in range(len(keyPathArray)):
if(i<(len(keyPathArray) - 1)):
if currentLevel.get(keyPathArray[i]) is None:
currentLevel[keyPathArray[i]] = dict()
else:
currentLevel[keyPathArray[i]] = True
currentLevel = currentLevel[keyPathArray[i]]
# Then we pass it in to filter objects.
return filter_objects(objs, keyPaths)
# Keypaths is arranged as follows:
# it is a nested dict with the order of the keys.
def filter_objects(objs, keyPaths):
# Otherwise, we keep recursing deeper.
# Because there are deeper keys, we know that we can go deeper.
# This means we are dealing with either an array or a dict.
# If keyPaths looks like this:
# {"username": True, "volumes": {"Id": True}}
# The keys in this sequence will be username and volumes.
# When we recurse into volumes, the keys will be Id.
finalFilteredObjects = dict()
if keyPaths == True and type(objs) is not list:
return objs
# If we've found a list, we recurse deeper to pull out the objs.
# We do not advance our keyPath recursion because this is just a list.
if type(objs) is list:
# If we have a list of objects, we will need to assemble and return a list of stuff.
filteredObjsDict = [None]*len(objs)
for i in range(len(objs)):
# Each element could be a string, dict, or list.
filteredObjsDict[i] = filter_objects(objs[i], keyPaths)
return filteredObjsDict
dictionaryOfInterest = None
if type(objs) is dict:
dictionaryOfInterest = objs
else:
dictionaryOfInterest = objs.__dict__
for key in keyPaths:
# If we've found a dict, we recurse deeper to pull out the objs.
# Because this is a dict, we must advance our keyPaths recursion.
# Consider the following example:
if key not in dictionaryOfInterest:
raise ValueError("'"+key+"' is not a valid key for this level. Valid keys are: "+','.join(dictionaryOfInterest.keys()))
finalFilteredObjects[key] = filter_objects(dictionaryOfInterest[key], keyPaths[key])
return finalFilteredObjects
def print_result_as_table(objs, keyPaths):
filteredDictionary = filter_objects(objs, keyPaths)
def print_result_as_tree(objs, depth=1):
print(get_result_as_tree(objs, depth))
def establish_connection(ctx):
# Verify that the mvip does not contain the port number:
if ctx.mvip and ":" in ctx.mvip:
ctx.logger.error('Please provide the port using the port parameter.')
exit(1)
cfg = None
# Arguments take precedence regardless of env settings
if ctx.mvip:
if ctx.username is None:
ctx.username = getpass.getpass("Username:")
if ctx.password is None:
ctx.password = getpass.getpass("Password:")
cfg = {'mvip': ctx.mvip,
'username': "b'"+encrypt(ctx.username).decode('utf-8')+"'",
'password': "b'"+encrypt(ctx.password).decode('utf-8')+"'",
'port': ctx.port,
'url': 'https://%s:%s' % (ctx.mvip, ctx.port),
'version': ctx.version,
'verifyssl': ctx.verifyssl,
'timeout': ctx.timeout}
try:
ctx.element = ElementFactory.create(cfg["mvip"],decrypt(cfg["username"]),decrypt(cfg["password"]),port=cfg["port"],version=cfg["version"],verify_ssl=cfg["verifyssl"],timeout=cfg["timeout"])
ctx.version = ctx.element._api_version
cfg["version"] = ctx.element._api_version
except Exception as e:
ctx.logger.error(e.__str__())
exit(1)
# If someone accidentally passed in an argument, but didn't specify everything, throw an error.
elif ctx.username or ctx.password:
ctx.logger.error("In order to manually connect, please provide an mvip, a username, AND a password")
# If someone asked for a given connection or we need to default to using the connection at index 0 if it exists:
else:
if ctx.connectionindex is None and ctx.name is None:
cfg = get_default_connection(ctx)
elif ctx.connectionindex is not None:
connections = get_connections(ctx)
if int(ctx.connectionindex) > (len(connections)-1) or int(ctx.connectionindex) < (-len(connections)):
ctx.logger.error("Connection "+str(ctx.connectionindex)+" Please provide an index between "+str(-len(connections))+" and "+str(len(connections)-1))
exit(1)
cfg = connections[ctx.connectionindex]
elif ctx.name is not None:
connections = get_connections(ctx)
filteredCfg = [connection for connection in connections if connection["name"] == ctx.name]
if(len(filteredCfg) > 1):
ctx.logger.error("Your connections.csv file has become corrupted. There are two connections of the same name.")
exit()
if(len(filteredCfg) < 1):
ctx.logger.error("Could not find a connection named "+ctx.name)
exit()
cfg = filteredCfg[0]
# If we managed to find the connection we were looking for, we must try to establish the connection.
if cfg is not None:
# Finally, we need to establish our connection via elementfactory:
try:
if int(cfg["port"]) != 443:
address = cfg["mvip"] + ":" + cfg["port"]
else:
address = cfg["mvip"]
ctx.element = ElementFactory.create(address, decrypt(cfg["username"]), decrypt(cfg["password"]), cfg["version"], verify_ssl=cfg["verifyssl"])
if int(cfg["timeout"]) != 30:
ctx.element.timeout(cfg["timeout"])
except Exception as e:
ctx.logger.error(e.__str__())
ctx.logger.error("The connection is corrupt. Run 'sfcli connection prune' to try and remove all broken connections or use 'sfcli connection remove -n name'")
ctx.logger.error(cfg)
exit(1)
# If we want the json output directly from the source, we'll have to override the send request method in the sdk:
# This is so that we can circumvent the python objects and get exactly what the json-rpc returns.
if ctx.json and ctx.element:
def new_send_request(*args, **kwargs):
return ctx.element.__class__.__bases__[0].send_request(ctx.element, return_response_raw=True, *args, **kwargs)
ctx.element.send_request = new_send_request
# The only time it is none is when we're asking for help or we're trying to store a connection.
# If that's not what we're doing, we catch it later.
if cfg is not None:
cfg["port"] = int(cfg["port"])
ctx.cfg = cfg
cfg["name"] = cfg.get("name", "default")
if not ctx.nocache:
write_default_connection(ctx, cfg)
if ctx.element is None:
ctx.logger.error("You must establish at least one connection and specify which you intend to use.")
exit()
# this needs to be atomic.
def get_connections(ctx):
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
if os.path.exists(connectionsCsvLocation):
try:
with FileLock(connectionsLock):
with open(connectionsCsvLocation, 'r') as connectionFile:
connections = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionsCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file.")
exit(1)
else:
connections = []
for connection in connections:
connection["version"] = float(connection["version"])
if connection.get("verifyssl") == "True":
connection["verifyssl"] = True
else:
connection["verifyssl"] = False
return connections
def write_connections(ctx, connections):
try:
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
with open(connectionsCsvLocation, 'w') as f:
with FileLock(connectionsLock):
w = csv.DictWriter(f, ["name","mvip","port","username","password","version","url","verifyssl","timeout"], lineterminator='\n')
w.writeheader()
for connection in connections:
if connection is not None:
w.writerow(connection)
except Exception as e:
ctx.logger.error("Problem writing "+ connectionsCsvLocation + " " + str(e.args)+" Try changing the permissions of that file.")
exit(1)
def get_default_connection(ctx):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
if os.path.exists(connectionCsvLocation):
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
try:
with FileLock(defaultLockLocation):
with open(connectionCsvLocation) as connectionFile:
connection = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file or specifying credentials.")
exit(1)
if len(connection)>0:
connection[0]["version"] = float(connection[0]["version"])
if(connection[0]["verifyssl"] == "True"):
connection[0]["verifyssl"] = True
else:
connection[0]["verifyssl"] = False
return connection[0]
else:
os.remove(defaultLockLocation)
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
else:
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
def write_default_connection(ctx, connection):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
try:
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
with FileLock(defaultLockLocation):
with open(connectionCsvLocation, 'w') as f:
w = csv.DictWriter(f, ["name", "mvip", "port", "username", "password", "version", "url", "verifyssl", "timeout"],
lineterminator='\n')
w.writeheader()
w.writerow(connection)
except Exception as e:
ctx.logger.warning("Problem writing "+ connectionCsvLocation + " " + str(e.args)+" Try using changing the permissions of that file or using the --nocache flag.")
# WARNING! This doesn't actually give us total security. It only gives us obscurity.
def encrypt(sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
encoded = base64.b64encode(cipher.encrypt(sensitive_data.encode('utf-8')))
return encoded
def | (encoded_sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
decoded = cipher.decrypt(base64.b64decode(encoded_sensitive_data[2:-1]))
return decoded.decode('utf-8') | decrypt | identifier_name |
utils.py | import jsonpickle
import json as serializer
from pkg_resources import Requirement, resource_filename
import os
import csv
from Crypto.Cipher import ARC4
import base64
import socket
import getpass
from solidfire.factory import ElementFactory
from filelock import FileLock
import sys
def kv_string_to_dict(kv_string):
new_dict = {}
items = kv_string.split(',')
for item in items:
kvs = item.split('=')
new_dict[kvs[0]] = kvs[1]
def print_result(objs, log, as_json=False, as_pickle=False, depth=None, filter_tree=None):
# There are 3 acceptable parameter sets to provide:
# 1. json=True, depth=None, filter_tree=None
# 2. json=False, depth=#, filter_tree=None
# 3. json=False, depth=#, filter_tree=acceptable string
# Error case
if as_json and (depth is not None or filter_tree is not None):
log.error("If you choose to print it as json, do not provide a depth or filter. Those are for printing it as a tree.")
exit()
"""
SDK1.6 Note:
Since print_tree is not supported in 1.6, when both the available output formats
json and pickle formats are set to False, change the default output format (pickle) to True.
"""
if as_json == False and as_pickle == False:
as_pickle = True
# If json is true, we print it as json and return:
if as_json == True or as_pickle == True:
print_result_as_json(objs, as_pickle)
return
"""
SDK1.6 Note:
Commenting out these lines as print_tree is not supported in 1.6.
"""
"""
# If we have a filter, apply it.
if filter_tree is not None:
try:
objs_to_print = filter_objects_from_simple_keypaths(objs, filter_tree.split(','))
except Exception as e:
log.error(e.args[0])
exit(1)
else:
objs_to_print = objs
# Set up a default depth
if depth is None:
depth = 10
# Next, print the tree to the appropriate depth
print_result_as_tree(objs_to_print, depth)
"""
def print_result_as_json(objs, pickle=False):
#print(jsonpickle.encode(objs))
nestedDict = serializer.loads(jsonpickle.encode(objs))
filteredDict = type(nestedDict)()
if(pickle==False):
remove_pickling(nestedDict, filteredDict)
else:
filteredDict = nestedDict
print(serializer.dumps(filteredDict,indent=4))
def remove_pickling(nestedDict, filteredDict):
if type(nestedDict) is dict:
#foreach key, if list, recurse, if dict, recurse, if string recurse unless py/obj is key.
for key in nestedDict:
if key == "py/object":
continue
else:
filteredDict[key] = type(nestedDict[key])()
filteredDict[key] = remove_pickling(nestedDict[key], filteredDict[key])
return filteredDict
if type(nestedDict) is list:
# foreach item
for i in range(len(nestedDict)):
filteredDict.append(type(nestedDict[i])())
filteredDict[i] = remove_pickling(nestedDict[i], filteredDict[i])
return filteredDict
return nestedDict
"""
SDK1.6 Note:
Commenting this as print_tree is not supported in SDK 1.6.
"""
def get_result_as_tree(objs, depth=1, currentDepth=0, lastKey = ""):
print("print_tree is not supported in SDK1.6")
"""stringToReturn = ""
if(currentDepth > depth):
return "<to see more details, increase depth>\n"
if(type(objs) is str or type(objs) is bool or type(objs) is int or type(objs) is type(u'') or objs is None or type(objs) is float):# or (sys.version_info[0]<3 and type(objs) is long)):
return str(objs) + "\n"
if(type(objs) is list):
stringToReturn += "\n"
for i in range(len(objs)):
obj = objs[i]
stringToReturn += currentDepth*" "+get_result_as_tree(obj, depth, currentDepth+1, lastKey)
return stringToReturn
if(isinstance(objs, dict)):
stringToReturn += "\n"
for key in objs:
stringToReturn += currentDepth*" "+key+": "+get_result_as_tree(objs[key], depth, currentDepth+1, key)
return stringToReturn
if (isinstance(objs, tuple)):
return str(objs[0]) + "\n"
if(objs is None):
return stringToReturn
mydict = objs.__dict__
stringToReturn += "\n"
for key in mydict:
stringToReturn += currentDepth*" "
stringToReturn += key+": "+get_result_as_tree(mydict[key], depth, currentDepth+1, key)
return stringToReturn
"""
def filter_objects_from_simple_keypaths(objs, simpleKeyPaths):
# First, we assemble the key paths.
# They start out like this:
# [accouts.username, accounts.initiator_secret.secret, accounts.status]
# and become like this:
# {"accounts":{"username":True, "initiator_secret":{"secret":True}, "status":True}
keyPaths = dict()
for simpleKeyPath in simpleKeyPaths:
currentLevel = keyPaths
keyPathArray = simpleKeyPath.split('.')
for i in range(len(keyPathArray)):
if(i<(len(keyPathArray) - 1)):
if currentLevel.get(keyPathArray[i]) is None:
currentLevel[keyPathArray[i]] = dict()
else:
currentLevel[keyPathArray[i]] = True
currentLevel = currentLevel[keyPathArray[i]]
# Then we pass it in to filter objects.
return filter_objects(objs, keyPaths)
# Keypaths is arranged as follows:
# it is a nested dict with the order of the keys.
def filter_objects(objs, keyPaths):
# Otherwise, we keep recursing deeper.
# Because there are deeper keys, we know that we can go deeper.
# This means we are dealing with either an array or a dict.
# If keyPaths looks like this:
# {"username": True, "volumes": {"Id": True}}
# The keys in this sequence will be username and volumes.
# When we recurse into volumes, the keys will be Id.
finalFilteredObjects = dict()
if keyPaths == True and type(objs) is not list:
return objs
# If we've found a list, we recurse deeper to pull out the objs.
# We do not advance our keyPath recursion because this is just a list.
if type(objs) is list:
# If we have a list of objects, we will need to assemble and return a list of stuff.
filteredObjsDict = [None]*len(objs)
for i in range(len(objs)):
# Each element could be a string, dict, or list.
filteredObjsDict[i] = filter_objects(objs[i], keyPaths)
return filteredObjsDict
dictionaryOfInterest = None
if type(objs) is dict:
dictionaryOfInterest = objs
else:
dictionaryOfInterest = objs.__dict__
for key in keyPaths:
# If we've found a dict, we recurse deeper to pull out the objs.
# Because this is a dict, we must advance our keyPaths recursion.
# Consider the following example:
if key not in dictionaryOfInterest:
raise ValueError("'"+key+"' is not a valid key for this level. Valid keys are: "+','.join(dictionaryOfInterest.keys()))
finalFilteredObjects[key] = filter_objects(dictionaryOfInterest[key], keyPaths[key])
return finalFilteredObjects
def print_result_as_table(objs, keyPaths):
filteredDictionary = filter_objects(objs, keyPaths)
def print_result_as_tree(objs, depth=1):
print(get_result_as_tree(objs, depth))
def establish_connection(ctx):
# Verify that the mvip does not contain the port number:
if ctx.mvip and ":" in ctx.mvip:
ctx.logger.error('Please provide the port using the port parameter.')
exit(1)
cfg = None
# Arguments take precedence regardless of env settings
if ctx.mvip:
if ctx.username is None:
ctx.username = getpass.getpass("Username:")
if ctx.password is None:
ctx.password = getpass.getpass("Password:")
cfg = {'mvip': ctx.mvip,
'username': "b'"+encrypt(ctx.username).decode('utf-8')+"'",
'password': "b'"+encrypt(ctx.password).decode('utf-8')+"'",
'port': ctx.port,
'url': 'https://%s:%s' % (ctx.mvip, ctx.port),
'version': ctx.version,
'verifyssl': ctx.verifyssl,
'timeout': ctx.timeout}
try:
ctx.element = ElementFactory.create(cfg["mvip"],decrypt(cfg["username"]),decrypt(cfg["password"]),port=cfg["port"],version=cfg["version"],verify_ssl=cfg["verifyssl"],timeout=cfg["timeout"])
ctx.version = ctx.element._api_version
cfg["version"] = ctx.element._api_version
except Exception as e:
ctx.logger.error(e.__str__())
exit(1)
# If someone accidentally passed in an argument, but didn't specify everything, throw an error.
elif ctx.username or ctx.password:
ctx.logger.error("In order to manually connect, please provide an mvip, a username, AND a password")
# If someone asked for a given connection or we need to default to using the connection at index 0 if it exists:
else:
if ctx.connectionindex is None and ctx.name is None:
cfg = get_default_connection(ctx)
elif ctx.connectionindex is not None:
connections = get_connections(ctx)
if int(ctx.connectionindex) > (len(connections)-1) or int(ctx.connectionindex) < (-len(connections)):
ctx.logger.error("Connection "+str(ctx.connectionindex)+" Please provide an index between "+str(-len(connections))+" and "+str(len(connections)-1))
exit(1)
cfg = connections[ctx.connectionindex]
elif ctx.name is not None:
connections = get_connections(ctx)
filteredCfg = [connection for connection in connections if connection["name"] == ctx.name]
if(len(filteredCfg) > 1):
ctx.logger.error("Your connections.csv file has become corrupted. There are two connections of the same name.")
exit()
if(len(filteredCfg) < 1):
ctx.logger.error("Could not find a connection named "+ctx.name)
exit()
cfg = filteredCfg[0]
# If we managed to find the connection we were looking for, we must try to establish the connection.
if cfg is not None:
# Finally, we need to establish our connection via elementfactory:
try:
if int(cfg["port"]) != 443:
address = cfg["mvip"] + ":" + cfg["port"]
else:
address = cfg["mvip"]
ctx.element = ElementFactory.create(address, decrypt(cfg["username"]), decrypt(cfg["password"]), cfg["version"], verify_ssl=cfg["verifyssl"])
if int(cfg["timeout"]) != 30:
ctx.element.timeout(cfg["timeout"])
except Exception as e:
ctx.logger.error(e.__str__())
ctx.logger.error("The connection is corrupt. Run 'sfcli connection prune' to try and remove all broken connections or use 'sfcli connection remove -n name'")
ctx.logger.error(cfg)
exit(1)
# If we want the json output directly from the source, we'll have to override the send request method in the sdk:
# This is so that we can circumvent the python objects and get exactly what the json-rpc returns.
if ctx.json and ctx.element:
def new_send_request(*args, **kwargs):
return ctx.element.__class__.__bases__[0].send_request(ctx.element, return_response_raw=True, *args, **kwargs)
ctx.element.send_request = new_send_request
# The only time it is none is when we're asking for help or we're trying to store a connection.
# If that's not what we're doing, we catch it later.
if cfg is not None:
cfg["port"] = int(cfg["port"])
ctx.cfg = cfg
cfg["name"] = cfg.get("name", "default")
if not ctx.nocache:
write_default_connection(ctx, cfg)
if ctx.element is None:
ctx.logger.error("You must establish at least one connection and specify which you intend to use.")
exit()
# this needs to be atomic.
def get_connections(ctx):
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
if os.path.exists(connectionsCsvLocation):
try:
with FileLock(connectionsLock):
with open(connectionsCsvLocation, 'r') as connectionFile:
connections = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionsCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file.")
exit(1)
else:
connections = []
for connection in connections:
connection["version"] = float(connection["version"])
if connection.get("verifyssl") == "True":
connection["verifyssl"] = True
else:
|
return connections
def write_connections(ctx, connections):
try:
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
with open(connectionsCsvLocation, 'w') as f:
with FileLock(connectionsLock):
w = csv.DictWriter(f, ["name","mvip","port","username","password","version","url","verifyssl","timeout"], lineterminator='\n')
w.writeheader()
for connection in connections:
if connection is not None:
w.writerow(connection)
except Exception as e:
ctx.logger.error("Problem writing "+ connectionsCsvLocation + " " + str(e.args)+" Try changing the permissions of that file.")
exit(1)
def get_default_connection(ctx):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
if os.path.exists(connectionCsvLocation):
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
try:
with FileLock(defaultLockLocation):
with open(connectionCsvLocation) as connectionFile:
connection = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file or specifying credentials.")
exit(1)
if len(connection)>0:
connection[0]["version"] = float(connection[0]["version"])
if(connection[0]["verifyssl"] == "True"):
connection[0]["verifyssl"] = True
else:
connection[0]["verifyssl"] = False
return connection[0]
else:
os.remove(defaultLockLocation)
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
else:
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
def write_default_connection(ctx, connection):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
try:
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
with FileLock(defaultLockLocation):
with open(connectionCsvLocation, 'w') as f:
w = csv.DictWriter(f, ["name", "mvip", "port", "username", "password", "version", "url", "verifyssl", "timeout"],
lineterminator='\n')
w.writeheader()
w.writerow(connection)
except Exception as e:
ctx.logger.warning("Problem writing "+ connectionCsvLocation + " " + str(e.args)+" Try using changing the permissions of that file or using the --nocache flag.")
# WARNING! This doesn't actually give us total security. It only gives us obscurity.
def encrypt(sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
encoded = base64.b64encode(cipher.encrypt(sensitive_data.encode('utf-8')))
return encoded
def decrypt(encoded_sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
decoded = cipher.decrypt(base64.b64decode(encoded_sensitive_data[2:-1]))
return decoded.decode('utf-8') | connection["verifyssl"] = False | conditional_block |
utils.py | import jsonpickle
import json as serializer
from pkg_resources import Requirement, resource_filename
import os
import csv
from Crypto.Cipher import ARC4
import base64
import socket
import getpass
from solidfire.factory import ElementFactory
from filelock import FileLock
import sys
def kv_string_to_dict(kv_string):
new_dict = {}
items = kv_string.split(',')
for item in items:
kvs = item.split('=')
new_dict[kvs[0]] = kvs[1]
def print_result(objs, log, as_json=False, as_pickle=False, depth=None, filter_tree=None):
# There are 3 acceptable parameter sets to provide:
# 1. json=True, depth=None, filter_tree=None
# 2. json=False, depth=#, filter_tree=None
# 3. json=False, depth=#, filter_tree=acceptable string
# Error case
if as_json and (depth is not None or filter_tree is not None):
log.error("If you choose to print it as json, do not provide a depth or filter. Those are for printing it as a tree.")
exit()
"""
SDK1.6 Note:
Since print_tree is not supported in 1.6, when both the available output formats
json and pickle formats are set to False, change the default output format (pickle) to True.
"""
if as_json == False and as_pickle == False:
as_pickle = True
# If json is true, we print it as json and return:
if as_json == True or as_pickle == True:
print_result_as_json(objs, as_pickle)
return
"""
SDK1.6 Note:
Commenting out these lines as print_tree is not supported in 1.6.
"""
"""
# If we have a filter, apply it.
if filter_tree is not None:
try:
objs_to_print = filter_objects_from_simple_keypaths(objs, filter_tree.split(','))
except Exception as e:
log.error(e.args[0])
exit(1)
else:
objs_to_print = objs
# Set up a default depth
if depth is None:
depth = 10
# Next, print the tree to the appropriate depth
print_result_as_tree(objs_to_print, depth)
"""
def print_result_as_json(objs, pickle=False):
#print(jsonpickle.encode(objs))
nestedDict = serializer.loads(jsonpickle.encode(objs))
filteredDict = type(nestedDict)()
if(pickle==False):
remove_pickling(nestedDict, filteredDict)
else:
filteredDict = nestedDict
print(serializer.dumps(filteredDict,indent=4))
def remove_pickling(nestedDict, filteredDict):
if type(nestedDict) is dict:
#foreach key, if list, recurse, if dict, recurse, if string recurse unless py/obj is key.
for key in nestedDict:
if key == "py/object":
continue
else:
filteredDict[key] = type(nestedDict[key])()
filteredDict[key] = remove_pickling(nestedDict[key], filteredDict[key])
return filteredDict
if type(nestedDict) is list:
# foreach item
for i in range(len(nestedDict)):
filteredDict.append(type(nestedDict[i])())
filteredDict[i] = remove_pickling(nestedDict[i], filteredDict[i])
return filteredDict
return nestedDict
"""
SDK1.6 Note:
Commenting this as print_tree is not supported in SDK 1.6.
"""
def get_result_as_tree(objs, depth=1, currentDepth=0, lastKey = ""):
print("print_tree is not supported in SDK1.6")
"""stringToReturn = ""
if(currentDepth > depth):
return "<to see more details, increase depth>\n"
if(type(objs) is str or type(objs) is bool or type(objs) is int or type(objs) is type(u'') or objs is None or type(objs) is float):# or (sys.version_info[0]<3 and type(objs) is long)):
return str(objs) + "\n"
if(type(objs) is list):
stringToReturn += "\n"
for i in range(len(objs)):
obj = objs[i]
stringToReturn += currentDepth*" "+get_result_as_tree(obj, depth, currentDepth+1, lastKey)
return stringToReturn
if(isinstance(objs, dict)):
stringToReturn += "\n"
for key in objs:
stringToReturn += currentDepth*" "+key+": "+get_result_as_tree(objs[key], depth, currentDepth+1, key)
return stringToReturn
if (isinstance(objs, tuple)):
return str(objs[0]) + "\n"
if(objs is None):
return stringToReturn
mydict = objs.__dict__
stringToReturn += "\n"
for key in mydict:
stringToReturn += currentDepth*" "
stringToReturn += key+": "+get_result_as_tree(mydict[key], depth, currentDepth+1, key)
return stringToReturn |
def filter_objects_from_simple_keypaths(objs, simpleKeyPaths):
# First, we assemble the key paths.
# They start out like this:
# [accouts.username, accounts.initiator_secret.secret, accounts.status]
# and become like this:
# {"accounts":{"username":True, "initiator_secret":{"secret":True}, "status":True}
keyPaths = dict()
for simpleKeyPath in simpleKeyPaths:
currentLevel = keyPaths
keyPathArray = simpleKeyPath.split('.')
for i in range(len(keyPathArray)):
if(i<(len(keyPathArray) - 1)):
if currentLevel.get(keyPathArray[i]) is None:
currentLevel[keyPathArray[i]] = dict()
else:
currentLevel[keyPathArray[i]] = True
currentLevel = currentLevel[keyPathArray[i]]
# Then we pass it in to filter objects.
return filter_objects(objs, keyPaths)
# Keypaths is arranged as follows:
# it is a nested dict with the order of the keys.
def filter_objects(objs, keyPaths):
# Otherwise, we keep recursing deeper.
# Because there are deeper keys, we know that we can go deeper.
# This means we are dealing with either an array or a dict.
# If keyPaths looks like this:
# {"username": True, "volumes": {"Id": True}}
# The keys in this sequence will be username and volumes.
# When we recurse into volumes, the keys will be Id.
finalFilteredObjects = dict()
if keyPaths == True and type(objs) is not list:
return objs
# If we've found a list, we recurse deeper to pull out the objs.
# We do not advance our keyPath recursion because this is just a list.
if type(objs) is list:
# If we have a list of objects, we will need to assemble and return a list of stuff.
filteredObjsDict = [None]*len(objs)
for i in range(len(objs)):
# Each element could be a string, dict, or list.
filteredObjsDict[i] = filter_objects(objs[i], keyPaths)
return filteredObjsDict
dictionaryOfInterest = None
if type(objs) is dict:
dictionaryOfInterest = objs
else:
dictionaryOfInterest = objs.__dict__
for key in keyPaths:
# If we've found a dict, we recurse deeper to pull out the objs.
# Because this is a dict, we must advance our keyPaths recursion.
# Consider the following example:
if key not in dictionaryOfInterest:
raise ValueError("'"+key+"' is not a valid key for this level. Valid keys are: "+','.join(dictionaryOfInterest.keys()))
finalFilteredObjects[key] = filter_objects(dictionaryOfInterest[key], keyPaths[key])
return finalFilteredObjects
def print_result_as_table(objs, keyPaths):
filteredDictionary = filter_objects(objs, keyPaths)
def print_result_as_tree(objs, depth=1):
print(get_result_as_tree(objs, depth))
def establish_connection(ctx):
# Verify that the mvip does not contain the port number:
if ctx.mvip and ":" in ctx.mvip:
ctx.logger.error('Please provide the port using the port parameter.')
exit(1)
cfg = None
# Arguments take precedence regardless of env settings
if ctx.mvip:
if ctx.username is None:
ctx.username = getpass.getpass("Username:")
if ctx.password is None:
ctx.password = getpass.getpass("Password:")
cfg = {'mvip': ctx.mvip,
'username': "b'"+encrypt(ctx.username).decode('utf-8')+"'",
'password': "b'"+encrypt(ctx.password).decode('utf-8')+"'",
'port': ctx.port,
'url': 'https://%s:%s' % (ctx.mvip, ctx.port),
'version': ctx.version,
'verifyssl': ctx.verifyssl,
'timeout': ctx.timeout}
try:
ctx.element = ElementFactory.create(cfg["mvip"],decrypt(cfg["username"]),decrypt(cfg["password"]),port=cfg["port"],version=cfg["version"],verify_ssl=cfg["verifyssl"],timeout=cfg["timeout"])
ctx.version = ctx.element._api_version
cfg["version"] = ctx.element._api_version
except Exception as e:
ctx.logger.error(e.__str__())
exit(1)
# If someone accidentally passed in an argument, but didn't specify everything, throw an error.
elif ctx.username or ctx.password:
ctx.logger.error("In order to manually connect, please provide an mvip, a username, AND a password")
# If someone asked for a given connection or we need to default to using the connection at index 0 if it exists:
else:
if ctx.connectionindex is None and ctx.name is None:
cfg = get_default_connection(ctx)
elif ctx.connectionindex is not None:
connections = get_connections(ctx)
if int(ctx.connectionindex) > (len(connections)-1) or int(ctx.connectionindex) < (-len(connections)):
ctx.logger.error("Connection "+str(ctx.connectionindex)+" Please provide an index between "+str(-len(connections))+" and "+str(len(connections)-1))
exit(1)
cfg = connections[ctx.connectionindex]
elif ctx.name is not None:
connections = get_connections(ctx)
filteredCfg = [connection for connection in connections if connection["name"] == ctx.name]
if(len(filteredCfg) > 1):
ctx.logger.error("Your connections.csv file has become corrupted. There are two connections of the same name.")
exit()
if(len(filteredCfg) < 1):
ctx.logger.error("Could not find a connection named "+ctx.name)
exit()
cfg = filteredCfg[0]
# If we managed to find the connection we were looking for, we must try to establish the connection.
if cfg is not None:
# Finally, we need to establish our connection via elementfactory:
try:
if int(cfg["port"]) != 443:
address = cfg["mvip"] + ":" + cfg["port"]
else:
address = cfg["mvip"]
ctx.element = ElementFactory.create(address, decrypt(cfg["username"]), decrypt(cfg["password"]), cfg["version"], verify_ssl=cfg["verifyssl"])
if int(cfg["timeout"]) != 30:
ctx.element.timeout(cfg["timeout"])
except Exception as e:
ctx.logger.error(e.__str__())
ctx.logger.error("The connection is corrupt. Run 'sfcli connection prune' to try and remove all broken connections or use 'sfcli connection remove -n name'")
ctx.logger.error(cfg)
exit(1)
# If we want the json output directly from the source, we'll have to override the send request method in the sdk:
# This is so that we can circumvent the python objects and get exactly what the json-rpc returns.
if ctx.json and ctx.element:
def new_send_request(*args, **kwargs):
return ctx.element.__class__.__bases__[0].send_request(ctx.element, return_response_raw=True, *args, **kwargs)
ctx.element.send_request = new_send_request
# The only time it is none is when we're asking for help or we're trying to store a connection.
# If that's not what we're doing, we catch it later.
if cfg is not None:
cfg["port"] = int(cfg["port"])
ctx.cfg = cfg
cfg["name"] = cfg.get("name", "default")
if not ctx.nocache:
write_default_connection(ctx, cfg)
if ctx.element is None:
ctx.logger.error("You must establish at least one connection and specify which you intend to use.")
exit()
# this needs to be atomic.
def get_connections(ctx):
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
if os.path.exists(connectionsCsvLocation):
try:
with FileLock(connectionsLock):
with open(connectionsCsvLocation, 'r') as connectionFile:
connections = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionsCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file.")
exit(1)
else:
connections = []
for connection in connections:
connection["version"] = float(connection["version"])
if connection.get("verifyssl") == "True":
connection["verifyssl"] = True
else:
connection["verifyssl"] = False
return connections
def write_connections(ctx, connections):
try:
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
with open(connectionsCsvLocation, 'w') as f:
with FileLock(connectionsLock):
w = csv.DictWriter(f, ["name","mvip","port","username","password","version","url","verifyssl","timeout"], lineterminator='\n')
w.writeheader()
for connection in connections:
if connection is not None:
w.writerow(connection)
except Exception as e:
ctx.logger.error("Problem writing "+ connectionsCsvLocation + " " + str(e.args)+" Try changing the permissions of that file.")
exit(1)
def get_default_connection(ctx):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
if os.path.exists(connectionCsvLocation):
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
try:
with FileLock(defaultLockLocation):
with open(connectionCsvLocation) as connectionFile:
connection = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file or specifying credentials.")
exit(1)
if len(connection)>0:
connection[0]["version"] = float(connection[0]["version"])
if(connection[0]["verifyssl"] == "True"):
connection[0]["verifyssl"] = True
else:
connection[0]["verifyssl"] = False
return connection[0]
else:
os.remove(defaultLockLocation)
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
else:
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
def write_default_connection(ctx, connection):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
try:
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
with FileLock(defaultLockLocation):
with open(connectionCsvLocation, 'w') as f:
w = csv.DictWriter(f, ["name", "mvip", "port", "username", "password", "version", "url", "verifyssl", "timeout"],
lineterminator='\n')
w.writeheader()
w.writerow(connection)
except Exception as e:
ctx.logger.warning("Problem writing "+ connectionCsvLocation + " " + str(e.args)+" Try using changing the permissions of that file or using the --nocache flag.")
# WARNING! This doesn't actually give us total security. It only gives us obscurity.
def encrypt(sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
encoded = base64.b64encode(cipher.encrypt(sensitive_data.encode('utf-8')))
return encoded
def decrypt(encoded_sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
decoded = cipher.decrypt(base64.b64decode(encoded_sensitive_data[2:-1]))
return decoded.decode('utf-8') | """ | random_line_split |
assets.py | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from functools import partial
from numbers import Integral
from operator import itemgetter, attrgetter
import struct
import numpy as np
import pandas as pd
from pandas import isnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from toolz import (
compose,
concat,
concatv,
curry,
groupby,
merge,
partition_all,
sliding_window,
valmap,
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
SidsNotFound,
SymbolNotFound,
)
from ._assets import (
Asset, Equity, Future,
)
from .continuous_futures import (
ADJUSTMENT_STYLES,
ContinuousFuture,
OrderedContracts,
)
from .asset_writer import (
check_version_info,
split_delimited_symbol,
asset_db_table_names,
symbol_columns,
SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
ASSET_DB_VERSION
)
from .exchange_info import ExchangeInfo
from zipline.utils.functional import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_eng
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
'rollover_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
def _build_ownership_map_from_rows(rows, key_from_row, value_from_row):
mappings = {}
for row in rows:
mappings.setdefault(
key_from_row(row),
[],
).append(
OwnershipPeriod(
pd.Timestamp(row.start_date, unit='ns', tz='utc'),
pd.Timestamp(row.end_date, unit='ns', tz='utc'),
row.sid,
value_from_row(row),
),
)
return merge_ownership_periods(mappings)
def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
return _build_ownership_map_from_rows(
sa.select(table.c).execute().fetchall(),
key_from_row,
value_from_row,
)
@curry
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
_filter_future_kwargs = _filter_kwargs(Future._kwargnames)
_filter_equity_kwargs = _filter_kwargs(Equity._kwargnames)
def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_
SID_TYPE_IDS = {
# Asset would be 0,
ContinuousFuture: 1,
}
CONTINUOUS_FUTURE_ROLL_STYLE_IDS = {
'calendar': 0,
'volume': 1,
}
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = {
None: 0,
'div': 1,
'add': 2,
}
def _encode_continuous_future_sid(root_symbol,
offset,
roll_style,
adjustment_style):
s = struct.Struct("B 2B B B B 2B")
# B - sid type
# 2B - root symbol
# B - offset (could be packed smaller since offsets of greater than 12 are
# probably unneeded.)
# B - roll type
# B - adjustment
# 2B - empty space left for parameterized roll types
# The root symbol currently supports 2 characters. If 3 char root symbols
# are needed, the size of the root symbol does not need to change, however
# writing the string directly will need to change to a scheme of writing
# the A-Z values in 5-bit chunks.
a = array.array('B', [0] * s.size)
rs = bytearray(root_symbol, 'ascii')
values = (SID_TYPE_IDS[ContinuousFuture],
rs[0],
rs[1],
offset,
CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style],
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style],
0, 0)
s.pack_into(a, 0, *values)
return int(binascii.hexlify(a), 16)
Lifetimes = namedtuple('Lifetimes', 'sid start end')
class AssetFinder(object):
"""
An AssetFinder is an interface to a database of Asset metadata written by
an ``AssetDBWriter``.
This class provides methods for looking up assets by unique integer id or
by symbol. For historical reasons, we refer to these unique ids as 'sids'.
Parameters
----------
engine : str or SQLAlchemy.engine
An engine with a connection to the asset database to use, or a string
that can be parsed by SQLAlchemy as a URI.
future_chain_predicates : dict
A dict mapping future root symbol to a predicate function which accepts
a contract as a parameter and returns whether or not the contract should be
included in the chain.
See Also
--------
:class:`zipline.assets.AssetDBWriter`
"""
@preprocess(engine=coerce_string_to_eng(require_exists=True))
def __init__(self, engine, future_chain_predicates=None):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(engine, self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset will populate the cache on first retrieval.
self._asset_cache = {}
self._asset_type_cache = {}
self._caches = (self._asset_cache, self._asset_type_cache)
self._future_chain_predicates = future_chain_predicates \
if future_chain_predicates is not None else {}
self._ordered_contracts = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = {}
# Stores the max end_date of the bundle
self._bundle_end_date = None
self._sids_to_real_sids = {}
self._real_sids_to_sids = {}
@lazyval
def exchange_info(self):
es = sa.select(self.exchanges.c).execute().fetchall()
return {
name: ExchangeInfo(name, canonical_name, country_code)
for name, canonical_name, country_code in es
}
@lazyval
def symbol_ownership_map(self):
return build_ownership_map(
table=self.equity_symbol_mappings,
key_from_row=(
lambda row: (row.company_symbol, row.share_class_symbol)
),
value_from_row=lambda row: row.symbol,
)
@lazyval
def country_codes(self):
return tuple([c for (c,) in sa.select(
sa.distinct(self.exchanges.c.country_code,
)).execute().fetchall()])
def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found
def group_by_type(self, sids):
"""
Group a list of sids by asset type.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[str or None -> list[int]]
A dict mapping unique asset types to lists of sids drawn from sids.
If we fail to look up an asset, we assign it a key of None.
"""
return invert(self.lookup_asset_types(sids))
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset for a given sid.
"""
try:
asset = self._asset_cache[sid]
if asset is None and not default_none:
raise SidsNotFound(sids=[sid])
return asset
except KeyError:
return self.retrieve_all((sid,), default_none=default_none)[0]
def retrieve_all(self, sids, default_none=False):
"""
Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of int
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False.
"""
sids = list(sids)
hits, missing, failures = {}, set(), []
for sid in sids:
try:
asset = self._asset_cache[sid]
if not default_none and asset is None:
# Bail early if we've already cached that we don't know
# about an asset.
raise SidsNotFound(sids=[sid])
hits[sid] = asset
except KeyError:
missing.add(sid)
# All requests were cache hits. Return requested sids in order.
if not missing:
return [hits[sid] for sid in sids]
update_hits = hits.update
# Look up cache misses by type.
type_to_assets = self.group_by_type(missing)
# Handle failures
failures = {failure: None for failure in type_to_assets.pop(None, ())}
update_hits(failures)
self._asset_cache.update(failures)
if failures and not default_none:
raise SidsNotFound(sids=list(failures))
# We don't update the asset cache here because it should already be
# updated by `self.retrieve_equities`.
update_hits(self.retrieve_equities(type_to_assets.pop('equity', ())))
update_hits(
self.retrieve_futures_contracts(type_to_assets.pop('future', ()))
)
# We shouldn't know about any other asset types.
if type_to_assets:
raise AssertionError(
"Found asset types: %s" % list(type_to_assets.keys())
)
return [hits[sid] for sid in sids]
def retrieve_equities(self, sids):
"""
Retrieve Equity objects for a list of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.equities, Equity)
def _retrieve_equity(self, sid):
return self.retrieve_equities((sid,))[sid]
def retrieve_futures_contracts(self, sids):
"""
Retrieve Future objects for an iterable of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.futures_contracts, Future)
@staticmethod
def _select_assets_by_sid(asset_tbl, sids):
return sa.select([asset_tbl]).where(
asset_tbl.c.sid.in_(map(int, sids))
)
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
cols = self.equity_symbol_mappings.c
# These are the columns we actually want.
data_cols = (cols.sid,) + tuple(cols[name] for name in symbol_columns)
# Also select the max of end_date so that all non-grouped fields take
# on the value associated with the max end_date. The SQLite docs say
# this:
#
# When the min() or max() aggregate functions are used in an aggregate
# query, all bare columns in the result set take values from the input
# row which also contains the minimum or maximum. Only the built-in
# min() and max() functions work this way.
#
# See https://www.sqlite.org/lang_select.html#resultset, for more info.
to_select = data_cols + (sa.func.max(cols.end_date),)
return sa.select(
to_select,
).where(
cols.sid.in_(map(int, sid_group))
).group_by(
cols.sid,
)
def _lookup_most_recent_symbols(self, sids):
return {
row.sid: {c: row[c] for c in symbol_columns}
for row in concat(
self.engine.execute(
self._select_most_recent_symbols_chunk(sid_group),
).fetchall()
for sid_group in partition_all(
SQLITE_MAX_VARIABLE_NUMBER,
sids
)
)
}
def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities):
if not sids:
return
if querying_equities:
def mkdict(row,
exchanges=self.exchange_info,
symbols=self._lookup_most_recent_symbols(sids)):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
# we are not required to have a symbol for every asset, if
# we don't have any symbols we will just use the empty string
return merge(d, symbols.get(row['sid'], {}))
else:
def mkdict(row, exchanges=self.exchange_info):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
return d
for assets in group_into_chunks(sids):
# Load misses from the db.
query = self._select_assets_by_sid(asset_tbl, assets)
for row in query.execute().fetchall():
yield _convert_asset_timestamp_fields(mkdict(row))
def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits
def lookup_symbol(self, symbol):
"""Lookup an equity by symbol. This method can only resolve the equity
if exactly one equity has ever owned the ticker.
Parameters
----------
symbol : str
The ticker symbol to resolve.
Returns
-------
equity : Equity
The equity identified by ``symbol``.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when more than one equity has held ``symbol`` or when
the symbol is ambiguous across multiple countries.
Notes
-----
The resolution algorithm is as follows:
- Split the symbol into the company and share class component.
- Do a dictionary lookup of the
``(company_symbol, share_class_symbol)`` in the provided ownership
map.
- If there is no entry in the dictionary, we don't know about this
symbol so raise a ``SymbolNotFound`` error.
- If more there is more than one owner, raise ``MultipleSymbolsFound``
- Otherwise, because the list mapped to a symbol cannot be empty,
return the single asset.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None")
mapping = self.symbol_ownership_map
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = mapping[company_symbol, share_class_symbol]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
# exactly one equity has ever held this symbol
if len(owners) == 1:
return self.retrieve_asset(owners[0].sid)
options = {self.retrieve_asset(owner.sid) for owner in owners}
# more than one equity has held this ticker, this
# is ambiguous
raise MultipleSymbolsFound(symbol=symbol, options=options)
def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid'])
def _get_contract_sids(self, root_symbol):
fc_cols = self.futures_contracts.c
return [r.sid for r in
list(sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(pd.notnull(fc_cols.start_date))).order_by(
fc_cols.auto_close_date).execute().fetchall())]
def _get_root_symbol_exchange(self, root_symbol):
fc_cols = self.futures_root_symbols.c
fields = (fc_cols.exchange,)
exchange = sa.select(fields).where(
fc_cols.root_symbol == root_symbol).execute().scalar()
if exchange is not None:
return exchange
else:
raise SymbolNotFound(symbol=root_symbol)
def get_ordered_contracts(self, root_symbol):
try:
return self._ordered_contracts[root_symbol]
except KeyError:
contract_sids = self._get_contract_sids(root_symbol)
contracts = deque(self.retrieve_all(contract_sids))
chain_predicate = self._future_chain_predicates.get(root_symbol,
None)
oc = OrderedContracts(root_symbol, contracts, chain_predicate)
self._ordered_contracts[root_symbol] = oc
return oc
def create_continuous_future(self,
root_symbol,
offset,
roll_style,
adjustment):
|
def _make_sids(tblattr):
def _(self):
return tuple(map(
itemgetter('sid'),
sa.select((
getattr(self, tblattr).c.sid,
)).execute().fetchall(),
))
return _
sids = property(
_make_sids('asset_router'),
doc='All the sids in the asset finder.',
)
equities_sids = property(
_make_sids('equities'),
doc='All of the sids for equities in the asset finder.',
)
futures_sids = property(
_make_sids('futures_contracts'),
doc='All of the sids for futures contracts in the asset finder.',
)
del _make_sids
@property
def sids_to_real_sids(self):
"""
Returns a dict mapping sids to real sids.
"""
if not self._sids_to_real_sids:
sql = """
SELECT
sid,
real_sid
FROM
equities
UNION
SELECT
sid,
real_sid
FROM
futures_contracts
"""
result = self.engine.execute(sql)
self._sids_to_real_sids = {row[0]: row[1] for row in result.fetchall()}
return self._sids_to_real_sids
@property
def real_sids_to_sids(self):
"""
Returns a dict mapping real sids to sids.
"""
if not self._real_sids_to_sids:
self._real_sids_to_sids = {v: k for k, v in self.sids_to_real_sids.items()}
return self._real_sids_to_sids
def get_bundle_end_date(self):
"""
Returns the max end_date of the bundle, which can be considered the date
through which the bundle has been updated.
"""
if not self._bundle_end_date:
max_date = self.engine.execute(
"""
SELECT
MAX(end_date)
FROM (
SELECT
end_date
FROM
equities
UNION
SELECT
end_date
FROM
futures_contracts
)
"""
).scalar()
self._bundle_end_date = pd.Timestamp(max_date, tz="UTC")
return self._bundle_end_date
def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
sids = starts = ends = []
equities_cols = self.equities.c
futures_cols = self.futures_contracts.c
if country_codes:
equities_query = sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.auto_close_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
)
futures_query = sa.select((
futures_cols.sid,
futures_cols.start_date,
futures_cols.auto_close_date,
)).where(
(self.exchanges.c.exchange == futures_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
)
results = equities_query.union(futures_query).execute().fetchall()
if results:
sids, starts, ends = zip(*results)
sid = np.array(sids, dtype='i8')
start = np.array(starts, dtype='f8')
end = np.array(ends, dtype='f8')
start[np.isnan(start)] = 0 # convert missing starts to 0
end[end==np.datetime64('NaT').view('i8')] = np.iinfo(int).max # convert missing end to INTMAX
return Lifetimes(sid, start, end)
def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
if isinstance(country_codes, string_types):
raise TypeError(
"Got string {!r} instead of an iterable of strings in "
"AssetFinder.lifetimes.".format(country_codes),
)
# normalize to a cache-key so that we can memoize results.
country_codes = frozenset(country_codes)
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = (
self._compute_asset_lifetimes(country_codes)
)
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
def equities_sids_for_country_code(self, country_code):
"""Return all of the sids for a given country.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code.
Returns
-------
tuple[int]
The sids whose exchanges are in this country.
"""
sids = self._compute_asset_lifetimes([country_code]).sid
return tuple(sids.tolist())
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Asset)
class NotAssetConvertible(ValueError):
pass
class PricingDataAssociable(with_metaclass(ABCMeta)):
"""
ABC for types that can be associated with pricing data.
Includes Asset, Future, ContinuousFuture
"""
pass
PricingDataAssociable.register(Asset)
PricingDataAssociable.register(Future)
PricingDataAssociable.register(ContinuousFuture)
| if adjustment not in ADJUSTMENT_STYLES:
raise ValueError(
'Invalid adjustment style {!r}. Allowed adjustment styles are '
'{}.'.format(adjustment, list(ADJUSTMENT_STYLES))
)
oc = self.get_ordered_contracts(root_symbol)
exchange = self._get_root_symbol_exchange(root_symbol)
sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
None)
mul_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'div')
add_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'add')
cf_template = partial(
ContinuousFuture,
root_symbol=root_symbol,
offset=offset,
roll_style=roll_style,
start_date=oc.start_date,
end_date=oc.end_date,
exchange_info=self.exchange_info[exchange],
)
cf = cf_template(sid=sid)
mul_cf = cf_template(sid=mul_sid, adjustment='mul')
add_cf = cf_template(sid=add_sid, adjustment='add')
self._asset_cache[cf.sid] = cf
self._asset_cache[mul_cf.sid] = mul_cf
self._asset_cache[add_cf.sid] = add_cf
return {None: cf, 'mul': mul_cf, 'add': add_cf}[adjustment] | identifier_body |
assets.py | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from functools import partial
from numbers import Integral
from operator import itemgetter, attrgetter
import struct
import numpy as np
import pandas as pd
from pandas import isnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from toolz import (
compose,
concat,
concatv,
curry,
groupby,
merge,
partition_all,
sliding_window,
valmap,
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
SidsNotFound,
SymbolNotFound,
)
from ._assets import (
Asset, Equity, Future,
)
from .continuous_futures import (
ADJUSTMENT_STYLES,
ContinuousFuture,
OrderedContracts,
)
from .asset_writer import (
check_version_info,
split_delimited_symbol,
asset_db_table_names,
symbol_columns,
SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
ASSET_DB_VERSION
)
from .exchange_info import ExchangeInfo
from zipline.utils.functional import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_eng
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
'rollover_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
def _build_ownership_map_from_rows(rows, key_from_row, value_from_row):
mappings = {}
for row in rows:
mappings.setdefault(
key_from_row(row),
[],
).append(
OwnershipPeriod(
pd.Timestamp(row.start_date, unit='ns', tz='utc'),
pd.Timestamp(row.end_date, unit='ns', tz='utc'),
row.sid,
value_from_row(row),
),
)
return merge_ownership_periods(mappings)
def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
return _build_ownership_map_from_rows(
sa.select(table.c).execute().fetchall(),
key_from_row,
value_from_row,
)
@curry
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
_filter_future_kwargs = _filter_kwargs(Future._kwargnames)
_filter_equity_kwargs = _filter_kwargs(Equity._kwargnames)
def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_
SID_TYPE_IDS = {
# Asset would be 0,
ContinuousFuture: 1,
}
CONTINUOUS_FUTURE_ROLL_STYLE_IDS = {
'calendar': 0,
'volume': 1,
}
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = {
None: 0,
'div': 1,
'add': 2,
}
def _encode_continuous_future_sid(root_symbol,
offset,
roll_style,
adjustment_style):
s = struct.Struct("B 2B B B B 2B")
# B - sid type
# 2B - root symbol
# B - offset (could be packed smaller since offsets of greater than 12 are
# probably unneeded.)
# B - roll type
# B - adjustment
# 2B - empty space left for parameterized roll types
# The root symbol currently supports 2 characters. If 3 char root symbols
# are needed, the size of the root symbol does not need to change, however
# writing the string directly will need to change to a scheme of writing
# the A-Z values in 5-bit chunks.
a = array.array('B', [0] * s.size)
rs = bytearray(root_symbol, 'ascii')
values = (SID_TYPE_IDS[ContinuousFuture],
rs[0],
rs[1],
offset,
CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style],
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style],
0, 0)
s.pack_into(a, 0, *values)
return int(binascii.hexlify(a), 16)
Lifetimes = namedtuple('Lifetimes', 'sid start end')
class AssetFinder(object):
"""
An AssetFinder is an interface to a database of Asset metadata written by
an ``AssetDBWriter``.
This class provides methods for looking up assets by unique integer id or
by symbol. For historical reasons, we refer to these unique ids as 'sids'.
Parameters
----------
engine : str or SQLAlchemy.engine
An engine with a connection to the asset database to use, or a string
that can be parsed by SQLAlchemy as a URI.
future_chain_predicates : dict
A dict mapping future root symbol to a predicate function which accepts
a contract as a parameter and returns whether or not the contract should be
included in the chain.
See Also
--------
:class:`zipline.assets.AssetDBWriter`
"""
@preprocess(engine=coerce_string_to_eng(require_exists=True))
def __init__(self, engine, future_chain_predicates=None):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(engine, self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset will populate the cache on first retrieval.
self._asset_cache = {}
self._asset_type_cache = {}
self._caches = (self._asset_cache, self._asset_type_cache)
self._future_chain_predicates = future_chain_predicates \
if future_chain_predicates is not None else {}
self._ordered_contracts = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = {}
# Stores the max end_date of the bundle
self._bundle_end_date = None
self._sids_to_real_sids = {}
self._real_sids_to_sids = {}
@lazyval
def | (self):
es = sa.select(self.exchanges.c).execute().fetchall()
return {
name: ExchangeInfo(name, canonical_name, country_code)
for name, canonical_name, country_code in es
}
@lazyval
def symbol_ownership_map(self):
return build_ownership_map(
table=self.equity_symbol_mappings,
key_from_row=(
lambda row: (row.company_symbol, row.share_class_symbol)
),
value_from_row=lambda row: row.symbol,
)
@lazyval
def country_codes(self):
return tuple([c for (c,) in sa.select(
sa.distinct(self.exchanges.c.country_code,
)).execute().fetchall()])
def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found
def group_by_type(self, sids):
"""
Group a list of sids by asset type.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[str or None -> list[int]]
A dict mapping unique asset types to lists of sids drawn from sids.
If we fail to look up an asset, we assign it a key of None.
"""
return invert(self.lookup_asset_types(sids))
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset for a given sid.
"""
try:
asset = self._asset_cache[sid]
if asset is None and not default_none:
raise SidsNotFound(sids=[sid])
return asset
except KeyError:
return self.retrieve_all((sid,), default_none=default_none)[0]
def retrieve_all(self, sids, default_none=False):
"""
Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of int
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False.
"""
sids = list(sids)
hits, missing, failures = {}, set(), []
for sid in sids:
try:
asset = self._asset_cache[sid]
if not default_none and asset is None:
# Bail early if we've already cached that we don't know
# about an asset.
raise SidsNotFound(sids=[sid])
hits[sid] = asset
except KeyError:
missing.add(sid)
# All requests were cache hits. Return requested sids in order.
if not missing:
return [hits[sid] for sid in sids]
update_hits = hits.update
# Look up cache misses by type.
type_to_assets = self.group_by_type(missing)
# Handle failures
failures = {failure: None for failure in type_to_assets.pop(None, ())}
update_hits(failures)
self._asset_cache.update(failures)
if failures and not default_none:
raise SidsNotFound(sids=list(failures))
# We don't update the asset cache here because it should already be
# updated by `self.retrieve_equities`.
update_hits(self.retrieve_equities(type_to_assets.pop('equity', ())))
update_hits(
self.retrieve_futures_contracts(type_to_assets.pop('future', ()))
)
# We shouldn't know about any other asset types.
if type_to_assets:
raise AssertionError(
"Found asset types: %s" % list(type_to_assets.keys())
)
return [hits[sid] for sid in sids]
def retrieve_equities(self, sids):
"""
Retrieve Equity objects for a list of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.equities, Equity)
def _retrieve_equity(self, sid):
return self.retrieve_equities((sid,))[sid]
def retrieve_futures_contracts(self, sids):
"""
Retrieve Future objects for an iterable of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.futures_contracts, Future)
@staticmethod
def _select_assets_by_sid(asset_tbl, sids):
return sa.select([asset_tbl]).where(
asset_tbl.c.sid.in_(map(int, sids))
)
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
cols = self.equity_symbol_mappings.c
# These are the columns we actually want.
data_cols = (cols.sid,) + tuple(cols[name] for name in symbol_columns)
# Also select the max of end_date so that all non-grouped fields take
# on the value associated with the max end_date. The SQLite docs say
# this:
#
# When the min() or max() aggregate functions are used in an aggregate
# query, all bare columns in the result set take values from the input
# row which also contains the minimum or maximum. Only the built-in
# min() and max() functions work this way.
#
# See https://www.sqlite.org/lang_select.html#resultset, for more info.
to_select = data_cols + (sa.func.max(cols.end_date),)
return sa.select(
to_select,
).where(
cols.sid.in_(map(int, sid_group))
).group_by(
cols.sid,
)
def _lookup_most_recent_symbols(self, sids):
return {
row.sid: {c: row[c] for c in symbol_columns}
for row in concat(
self.engine.execute(
self._select_most_recent_symbols_chunk(sid_group),
).fetchall()
for sid_group in partition_all(
SQLITE_MAX_VARIABLE_NUMBER,
sids
)
)
}
def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities):
if not sids:
return
if querying_equities:
def mkdict(row,
exchanges=self.exchange_info,
symbols=self._lookup_most_recent_symbols(sids)):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
# we are not required to have a symbol for every asset, if
# we don't have any symbols we will just use the empty string
return merge(d, symbols.get(row['sid'], {}))
else:
def mkdict(row, exchanges=self.exchange_info):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
return d
for assets in group_into_chunks(sids):
# Load misses from the db.
query = self._select_assets_by_sid(asset_tbl, assets)
for row in query.execute().fetchall():
yield _convert_asset_timestamp_fields(mkdict(row))
def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits
def lookup_symbol(self, symbol):
"""Lookup an equity by symbol. This method can only resolve the equity
if exactly one equity has ever owned the ticker.
Parameters
----------
symbol : str
The ticker symbol to resolve.
Returns
-------
equity : Equity
The equity identified by ``symbol``.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when more than one equity has held ``symbol`` or when
the symbol is ambiguous across multiple countries.
Notes
-----
The resolution algorithm is as follows:
- Split the symbol into the company and share class component.
- Do a dictionary lookup of the
``(company_symbol, share_class_symbol)`` in the provided ownership
map.
- If there is no entry in the dictionary, we don't know about this
symbol so raise a ``SymbolNotFound`` error.
- If more there is more than one owner, raise ``MultipleSymbolsFound``
- Otherwise, because the list mapped to a symbol cannot be empty,
return the single asset.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None")
mapping = self.symbol_ownership_map
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = mapping[company_symbol, share_class_symbol]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
# exactly one equity has ever held this symbol
if len(owners) == 1:
return self.retrieve_asset(owners[0].sid)
options = {self.retrieve_asset(owner.sid) for owner in owners}
# more than one equity has held this ticker, this
# is ambiguous
raise MultipleSymbolsFound(symbol=symbol, options=options)
def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid'])
def _get_contract_sids(self, root_symbol):
fc_cols = self.futures_contracts.c
return [r.sid for r in
list(sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(pd.notnull(fc_cols.start_date))).order_by(
fc_cols.auto_close_date).execute().fetchall())]
def _get_root_symbol_exchange(self, root_symbol):
fc_cols = self.futures_root_symbols.c
fields = (fc_cols.exchange,)
exchange = sa.select(fields).where(
fc_cols.root_symbol == root_symbol).execute().scalar()
if exchange is not None:
return exchange
else:
raise SymbolNotFound(symbol=root_symbol)
def get_ordered_contracts(self, root_symbol):
try:
return self._ordered_contracts[root_symbol]
except KeyError:
contract_sids = self._get_contract_sids(root_symbol)
contracts = deque(self.retrieve_all(contract_sids))
chain_predicate = self._future_chain_predicates.get(root_symbol,
None)
oc = OrderedContracts(root_symbol, contracts, chain_predicate)
self._ordered_contracts[root_symbol] = oc
return oc
def create_continuous_future(self,
root_symbol,
offset,
roll_style,
adjustment):
if adjustment not in ADJUSTMENT_STYLES:
raise ValueError(
'Invalid adjustment style {!r}. Allowed adjustment styles are '
'{}.'.format(adjustment, list(ADJUSTMENT_STYLES))
)
oc = self.get_ordered_contracts(root_symbol)
exchange = self._get_root_symbol_exchange(root_symbol)
sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
None)
mul_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'div')
add_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'add')
cf_template = partial(
ContinuousFuture,
root_symbol=root_symbol,
offset=offset,
roll_style=roll_style,
start_date=oc.start_date,
end_date=oc.end_date,
exchange_info=self.exchange_info[exchange],
)
cf = cf_template(sid=sid)
mul_cf = cf_template(sid=mul_sid, adjustment='mul')
add_cf = cf_template(sid=add_sid, adjustment='add')
self._asset_cache[cf.sid] = cf
self._asset_cache[mul_cf.sid] = mul_cf
self._asset_cache[add_cf.sid] = add_cf
return {None: cf, 'mul': mul_cf, 'add': add_cf}[adjustment]
def _make_sids(tblattr):
def _(self):
return tuple(map(
itemgetter('sid'),
sa.select((
getattr(self, tblattr).c.sid,
)).execute().fetchall(),
))
return _
sids = property(
_make_sids('asset_router'),
doc='All the sids in the asset finder.',
)
equities_sids = property(
_make_sids('equities'),
doc='All of the sids for equities in the asset finder.',
)
futures_sids = property(
_make_sids('futures_contracts'),
doc='All of the sids for futures contracts in the asset finder.',
)
del _make_sids
@property
def sids_to_real_sids(self):
"""
Returns a dict mapping sids to real sids.
"""
if not self._sids_to_real_sids:
sql = """
SELECT
sid,
real_sid
FROM
equities
UNION
SELECT
sid,
real_sid
FROM
futures_contracts
"""
result = self.engine.execute(sql)
self._sids_to_real_sids = {row[0]: row[1] for row in result.fetchall()}
return self._sids_to_real_sids
@property
def real_sids_to_sids(self):
"""
Returns a dict mapping real sids to sids.
"""
if not self._real_sids_to_sids:
self._real_sids_to_sids = {v: k for k, v in self.sids_to_real_sids.items()}
return self._real_sids_to_sids
def get_bundle_end_date(self):
"""
Returns the max end_date of the bundle, which can be considered the date
through which the bundle has been updated.
"""
if not self._bundle_end_date:
max_date = self.engine.execute(
"""
SELECT
MAX(end_date)
FROM (
SELECT
end_date
FROM
equities
UNION
SELECT
end_date
FROM
futures_contracts
)
"""
).scalar()
self._bundle_end_date = pd.Timestamp(max_date, tz="UTC")
return self._bundle_end_date
def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
sids = starts = ends = []
equities_cols = self.equities.c
futures_cols = self.futures_contracts.c
if country_codes:
equities_query = sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.auto_close_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
)
futures_query = sa.select((
futures_cols.sid,
futures_cols.start_date,
futures_cols.auto_close_date,
)).where(
(self.exchanges.c.exchange == futures_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
)
results = equities_query.union(futures_query).execute().fetchall()
if results:
sids, starts, ends = zip(*results)
sid = np.array(sids, dtype='i8')
start = np.array(starts, dtype='f8')
end = np.array(ends, dtype='f8')
start[np.isnan(start)] = 0 # convert missing starts to 0
end[end==np.datetime64('NaT').view('i8')] = np.iinfo(int).max # convert missing end to INTMAX
return Lifetimes(sid, start, end)
def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
if isinstance(country_codes, string_types):
raise TypeError(
"Got string {!r} instead of an iterable of strings in "
"AssetFinder.lifetimes.".format(country_codes),
)
# normalize to a cache-key so that we can memoize results.
country_codes = frozenset(country_codes)
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = (
self._compute_asset_lifetimes(country_codes)
)
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
def equities_sids_for_country_code(self, country_code):
"""Return all of the sids for a given country.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code.
Returns
-------
tuple[int]
The sids whose exchanges are in this country.
"""
sids = self._compute_asset_lifetimes([country_code]).sid
return tuple(sids.tolist())
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Asset)
class NotAssetConvertible(ValueError):
pass
class PricingDataAssociable(with_metaclass(ABCMeta)):
"""
ABC for types that can be associated with pricing data.
Includes Asset, Future, ContinuousFuture
"""
pass
PricingDataAssociable.register(Asset)
PricingDataAssociable.register(Future)
PricingDataAssociable.register(ContinuousFuture)
| exchange_info | identifier_name |
assets.py | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from functools import partial
from numbers import Integral
from operator import itemgetter, attrgetter
import struct
import numpy as np
import pandas as pd
from pandas import isnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from toolz import (
compose,
concat,
concatv,
curry,
groupby,
merge,
partition_all,
sliding_window,
valmap,
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
SidsNotFound,
SymbolNotFound,
)
from ._assets import (
Asset, Equity, Future,
)
from .continuous_futures import (
ADJUSTMENT_STYLES,
ContinuousFuture,
OrderedContracts,
)
from .asset_writer import (
check_version_info,
split_delimited_symbol,
asset_db_table_names,
symbol_columns,
SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
ASSET_DB_VERSION
)
from .exchange_info import ExchangeInfo
from zipline.utils.functional import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_eng
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
'rollover_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
def _build_ownership_map_from_rows(rows, key_from_row, value_from_row):
mappings = {}
for row in rows:
mappings.setdefault(
key_from_row(row),
[],
).append(
OwnershipPeriod(
pd.Timestamp(row.start_date, unit='ns', tz='utc'),
pd.Timestamp(row.end_date, unit='ns', tz='utc'),
row.sid,
value_from_row(row),
),
)
return merge_ownership_periods(mappings)
def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
return _build_ownership_map_from_rows(
sa.select(table.c).execute().fetchall(),
key_from_row,
value_from_row,
)
@curry
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
_filter_future_kwargs = _filter_kwargs(Future._kwargnames)
_filter_equity_kwargs = _filter_kwargs(Equity._kwargnames)
def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_
SID_TYPE_IDS = {
# Asset would be 0,
ContinuousFuture: 1,
}
CONTINUOUS_FUTURE_ROLL_STYLE_IDS = {
'calendar': 0,
'volume': 1,
}
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = {
None: 0,
'div': 1,
'add': 2,
}
def _encode_continuous_future_sid(root_symbol,
offset,
roll_style,
adjustment_style):
s = struct.Struct("B 2B B B B 2B")
# B - sid type
# 2B - root symbol
# B - offset (could be packed smaller since offsets of greater than 12 are
# probably unneeded.)
# B - roll type
# B - adjustment
# 2B - empty space left for parameterized roll types
# The root symbol currently supports 2 characters. If 3 char root symbols
# are needed, the size of the root symbol does not need to change, however
# writing the string directly will need to change to a scheme of writing
# the A-Z values in 5-bit chunks.
a = array.array('B', [0] * s.size)
rs = bytearray(root_symbol, 'ascii')
values = (SID_TYPE_IDS[ContinuousFuture],
rs[0],
rs[1],
offset,
CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style],
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style],
0, 0)
s.pack_into(a, 0, *values)
return int(binascii.hexlify(a), 16)
Lifetimes = namedtuple('Lifetimes', 'sid start end')
class AssetFinder(object):
"""
An AssetFinder is an interface to a database of Asset metadata written by
an ``AssetDBWriter``.
This class provides methods for looking up assets by unique integer id or
by symbol. For historical reasons, we refer to these unique ids as 'sids'.
Parameters
----------
engine : str or SQLAlchemy.engine
An engine with a connection to the asset database to use, or a string
that can be parsed by SQLAlchemy as a URI.
future_chain_predicates : dict
A dict mapping future root symbol to a predicate function which accepts
a contract as a parameter and returns whether or not the contract should be
included in the chain.
See Also
--------
:class:`zipline.assets.AssetDBWriter`
"""
@preprocess(engine=coerce_string_to_eng(require_exists=True))
def __init__(self, engine, future_chain_predicates=None):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(engine, self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset will populate the cache on first retrieval.
self._asset_cache = {}
self._asset_type_cache = {}
self._caches = (self._asset_cache, self._asset_type_cache)
self._future_chain_predicates = future_chain_predicates \
if future_chain_predicates is not None else {}
self._ordered_contracts = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = {}
# Stores the max end_date of the bundle
self._bundle_end_date = None
self._sids_to_real_sids = {}
self._real_sids_to_sids = {}
@lazyval
def exchange_info(self):
es = sa.select(self.exchanges.c).execute().fetchall()
return {
name: ExchangeInfo(name, canonical_name, country_code)
for name, canonical_name, country_code in es
}
@lazyval
def symbol_ownership_map(self):
return build_ownership_map(
table=self.equity_symbol_mappings,
key_from_row=(
lambda row: (row.company_symbol, row.share_class_symbol)
),
value_from_row=lambda row: row.symbol,
)
@lazyval
def country_codes(self):
return tuple([c for (c,) in sa.select(
sa.distinct(self.exchanges.c.country_code,
)).execute().fetchall()])
def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found
def group_by_type(self, sids):
"""
Group a list of sids by asset type.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[str or None -> list[int]]
A dict mapping unique asset types to lists of sids drawn from sids.
If we fail to look up an asset, we assign it a key of None.
"""
return invert(self.lookup_asset_types(sids))
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset for a given sid.
"""
try:
asset = self._asset_cache[sid]
if asset is None and not default_none:
raise SidsNotFound(sids=[sid])
return asset
except KeyError:
return self.retrieve_all((sid,), default_none=default_none)[0]
def retrieve_all(self, sids, default_none=False):
"""
Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of int
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False.
"""
sids = list(sids)
hits, missing, failures = {}, set(), []
for sid in sids:
try:
asset = self._asset_cache[sid]
if not default_none and asset is None:
# Bail early if we've already cached that we don't know
# about an asset.
raise SidsNotFound(sids=[sid])
hits[sid] = asset
except KeyError:
missing.add(sid)
# All requests were cache hits. Return requested sids in order.
if not missing:
return [hits[sid] for sid in sids]
update_hits = hits.update
# Look up cache misses by type.
type_to_assets = self.group_by_type(missing)
# Handle failures
failures = {failure: None for failure in type_to_assets.pop(None, ())}
update_hits(failures)
self._asset_cache.update(failures)
if failures and not default_none:
raise SidsNotFound(sids=list(failures))
# We don't update the asset cache here because it should already be
# updated by `self.retrieve_equities`.
update_hits(self.retrieve_equities(type_to_assets.pop('equity', ())))
update_hits(
self.retrieve_futures_contracts(type_to_assets.pop('future', ()))
)
# We shouldn't know about any other asset types.
if type_to_assets:
raise AssertionError(
"Found asset types: %s" % list(type_to_assets.keys())
)
return [hits[sid] for sid in sids]
def retrieve_equities(self, sids):
"""
Retrieve Equity objects for a list of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.equities, Equity)
def _retrieve_equity(self, sid):
return self.retrieve_equities((sid,))[sid]
def retrieve_futures_contracts(self, sids):
"""
Retrieve Future objects for an iterable of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.futures_contracts, Future)
@staticmethod
def _select_assets_by_sid(asset_tbl, sids):
return sa.select([asset_tbl]).where(
asset_tbl.c.sid.in_(map(int, sids))
)
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
cols = self.equity_symbol_mappings.c
# These are the columns we actually want.
data_cols = (cols.sid,) + tuple(cols[name] for name in symbol_columns)
# Also select the max of end_date so that all non-grouped fields take
# on the value associated with the max end_date. The SQLite docs say
# this:
#
# When the min() or max() aggregate functions are used in an aggregate
# query, all bare columns in the result set take values from the input
# row which also contains the minimum or maximum. Only the built-in
# min() and max() functions work this way.
#
# See https://www.sqlite.org/lang_select.html#resultset, for more info.
to_select = data_cols + (sa.func.max(cols.end_date),)
return sa.select(
to_select,
).where(
cols.sid.in_(map(int, sid_group))
).group_by(
cols.sid,
)
def _lookup_most_recent_symbols(self, sids):
return {
row.sid: {c: row[c] for c in symbol_columns}
for row in concat(
self.engine.execute(
self._select_most_recent_symbols_chunk(sid_group),
).fetchall()
for sid_group in partition_all(
SQLITE_MAX_VARIABLE_NUMBER,
sids
)
)
}
def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities):
if not sids:
return
if querying_equities:
def mkdict(row,
exchanges=self.exchange_info,
symbols=self._lookup_most_recent_symbols(sids)):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
# we are not required to have a symbol for every asset, if
# we don't have any symbols we will just use the empty string
return merge(d, symbols.get(row['sid'], {}))
else:
def mkdict(row, exchanges=self.exchange_info):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
return d
for assets in group_into_chunks(sids):
# Load misses from the db.
query = self._select_assets_by_sid(asset_tbl, assets)
for row in query.execute().fetchall():
yield _convert_asset_timestamp_fields(mkdict(row))
def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
|
else:
raise FutureContractsNotFound(sids=misses)
return hits
def lookup_symbol(self, symbol):
"""Lookup an equity by symbol. This method can only resolve the equity
if exactly one equity has ever owned the ticker.
Parameters
----------
symbol : str
The ticker symbol to resolve.
Returns
-------
equity : Equity
The equity identified by ``symbol``.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when more than one equity has held ``symbol`` or when
the symbol is ambiguous across multiple countries.
Notes
-----
The resolution algorithm is as follows:
- Split the symbol into the company and share class component.
- Do a dictionary lookup of the
``(company_symbol, share_class_symbol)`` in the provided ownership
map.
- If there is no entry in the dictionary, we don't know about this
symbol so raise a ``SymbolNotFound`` error.
- If more there is more than one owner, raise ``MultipleSymbolsFound``
- Otherwise, because the list mapped to a symbol cannot be empty,
return the single asset.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None")
mapping = self.symbol_ownership_map
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = mapping[company_symbol, share_class_symbol]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
# exactly one equity has ever held this symbol
if len(owners) == 1:
return self.retrieve_asset(owners[0].sid)
options = {self.retrieve_asset(owner.sid) for owner in owners}
# more than one equity has held this ticker, this
# is ambiguous
raise MultipleSymbolsFound(symbol=symbol, options=options)
def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid'])
def _get_contract_sids(self, root_symbol):
fc_cols = self.futures_contracts.c
return [r.sid for r in
list(sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(pd.notnull(fc_cols.start_date))).order_by(
fc_cols.auto_close_date).execute().fetchall())]
def _get_root_symbol_exchange(self, root_symbol):
fc_cols = self.futures_root_symbols.c
fields = (fc_cols.exchange,)
exchange = sa.select(fields).where(
fc_cols.root_symbol == root_symbol).execute().scalar()
if exchange is not None:
return exchange
else:
raise SymbolNotFound(symbol=root_symbol)
def get_ordered_contracts(self, root_symbol):
try:
return self._ordered_contracts[root_symbol]
except KeyError:
contract_sids = self._get_contract_sids(root_symbol)
contracts = deque(self.retrieve_all(contract_sids))
chain_predicate = self._future_chain_predicates.get(root_symbol,
None)
oc = OrderedContracts(root_symbol, contracts, chain_predicate)
self._ordered_contracts[root_symbol] = oc
return oc
def create_continuous_future(self,
root_symbol,
offset,
roll_style,
adjustment):
if adjustment not in ADJUSTMENT_STYLES:
raise ValueError(
'Invalid adjustment style {!r}. Allowed adjustment styles are '
'{}.'.format(adjustment, list(ADJUSTMENT_STYLES))
)
oc = self.get_ordered_contracts(root_symbol)
exchange = self._get_root_symbol_exchange(root_symbol)
sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
None)
mul_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'div')
add_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'add')
cf_template = partial(
ContinuousFuture,
root_symbol=root_symbol,
offset=offset,
roll_style=roll_style,
start_date=oc.start_date,
end_date=oc.end_date,
exchange_info=self.exchange_info[exchange],
)
cf = cf_template(sid=sid)
mul_cf = cf_template(sid=mul_sid, adjustment='mul')
add_cf = cf_template(sid=add_sid, adjustment='add')
self._asset_cache[cf.sid] = cf
self._asset_cache[mul_cf.sid] = mul_cf
self._asset_cache[add_cf.sid] = add_cf
return {None: cf, 'mul': mul_cf, 'add': add_cf}[adjustment]
def _make_sids(tblattr):
def _(self):
return tuple(map(
itemgetter('sid'),
sa.select((
getattr(self, tblattr).c.sid,
)).execute().fetchall(),
))
return _
sids = property(
_make_sids('asset_router'),
doc='All the sids in the asset finder.',
)
equities_sids = property(
_make_sids('equities'),
doc='All of the sids for equities in the asset finder.',
)
futures_sids = property(
_make_sids('futures_contracts'),
doc='All of the sids for futures contracts in the asset finder.',
)
del _make_sids
@property
def sids_to_real_sids(self):
"""
Returns a dict mapping sids to real sids.
"""
if not self._sids_to_real_sids:
sql = """
SELECT
sid,
real_sid
FROM
equities
UNION
SELECT
sid,
real_sid
FROM
futures_contracts
"""
result = self.engine.execute(sql)
self._sids_to_real_sids = {row[0]: row[1] for row in result.fetchall()}
return self._sids_to_real_sids
@property
def real_sids_to_sids(self):
"""
Returns a dict mapping real sids to sids.
"""
if not self._real_sids_to_sids:
self._real_sids_to_sids = {v: k for k, v in self.sids_to_real_sids.items()}
return self._real_sids_to_sids
def get_bundle_end_date(self):
"""
Returns the max end_date of the bundle, which can be considered the date
through which the bundle has been updated.
"""
if not self._bundle_end_date:
max_date = self.engine.execute(
"""
SELECT
MAX(end_date)
FROM (
SELECT
end_date
FROM
equities
UNION
SELECT
end_date
FROM
futures_contracts
)
"""
).scalar()
self._bundle_end_date = pd.Timestamp(max_date, tz="UTC")
return self._bundle_end_date
def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
sids = starts = ends = []
equities_cols = self.equities.c
futures_cols = self.futures_contracts.c
if country_codes:
equities_query = sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.auto_close_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
)
futures_query = sa.select((
futures_cols.sid,
futures_cols.start_date,
futures_cols.auto_close_date,
)).where(
(self.exchanges.c.exchange == futures_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
)
results = equities_query.union(futures_query).execute().fetchall()
if results:
sids, starts, ends = zip(*results)
sid = np.array(sids, dtype='i8')
start = np.array(starts, dtype='f8')
end = np.array(ends, dtype='f8')
start[np.isnan(start)] = 0 # convert missing starts to 0
end[end==np.datetime64('NaT').view('i8')] = np.iinfo(int).max # convert missing end to INTMAX
return Lifetimes(sid, start, end)
def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
if isinstance(country_codes, string_types):
raise TypeError(
"Got string {!r} instead of an iterable of strings in "
"AssetFinder.lifetimes.".format(country_codes),
)
# normalize to a cache-key so that we can memoize results.
country_codes = frozenset(country_codes)
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = (
self._compute_asset_lifetimes(country_codes)
)
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
def equities_sids_for_country_code(self, country_code):
"""Return all of the sids for a given country.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code.
Returns
-------
tuple[int]
The sids whose exchanges are in this country.
"""
sids = self._compute_asset_lifetimes([country_code]).sid
return tuple(sids.tolist())
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Asset)
class NotAssetConvertible(ValueError):
pass
class PricingDataAssociable(with_metaclass(ABCMeta)):
"""
ABC for types that can be associated with pricing data.
Includes Asset, Future, ContinuousFuture
"""
pass
PricingDataAssociable.register(Asset)
PricingDataAssociable.register(Future)
PricingDataAssociable.register(ContinuousFuture)
| raise EquitiesNotFound(sids=misses) | conditional_block |
assets.py | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from functools import partial
from numbers import Integral
from operator import itemgetter, attrgetter
import struct
import numpy as np
import pandas as pd
from pandas import isnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from toolz import (
compose,
concat,
concatv,
curry,
groupby,
merge,
partition_all,
sliding_window,
valmap,
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
SidsNotFound,
SymbolNotFound,
)
from ._assets import (
Asset, Equity, Future,
)
from .continuous_futures import (
ADJUSTMENT_STYLES,
ContinuousFuture,
OrderedContracts,
)
from .asset_writer import (
check_version_info,
split_delimited_symbol,
asset_db_table_names,
symbol_columns,
SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
ASSET_DB_VERSION
)
from .exchange_info import ExchangeInfo
from zipline.utils.functional import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_eng
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
'rollover_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
def _build_ownership_map_from_rows(rows, key_from_row, value_from_row):
mappings = {}
for row in rows:
mappings.setdefault(
key_from_row(row),
[],
).append(
OwnershipPeriod(
pd.Timestamp(row.start_date, unit='ns', tz='utc'),
pd.Timestamp(row.end_date, unit='ns', tz='utc'),
row.sid,
value_from_row(row),
),
)
return merge_ownership_periods(mappings)
def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
return _build_ownership_map_from_rows(
sa.select(table.c).execute().fetchall(),
key_from_row,
value_from_row,
)
@curry
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
_filter_future_kwargs = _filter_kwargs(Future._kwargnames)
_filter_equity_kwargs = _filter_kwargs(Equity._kwargnames)
def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_
SID_TYPE_IDS = {
# Asset would be 0,
ContinuousFuture: 1,
}
CONTINUOUS_FUTURE_ROLL_STYLE_IDS = {
'calendar': 0,
'volume': 1,
}
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = {
None: 0,
'div': 1,
'add': 2,
}
def _encode_continuous_future_sid(root_symbol,
offset,
roll_style,
adjustment_style):
s = struct.Struct("B 2B B B B 2B")
# B - sid type
# 2B - root symbol
# B - offset (could be packed smaller since offsets of greater than 12 are
# probably unneeded.)
# B - roll type
# B - adjustment
# 2B - empty space left for parameterized roll types
# The root symbol currently supports 2 characters. If 3 char root symbols
# are needed, the size of the root symbol does not need to change, however
# writing the string directly will need to change to a scheme of writing
# the A-Z values in 5-bit chunks.
a = array.array('B', [0] * s.size)
rs = bytearray(root_symbol, 'ascii')
values = (SID_TYPE_IDS[ContinuousFuture],
rs[0],
rs[1],
offset,
CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style],
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style],
0, 0)
s.pack_into(a, 0, *values)
return int(binascii.hexlify(a), 16)
Lifetimes = namedtuple('Lifetimes', 'sid start end')
class AssetFinder(object):
"""
An AssetFinder is an interface to a database of Asset metadata written by
an ``AssetDBWriter``.
This class provides methods for looking up assets by unique integer id or
by symbol. For historical reasons, we refer to these unique ids as 'sids'.
Parameters
----------
engine : str or SQLAlchemy.engine
An engine with a connection to the asset database to use, or a string
that can be parsed by SQLAlchemy as a URI.
future_chain_predicates : dict
A dict mapping future root symbol to a predicate function which accepts
a contract as a parameter and returns whether or not the contract should be
included in the chain.
See Also
--------
:class:`zipline.assets.AssetDBWriter`
"""
@preprocess(engine=coerce_string_to_eng(require_exists=True))
def __init__(self, engine, future_chain_predicates=None):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(engine, self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset will populate the cache on first retrieval.
self._asset_cache = {}
self._asset_type_cache = {}
self._caches = (self._asset_cache, self._asset_type_cache)
self._future_chain_predicates = future_chain_predicates \
if future_chain_predicates is not None else {}
self._ordered_contracts = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = {}
# Stores the max end_date of the bundle
self._bundle_end_date = None
self._sids_to_real_sids = {}
self._real_sids_to_sids = {}
@lazyval
def exchange_info(self):
es = sa.select(self.exchanges.c).execute().fetchall()
return {
name: ExchangeInfo(name, canonical_name, country_code)
for name, canonical_name, country_code in es
}
@lazyval
def symbol_ownership_map(self):
return build_ownership_map(
table=self.equity_symbol_mappings,
key_from_row=(
lambda row: (row.company_symbol, row.share_class_symbol)
),
value_from_row=lambda row: row.symbol,
)
@lazyval
def country_codes(self):
return tuple([c for (c,) in sa.select(
sa.distinct(self.exchanges.c.country_code,
)).execute().fetchall()])
def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found
def group_by_type(self, sids):
"""
Group a list of sids by asset type.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[str or None -> list[int]]
A dict mapping unique asset types to lists of sids drawn from sids.
If we fail to look up an asset, we assign it a key of None.
"""
return invert(self.lookup_asset_types(sids))
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset for a given sid.
"""
try:
asset = self._asset_cache[sid]
if asset is None and not default_none:
raise SidsNotFound(sids=[sid])
return asset
except KeyError:
return self.retrieve_all((sid,), default_none=default_none)[0]
def retrieve_all(self, sids, default_none=False):
"""
Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of int
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False.
"""
sids = list(sids)
hits, missing, failures = {}, set(), []
for sid in sids:
try:
asset = self._asset_cache[sid]
if not default_none and asset is None:
# Bail early if we've already cached that we don't know | missing.add(sid)
# All requests were cache hits. Return requested sids in order.
if not missing:
return [hits[sid] for sid in sids]
update_hits = hits.update
# Look up cache misses by type.
type_to_assets = self.group_by_type(missing)
# Handle failures
failures = {failure: None for failure in type_to_assets.pop(None, ())}
update_hits(failures)
self._asset_cache.update(failures)
if failures and not default_none:
raise SidsNotFound(sids=list(failures))
# We don't update the asset cache here because it should already be
# updated by `self.retrieve_equities`.
update_hits(self.retrieve_equities(type_to_assets.pop('equity', ())))
update_hits(
self.retrieve_futures_contracts(type_to_assets.pop('future', ()))
)
# We shouldn't know about any other asset types.
if type_to_assets:
raise AssertionError(
"Found asset types: %s" % list(type_to_assets.keys())
)
return [hits[sid] for sid in sids]
def retrieve_equities(self, sids):
"""
Retrieve Equity objects for a list of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.equities, Equity)
def _retrieve_equity(self, sid):
return self.retrieve_equities((sid,))[sid]
def retrieve_futures_contracts(self, sids):
"""
Retrieve Future objects for an iterable of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.futures_contracts, Future)
@staticmethod
def _select_assets_by_sid(asset_tbl, sids):
return sa.select([asset_tbl]).where(
asset_tbl.c.sid.in_(map(int, sids))
)
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
cols = self.equity_symbol_mappings.c
# These are the columns we actually want.
data_cols = (cols.sid,) + tuple(cols[name] for name in symbol_columns)
# Also select the max of end_date so that all non-grouped fields take
# on the value associated with the max end_date. The SQLite docs say
# this:
#
# When the min() or max() aggregate functions are used in an aggregate
# query, all bare columns in the result set take values from the input
# row which also contains the minimum or maximum. Only the built-in
# min() and max() functions work this way.
#
# See https://www.sqlite.org/lang_select.html#resultset, for more info.
to_select = data_cols + (sa.func.max(cols.end_date),)
return sa.select(
to_select,
).where(
cols.sid.in_(map(int, sid_group))
).group_by(
cols.sid,
)
def _lookup_most_recent_symbols(self, sids):
return {
row.sid: {c: row[c] for c in symbol_columns}
for row in concat(
self.engine.execute(
self._select_most_recent_symbols_chunk(sid_group),
).fetchall()
for sid_group in partition_all(
SQLITE_MAX_VARIABLE_NUMBER,
sids
)
)
}
def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities):
if not sids:
return
if querying_equities:
def mkdict(row,
exchanges=self.exchange_info,
symbols=self._lookup_most_recent_symbols(sids)):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
# we are not required to have a symbol for every asset, if
# we don't have any symbols we will just use the empty string
return merge(d, symbols.get(row['sid'], {}))
else:
def mkdict(row, exchanges=self.exchange_info):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
return d
for assets in group_into_chunks(sids):
# Load misses from the db.
query = self._select_assets_by_sid(asset_tbl, assets)
for row in query.execute().fetchall():
yield _convert_asset_timestamp_fields(mkdict(row))
def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits
def lookup_symbol(self, symbol):
"""Lookup an equity by symbol. This method can only resolve the equity
if exactly one equity has ever owned the ticker.
Parameters
----------
symbol : str
The ticker symbol to resolve.
Returns
-------
equity : Equity
The equity identified by ``symbol``.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when more than one equity has held ``symbol`` or when
the symbol is ambiguous across multiple countries.
Notes
-----
The resolution algorithm is as follows:
- Split the symbol into the company and share class component.
- Do a dictionary lookup of the
``(company_symbol, share_class_symbol)`` in the provided ownership
map.
- If there is no entry in the dictionary, we don't know about this
symbol so raise a ``SymbolNotFound`` error.
- If more there is more than one owner, raise ``MultipleSymbolsFound``
- Otherwise, because the list mapped to a symbol cannot be empty,
return the single asset.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None")
mapping = self.symbol_ownership_map
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = mapping[company_symbol, share_class_symbol]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
# exactly one equity has ever held this symbol
if len(owners) == 1:
return self.retrieve_asset(owners[0].sid)
options = {self.retrieve_asset(owner.sid) for owner in owners}
# more than one equity has held this ticker, this
# is ambiguous
raise MultipleSymbolsFound(symbol=symbol, options=options)
def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid'])
def _get_contract_sids(self, root_symbol):
fc_cols = self.futures_contracts.c
return [r.sid for r in
list(sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(pd.notnull(fc_cols.start_date))).order_by(
fc_cols.auto_close_date).execute().fetchall())]
def _get_root_symbol_exchange(self, root_symbol):
fc_cols = self.futures_root_symbols.c
fields = (fc_cols.exchange,)
exchange = sa.select(fields).where(
fc_cols.root_symbol == root_symbol).execute().scalar()
if exchange is not None:
return exchange
else:
raise SymbolNotFound(symbol=root_symbol)
def get_ordered_contracts(self, root_symbol):
try:
return self._ordered_contracts[root_symbol]
except KeyError:
contract_sids = self._get_contract_sids(root_symbol)
contracts = deque(self.retrieve_all(contract_sids))
chain_predicate = self._future_chain_predicates.get(root_symbol,
None)
oc = OrderedContracts(root_symbol, contracts, chain_predicate)
self._ordered_contracts[root_symbol] = oc
return oc
def create_continuous_future(self,
root_symbol,
offset,
roll_style,
adjustment):
if adjustment not in ADJUSTMENT_STYLES:
raise ValueError(
'Invalid adjustment style {!r}. Allowed adjustment styles are '
'{}.'.format(adjustment, list(ADJUSTMENT_STYLES))
)
oc = self.get_ordered_contracts(root_symbol)
exchange = self._get_root_symbol_exchange(root_symbol)
sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
None)
mul_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'div')
add_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'add')
cf_template = partial(
ContinuousFuture,
root_symbol=root_symbol,
offset=offset,
roll_style=roll_style,
start_date=oc.start_date,
end_date=oc.end_date,
exchange_info=self.exchange_info[exchange],
)
cf = cf_template(sid=sid)
mul_cf = cf_template(sid=mul_sid, adjustment='mul')
add_cf = cf_template(sid=add_sid, adjustment='add')
self._asset_cache[cf.sid] = cf
self._asset_cache[mul_cf.sid] = mul_cf
self._asset_cache[add_cf.sid] = add_cf
return {None: cf, 'mul': mul_cf, 'add': add_cf}[adjustment]
def _make_sids(tblattr):
def _(self):
return tuple(map(
itemgetter('sid'),
sa.select((
getattr(self, tblattr).c.sid,
)).execute().fetchall(),
))
return _
sids = property(
_make_sids('asset_router'),
doc='All the sids in the asset finder.',
)
equities_sids = property(
_make_sids('equities'),
doc='All of the sids for equities in the asset finder.',
)
futures_sids = property(
_make_sids('futures_contracts'),
doc='All of the sids for futures contracts in the asset finder.',
)
del _make_sids
@property
def sids_to_real_sids(self):
"""
Returns a dict mapping sids to real sids.
"""
if not self._sids_to_real_sids:
sql = """
SELECT
sid,
real_sid
FROM
equities
UNION
SELECT
sid,
real_sid
FROM
futures_contracts
"""
result = self.engine.execute(sql)
self._sids_to_real_sids = {row[0]: row[1] for row in result.fetchall()}
return self._sids_to_real_sids
@property
def real_sids_to_sids(self):
"""
Returns a dict mapping real sids to sids.
"""
if not self._real_sids_to_sids:
self._real_sids_to_sids = {v: k for k, v in self.sids_to_real_sids.items()}
return self._real_sids_to_sids
def get_bundle_end_date(self):
"""
Returns the max end_date of the bundle, which can be considered the date
through which the bundle has been updated.
"""
if not self._bundle_end_date:
max_date = self.engine.execute(
"""
SELECT
MAX(end_date)
FROM (
SELECT
end_date
FROM
equities
UNION
SELECT
end_date
FROM
futures_contracts
)
"""
).scalar()
self._bundle_end_date = pd.Timestamp(max_date, tz="UTC")
return self._bundle_end_date
def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
sids = starts = ends = []
equities_cols = self.equities.c
futures_cols = self.futures_contracts.c
if country_codes:
equities_query = sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.auto_close_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
)
futures_query = sa.select((
futures_cols.sid,
futures_cols.start_date,
futures_cols.auto_close_date,
)).where(
(self.exchanges.c.exchange == futures_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
)
results = equities_query.union(futures_query).execute().fetchall()
if results:
sids, starts, ends = zip(*results)
sid = np.array(sids, dtype='i8')
start = np.array(starts, dtype='f8')
end = np.array(ends, dtype='f8')
start[np.isnan(start)] = 0 # convert missing starts to 0
end[end==np.datetime64('NaT').view('i8')] = np.iinfo(int).max # convert missing end to INTMAX
return Lifetimes(sid, start, end)
def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
if isinstance(country_codes, string_types):
raise TypeError(
"Got string {!r} instead of an iterable of strings in "
"AssetFinder.lifetimes.".format(country_codes),
)
# normalize to a cache-key so that we can memoize results.
country_codes = frozenset(country_codes)
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = (
self._compute_asset_lifetimes(country_codes)
)
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
def equities_sids_for_country_code(self, country_code):
"""Return all of the sids for a given country.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code.
Returns
-------
tuple[int]
The sids whose exchanges are in this country.
"""
sids = self._compute_asset_lifetimes([country_code]).sid
return tuple(sids.tolist())
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Asset)
class NotAssetConvertible(ValueError):
pass
class PricingDataAssociable(with_metaclass(ABCMeta)):
"""
ABC for types that can be associated with pricing data.
Includes Asset, Future, ContinuousFuture
"""
pass
PricingDataAssociable.register(Asset)
PricingDataAssociable.register(Future)
PricingDataAssociable.register(ContinuousFuture) | # about an asset.
raise SidsNotFound(sids=[sid])
hits[sid] = asset
except KeyError: | random_line_split |
game.rs | use std::collections::BTreeMap;
use std::fmt::{self, Debug, Formatter};
use std::sync::Arc;
use tokio::sync::{
mpsc::{self, error::TrySendError},
oneshot,
};
use tokio::time;
use logic::components::{Movement, WorldInteraction};
use logic::legion::prelude::{Entity, World};
use logic::resources::DeadEntities;
use logic::snapshot::SnapshotEncoder;
use protocol::{
Action, ActionKind, EntityId, Event, EventKind, GameOver, PlayerId, Request, RequestKind,
Response, ResponseKind, Snapshot,
};
/// How many times per second to update the game world.
const TICK_RATE: u32 = 60;
/// The maximum number of events to buffer per player.
const EVENT_BUFFER_SIZE: usize = 1024;
pub struct Game {
players: BTreeMap<PlayerId, PlayerData>,
receiver: mpsc::Receiver<Command>,
world: World,
executor: logic::Executor,
snapshots: SnapshotEncoder,
time: u32,
}
#[derive(Debug, Clone)]
struct PlayerData {
entity: Entity,
network_id: EntityId,
events: mpsc::Sender<Event>,
}
#[derive(Debug)]
pub struct PlayerHandle {
player: PlayerId,
events: mpsc::Receiver<Event>,
}
#[derive(Debug, Clone)]
pub struct GameHandle {
sender: mpsc::Sender<Command>,
}
#[derive(Debug)]
enum Command {
Request {
request: Request,
callback: Callback<Response>,
},
RegisterPlayer {
callback: Callback<PlayerHandle>,
},
DisconnectPlayer(PlayerId),
Snapshot {
callback: Callback<Snapshot>,
},
PerformAction {
action: Action,
player: PlayerId,
},
}
struct Callback<T> {
sender: oneshot::Sender<T>,
}
// We don't care what the callback contains, simply print the expected return type.
impl<T> Debug for Callback<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Callback<{}>", std::any::type_name::<T>())
}
}
impl Game {
/// Create a new game alongside a handle to thet game.
pub fn new() -> (Game, GameHandle) {
let (sender, receiver) = mpsc::channel(1024);
let world = logic::create_world(logic::WorldKind::WithObjects);
let schedule = logic::add_systems(Default::default(), logic::SystemSet::Everything);
let executor = logic::Executor::new(schedule);
let game = Game {
players: BTreeMap::new(),
receiver,
world,
executor,
snapshots: SnapshotEncoder::new(),
time: 0,
};
let handle = GameHandle { sender };
(game, handle)
}
/// Run the game to completion (either the handle is dropped or a fatal error occurs).
pub async fn run(&mut self) {
let mut timer = time::interval(time::Duration::from_secs(1) / TICK_RATE);
loop {
tokio::select! {
_ = timer.tick() => {
self.tick();
}
command = self.receiver.recv() => match command {
None => {
log::info!("game handle dropped");
break;
},
Some(command) => {
log::debug!("got command: {:?}", command);
self.execute_command(command);
}
}
};
}
}
fn tick(&mut self) {
self.executor.tick(&mut self.world);
self.snapshots.update_mapping(&self.world);
self.check_win_condition();
let mut events = Vec::<EventKind>::new();
let snapshot = Arc::new(self.snapshot());
events.push(snapshot.into());
for event in events {
self.broadcast(event);
}
self.time = self.time.wrapping_add(1);
}
fn broadcast<T>(&mut self, kind: T)
where
T: Into<EventKind>,
{
let event = Event {
time: self.time,
kind: kind.into(),
};
let mut dead = Vec::new();
for (&id, player) in &mut self.players {
match player.events.try_send(event.clone()) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
log::warn!("player {}'s event buffer is full", id);
dead.push(id);
// TODO: request full client resync
}
Err(TrySendError::Closed(_)) => {
log::info!("player {} stopped listening for events", id);
dead.push(id);
// TODO: stop attempting to send events to this player, and potentially
// disconnect them.
}
}
}
for player in dead {
self.remove_player(player);
}
}
fn remove_player(&mut self, player: PlayerId) -> Option<PlayerData> {
let data = self.players.remove(&player)?;
self.world.delete(data.entity);
self.world
.resources
.get_mut::<DeadEntities>()
.unwrap()
.entities
.push(data.network_id);
Some(data)
}
/// Check if any player has won or lost.
fn check_win_condition(&mut self) {
let dead = self.world.resources.get::<DeadEntities>().unwrap();
let mut losers = Vec::new();
for (&player, data) in &self.players {
if dead.entities.contains(&data.network_id) {
losers.push(player);
}
}
drop(dead);
for loser in losers {
let mut player = self.players.remove(&loser).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Loser),
};
tokio::spawn(async move { player.events.send(event).await });
if self.players.len() == 1 {
let winner = *self.players.keys().next().unwrap();
let mut player = self.remove_player(winner).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Winner),
};
tokio::spawn(async move { player.events.send(event).await });
}
}
}
/// Execute a command.
fn execute_command(&mut self, command: Command) {
match command {
Command::RegisterPlayer { callback } => {
callback.send(self.register_player());
}
Command::DisconnectPlayer(player) => {
self.remove_player(player);
}
Command::Request { callback, request } => {
let message = self.handle_request(request);
callback.send(message);
}
Command::Snapshot { callback } => {
let snapshot = self.snapshot();
callback.send(snapshot);
}
Command::PerformAction { action, player } => self.perform_action(action, player),
}
}
/// Create and register a new player
fn register_player(&mut self) -> PlayerHandle {
let player = self.next_player_id();
let entity = logic::add_player(&mut self.world, player);
let (sender, receiver) = mpsc::channel(EVENT_BUFFER_SIZE);
let network_id = *self.world.get_component::<EntityId>(entity).unwrap();
let data = PlayerData {
network_id,
entity,
events: sender,
};
self.players.insert(player, data);
PlayerHandle {
player,
events: receiver,
}
}
/// Find the next available player id
fn next_player_id(&self) -> PlayerId {
let mut id = 1;
for player in self.players.keys() {
if player.0 == id {
id += 1;
} else {
break;
}
}
PlayerId(id)
}
/// Perform the request and return the result in a message
fn handle_request(&mut self, request: Request) -> Response {
let kind = match request.kind {
RequestKind::Ping => protocol::Pong.into(),
RequestKind::Init => {
let error = "Requested 'Init' on already initialized player";
ResponseKind::Error(error.into())
}
};
Response {
channel: request.channel,
kind,
}
}
/// Get a snapshot of the current game state.
fn snapshot(&self) -> Snapshot {
self.snapshots.make_snapshot(&self.world)
}
/// Perform an action for a player.
fn perform_action(&mut self, action: Action, player: PlayerId) {
match action.kind {
ActionKind::Move(new) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let mut movement = self.world.get_component_mut::<Movement>(data.entity)?;
movement.direction = new.direction;
Some(())
}();
}
ActionKind::Break(breaking) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let breaking = breaking
.entity
.and_then(|breaking| self.snapshots.lookup(breaking));
self.world
.get_component_mut::<WorldInteraction>(data.entity)?
.breaking = breaking;
Some(())
}();
}
ActionKind::Throw(throwing) => {
if let Some(data) = self.players.get(&player) {
logic::events::throw(&mut self.world, data.entity, throwing.target);
}
}
}
}
}
impl GameHandle {
/// Register a new client and return it's id.
pub async fn register_player(&mut self) -> crate::Result<PlayerHandle> {
self.send_with(|callback| Command::RegisterPlayer { callback })
.await
}
/// Remove a player from the game.
pub async fn disconnect_player(&mut self, player: PlayerId) -> crate::Result<()> {
self.sender.send(Command::DisconnectPlayer(player)).await?;
Ok(())
}
/// Handle a request made by a player.
pub async fn handle_request(&mut self, request: Request) -> crate::Result<Response> {
self.send_with(move |callback| Command::Request { request, callback })
.await
}
/// Get a snapshot of the current game state.
pub async fn snapshot(&mut self) -> crate::Result<Snapshot> {
self.send_with(|callback| Command::Snapshot { callback })
.await
}
/// Handle an action performed by a player
pub async fn handle_action(&mut self, action: Action, player: PlayerId) -> crate::Result<()> {
self.sender
.send(Command::PerformAction { action, player })
.await?;
Ok(())
}
/// Send a command to the game with the specified callback and then return the value passed into
/// the callback.
async fn send_with<F, O>(&mut self, to_command: F) -> crate::Result<O>
where
F: FnOnce(Callback<O>) -> Command,
{
let (callback, value) = Callback::new();
let command = to_command(callback);
self.sender.send(command).await?;
value.await.map_err(Into::into)
}
}
impl PlayerHandle {
/// Get the id of this player
pub fn id(&self) -> PlayerId {
self.player
}
pub async fn poll_event(&mut self) -> Option<Event> {
self.events.recv().await
}
}
impl<T> Callback<T> {
/// Create a new callback
pub fn new() -> (Callback<T>, oneshot::Receiver<T>) |
/// Attempt to send the value, returning false if the receiver was closed.
pub fn send(self, value: T) -> bool {
match self.sender.send(value) {
Ok(()) => true,
Err(_) => false,
}
}
}
| {
let (sender, receiver) = oneshot::channel();
(Callback { sender }, receiver)
} | identifier_body |
game.rs | use std::collections::BTreeMap;
use std::fmt::{self, Debug, Formatter};
use std::sync::Arc;
use tokio::sync::{
mpsc::{self, error::TrySendError},
oneshot,
};
use tokio::time;
use logic::components::{Movement, WorldInteraction};
use logic::legion::prelude::{Entity, World};
use logic::resources::DeadEntities;
use logic::snapshot::SnapshotEncoder;
use protocol::{
Action, ActionKind, EntityId, Event, EventKind, GameOver, PlayerId, Request, RequestKind,
Response, ResponseKind, Snapshot,
};
/// How many times per second to update the game world.
const TICK_RATE: u32 = 60;
/// The maximum number of events to buffer per player.
const EVENT_BUFFER_SIZE: usize = 1024;
pub struct Game {
players: BTreeMap<PlayerId, PlayerData>,
receiver: mpsc::Receiver<Command>,
world: World,
executor: logic::Executor,
snapshots: SnapshotEncoder,
time: u32,
}
#[derive(Debug, Clone)]
struct PlayerData {
entity: Entity,
network_id: EntityId,
events: mpsc::Sender<Event>,
}
#[derive(Debug)]
pub struct PlayerHandle {
player: PlayerId,
events: mpsc::Receiver<Event>,
}
#[derive(Debug, Clone)]
pub struct GameHandle {
sender: mpsc::Sender<Command>,
}
#[derive(Debug)]
enum Command {
Request {
request: Request,
callback: Callback<Response>,
},
RegisterPlayer {
callback: Callback<PlayerHandle>,
},
DisconnectPlayer(PlayerId),
Snapshot {
callback: Callback<Snapshot>,
},
PerformAction {
action: Action,
player: PlayerId,
},
}
struct Callback<T> {
sender: oneshot::Sender<T>,
}
// We don't care what the callback contains, simply print the expected return type.
impl<T> Debug for Callback<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Callback<{}>", std::any::type_name::<T>())
}
}
impl Game {
/// Create a new game alongside a handle to thet game.
pub fn new() -> (Game, GameHandle) {
let (sender, receiver) = mpsc::channel(1024);
let world = logic::create_world(logic::WorldKind::WithObjects);
let schedule = logic::add_systems(Default::default(), logic::SystemSet::Everything);
let executor = logic::Executor::new(schedule);
let game = Game {
players: BTreeMap::new(),
receiver,
world,
executor,
snapshots: SnapshotEncoder::new(),
time: 0,
};
let handle = GameHandle { sender };
(game, handle)
}
/// Run the game to completion (either the handle is dropped or a fatal error occurs).
pub async fn run(&mut self) {
let mut timer = time::interval(time::Duration::from_secs(1) / TICK_RATE);
loop {
tokio::select! {
_ = timer.tick() => {
self.tick();
}
command = self.receiver.recv() => match command {
None => {
log::info!("game handle dropped");
break;
},
Some(command) => {
log::debug!("got command: {:?}", command);
self.execute_command(command);
}
}
};
}
}
fn tick(&mut self) {
self.executor.tick(&mut self.world);
self.snapshots.update_mapping(&self.world);
self.check_win_condition();
let mut events = Vec::<EventKind>::new();
let snapshot = Arc::new(self.snapshot());
events.push(snapshot.into());
for event in events {
self.broadcast(event);
}
self.time = self.time.wrapping_add(1);
}
fn broadcast<T>(&mut self, kind: T)
where
T: Into<EventKind>,
{
let event = Event {
time: self.time,
kind: kind.into(),
};
let mut dead = Vec::new();
for (&id, player) in &mut self.players {
match player.events.try_send(event.clone()) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
log::warn!("player {}'s event buffer is full", id);
dead.push(id);
// TODO: request full client resync
}
Err(TrySendError::Closed(_)) => {
log::info!("player {} stopped listening for events", id);
dead.push(id);
// TODO: stop attempting to send events to this player, and potentially
// disconnect them.
}
}
}
for player in dead {
self.remove_player(player);
}
}
fn remove_player(&mut self, player: PlayerId) -> Option<PlayerData> {
let data = self.players.remove(&player)?;
self.world.delete(data.entity);
self.world
.resources
.get_mut::<DeadEntities>()
.unwrap()
.entities
.push(data.network_id);
Some(data)
}
/// Check if any player has won or lost.
fn check_win_condition(&mut self) {
let dead = self.world.resources.get::<DeadEntities>().unwrap();
let mut losers = Vec::new();
for (&player, data) in &self.players {
if dead.entities.contains(&data.network_id) {
losers.push(player);
}
}
drop(dead);
for loser in losers {
let mut player = self.players.remove(&loser).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Loser),
};
tokio::spawn(async move { player.events.send(event).await });
if self.players.len() == 1 {
let winner = *self.players.keys().next().unwrap();
let mut player = self.remove_player(winner).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Winner),
};
tokio::spawn(async move { player.events.send(event).await });
}
}
}
/// Execute a command.
fn execute_command(&mut self, command: Command) {
match command {
Command::RegisterPlayer { callback } => {
callback.send(self.register_player());
}
Command::DisconnectPlayer(player) => {
self.remove_player(player);
}
Command::Request { callback, request } => {
let message = self.handle_request(request);
callback.send(message);
}
Command::Snapshot { callback } => {
let snapshot = self.snapshot();
callback.send(snapshot);
}
Command::PerformAction { action, player } => self.perform_action(action, player),
}
}
/// Create and register a new player
fn register_player(&mut self) -> PlayerHandle {
let player = self.next_player_id();
let entity = logic::add_player(&mut self.world, player);
let (sender, receiver) = mpsc::channel(EVENT_BUFFER_SIZE);
let network_id = *self.world.get_component::<EntityId>(entity).unwrap();
let data = PlayerData {
network_id,
entity,
events: sender,
};
self.players.insert(player, data);
PlayerHandle {
player,
events: receiver,
}
}
/// Find the next available player id
fn next_player_id(&self) -> PlayerId {
let mut id = 1;
for player in self.players.keys() {
if player.0 == id {
id += 1;
} else {
break;
}
}
PlayerId(id)
}
/// Perform the request and return the result in a message
fn handle_request(&mut self, request: Request) -> Response {
let kind = match request.kind {
RequestKind::Ping => protocol::Pong.into(),
RequestKind::Init => {
let error = "Requested 'Init' on already initialized player";
ResponseKind::Error(error.into())
}
};
Response {
channel: request.channel,
kind,
}
}
/// Get a snapshot of the current game state.
fn snapshot(&self) -> Snapshot {
self.snapshots.make_snapshot(&self.world)
}
/// Perform an action for a player.
fn perform_action(&mut self, action: Action, player: PlayerId) {
match action.kind {
ActionKind::Move(new) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let mut movement = self.world.get_component_mut::<Movement>(data.entity)?;
movement.direction = new.direction;
Some(())
}();
}
ActionKind::Break(breaking) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let breaking = breaking
.entity
.and_then(|breaking| self.snapshots.lookup(breaking));
self.world
.get_component_mut::<WorldInteraction>(data.entity)?
.breaking = breaking;
Some(())
}();
}
ActionKind::Throw(throwing) => {
if let Some(data) = self.players.get(&player) {
logic::events::throw(&mut self.world, data.entity, throwing.target);
}
}
}
}
}
impl GameHandle {
/// Register a new client and return it's id.
pub async fn register_player(&mut self) -> crate::Result<PlayerHandle> {
self.send_with(|callback| Command::RegisterPlayer { callback })
.await
}
/// Remove a player from the game.
pub async fn disconnect_player(&mut self, player: PlayerId) -> crate::Result<()> {
self.sender.send(Command::DisconnectPlayer(player)).await?;
Ok(())
}
/// Handle a request made by a player.
pub async fn handle_request(&mut self, request: Request) -> crate::Result<Response> {
self.send_with(move |callback| Command::Request { request, callback })
.await
}
/// Get a snapshot of the current game state.
pub async fn | (&mut self) -> crate::Result<Snapshot> {
self.send_with(|callback| Command::Snapshot { callback })
.await
}
/// Handle an action performed by a player
pub async fn handle_action(&mut self, action: Action, player: PlayerId) -> crate::Result<()> {
self.sender
.send(Command::PerformAction { action, player })
.await?;
Ok(())
}
/// Send a command to the game with the specified callback and then return the value passed into
/// the callback.
async fn send_with<F, O>(&mut self, to_command: F) -> crate::Result<O>
where
F: FnOnce(Callback<O>) -> Command,
{
let (callback, value) = Callback::new();
let command = to_command(callback);
self.sender.send(command).await?;
value.await.map_err(Into::into)
}
}
impl PlayerHandle {
/// Get the id of this player
pub fn id(&self) -> PlayerId {
self.player
}
pub async fn poll_event(&mut self) -> Option<Event> {
self.events.recv().await
}
}
impl<T> Callback<T> {
/// Create a new callback
pub fn new() -> (Callback<T>, oneshot::Receiver<T>) {
let (sender, receiver) = oneshot::channel();
(Callback { sender }, receiver)
}
/// Attempt to send the value, returning false if the receiver was closed.
pub fn send(self, value: T) -> bool {
match self.sender.send(value) {
Ok(()) => true,
Err(_) => false,
}
}
}
| snapshot | identifier_name |
game.rs | use std::collections::BTreeMap;
use std::fmt::{self, Debug, Formatter};
use std::sync::Arc;
use tokio::sync::{
mpsc::{self, error::TrySendError},
oneshot,
};
use tokio::time;
use logic::components::{Movement, WorldInteraction};
use logic::legion::prelude::{Entity, World};
use logic::resources::DeadEntities;
use logic::snapshot::SnapshotEncoder;
use protocol::{
Action, ActionKind, EntityId, Event, EventKind, GameOver, PlayerId, Request, RequestKind,
Response, ResponseKind, Snapshot,
};
/// How many times per second to update the game world.
const TICK_RATE: u32 = 60;
/// The maximum number of events to buffer per player.
const EVENT_BUFFER_SIZE: usize = 1024;
pub struct Game {
players: BTreeMap<PlayerId, PlayerData>,
receiver: mpsc::Receiver<Command>,
world: World,
executor: logic::Executor,
snapshots: SnapshotEncoder,
time: u32,
}
#[derive(Debug, Clone)]
struct PlayerData {
entity: Entity,
network_id: EntityId,
events: mpsc::Sender<Event>,
}
#[derive(Debug)]
pub struct PlayerHandle {
player: PlayerId,
events: mpsc::Receiver<Event>,
}
#[derive(Debug, Clone)] | #[derive(Debug)]
enum Command {
Request {
request: Request,
callback: Callback<Response>,
},
RegisterPlayer {
callback: Callback<PlayerHandle>,
},
DisconnectPlayer(PlayerId),
Snapshot {
callback: Callback<Snapshot>,
},
PerformAction {
action: Action,
player: PlayerId,
},
}
struct Callback<T> {
sender: oneshot::Sender<T>,
}
// We don't care what the callback contains, simply print the expected return type.
impl<T> Debug for Callback<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Callback<{}>", std::any::type_name::<T>())
}
}
impl Game {
/// Create a new game alongside a handle to thet game.
pub fn new() -> (Game, GameHandle) {
let (sender, receiver) = mpsc::channel(1024);
let world = logic::create_world(logic::WorldKind::WithObjects);
let schedule = logic::add_systems(Default::default(), logic::SystemSet::Everything);
let executor = logic::Executor::new(schedule);
let game = Game {
players: BTreeMap::new(),
receiver,
world,
executor,
snapshots: SnapshotEncoder::new(),
time: 0,
};
let handle = GameHandle { sender };
(game, handle)
}
/// Run the game to completion (either the handle is dropped or a fatal error occurs).
pub async fn run(&mut self) {
let mut timer = time::interval(time::Duration::from_secs(1) / TICK_RATE);
loop {
tokio::select! {
_ = timer.tick() => {
self.tick();
}
command = self.receiver.recv() => match command {
None => {
log::info!("game handle dropped");
break;
},
Some(command) => {
log::debug!("got command: {:?}", command);
self.execute_command(command);
}
}
};
}
}
fn tick(&mut self) {
self.executor.tick(&mut self.world);
self.snapshots.update_mapping(&self.world);
self.check_win_condition();
let mut events = Vec::<EventKind>::new();
let snapshot = Arc::new(self.snapshot());
events.push(snapshot.into());
for event in events {
self.broadcast(event);
}
self.time = self.time.wrapping_add(1);
}
fn broadcast<T>(&mut self, kind: T)
where
T: Into<EventKind>,
{
let event = Event {
time: self.time,
kind: kind.into(),
};
let mut dead = Vec::new();
for (&id, player) in &mut self.players {
match player.events.try_send(event.clone()) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
log::warn!("player {}'s event buffer is full", id);
dead.push(id);
// TODO: request full client resync
}
Err(TrySendError::Closed(_)) => {
log::info!("player {} stopped listening for events", id);
dead.push(id);
// TODO: stop attempting to send events to this player, and potentially
// disconnect them.
}
}
}
for player in dead {
self.remove_player(player);
}
}
fn remove_player(&mut self, player: PlayerId) -> Option<PlayerData> {
let data = self.players.remove(&player)?;
self.world.delete(data.entity);
self.world
.resources
.get_mut::<DeadEntities>()
.unwrap()
.entities
.push(data.network_id);
Some(data)
}
/// Check if any player has won or lost.
fn check_win_condition(&mut self) {
let dead = self.world.resources.get::<DeadEntities>().unwrap();
let mut losers = Vec::new();
for (&player, data) in &self.players {
if dead.entities.contains(&data.network_id) {
losers.push(player);
}
}
drop(dead);
for loser in losers {
let mut player = self.players.remove(&loser).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Loser),
};
tokio::spawn(async move { player.events.send(event).await });
if self.players.len() == 1 {
let winner = *self.players.keys().next().unwrap();
let mut player = self.remove_player(winner).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Winner),
};
tokio::spawn(async move { player.events.send(event).await });
}
}
}
/// Execute a command.
fn execute_command(&mut self, command: Command) {
match command {
Command::RegisterPlayer { callback } => {
callback.send(self.register_player());
}
Command::DisconnectPlayer(player) => {
self.remove_player(player);
}
Command::Request { callback, request } => {
let message = self.handle_request(request);
callback.send(message);
}
Command::Snapshot { callback } => {
let snapshot = self.snapshot();
callback.send(snapshot);
}
Command::PerformAction { action, player } => self.perform_action(action, player),
}
}
/// Create and register a new player
fn register_player(&mut self) -> PlayerHandle {
let player = self.next_player_id();
let entity = logic::add_player(&mut self.world, player);
let (sender, receiver) = mpsc::channel(EVENT_BUFFER_SIZE);
let network_id = *self.world.get_component::<EntityId>(entity).unwrap();
let data = PlayerData {
network_id,
entity,
events: sender,
};
self.players.insert(player, data);
PlayerHandle {
player,
events: receiver,
}
}
/// Find the next available player id
fn next_player_id(&self) -> PlayerId {
let mut id = 1;
for player in self.players.keys() {
if player.0 == id {
id += 1;
} else {
break;
}
}
PlayerId(id)
}
/// Perform the request and return the result in a message
fn handle_request(&mut self, request: Request) -> Response {
let kind = match request.kind {
RequestKind::Ping => protocol::Pong.into(),
RequestKind::Init => {
let error = "Requested 'Init' on already initialized player";
ResponseKind::Error(error.into())
}
};
Response {
channel: request.channel,
kind,
}
}
/// Get a snapshot of the current game state.
fn snapshot(&self) -> Snapshot {
self.snapshots.make_snapshot(&self.world)
}
/// Perform an action for a player.
fn perform_action(&mut self, action: Action, player: PlayerId) {
match action.kind {
ActionKind::Move(new) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let mut movement = self.world.get_component_mut::<Movement>(data.entity)?;
movement.direction = new.direction;
Some(())
}();
}
ActionKind::Break(breaking) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let breaking = breaking
.entity
.and_then(|breaking| self.snapshots.lookup(breaking));
self.world
.get_component_mut::<WorldInteraction>(data.entity)?
.breaking = breaking;
Some(())
}();
}
ActionKind::Throw(throwing) => {
if let Some(data) = self.players.get(&player) {
logic::events::throw(&mut self.world, data.entity, throwing.target);
}
}
}
}
}
impl GameHandle {
/// Register a new client and return it's id.
pub async fn register_player(&mut self) -> crate::Result<PlayerHandle> {
self.send_with(|callback| Command::RegisterPlayer { callback })
.await
}
/// Remove a player from the game.
pub async fn disconnect_player(&mut self, player: PlayerId) -> crate::Result<()> {
self.sender.send(Command::DisconnectPlayer(player)).await?;
Ok(())
}
/// Handle a request made by a player.
pub async fn handle_request(&mut self, request: Request) -> crate::Result<Response> {
self.send_with(move |callback| Command::Request { request, callback })
.await
}
/// Get a snapshot of the current game state.
pub async fn snapshot(&mut self) -> crate::Result<Snapshot> {
self.send_with(|callback| Command::Snapshot { callback })
.await
}
/// Handle an action performed by a player
pub async fn handle_action(&mut self, action: Action, player: PlayerId) -> crate::Result<()> {
self.sender
.send(Command::PerformAction { action, player })
.await?;
Ok(())
}
/// Send a command to the game with the specified callback and then return the value passed into
/// the callback.
async fn send_with<F, O>(&mut self, to_command: F) -> crate::Result<O>
where
F: FnOnce(Callback<O>) -> Command,
{
let (callback, value) = Callback::new();
let command = to_command(callback);
self.sender.send(command).await?;
value.await.map_err(Into::into)
}
}
impl PlayerHandle {
/// Get the id of this player
pub fn id(&self) -> PlayerId {
self.player
}
pub async fn poll_event(&mut self) -> Option<Event> {
self.events.recv().await
}
}
impl<T> Callback<T> {
/// Create a new callback
pub fn new() -> (Callback<T>, oneshot::Receiver<T>) {
let (sender, receiver) = oneshot::channel();
(Callback { sender }, receiver)
}
/// Attempt to send the value, returning false if the receiver was closed.
pub fn send(self, value: T) -> bool {
match self.sender.send(value) {
Ok(()) => true,
Err(_) => false,
}
}
} | pub struct GameHandle {
sender: mpsc::Sender<Command>,
}
| random_line_split |
tmsExport_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type tmsExportOptions struct {
TmsServiceKey string `json:"tmsServiceKey,omitempty"`
CustomDescription string `json:"customDescription,omitempty"`
NamedUser string `json:"namedUser,omitempty"`
NodeName string `json:"nodeName,omitempty"`
MtaPath string `json:"mtaPath,omitempty"`
MtaVersion string `json:"mtaVersion,omitempty"`
NodeExtDescriptorMapping map[string]interface{} `json:"nodeExtDescriptorMapping,omitempty"`
Proxy string `json:"proxy,omitempty"`
}
type tmsExportInflux struct {
step_data struct {
fields struct {
tms bool
}
tags struct {
}
}
}
func (i *tmsExportInflux) persist(path, resourceName string) {
measurementContent := []struct {
measurement string
valType string
name string
value interface{}
}{
{valType: config.InfluxField, measurement: "step_data", name: "tms", value: i.step_data.fields.tms},
}
errCount := 0
for _, metric := range measurementContent {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(metric.measurement, fmt.Sprintf("%vs", metric.valType), metric.name), metric.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting influx environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Error("failed to persist Influx environment")
}
}
// TmsExportCommand This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.
func TmsExportCommand() *cobra.Command {
const STEP_NAME = "tmsExport"
metadata := tmsExportMetadata()
var stepConfig tmsExportOptions
var startTime time.Time
var influx tmsExportInflux
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createTmsExportCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.",
Long: `This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape. The MTA file is attached to a new transport request which is added to the import queues of the follow-on transport nodes of the specified export node.
TMS lets you manage transports between SAP Business Technology Platform accounts in Neo and Cloud Foundry, such as from DEV to TEST and PROD accounts.
For more information, see [official documentation of SAP Cloud Transport Management service](https://help.sap.com/viewer/p/TRANSPORT_MANAGEMENT_SERVICE)
!!! note "Prerequisites"
* You have subscribed to and set up TMS, as described in [Initial Setup](https://help.sap.com/viewer/7f7160ec0d8546c6b3eab72fb5ad6fd8/Cloud/en-US/66fd7283c62f48adb23c56fb48c84a60.html), which includes the configuration of your transport landscape.
* A corresponding service key has been created, as described in [Set Up the Environment to Transport Content Archives directly in an Application](https://help.sap.com/viewer/7f7160ec0d8546c6b3eab72fb5ad6fd8/Cloud/en-US/8d9490792ed14f1bbf8a6ac08a6bca64.html). This service key (JSON) must be stored as a secret text within the Jenkins secure store or provided as value of tmsServiceKey parameter.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.TmsServiceKey)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
if err = log.RegisterANSHookIfConfigured(GeneralConfig.CorrelationID); err != nil {
log.Entry().WithError(err).Warn("failed to set up SAP Alert Notification Service log hook")
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
influx.persist(GeneralConfig.EnvRootPath, "influx")
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
if len(GeneralConfig.HookConfig.SplunkConfig.ProdCriblEndpoint) > 0 |
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
tmsExport(stepConfig, &stepTelemetryData, &influx)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addTmsExportFlags(createTmsExportCmd, &stepConfig)
return createTmsExportCmd
}
func addTmsExportFlags(cmd *cobra.Command, stepConfig *tmsExportOptions) {
cmd.Flags().StringVar(&stepConfig.TmsServiceKey, "tmsServiceKey", os.Getenv("PIPER_tmsServiceKey"), "Service key JSON string to access the SAP Cloud Transport Management service instance APIs. If not specified and if pipeline is running on Jenkins, service key, stored under ID provided with credentialsId parameter, is used.")
cmd.Flags().StringVar(&stepConfig.CustomDescription, "customDescription", os.Getenv("PIPER_customDescription"), "Can be used as the description of a transport request. Will overwrite the default, which is corresponding Git commit ID.")
cmd.Flags().StringVar(&stepConfig.NamedUser, "namedUser", `Piper-Pipeline`, "Defines the named user to execute transport request with. The default value is 'Piper-Pipeline'. If pipeline is running on Jenkins, the name of the user, who started the job, is tried to be used at first.")
cmd.Flags().StringVar(&stepConfig.NodeName, "nodeName", os.Getenv("PIPER_nodeName"), "Defines the name of the export node - starting node in TMS landscape. The transport request is added to the queues of the follow-on nodes of export node.")
cmd.Flags().StringVar(&stepConfig.MtaPath, "mtaPath", os.Getenv("PIPER_mtaPath"), "Defines the relative path to *.mtar file for the export to the SAP Cloud Transport Management service. If not specified, it will use the *.mtar file created in mtaBuild.")
cmd.Flags().StringVar(&stepConfig.MtaVersion, "mtaVersion", `*`, "Defines the version of the MTA for which the MTA extension descriptor will be used. You can use an asterisk (*) to accept any MTA version, or use a specific version compliant with SemVer 2.0, e.g. 1.0.0 (see semver.org). If the parameter is not configured, an asterisk is used.")
cmd.Flags().StringVar(&stepConfig.Proxy, "proxy", os.Getenv("PIPER_proxy"), "Proxy URL which should be used for communication with the SAP Cloud Transport Management service backend.")
cmd.MarkFlagRequired("tmsServiceKey")
cmd.MarkFlagRequired("nodeName")
}
// retrieve step metadata
func tmsExportMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "tmsExport",
Aliases: []config.Alias{},
Description: "This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "credentialsId", Description: "Jenkins 'Secret text' credentials ID containing service key for SAP Cloud Transport Management service.", Type: "jenkins"},
},
Resources: []config.StepResources{
{Name: "buildResult", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "tmsServiceKey",
ResourceRef: []config.ResourceReference{
{
Name: "credentialsId",
Param: "tmsServiceKey",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_tmsServiceKey"),
},
{
Name: "customDescription",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "git/commitId",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_customDescription"),
},
{
Name: "namedUser",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `Piper-Pipeline`,
},
{
Name: "nodeName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_nodeName"),
},
{
Name: "mtaPath",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "mtarFilePath",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_mtaPath"),
},
{
Name: "mtaVersion",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `*`,
},
{
Name: "nodeExtDescriptorMapping",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "map[string]interface{}",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "proxy",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_proxy"),
},
},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "influx",
Type: "influx",
Parameters: []map[string]interface{}{
{"name": "step_data", "fields": []map[string]string{{"name": "tms"}}},
},
},
},
},
},
}
return theMetaData
}
| {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblEndpoint,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblToken,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblIndex,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
splunkClient.Send(telemetryClient.GetData(), logCollector)
} | conditional_block |
tmsExport_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type tmsExportOptions struct {
TmsServiceKey string `json:"tmsServiceKey,omitempty"`
CustomDescription string `json:"customDescription,omitempty"`
NamedUser string `json:"namedUser,omitempty"`
NodeName string `json:"nodeName,omitempty"`
MtaPath string `json:"mtaPath,omitempty"`
MtaVersion string `json:"mtaVersion,omitempty"`
NodeExtDescriptorMapping map[string]interface{} `json:"nodeExtDescriptorMapping,omitempty"`
Proxy string `json:"proxy,omitempty"`
}
type tmsExportInflux struct {
step_data struct {
fields struct {
tms bool
}
tags struct {
}
}
}
func (i *tmsExportInflux) | (path, resourceName string) {
measurementContent := []struct {
measurement string
valType string
name string
value interface{}
}{
{valType: config.InfluxField, measurement: "step_data", name: "tms", value: i.step_data.fields.tms},
}
errCount := 0
for _, metric := range measurementContent {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(metric.measurement, fmt.Sprintf("%vs", metric.valType), metric.name), metric.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting influx environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Error("failed to persist Influx environment")
}
}
// TmsExportCommand This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.
func TmsExportCommand() *cobra.Command {
const STEP_NAME = "tmsExport"
metadata := tmsExportMetadata()
var stepConfig tmsExportOptions
var startTime time.Time
var influx tmsExportInflux
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createTmsExportCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.",
Long: `This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape. The MTA file is attached to a new transport request which is added to the import queues of the follow-on transport nodes of the specified export node.
TMS lets you manage transports between SAP Business Technology Platform accounts in Neo and Cloud Foundry, such as from DEV to TEST and PROD accounts.
For more information, see [official documentation of SAP Cloud Transport Management service](https://help.sap.com/viewer/p/TRANSPORT_MANAGEMENT_SERVICE)
!!! note "Prerequisites"
* You have subscribed to and set up TMS, as described in [Initial Setup](https://help.sap.com/viewer/7f7160ec0d8546c6b3eab72fb5ad6fd8/Cloud/en-US/66fd7283c62f48adb23c56fb48c84a60.html), which includes the configuration of your transport landscape.
* A corresponding service key has been created, as described in [Set Up the Environment to Transport Content Archives directly in an Application](https://help.sap.com/viewer/7f7160ec0d8546c6b3eab72fb5ad6fd8/Cloud/en-US/8d9490792ed14f1bbf8a6ac08a6bca64.html). This service key (JSON) must be stored as a secret text within the Jenkins secure store or provided as value of tmsServiceKey parameter.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.TmsServiceKey)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
if err = log.RegisterANSHookIfConfigured(GeneralConfig.CorrelationID); err != nil {
log.Entry().WithError(err).Warn("failed to set up SAP Alert Notification Service log hook")
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
influx.persist(GeneralConfig.EnvRootPath, "influx")
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
if len(GeneralConfig.HookConfig.SplunkConfig.ProdCriblEndpoint) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblEndpoint,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblToken,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblIndex,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
tmsExport(stepConfig, &stepTelemetryData, &influx)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addTmsExportFlags(createTmsExportCmd, &stepConfig)
return createTmsExportCmd
}
func addTmsExportFlags(cmd *cobra.Command, stepConfig *tmsExportOptions) {
cmd.Flags().StringVar(&stepConfig.TmsServiceKey, "tmsServiceKey", os.Getenv("PIPER_tmsServiceKey"), "Service key JSON string to access the SAP Cloud Transport Management service instance APIs. If not specified and if pipeline is running on Jenkins, service key, stored under ID provided with credentialsId parameter, is used.")
cmd.Flags().StringVar(&stepConfig.CustomDescription, "customDescription", os.Getenv("PIPER_customDescription"), "Can be used as the description of a transport request. Will overwrite the default, which is corresponding Git commit ID.")
cmd.Flags().StringVar(&stepConfig.NamedUser, "namedUser", `Piper-Pipeline`, "Defines the named user to execute transport request with. The default value is 'Piper-Pipeline'. If pipeline is running on Jenkins, the name of the user, who started the job, is tried to be used at first.")
cmd.Flags().StringVar(&stepConfig.NodeName, "nodeName", os.Getenv("PIPER_nodeName"), "Defines the name of the export node - starting node in TMS landscape. The transport request is added to the queues of the follow-on nodes of export node.")
cmd.Flags().StringVar(&stepConfig.MtaPath, "mtaPath", os.Getenv("PIPER_mtaPath"), "Defines the relative path to *.mtar file for the export to the SAP Cloud Transport Management service. If not specified, it will use the *.mtar file created in mtaBuild.")
cmd.Flags().StringVar(&stepConfig.MtaVersion, "mtaVersion", `*`, "Defines the version of the MTA for which the MTA extension descriptor will be used. You can use an asterisk (*) to accept any MTA version, or use a specific version compliant with SemVer 2.0, e.g. 1.0.0 (see semver.org). If the parameter is not configured, an asterisk is used.")
cmd.Flags().StringVar(&stepConfig.Proxy, "proxy", os.Getenv("PIPER_proxy"), "Proxy URL which should be used for communication with the SAP Cloud Transport Management service backend.")
cmd.MarkFlagRequired("tmsServiceKey")
cmd.MarkFlagRequired("nodeName")
}
// retrieve step metadata
func tmsExportMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "tmsExport",
Aliases: []config.Alias{},
Description: "This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "credentialsId", Description: "Jenkins 'Secret text' credentials ID containing service key for SAP Cloud Transport Management service.", Type: "jenkins"},
},
Resources: []config.StepResources{
{Name: "buildResult", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "tmsServiceKey",
ResourceRef: []config.ResourceReference{
{
Name: "credentialsId",
Param: "tmsServiceKey",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_tmsServiceKey"),
},
{
Name: "customDescription",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "git/commitId",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_customDescription"),
},
{
Name: "namedUser",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `Piper-Pipeline`,
},
{
Name: "nodeName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_nodeName"),
},
{
Name: "mtaPath",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "mtarFilePath",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_mtaPath"),
},
{
Name: "mtaVersion",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `*`,
},
{
Name: "nodeExtDescriptorMapping",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "map[string]interface{}",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "proxy",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_proxy"),
},
},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "influx",
Type: "influx",
Parameters: []map[string]interface{}{
{"name": "step_data", "fields": []map[string]string{{"name": "tms"}}},
},
},
},
},
},
}
return theMetaData
}
| persist | identifier_name |
tmsExport_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type tmsExportOptions struct {
TmsServiceKey string `json:"tmsServiceKey,omitempty"`
CustomDescription string `json:"customDescription,omitempty"`
NamedUser string `json:"namedUser,omitempty"`
NodeName string `json:"nodeName,omitempty"`
MtaPath string `json:"mtaPath,omitempty"`
MtaVersion string `json:"mtaVersion,omitempty"`
NodeExtDescriptorMapping map[string]interface{} `json:"nodeExtDescriptorMapping,omitempty"`
Proxy string `json:"proxy,omitempty"`
}
type tmsExportInflux struct {
step_data struct {
fields struct {
tms bool
}
tags struct {
}
}
}
func (i *tmsExportInflux) persist(path, resourceName string) {
measurementContent := []struct {
measurement string
valType string
name string
value interface{}
}{
{valType: config.InfluxField, measurement: "step_data", name: "tms", value: i.step_data.fields.tms},
}
errCount := 0
for _, metric := range measurementContent {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(metric.measurement, fmt.Sprintf("%vs", metric.valType), metric.name), metric.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting influx environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Error("failed to persist Influx environment")
}
}
// TmsExportCommand This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.
func TmsExportCommand() *cobra.Command {
const STEP_NAME = "tmsExport"
metadata := tmsExportMetadata()
var stepConfig tmsExportOptions
var startTime time.Time
var influx tmsExportInflux
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createTmsExportCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.",
Long: `This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape. The MTA file is attached to a new transport request which is added to the import queues of the follow-on transport nodes of the specified export node.
TMS lets you manage transports between SAP Business Technology Platform accounts in Neo and Cloud Foundry, such as from DEV to TEST and PROD accounts.
For more information, see [official documentation of SAP Cloud Transport Management service](https://help.sap.com/viewer/p/TRANSPORT_MANAGEMENT_SERVICE)
!!! note "Prerequisites"
* You have subscribed to and set up TMS, as described in [Initial Setup](https://help.sap.com/viewer/7f7160ec0d8546c6b3eab72fb5ad6fd8/Cloud/en-US/66fd7283c62f48adb23c56fb48c84a60.html), which includes the configuration of your transport landscape.
* A corresponding service key has been created, as described in [Set Up the Environment to Transport Content Archives directly in an Application](https://help.sap.com/viewer/7f7160ec0d8546c6b3eab72fb5ad6fd8/Cloud/en-US/8d9490792ed14f1bbf8a6ac08a6bca64.html). This service key (JSON) must be stored as a secret text within the Jenkins secure store or provided as value of tmsServiceKey parameter.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.TmsServiceKey)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
if err = log.RegisterANSHookIfConfigured(GeneralConfig.CorrelationID); err != nil {
log.Entry().WithError(err).Warn("failed to set up SAP Alert Notification Service log hook")
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
influx.persist(GeneralConfig.EnvRootPath, "influx")
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
if len(GeneralConfig.HookConfig.SplunkConfig.ProdCriblEndpoint) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblEndpoint,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblToken,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblIndex,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
tmsExport(stepConfig, &stepTelemetryData, &influx)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addTmsExportFlags(createTmsExportCmd, &stepConfig)
return createTmsExportCmd
}
func addTmsExportFlags(cmd *cobra.Command, stepConfig *tmsExportOptions) {
cmd.Flags().StringVar(&stepConfig.TmsServiceKey, "tmsServiceKey", os.Getenv("PIPER_tmsServiceKey"), "Service key JSON string to access the SAP Cloud Transport Management service instance APIs. If not specified and if pipeline is running on Jenkins, service key, stored under ID provided with credentialsId parameter, is used.")
cmd.Flags().StringVar(&stepConfig.CustomDescription, "customDescription", os.Getenv("PIPER_customDescription"), "Can be used as the description of a transport request. Will overwrite the default, which is corresponding Git commit ID.")
cmd.Flags().StringVar(&stepConfig.NamedUser, "namedUser", `Piper-Pipeline`, "Defines the named user to execute transport request with. The default value is 'Piper-Pipeline'. If pipeline is running on Jenkins, the name of the user, who started the job, is tried to be used at first.")
cmd.Flags().StringVar(&stepConfig.NodeName, "nodeName", os.Getenv("PIPER_nodeName"), "Defines the name of the export node - starting node in TMS landscape. The transport request is added to the queues of the follow-on nodes of export node.")
cmd.Flags().StringVar(&stepConfig.MtaPath, "mtaPath", os.Getenv("PIPER_mtaPath"), "Defines the relative path to *.mtar file for the export to the SAP Cloud Transport Management service. If not specified, it will use the *.mtar file created in mtaBuild.")
cmd.Flags().StringVar(&stepConfig.MtaVersion, "mtaVersion", `*`, "Defines the version of the MTA for which the MTA extension descriptor will be used. You can use an asterisk (*) to accept any MTA version, or use a specific version compliant with SemVer 2.0, e.g. 1.0.0 (see semver.org). If the parameter is not configured, an asterisk is used.")
cmd.Flags().StringVar(&stepConfig.Proxy, "proxy", os.Getenv("PIPER_proxy"), "Proxy URL which should be used for communication with the SAP Cloud Transport Management service backend.")
cmd.MarkFlagRequired("tmsServiceKey")
cmd.MarkFlagRequired("nodeName")
}
// retrieve step metadata
func tmsExportMetadata() config.StepData | {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "tmsExport",
Aliases: []config.Alias{},
Description: "This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "credentialsId", Description: "Jenkins 'Secret text' credentials ID containing service key for SAP Cloud Transport Management service.", Type: "jenkins"},
},
Resources: []config.StepResources{
{Name: "buildResult", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "tmsServiceKey",
ResourceRef: []config.ResourceReference{
{
Name: "credentialsId",
Param: "tmsServiceKey",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_tmsServiceKey"),
},
{
Name: "customDescription",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "git/commitId",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_customDescription"),
},
{
Name: "namedUser",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `Piper-Pipeline`,
},
{
Name: "nodeName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_nodeName"),
},
{
Name: "mtaPath",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "mtarFilePath",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_mtaPath"),
},
{
Name: "mtaVersion",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `*`,
},
{
Name: "nodeExtDescriptorMapping",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "map[string]interface{}",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "proxy",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_proxy"),
},
},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "influx",
Type: "influx",
Parameters: []map[string]interface{}{
{"name": "step_data", "fields": []map[string]string{{"name": "tms"}}},
},
},
},
},
},
}
return theMetaData
} | identifier_body | |
tmsExport_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type tmsExportOptions struct {
TmsServiceKey string `json:"tmsServiceKey,omitempty"`
CustomDescription string `json:"customDescription,omitempty"`
NamedUser string `json:"namedUser,omitempty"`
NodeName string `json:"nodeName,omitempty"`
MtaPath string `json:"mtaPath,omitempty"`
MtaVersion string `json:"mtaVersion,omitempty"`
NodeExtDescriptorMapping map[string]interface{} `json:"nodeExtDescriptorMapping,omitempty"`
Proxy string `json:"proxy,omitempty"`
}
type tmsExportInflux struct {
step_data struct {
fields struct {
tms bool
}
tags struct {
}
}
}
func (i *tmsExportInflux) persist(path, resourceName string) {
measurementContent := []struct {
measurement string
valType string
name string
value interface{}
}{
{valType: config.InfluxField, measurement: "step_data", name: "tms", value: i.step_data.fields.tms},
}
errCount := 0
for _, metric := range measurementContent {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(metric.measurement, fmt.Sprintf("%vs", metric.valType), metric.name), metric.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting influx environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Error("failed to persist Influx environment")
}
}
// TmsExportCommand This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.
func TmsExportCommand() *cobra.Command {
const STEP_NAME = "tmsExport"
metadata := tmsExportMetadata()
var stepConfig tmsExportOptions
var startTime time.Time
var influx tmsExportInflux
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createTmsExportCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.",
Long: `This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape. The MTA file is attached to a new transport request which is added to the import queues of the follow-on transport nodes of the specified export node.
TMS lets you manage transports between SAP Business Technology Platform accounts in Neo and Cloud Foundry, such as from DEV to TEST and PROD accounts.
For more information, see [official documentation of SAP Cloud Transport Management service](https://help.sap.com/viewer/p/TRANSPORT_MANAGEMENT_SERVICE)
!!! note "Prerequisites"
* You have subscribed to and set up TMS, as described in [Initial Setup](https://help.sap.com/viewer/7f7160ec0d8546c6b3eab72fb5ad6fd8/Cloud/en-US/66fd7283c62f48adb23c56fb48c84a60.html), which includes the configuration of your transport landscape.
* A corresponding service key has been created, as described in [Set Up the Environment to Transport Content Archives directly in an Application](https://help.sap.com/viewer/7f7160ec0d8546c6b3eab72fb5ad6fd8/Cloud/en-US/8d9490792ed14f1bbf8a6ac08a6bca64.html). This service key (JSON) must be stored as a secret text within the Jenkins secure store or provided as value of tmsServiceKey parameter.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err | log.RegisterSecret(stepConfig.TmsServiceKey)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
if err = log.RegisterANSHookIfConfigured(GeneralConfig.CorrelationID); err != nil {
log.Entry().WithError(err).Warn("failed to set up SAP Alert Notification Service log hook")
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
influx.persist(GeneralConfig.EnvRootPath, "influx")
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
if len(GeneralConfig.HookConfig.SplunkConfig.ProdCriblEndpoint) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblEndpoint,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblToken,
GeneralConfig.HookConfig.SplunkConfig.ProdCriblIndex,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
tmsExport(stepConfig, &stepTelemetryData, &influx)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addTmsExportFlags(createTmsExportCmd, &stepConfig)
return createTmsExportCmd
}
func addTmsExportFlags(cmd *cobra.Command, stepConfig *tmsExportOptions) {
cmd.Flags().StringVar(&stepConfig.TmsServiceKey, "tmsServiceKey", os.Getenv("PIPER_tmsServiceKey"), "Service key JSON string to access the SAP Cloud Transport Management service instance APIs. If not specified and if pipeline is running on Jenkins, service key, stored under ID provided with credentialsId parameter, is used.")
cmd.Flags().StringVar(&stepConfig.CustomDescription, "customDescription", os.Getenv("PIPER_customDescription"), "Can be used as the description of a transport request. Will overwrite the default, which is corresponding Git commit ID.")
cmd.Flags().StringVar(&stepConfig.NamedUser, "namedUser", `Piper-Pipeline`, "Defines the named user to execute transport request with. The default value is 'Piper-Pipeline'. If pipeline is running on Jenkins, the name of the user, who started the job, is tried to be used at first.")
cmd.Flags().StringVar(&stepConfig.NodeName, "nodeName", os.Getenv("PIPER_nodeName"), "Defines the name of the export node - starting node in TMS landscape. The transport request is added to the queues of the follow-on nodes of export node.")
cmd.Flags().StringVar(&stepConfig.MtaPath, "mtaPath", os.Getenv("PIPER_mtaPath"), "Defines the relative path to *.mtar file for the export to the SAP Cloud Transport Management service. If not specified, it will use the *.mtar file created in mtaBuild.")
cmd.Flags().StringVar(&stepConfig.MtaVersion, "mtaVersion", `*`, "Defines the version of the MTA for which the MTA extension descriptor will be used. You can use an asterisk (*) to accept any MTA version, or use a specific version compliant with SemVer 2.0, e.g. 1.0.0 (see semver.org). If the parameter is not configured, an asterisk is used.")
cmd.Flags().StringVar(&stepConfig.Proxy, "proxy", os.Getenv("PIPER_proxy"), "Proxy URL which should be used for communication with the SAP Cloud Transport Management service backend.")
cmd.MarkFlagRequired("tmsServiceKey")
cmd.MarkFlagRequired("nodeName")
}
// retrieve step metadata
func tmsExportMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "tmsExport",
Aliases: []config.Alias{},
Description: "This step allows you to export an MTA file (multi-target application archive) and multiple MTA extension descriptors into a TMS (SAP Cloud Transport Management service) landscape for further TMS-controlled distribution through a TMS-configured landscape.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "credentialsId", Description: "Jenkins 'Secret text' credentials ID containing service key for SAP Cloud Transport Management service.", Type: "jenkins"},
},
Resources: []config.StepResources{
{Name: "buildResult", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "tmsServiceKey",
ResourceRef: []config.ResourceReference{
{
Name: "credentialsId",
Param: "tmsServiceKey",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_tmsServiceKey"),
},
{
Name: "customDescription",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "git/commitId",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_customDescription"),
},
{
Name: "namedUser",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `Piper-Pipeline`,
},
{
Name: "nodeName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_nodeName"),
},
{
Name: "mtaPath",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "mtarFilePath",
},
},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_mtaPath"),
},
{
Name: "mtaVersion",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `*`,
},
{
Name: "nodeExtDescriptorMapping",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "map[string]interface{}",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "proxy",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS", "STAGES"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_proxy"),
},
},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "influx",
Type: "influx",
Parameters: []map[string]interface{}{
{"name": "step_data", "fields": []map[string]string{{"name": "tms"}}},
},
},
},
},
},
}
return theMetaData
} | } | random_line_split |
par_granges.rs | //! # ParGranges
//!
//! Iterates over chunked genomic regions in parallel.
use anyhow::Result;
use bio::io::bed;
use crossbeam::channel::{bounded, Receiver};
use lazy_static::lazy_static;
use log::*;
use num_cpus;
use rayon::prelude::*;
use rust_htslib::{
bam::{HeaderView, IndexedReader, Read},
bcf::{Read as bcfRead, Reader},
};
use rust_lapper::{Interval, Lapper};
use serde::Serialize;
use std::{convert::TryInto, path::PathBuf, thread};
const BYTES_INA_GIGABYTE: usize = 1024 * 1024 * 1024;
/// A modifier to apply to the channel size formular that is (BYTES_INA_GIGABYTE * channel_size_modifier) * threads / size_of(R::P)
/// 0.15 roughly corresponds to 1_000_000 PileupPosition objects per thread with some wiggle room.
pub const CHANNEL_SIZE_MODIFIER: f64 = 0.15;
/// The ideal number of basepairs each worker will receive. Total bp in memory at one time = `threads` * `chunksize`
pub const CHUNKSIZE: u32 = 1_000_000;
lazy_static! {
/// CHANNEL_SIZE_MODIFIER as a str
pub static ref CHANNEL_SIZE_MODIFIER_STR: String = CHANNEL_SIZE_MODIFIER.to_string();
/// CHUNKSIZE as a str
pub static ref CHUNKSIZE_STR: String = CHUNKSIZE.to_string();
}
/// RegionProcessor defines the methods that must be implemented to process a region
pub trait RegionProcessor {
/// A vector of P make up the output of [`process_region`] and
/// are values associated with each position.
///
/// [`process_region`]: #method.process_region
type P: 'static + Send + Sync + Serialize;
/// A function that takes the tid, start, and stop and returns something serializable.
/// Note, a common use of this function will be a `fetch` -> `pileup`. The pileup must
/// be bounds checked.
fn process_region(&self, tid: u32, start: u32, stop: u32) -> Vec<Self::P>;
}
/// ParGranges holds all the information and configuration needed to launch the
/// [`ParGranges::process`].
///
/// [`ParGranges::process`]: #method.process
#[derive(Debug)]
pub struct ParGranges<R: 'static + RegionProcessor + Send + Sync> {
/// Path to an indexed BAM / CRAM file
reads: PathBuf,
/// Optional reference file for CRAM
ref_fasta: Option<PathBuf>,
/// Optional path to a BED file to restrict the regions iterated over
regions_bed: Option<PathBuf>,
/// Optional path to a BCF/VCF file to restrict the regions iterated over
regions_bcf: Option<PathBuf>,
/// Number of threads this is allowed to use, uses all if None
threads: usize,
/// The ideal number of basepairs each worker will receive. Total bp in memory at one time = `threads` * `chunksize`
chunksize: u32,
/// A modifier to apply to the channel size formular that is (BYTES_INA_GIGABYTE * channel_size_modifier) * threads / size_of(R::P)
channel_size_modifier: f64,
/// The rayon threadpool to operate in
pool: rayon::ThreadPool,
/// The implementation of [RegionProcessor] that will be used to process regions
processor: R,
}
impl<R: RegionProcessor + Send + Sync> ParGranges<R> {
/// Create a ParIO object
///
/// # Arguments
///
/// * `reads`- path to an indexed BAM/CRAM
/// * `ref_fasta`- path to an indexed reference file for CRAM
/// * `regions_bed`- Optional BED file path restricting the regions to be examined
/// * `regions_bcf`- Optional BCF/VCF file path restricting the regions to be examined
/// * `threads`- Optional threads to restrict the number of threads this process will use, defaults to all
/// * `chunksize`- optional argument to change the default chunksize of 1_000_000. `chunksize` determines the number of bases
/// each worker will get to work on at one time.
/// * `channel_size_modifier`- Optional argument to modify the default size ration of the channel that `R::P` is sent on.
/// formula is: ((BYTES_INA_GIGABYTE * channel_size_modifier) * threads) / size_of(R::P)
/// * `processor`- Something that implements [`RegionProcessor`](RegionProcessor)
pub fn new(
reads: PathBuf,
ref_fasta: Option<PathBuf>,
regions_bed: Option<PathBuf>,
regions_bcf: Option<PathBuf>,
threads: Option<usize>,
chunksize: Option<u32>,
channel_size_modifier: Option<f64>,
processor: R,
) -> Self {
let threads = if let Some(threads) = threads {
threads
} else {
num_cpus::get()
};
// Keep two around for main thread and thread running the pool
let threads = std::cmp::max(threads.checked_sub(2).unwrap_or(0), 1);
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
info!("Using {} worker threads.", threads);
Self {
reads,
ref_fasta,
regions_bed,
regions_bcf,
threads,
chunksize: chunksize.unwrap_or(CHUNKSIZE),
channel_size_modifier: channel_size_modifier.unwrap_or(CHANNEL_SIZE_MODIFIER),
pool,
processor,
}
}
/// Process each region.
///
/// This method splits the sequences in the BAM/CRAM header into `chunksize` * `self.threads` regions (aka 'super chunks').
/// It then queries that 'super chunk' against the intervals (either the BED file, or the whole genome broken up into `chunksize`
/// regions). The results of that query are then processed by a pool of workers that apply `process_region` to reach interval to
/// do perbase analysis on. The collected result for each region is then sent back over the returned `Receiver<R::P>` channel
/// for the caller to use. The results will be returned in order according to the order of the intervals used to drive this method.
///
/// While one 'super chunk' is being worked on by all workers, the last 'super chunks' results are being printed to either to
/// a file or to STDOUT, in order.
///
/// Note, a common use case of this will be to fetch a region and do a pileup. The bounds of bases being looked at should still be
/// checked since a fetch will pull all reads that overlap the region in question.
pub fn process(self) -> Result<Receiver<R::P>> {
let channel_size: usize = ((BYTES_INA_GIGABYTE as f64 * self.channel_size_modifier).floor()
as usize
/ std::mem::size_of::<R::P>())
* self.threads;
info!(
"Creating channel of length {:?} (* 120 bytes to get mem)",
channel_size
);
let (snd, rxv) = bounded(channel_size);
thread::spawn(move || {
self.pool.install(|| {
info!("Reading from {:?}", self.reads);
let mut reader = IndexedReader::from_path(&self.reads).expect("Indexed BAM/CRAM");
// If passed add ref_fasta
if let Some(ref_fasta) = &self.ref_fasta {
reader.set_reference(ref_fasta).expect("Set ref");
}
// Get a copy of the header
let header = reader.header().to_owned();
// Work out if we are restricted to a subset of sites
let bed_intervals = if let Some(regions_bed) = &self.regions_bed {
Some(
Self::bed_to_intervals(&header, regions_bed)
.expect("Parsed BED to intervals"),
)
} else {
None
};
let bcf_intervals = if let Some(regions_bcf) = &self.regions_bcf | else {
None
};
let restricted_ivs = match (bed_intervals, bcf_intervals) {
(Some(bed_ivs), Some(bcf_ivs)) => Some(Self::merge_intervals(bed_ivs, bcf_ivs)),
(Some(bed_ivs), None) => Some(bed_ivs),
(None, Some(bcf_ivs)) => Some(bcf_ivs),
(None, None) => None,
};
let intervals = if let Some(restricted) = restricted_ivs {
restricted
} else {
Self::header_to_intervals(&header, self.chunksize)
.expect("Parsed BAM/CRAM header to intervals")
};
// The number positions to try to process in one batch
let serial_step_size = self
.chunksize
.checked_mul(self.threads as u32)
.unwrap_or(u32::MAX); // aka superchunk
for (tid, intervals) in intervals.into_iter().enumerate() {
let tid: u32 = tid as u32;
let tid_end: u32 = header.target_len(tid).unwrap().try_into().unwrap();
// Result holds the processed positions to be sent to writer
let mut result = vec![];
for chunk_start in (0..tid_end).step_by(serial_step_size as usize) {
let tid_name = std::str::from_utf8(header.tid2name(tid)).unwrap();
let chunk_end =
std::cmp::min(chunk_start as u32 + serial_step_size, tid_end);
info!(
"Batch Processing {}:{}-{}",
tid_name, chunk_start, chunk_end
);
let (r, _) = rayon::join(
|| {
// Must be a vec so that par_iter works and results stay in order
let ivs: Vec<Interval<u32, ()>> =
Lapper::<u32, ()>::find(&intervals, chunk_start, chunk_end)
// Truncate intervals that extend forward or backward of chunk in question
.map(|iv| Interval {
start: std::cmp::max(iv.start, chunk_start),
stop: std::cmp::min(iv.stop, chunk_end),
val: (),
})
.collect();
ivs.into_par_iter()
.flat_map(|iv| {
info!("Processing {}:{}-{}", tid_name, iv.start, iv.stop);
self.processor.process_region(tid, iv.start, iv.stop)
})
.collect()
},
|| {
result.into_iter().for_each(|p| {
snd.send(p).expect("Sent a serializable to writer")
})
},
);
result = r;
}
// Send final set of results
result
.into_iter()
.for_each(|p| snd.send(p).expect("Sent a serializable to writer"));
}
});
});
Ok(rxv)
}
// Convert the header into intervals of equally sized chunks. The last interval may be short.
fn header_to_intervals(header: &HeaderView, chunksize: u32) -> Result<Vec<Lapper<u32, ()>>> {
let mut intervals = vec![vec![]; header.target_count() as usize];
for tid in 0..(header.target_count()) {
let tid_len: u32 = header.target_len(tid).unwrap().try_into().unwrap();
for start in (0..tid_len).step_by(chunksize as usize) {
let stop = std::cmp::min(start as u32 + chunksize, tid_len);
intervals[tid as usize].push(Interval {
start: start as u32,
stop: stop,
val: (),
});
}
}
Ok(intervals.into_iter().map(|ivs| Lapper::new(ivs)).collect())
}
/// Read a bed file into a vector of lappers with the index representing the TID
// TODO add a proper error message
fn bed_to_intervals(header: &HeaderView, bed_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bed_reader = bed::Reader::from_file(bed_file)?;
let mut intervals = vec![vec![]; header.target_count() as usize];
for record in bed_reader.records() {
let record = record?;
let tid = header
.tid(record.chrom().as_bytes())
.expect("Chromosome not found in BAM/CRAM header");
intervals[tid as usize].push(Interval {
start: record.start().try_into().unwrap(),
stop: record.end().try_into().unwrap(),
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Read a BCF/VCF file into a vector of lappers with index representing the TID
fn bcf_to_intervals(header: &HeaderView, bcf_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bcf_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header = bcf_header_reader.header();
let mut intervals = vec![vec![]; header.target_count() as usize];
// TODO: validate the headers against eachother
for record in bcf_reader.records() {
let record = record?;
let record_rid = bcf_header.rid2name(record.rid().unwrap()).unwrap();
let tid = header
.tid(record_rid)
.expect("Chromosome not found in BAM/CRAM header");
let pos: u32 = record
.pos()
.try_into()
.expect("Got a negative value for pos");
intervals[tid as usize].push(Interval {
start: pos,
stop: pos + 1,
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Merge two sets of restriction intervals together
fn merge_intervals(
a_ivs: Vec<Lapper<u32, ()>>,
b_ivs: Vec<Lapper<u32, ()>>,
) -> Vec<Lapper<u32, ()>> {
let mut intervals = vec![vec![]; a_ivs.len()];
for (i, (a_lapper, b_lapper)) in a_ivs.into_iter().zip(b_ivs.into_iter()).enumerate() {
intervals[i] = a_lapper.into_iter().chain(b_lapper.into_iter()).collect();
}
intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect()
}
}
#[cfg(test)]
mod test {
use super::*;
use bio::io::bed;
use num_cpus;
use proptest::prelude::*;
use rust_htslib::{bam, bcf};
use rust_lapper::{Interval, Lapper};
use std::collections::{HashMap, HashSet};
use tempfile::tempdir;
// The purpose of these tests is to demonstrate that positions are covered once under a variety of circumstances
prop_compose! {
fn arb_iv_start(max_iv: u64)(start in 0..max_iv/2) -> u64 { start }
}
prop_compose! {
fn arb_iv_size(max_iv: u64)(size in 1..max_iv/2) -> u64 { size }
}
prop_compose! {
// Create an arbitrary interval where the min size == max_iv / 2
fn arb_iv(max_iv: u64)(start in arb_iv_start(max_iv), size in arb_iv_size(max_iv)) -> Interval<u64, ()> {
Interval {start, stop: start + size, val: ()}
}
}
// Create an arbitrary number of intervals along with the expected number of positions they cover
fn arb_ivs(
max_iv: u64, // max iv size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = (Vec<Interval<u64, ()>>, u64, u64)> {
prop::collection::vec(arb_iv(max_iv), 0..max_ivs).prop_map(|vec| {
let mut furthest_right = 0;
let lapper = Lapper::new(vec.clone());
let expected = lapper.cov();
for iv in vec.iter() {
if iv.stop > furthest_right {
furthest_right = iv.stop;
}
}
(vec, expected, furthest_right)
})
}
// Create arbitrary number of contigs with arbitrary intervals each
fn arb_chrs(
max_chr: usize, // number of chromosomes to use
max_iv: u64, // max interval size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = Vec<(Vec<Interval<u64, ()>>, u64, u64)>> {
prop::collection::vec(arb_ivs(max_iv, max_ivs), 0..max_chr)
}
// An empty BAM with correct header
// A BED file with the randomly generated intervals (with expected number of positions)
// proptest generate random chunksize, cpus
proptest! {
#[test]
// add random chunksize and random cpus
// NB: using any larger numbers for this tends to blow up the test runtime
fn interval_set(chromosomes in arb_chrs(4, 10_000, 1_000), chunksize in any::<u32>(), cpus in 0..num_cpus::get(), use_bed in any::<bool>(), use_vcf in any::<bool>()) {
let tempdir = tempdir().unwrap();
let bam_path = tempdir.path().join("test.bam");
let bed_path = tempdir.path().join("test.bed");
let vcf_path = tempdir.path().join("test.vcf");
// Build a BAM
let mut header = bam::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
let mut chr_rec = bam::header::HeaderRecord::new(b"SQ");
chr_rec.push_tag(b"SN", &i.to_string());
chr_rec.push_tag(b"LN", &chr.2.to_string()); // set len as max observed
header.push_record(&chr_rec);
}
let writer = bam::Writer::from_path(&bam_path, &header, bam::Format::BAM).expect("Opened test.bam for writing");
drop(writer); // force flush the writer so the header info is written
bam::index::build(&bam_path, None, bam::index::Type::BAI, 1).unwrap();
// Build a bed
let mut writer = bed::Writer::to_file(&bed_path).expect("Opened test.bed for writing");
for (i, chr) in chromosomes.iter().enumerate() {
for iv in chr.0.iter() {
let mut record = bed::Record::new();
record.set_start(iv.start);
record.set_end(iv.stop);
record.set_chrom(&i.to_string());
record.set_score(&0.to_string());
writer.write(&record).expect("Wrote to test.bed");
}
}
drop(writer); // force flush
// Build a VCF file
let mut vcf_truth = HashMap::new();
let mut header = bcf::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
header.push_record(format!("##contig=<ID={},length={}>", &i.to_string(), &chr.2.to_string()).as_bytes());
}
let mut writer = bcf::Writer::from_path(&vcf_path, &header, true, bcf::Format::VCF).expect("Failed to open test.vcf for writing");
let mut record = writer.empty_record();
for (i, chr) in chromosomes.iter().enumerate() {
record.set_rid(Some(i as u32));
let counter = vcf_truth.entry(i).or_insert(0);
let mut seen = HashSet::new();
for iv in chr.0.iter() {
if !seen.contains(&iv.start) {
*counter += 1;
seen.insert(iv.start);
}
record.set_pos(iv.start as i64);
writer.write(&record).expect("Failed to write to test.vcf")
}
}
drop(writer); // force flush
// Create the processor with a dumb impl of processing that just returns positions with no counting
let test_processor = TestProcessor {};
let par_granges_runner = ParGranges::new(
bam_path,
None,
if use_bed { Some(bed_path) } else { None }, // do one with regions
if use_vcf { Some(vcf_path) } else { None }, // do one with vcf regions
Some(cpus),
Some(chunksize),
Some(0.002),
test_processor
);
let receiver = par_granges_runner.process().expect("Launch ParGranges Process");
let mut chrom_counts = HashMap::new();
receiver.into_iter().for_each(|p: PileupPosition| {
let positions = chrom_counts.entry(p.ref_seq.parse::<usize>().expect("parsed chr")).or_insert(0u64);
*positions += 1
});
// Validate that for each chr we get the expected number of bases
for (chrom, positions) in chrom_counts.iter() {
if use_bed && !use_vcf {
// if this was with bed, should be equal to .1
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_bed && use_vcf {
// if this was with bed, should be equal to .1, bed restrictions and vcf restrctions should overlap
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_vcf && !use_bed {
// total positions should be equal to the number of records for that chr in the vcf
prop_assert_eq!(vcf_truth.get(chrom).unwrap(), positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else {
// if this was bam only, should be equal to rightmost postion
prop_assert_eq!(chromosomes[*chrom].2, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].2, positions);
}
}
}
}
use crate::position::{pileup_position::PileupPosition, Position};
use smartstring::SmartString;
struct TestProcessor {}
impl RegionProcessor for TestProcessor {
type P = PileupPosition;
fn process_region(&self, tid: u32, start: u32, stop: u32) -> Vec<Self::P> {
let mut results = vec![];
for i in start..stop {
let chr = SmartString::from(&tid.to_string());
let pos = PileupPosition::new(chr, i);
results.push(pos);
}
results
}
}
}
| {
Some(
Self::bcf_to_intervals(&header, regions_bcf)
.expect("Parsed BCF/VCF to intervals"),
)
} | conditional_block |
par_granges.rs | //! # ParGranges
//!
//! Iterates over chunked genomic regions in parallel.
use anyhow::Result;
use bio::io::bed;
use crossbeam::channel::{bounded, Receiver};
use lazy_static::lazy_static;
use log::*;
use num_cpus;
use rayon::prelude::*;
use rust_htslib::{
bam::{HeaderView, IndexedReader, Read},
bcf::{Read as bcfRead, Reader},
};
use rust_lapper::{Interval, Lapper};
use serde::Serialize;
use std::{convert::TryInto, path::PathBuf, thread};
const BYTES_INA_GIGABYTE: usize = 1024 * 1024 * 1024;
/// A modifier to apply to the channel size formular that is (BYTES_INA_GIGABYTE * channel_size_modifier) * threads / size_of(R::P)
/// 0.15 roughly corresponds to 1_000_000 PileupPosition objects per thread with some wiggle room.
pub const CHANNEL_SIZE_MODIFIER: f64 = 0.15;
/// The ideal number of basepairs each worker will receive. Total bp in memory at one time = `threads` * `chunksize`
pub const CHUNKSIZE: u32 = 1_000_000;
lazy_static! {
/// CHANNEL_SIZE_MODIFIER as a str
pub static ref CHANNEL_SIZE_MODIFIER_STR: String = CHANNEL_SIZE_MODIFIER.to_string();
/// CHUNKSIZE as a str
pub static ref CHUNKSIZE_STR: String = CHUNKSIZE.to_string();
}
/// RegionProcessor defines the methods that must be implemented to process a region
pub trait RegionProcessor {
/// A vector of P make up the output of [`process_region`] and
/// are values associated with each position.
///
/// [`process_region`]: #method.process_region
type P: 'static + Send + Sync + Serialize;
/// A function that takes the tid, start, and stop and returns something serializable.
/// Note, a common use of this function will be a `fetch` -> `pileup`. The pileup must
/// be bounds checked.
fn process_region(&self, tid: u32, start: u32, stop: u32) -> Vec<Self::P>;
}
/// ParGranges holds all the information and configuration needed to launch the
/// [`ParGranges::process`].
///
/// [`ParGranges::process`]: #method.process
#[derive(Debug)]
pub struct ParGranges<R: 'static + RegionProcessor + Send + Sync> {
/// Path to an indexed BAM / CRAM file
reads: PathBuf,
/// Optional reference file for CRAM
ref_fasta: Option<PathBuf>,
/// Optional path to a BED file to restrict the regions iterated over
regions_bed: Option<PathBuf>,
/// Optional path to a BCF/VCF file to restrict the regions iterated over
regions_bcf: Option<PathBuf>,
/// Number of threads this is allowed to use, uses all if None
threads: usize,
/// The ideal number of basepairs each worker will receive. Total bp in memory at one time = `threads` * `chunksize`
chunksize: u32,
/// A modifier to apply to the channel size formular that is (BYTES_INA_GIGABYTE * channel_size_modifier) * threads / size_of(R::P)
channel_size_modifier: f64,
/// The rayon threadpool to operate in
pool: rayon::ThreadPool,
/// The implementation of [RegionProcessor] that will be used to process regions
processor: R,
}
impl<R: RegionProcessor + Send + Sync> ParGranges<R> {
/// Create a ParIO object
///
/// # Arguments
///
/// * `reads`- path to an indexed BAM/CRAM
/// * `ref_fasta`- path to an indexed reference file for CRAM
/// * `regions_bed`- Optional BED file path restricting the regions to be examined
/// * `regions_bcf`- Optional BCF/VCF file path restricting the regions to be examined
/// * `threads`- Optional threads to restrict the number of threads this process will use, defaults to all
/// * `chunksize`- optional argument to change the default chunksize of 1_000_000. `chunksize` determines the number of bases
/// each worker will get to work on at one time.
/// * `channel_size_modifier`- Optional argument to modify the default size ration of the channel that `R::P` is sent on.
/// formula is: ((BYTES_INA_GIGABYTE * channel_size_modifier) * threads) / size_of(R::P)
/// * `processor`- Something that implements [`RegionProcessor`](RegionProcessor)
pub fn new(
reads: PathBuf,
ref_fasta: Option<PathBuf>,
regions_bed: Option<PathBuf>,
regions_bcf: Option<PathBuf>,
threads: Option<usize>,
chunksize: Option<u32>,
channel_size_modifier: Option<f64>,
processor: R,
) -> Self {
let threads = if let Some(threads) = threads {
threads
} else {
num_cpus::get()
};
// Keep two around for main thread and thread running the pool
let threads = std::cmp::max(threads.checked_sub(2).unwrap_or(0), 1);
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
info!("Using {} worker threads.", threads);
Self {
reads,
ref_fasta,
regions_bed,
regions_bcf,
threads,
chunksize: chunksize.unwrap_or(CHUNKSIZE),
channel_size_modifier: channel_size_modifier.unwrap_or(CHANNEL_SIZE_MODIFIER),
pool,
processor,
}
}
/// Process each region.
///
/// This method splits the sequences in the BAM/CRAM header into `chunksize` * `self.threads` regions (aka 'super chunks').
/// It then queries that 'super chunk' against the intervals (either the BED file, or the whole genome broken up into `chunksize`
/// regions). The results of that query are then processed by a pool of workers that apply `process_region` to reach interval to
/// do perbase analysis on. The collected result for each region is then sent back over the returned `Receiver<R::P>` channel
/// for the caller to use. The results will be returned in order according to the order of the intervals used to drive this method.
///
/// While one 'super chunk' is being worked on by all workers, the last 'super chunks' results are being printed to either to
/// a file or to STDOUT, in order.
///
/// Note, a common use case of this will be to fetch a region and do a pileup. The bounds of bases being looked at should still be
/// checked since a fetch will pull all reads that overlap the region in question.
pub fn process(self) -> Result<Receiver<R::P>> {
let channel_size: usize = ((BYTES_INA_GIGABYTE as f64 * self.channel_size_modifier).floor()
as usize
/ std::mem::size_of::<R::P>())
* self.threads;
info!(
"Creating channel of length {:?} (* 120 bytes to get mem)",
channel_size
);
let (snd, rxv) = bounded(channel_size);
thread::spawn(move || {
self.pool.install(|| {
info!("Reading from {:?}", self.reads);
let mut reader = IndexedReader::from_path(&self.reads).expect("Indexed BAM/CRAM");
// If passed add ref_fasta
if let Some(ref_fasta) = &self.ref_fasta {
reader.set_reference(ref_fasta).expect("Set ref");
}
// Get a copy of the header
let header = reader.header().to_owned();
// Work out if we are restricted to a subset of sites
let bed_intervals = if let Some(regions_bed) = &self.regions_bed {
Some(
Self::bed_to_intervals(&header, regions_bed)
.expect("Parsed BED to intervals"),
)
} else {
None
};
let bcf_intervals = if let Some(regions_bcf) = &self.regions_bcf {
Some(
Self::bcf_to_intervals(&header, regions_bcf)
.expect("Parsed BCF/VCF to intervals"),
)
} else {
None
};
let restricted_ivs = match (bed_intervals, bcf_intervals) {
(Some(bed_ivs), Some(bcf_ivs)) => Some(Self::merge_intervals(bed_ivs, bcf_ivs)),
(Some(bed_ivs), None) => Some(bed_ivs),
(None, Some(bcf_ivs)) => Some(bcf_ivs),
(None, None) => None,
};
let intervals = if let Some(restricted) = restricted_ivs {
restricted
} else {
Self::header_to_intervals(&header, self.chunksize)
.expect("Parsed BAM/CRAM header to intervals")
};
// The number positions to try to process in one batch
let serial_step_size = self
.chunksize
.checked_mul(self.threads as u32)
.unwrap_or(u32::MAX); // aka superchunk
for (tid, intervals) in intervals.into_iter().enumerate() {
let tid: u32 = tid as u32;
let tid_end: u32 = header.target_len(tid).unwrap().try_into().unwrap();
// Result holds the processed positions to be sent to writer
let mut result = vec![];
for chunk_start in (0..tid_end).step_by(serial_step_size as usize) {
let tid_name = std::str::from_utf8(header.tid2name(tid)).unwrap();
let chunk_end =
std::cmp::min(chunk_start as u32 + serial_step_size, tid_end);
info!(
"Batch Processing {}:{}-{}",
tid_name, chunk_start, chunk_end
);
let (r, _) = rayon::join(
|| {
// Must be a vec so that par_iter works and results stay in order
let ivs: Vec<Interval<u32, ()>> =
Lapper::<u32, ()>::find(&intervals, chunk_start, chunk_end)
// Truncate intervals that extend forward or backward of chunk in question
.map(|iv| Interval {
start: std::cmp::max(iv.start, chunk_start),
stop: std::cmp::min(iv.stop, chunk_end),
val: (),
})
.collect();
ivs.into_par_iter()
.flat_map(|iv| {
info!("Processing {}:{}-{}", tid_name, iv.start, iv.stop);
self.processor.process_region(tid, iv.start, iv.stop)
})
.collect()
},
|| {
result.into_iter().for_each(|p| {
snd.send(p).expect("Sent a serializable to writer")
})
},
);
result = r;
}
// Send final set of results
result
.into_iter()
.for_each(|p| snd.send(p).expect("Sent a serializable to writer"));
}
});
});
Ok(rxv)
}
// Convert the header into intervals of equally sized chunks. The last interval may be short.
fn header_to_intervals(header: &HeaderView, chunksize: u32) -> Result<Vec<Lapper<u32, ()>>> {
let mut intervals = vec![vec![]; header.target_count() as usize];
for tid in 0..(header.target_count()) {
let tid_len: u32 = header.target_len(tid).unwrap().try_into().unwrap();
for start in (0..tid_len).step_by(chunksize as usize) {
let stop = std::cmp::min(start as u32 + chunksize, tid_len);
intervals[tid as usize].push(Interval {
start: start as u32,
stop: stop,
val: (),
});
}
}
Ok(intervals.into_iter().map(|ivs| Lapper::new(ivs)).collect())
}
/// Read a bed file into a vector of lappers with the index representing the TID
// TODO add a proper error message
fn bed_to_intervals(header: &HeaderView, bed_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bed_reader = bed::Reader::from_file(bed_file)?;
let mut intervals = vec![vec![]; header.target_count() as usize];
for record in bed_reader.records() {
let record = record?;
let tid = header
.tid(record.chrom().as_bytes())
.expect("Chromosome not found in BAM/CRAM header");
intervals[tid as usize].push(Interval {
start: record.start().try_into().unwrap(),
stop: record.end().try_into().unwrap(),
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Read a BCF/VCF file into a vector of lappers with index representing the TID
fn bcf_to_intervals(header: &HeaderView, bcf_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bcf_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header = bcf_header_reader.header();
let mut intervals = vec![vec![]; header.target_count() as usize];
// TODO: validate the headers against eachother
for record in bcf_reader.records() {
let record = record?;
let record_rid = bcf_header.rid2name(record.rid().unwrap()).unwrap();
let tid = header
.tid(record_rid)
.expect("Chromosome not found in BAM/CRAM header");
let pos: u32 = record
.pos()
.try_into()
.expect("Got a negative value for pos");
intervals[tid as usize].push(Interval {
start: pos,
stop: pos + 1,
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Merge two sets of restriction intervals together
fn merge_intervals(
a_ivs: Vec<Lapper<u32, ()>>,
b_ivs: Vec<Lapper<u32, ()>>,
) -> Vec<Lapper<u32, ()>> {
let mut intervals = vec![vec![]; a_ivs.len()];
for (i, (a_lapper, b_lapper)) in a_ivs.into_iter().zip(b_ivs.into_iter()).enumerate() {
intervals[i] = a_lapper.into_iter().chain(b_lapper.into_iter()).collect();
}
intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect()
}
}
#[cfg(test)]
mod test {
use super::*;
use bio::io::bed;
use num_cpus;
use proptest::prelude::*;
use rust_htslib::{bam, bcf};
use rust_lapper::{Interval, Lapper};
use std::collections::{HashMap, HashSet};
use tempfile::tempdir;
// The purpose of these tests is to demonstrate that positions are covered once under a variety of circumstances
prop_compose! {
fn arb_iv_start(max_iv: u64)(start in 0..max_iv/2) -> u64 { start }
}
prop_compose! {
fn arb_iv_size(max_iv: u64)(size in 1..max_iv/2) -> u64 { size }
}
prop_compose! {
// Create an arbitrary interval where the min size == max_iv / 2
fn arb_iv(max_iv: u64)(start in arb_iv_start(max_iv), size in arb_iv_size(max_iv)) -> Interval<u64, ()> {
Interval {start, stop: start + size, val: ()}
}
}
// Create an arbitrary number of intervals along with the expected number of positions they cover
fn arb_ivs(
max_iv: u64, // max iv size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = (Vec<Interval<u64, ()>>, u64, u64)> {
prop::collection::vec(arb_iv(max_iv), 0..max_ivs).prop_map(|vec| {
let mut furthest_right = 0;
let lapper = Lapper::new(vec.clone());
let expected = lapper.cov();
for iv in vec.iter() {
if iv.stop > furthest_right {
furthest_right = iv.stop;
}
}
(vec, expected, furthest_right)
})
}
// Create arbitrary number of contigs with arbitrary intervals each
fn arb_chrs(
max_chr: usize, // number of chromosomes to use
max_iv: u64, // max interval size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = Vec<(Vec<Interval<u64, ()>>, u64, u64)>> |
// An empty BAM with correct header
// A BED file with the randomly generated intervals (with expected number of positions)
// proptest generate random chunksize, cpus
proptest! {
#[test]
// add random chunksize and random cpus
// NB: using any larger numbers for this tends to blow up the test runtime
fn interval_set(chromosomes in arb_chrs(4, 10_000, 1_000), chunksize in any::<u32>(), cpus in 0..num_cpus::get(), use_bed in any::<bool>(), use_vcf in any::<bool>()) {
let tempdir = tempdir().unwrap();
let bam_path = tempdir.path().join("test.bam");
let bed_path = tempdir.path().join("test.bed");
let vcf_path = tempdir.path().join("test.vcf");
// Build a BAM
let mut header = bam::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
let mut chr_rec = bam::header::HeaderRecord::new(b"SQ");
chr_rec.push_tag(b"SN", &i.to_string());
chr_rec.push_tag(b"LN", &chr.2.to_string()); // set len as max observed
header.push_record(&chr_rec);
}
let writer = bam::Writer::from_path(&bam_path, &header, bam::Format::BAM).expect("Opened test.bam for writing");
drop(writer); // force flush the writer so the header info is written
bam::index::build(&bam_path, None, bam::index::Type::BAI, 1).unwrap();
// Build a bed
let mut writer = bed::Writer::to_file(&bed_path).expect("Opened test.bed for writing");
for (i, chr) in chromosomes.iter().enumerate() {
for iv in chr.0.iter() {
let mut record = bed::Record::new();
record.set_start(iv.start);
record.set_end(iv.stop);
record.set_chrom(&i.to_string());
record.set_score(&0.to_string());
writer.write(&record).expect("Wrote to test.bed");
}
}
drop(writer); // force flush
// Build a VCF file
let mut vcf_truth = HashMap::new();
let mut header = bcf::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
header.push_record(format!("##contig=<ID={},length={}>", &i.to_string(), &chr.2.to_string()).as_bytes());
}
let mut writer = bcf::Writer::from_path(&vcf_path, &header, true, bcf::Format::VCF).expect("Failed to open test.vcf for writing");
let mut record = writer.empty_record();
for (i, chr) in chromosomes.iter().enumerate() {
record.set_rid(Some(i as u32));
let counter = vcf_truth.entry(i).or_insert(0);
let mut seen = HashSet::new();
for iv in chr.0.iter() {
if !seen.contains(&iv.start) {
*counter += 1;
seen.insert(iv.start);
}
record.set_pos(iv.start as i64);
writer.write(&record).expect("Failed to write to test.vcf")
}
}
drop(writer); // force flush
// Create the processor with a dumb impl of processing that just returns positions with no counting
let test_processor = TestProcessor {};
let par_granges_runner = ParGranges::new(
bam_path,
None,
if use_bed { Some(bed_path) } else { None }, // do one with regions
if use_vcf { Some(vcf_path) } else { None }, // do one with vcf regions
Some(cpus),
Some(chunksize),
Some(0.002),
test_processor
);
let receiver = par_granges_runner.process().expect("Launch ParGranges Process");
let mut chrom_counts = HashMap::new();
receiver.into_iter().for_each(|p: PileupPosition| {
let positions = chrom_counts.entry(p.ref_seq.parse::<usize>().expect("parsed chr")).or_insert(0u64);
*positions += 1
});
// Validate that for each chr we get the expected number of bases
for (chrom, positions) in chrom_counts.iter() {
if use_bed && !use_vcf {
// if this was with bed, should be equal to .1
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_bed && use_vcf {
// if this was with bed, should be equal to .1, bed restrictions and vcf restrctions should overlap
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_vcf && !use_bed {
// total positions should be equal to the number of records for that chr in the vcf
prop_assert_eq!(vcf_truth.get(chrom).unwrap(), positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else {
// if this was bam only, should be equal to rightmost postion
prop_assert_eq!(chromosomes[*chrom].2, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].2, positions);
}
}
}
}
use crate::position::{pileup_position::PileupPosition, Position};
use smartstring::SmartString;
struct TestProcessor {}
impl RegionProcessor for TestProcessor {
type P = PileupPosition;
fn process_region(&self, tid: u32, start: u32, stop: u32) -> Vec<Self::P> {
let mut results = vec![];
for i in start..stop {
let chr = SmartString::from(&tid.to_string());
let pos = PileupPosition::new(chr, i);
results.push(pos);
}
results
}
}
}
| {
prop::collection::vec(arb_ivs(max_iv, max_ivs), 0..max_chr)
} | identifier_body |
par_granges.rs | //! # ParGranges
//!
//! Iterates over chunked genomic regions in parallel.
use anyhow::Result;
use bio::io::bed;
use crossbeam::channel::{bounded, Receiver};
use lazy_static::lazy_static;
use log::*;
use num_cpus;
use rayon::prelude::*;
use rust_htslib::{
bam::{HeaderView, IndexedReader, Read},
bcf::{Read as bcfRead, Reader},
};
use rust_lapper::{Interval, Lapper};
use serde::Serialize;
use std::{convert::TryInto, path::PathBuf, thread};
const BYTES_INA_GIGABYTE: usize = 1024 * 1024 * 1024;
/// A modifier to apply to the channel size formular that is (BYTES_INA_GIGABYTE * channel_size_modifier) * threads / size_of(R::P)
/// 0.15 roughly corresponds to 1_000_000 PileupPosition objects per thread with some wiggle room.
pub const CHANNEL_SIZE_MODIFIER: f64 = 0.15;
/// The ideal number of basepairs each worker will receive. Total bp in memory at one time = `threads` * `chunksize`
pub const CHUNKSIZE: u32 = 1_000_000;
lazy_static! {
/// CHANNEL_SIZE_MODIFIER as a str
pub static ref CHANNEL_SIZE_MODIFIER_STR: String = CHANNEL_SIZE_MODIFIER.to_string();
/// CHUNKSIZE as a str
pub static ref CHUNKSIZE_STR: String = CHUNKSIZE.to_string();
}
/// RegionProcessor defines the methods that must be implemented to process a region
pub trait RegionProcessor {
/// A vector of P make up the output of [`process_region`] and
/// are values associated with each position.
///
/// [`process_region`]: #method.process_region
type P: 'static + Send + Sync + Serialize;
/// A function that takes the tid, start, and stop and returns something serializable.
/// Note, a common use of this function will be a `fetch` -> `pileup`. The pileup must
/// be bounds checked.
fn process_region(&self, tid: u32, start: u32, stop: u32) -> Vec<Self::P>;
}
/// ParGranges holds all the information and configuration needed to launch the
/// [`ParGranges::process`].
///
/// [`ParGranges::process`]: #method.process
#[derive(Debug)]
pub struct ParGranges<R: 'static + RegionProcessor + Send + Sync> {
/// Path to an indexed BAM / CRAM file
reads: PathBuf,
/// Optional reference file for CRAM
ref_fasta: Option<PathBuf>,
/// Optional path to a BED file to restrict the regions iterated over
regions_bed: Option<PathBuf>,
/// Optional path to a BCF/VCF file to restrict the regions iterated over
regions_bcf: Option<PathBuf>,
/// Number of threads this is allowed to use, uses all if None
threads: usize,
/// The ideal number of basepairs each worker will receive. Total bp in memory at one time = `threads` * `chunksize`
chunksize: u32,
/// A modifier to apply to the channel size formular that is (BYTES_INA_GIGABYTE * channel_size_modifier) * threads / size_of(R::P)
channel_size_modifier: f64,
/// The rayon threadpool to operate in
pool: rayon::ThreadPool,
/// The implementation of [RegionProcessor] that will be used to process regions
processor: R,
}
impl<R: RegionProcessor + Send + Sync> ParGranges<R> {
/// Create a ParIO object
///
/// # Arguments
///
/// * `reads`- path to an indexed BAM/CRAM
/// * `ref_fasta`- path to an indexed reference file for CRAM
/// * `regions_bed`- Optional BED file path restricting the regions to be examined
/// * `regions_bcf`- Optional BCF/VCF file path restricting the regions to be examined
/// * `threads`- Optional threads to restrict the number of threads this process will use, defaults to all
/// * `chunksize`- optional argument to change the default chunksize of 1_000_000. `chunksize` determines the number of bases
/// each worker will get to work on at one time.
/// * `channel_size_modifier`- Optional argument to modify the default size ration of the channel that `R::P` is sent on.
/// formula is: ((BYTES_INA_GIGABYTE * channel_size_modifier) * threads) / size_of(R::P)
/// * `processor`- Something that implements [`RegionProcessor`](RegionProcessor)
pub fn new(
reads: PathBuf,
ref_fasta: Option<PathBuf>,
regions_bed: Option<PathBuf>,
regions_bcf: Option<PathBuf>,
threads: Option<usize>,
chunksize: Option<u32>,
channel_size_modifier: Option<f64>,
processor: R,
) -> Self {
let threads = if let Some(threads) = threads {
threads
} else {
num_cpus::get()
};
// Keep two around for main thread and thread running the pool
let threads = std::cmp::max(threads.checked_sub(2).unwrap_or(0), 1);
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
info!("Using {} worker threads.", threads);
Self {
reads,
ref_fasta,
regions_bed,
regions_bcf,
threads,
chunksize: chunksize.unwrap_or(CHUNKSIZE),
channel_size_modifier: channel_size_modifier.unwrap_or(CHANNEL_SIZE_MODIFIER),
pool,
processor,
}
}
/// Process each region.
///
/// This method splits the sequences in the BAM/CRAM header into `chunksize` * `self.threads` regions (aka 'super chunks').
/// It then queries that 'super chunk' against the intervals (either the BED file, or the whole genome broken up into `chunksize`
/// regions). The results of that query are then processed by a pool of workers that apply `process_region` to reach interval to
/// do perbase analysis on. The collected result for each region is then sent back over the returned `Receiver<R::P>` channel
/// for the caller to use. The results will be returned in order according to the order of the intervals used to drive this method.
///
/// While one 'super chunk' is being worked on by all workers, the last 'super chunks' results are being printed to either to
/// a file or to STDOUT, in order.
///
/// Note, a common use case of this will be to fetch a region and do a pileup. The bounds of bases being looked at should still be
/// checked since a fetch will pull all reads that overlap the region in question.
pub fn process(self) -> Result<Receiver<R::P>> {
let channel_size: usize = ((BYTES_INA_GIGABYTE as f64 * self.channel_size_modifier).floor()
as usize
/ std::mem::size_of::<R::P>())
* self.threads;
info!(
"Creating channel of length {:?} (* 120 bytes to get mem)",
channel_size
);
let (snd, rxv) = bounded(channel_size);
thread::spawn(move || {
self.pool.install(|| {
info!("Reading from {:?}", self.reads);
let mut reader = IndexedReader::from_path(&self.reads).expect("Indexed BAM/CRAM");
// If passed add ref_fasta
if let Some(ref_fasta) = &self.ref_fasta {
reader.set_reference(ref_fasta).expect("Set ref");
}
// Get a copy of the header
let header = reader.header().to_owned();
// Work out if we are restricted to a subset of sites
let bed_intervals = if let Some(regions_bed) = &self.regions_bed {
Some(
Self::bed_to_intervals(&header, regions_bed)
.expect("Parsed BED to intervals"),
)
} else {
None
};
let bcf_intervals = if let Some(regions_bcf) = &self.regions_bcf {
Some(
Self::bcf_to_intervals(&header, regions_bcf)
.expect("Parsed BCF/VCF to intervals"),
)
} else {
None
};
let restricted_ivs = match (bed_intervals, bcf_intervals) {
(Some(bed_ivs), Some(bcf_ivs)) => Some(Self::merge_intervals(bed_ivs, bcf_ivs)),
(Some(bed_ivs), None) => Some(bed_ivs),
(None, Some(bcf_ivs)) => Some(bcf_ivs),
(None, None) => None,
};
let intervals = if let Some(restricted) = restricted_ivs {
restricted
} else {
Self::header_to_intervals(&header, self.chunksize)
.expect("Parsed BAM/CRAM header to intervals")
};
// The number positions to try to process in one batch
let serial_step_size = self
.chunksize
.checked_mul(self.threads as u32)
.unwrap_or(u32::MAX); // aka superchunk
for (tid, intervals) in intervals.into_iter().enumerate() {
let tid: u32 = tid as u32;
let tid_end: u32 = header.target_len(tid).unwrap().try_into().unwrap();
// Result holds the processed positions to be sent to writer
let mut result = vec![];
for chunk_start in (0..tid_end).step_by(serial_step_size as usize) {
let tid_name = std::str::from_utf8(header.tid2name(tid)).unwrap();
let chunk_end =
std::cmp::min(chunk_start as u32 + serial_step_size, tid_end);
info!(
"Batch Processing {}:{}-{}",
tid_name, chunk_start, chunk_end
);
let (r, _) = rayon::join(
|| {
// Must be a vec so that par_iter works and results stay in order
let ivs: Vec<Interval<u32, ()>> =
Lapper::<u32, ()>::find(&intervals, chunk_start, chunk_end)
// Truncate intervals that extend forward or backward of chunk in question
.map(|iv| Interval {
start: std::cmp::max(iv.start, chunk_start),
stop: std::cmp::min(iv.stop, chunk_end),
val: (),
})
.collect();
ivs.into_par_iter()
.flat_map(|iv| {
info!("Processing {}:{}-{}", tid_name, iv.start, iv.stop);
self.processor.process_region(tid, iv.start, iv.stop)
})
.collect()
},
|| {
result.into_iter().for_each(|p| {
snd.send(p).expect("Sent a serializable to writer")
})
},
);
result = r;
}
// Send final set of results
result
.into_iter()
.for_each(|p| snd.send(p).expect("Sent a serializable to writer"));
}
});
});
Ok(rxv)
}
// Convert the header into intervals of equally sized chunks. The last interval may be short.
fn header_to_intervals(header: &HeaderView, chunksize: u32) -> Result<Vec<Lapper<u32, ()>>> {
let mut intervals = vec![vec![]; header.target_count() as usize];
for tid in 0..(header.target_count()) {
let tid_len: u32 = header.target_len(tid).unwrap().try_into().unwrap();
for start in (0..tid_len).step_by(chunksize as usize) {
let stop = std::cmp::min(start as u32 + chunksize, tid_len);
intervals[tid as usize].push(Interval {
start: start as u32,
stop: stop,
val: (),
});
}
}
Ok(intervals.into_iter().map(|ivs| Lapper::new(ivs)).collect())
}
/// Read a bed file into a vector of lappers with the index representing the TID
// TODO add a proper error message
fn bed_to_intervals(header: &HeaderView, bed_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bed_reader = bed::Reader::from_file(bed_file)?;
let mut intervals = vec![vec![]; header.target_count() as usize];
for record in bed_reader.records() {
let record = record?;
let tid = header
.tid(record.chrom().as_bytes())
.expect("Chromosome not found in BAM/CRAM header");
intervals[tid as usize].push(Interval {
start: record.start().try_into().unwrap(),
stop: record.end().try_into().unwrap(),
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Read a BCF/VCF file into a vector of lappers with index representing the TID
fn bcf_to_intervals(header: &HeaderView, bcf_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bcf_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header = bcf_header_reader.header();
let mut intervals = vec![vec![]; header.target_count() as usize];
// TODO: validate the headers against eachother
for record in bcf_reader.records() {
let record = record?;
let record_rid = bcf_header.rid2name(record.rid().unwrap()).unwrap();
let tid = header
.tid(record_rid)
.expect("Chromosome not found in BAM/CRAM header");
let pos: u32 = record
.pos()
.try_into()
.expect("Got a negative value for pos");
intervals[tid as usize].push(Interval {
start: pos,
stop: pos + 1,
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs); | .collect())
}
/// Merge two sets of restriction intervals together
fn merge_intervals(
a_ivs: Vec<Lapper<u32, ()>>,
b_ivs: Vec<Lapper<u32, ()>>,
) -> Vec<Lapper<u32, ()>> {
let mut intervals = vec![vec![]; a_ivs.len()];
for (i, (a_lapper, b_lapper)) in a_ivs.into_iter().zip(b_ivs.into_iter()).enumerate() {
intervals[i] = a_lapper.into_iter().chain(b_lapper.into_iter()).collect();
}
intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect()
}
}
#[cfg(test)]
mod test {
use super::*;
use bio::io::bed;
use num_cpus;
use proptest::prelude::*;
use rust_htslib::{bam, bcf};
use rust_lapper::{Interval, Lapper};
use std::collections::{HashMap, HashSet};
use tempfile::tempdir;
// The purpose of these tests is to demonstrate that positions are covered once under a variety of circumstances
prop_compose! {
fn arb_iv_start(max_iv: u64)(start in 0..max_iv/2) -> u64 { start }
}
prop_compose! {
fn arb_iv_size(max_iv: u64)(size in 1..max_iv/2) -> u64 { size }
}
prop_compose! {
// Create an arbitrary interval where the min size == max_iv / 2
fn arb_iv(max_iv: u64)(start in arb_iv_start(max_iv), size in arb_iv_size(max_iv)) -> Interval<u64, ()> {
Interval {start, stop: start + size, val: ()}
}
}
// Create an arbitrary number of intervals along with the expected number of positions they cover
fn arb_ivs(
max_iv: u64, // max iv size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = (Vec<Interval<u64, ()>>, u64, u64)> {
prop::collection::vec(arb_iv(max_iv), 0..max_ivs).prop_map(|vec| {
let mut furthest_right = 0;
let lapper = Lapper::new(vec.clone());
let expected = lapper.cov();
for iv in vec.iter() {
if iv.stop > furthest_right {
furthest_right = iv.stop;
}
}
(vec, expected, furthest_right)
})
}
// Create arbitrary number of contigs with arbitrary intervals each
fn arb_chrs(
max_chr: usize, // number of chromosomes to use
max_iv: u64, // max interval size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = Vec<(Vec<Interval<u64, ()>>, u64, u64)>> {
prop::collection::vec(arb_ivs(max_iv, max_ivs), 0..max_chr)
}
// An empty BAM with correct header
// A BED file with the randomly generated intervals (with expected number of positions)
// proptest generate random chunksize, cpus
proptest! {
#[test]
// add random chunksize and random cpus
// NB: using any larger numbers for this tends to blow up the test runtime
fn interval_set(chromosomes in arb_chrs(4, 10_000, 1_000), chunksize in any::<u32>(), cpus in 0..num_cpus::get(), use_bed in any::<bool>(), use_vcf in any::<bool>()) {
let tempdir = tempdir().unwrap();
let bam_path = tempdir.path().join("test.bam");
let bed_path = tempdir.path().join("test.bed");
let vcf_path = tempdir.path().join("test.vcf");
// Build a BAM
let mut header = bam::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
let mut chr_rec = bam::header::HeaderRecord::new(b"SQ");
chr_rec.push_tag(b"SN", &i.to_string());
chr_rec.push_tag(b"LN", &chr.2.to_string()); // set len as max observed
header.push_record(&chr_rec);
}
let writer = bam::Writer::from_path(&bam_path, &header, bam::Format::BAM).expect("Opened test.bam for writing");
drop(writer); // force flush the writer so the header info is written
bam::index::build(&bam_path, None, bam::index::Type::BAI, 1).unwrap();
// Build a bed
let mut writer = bed::Writer::to_file(&bed_path).expect("Opened test.bed for writing");
for (i, chr) in chromosomes.iter().enumerate() {
for iv in chr.0.iter() {
let mut record = bed::Record::new();
record.set_start(iv.start);
record.set_end(iv.stop);
record.set_chrom(&i.to_string());
record.set_score(&0.to_string());
writer.write(&record).expect("Wrote to test.bed");
}
}
drop(writer); // force flush
// Build a VCF file
let mut vcf_truth = HashMap::new();
let mut header = bcf::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
header.push_record(format!("##contig=<ID={},length={}>", &i.to_string(), &chr.2.to_string()).as_bytes());
}
let mut writer = bcf::Writer::from_path(&vcf_path, &header, true, bcf::Format::VCF).expect("Failed to open test.vcf for writing");
let mut record = writer.empty_record();
for (i, chr) in chromosomes.iter().enumerate() {
record.set_rid(Some(i as u32));
let counter = vcf_truth.entry(i).or_insert(0);
let mut seen = HashSet::new();
for iv in chr.0.iter() {
if !seen.contains(&iv.start) {
*counter += 1;
seen.insert(iv.start);
}
record.set_pos(iv.start as i64);
writer.write(&record).expect("Failed to write to test.vcf")
}
}
drop(writer); // force flush
// Create the processor with a dumb impl of processing that just returns positions with no counting
let test_processor = TestProcessor {};
let par_granges_runner = ParGranges::new(
bam_path,
None,
if use_bed { Some(bed_path) } else { None }, // do one with regions
if use_vcf { Some(vcf_path) } else { None }, // do one with vcf regions
Some(cpus),
Some(chunksize),
Some(0.002),
test_processor
);
let receiver = par_granges_runner.process().expect("Launch ParGranges Process");
let mut chrom_counts = HashMap::new();
receiver.into_iter().for_each(|p: PileupPosition| {
let positions = chrom_counts.entry(p.ref_seq.parse::<usize>().expect("parsed chr")).or_insert(0u64);
*positions += 1
});
// Validate that for each chr we get the expected number of bases
for (chrom, positions) in chrom_counts.iter() {
if use_bed && !use_vcf {
// if this was with bed, should be equal to .1
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_bed && use_vcf {
// if this was with bed, should be equal to .1, bed restrictions and vcf restrctions should overlap
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_vcf && !use_bed {
// total positions should be equal to the number of records for that chr in the vcf
prop_assert_eq!(vcf_truth.get(chrom).unwrap(), positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else {
// if this was bam only, should be equal to rightmost postion
prop_assert_eq!(chromosomes[*chrom].2, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].2, positions);
}
}
}
}
use crate::position::{pileup_position::PileupPosition, Position};
use smartstring::SmartString;
struct TestProcessor {}
impl RegionProcessor for TestProcessor {
type P = PileupPosition;
fn process_region(&self, tid: u32, start: u32, stop: u32) -> Vec<Self::P> {
let mut results = vec![];
for i in start..stop {
let chr = SmartString::from(&tid.to_string());
let pos = PileupPosition::new(chr, i);
results.push(pos);
}
results
}
}
} | lapper.merge_overlaps();
lapper
}) | random_line_split |
par_granges.rs | //! # ParGranges
//!
//! Iterates over chunked genomic regions in parallel.
use anyhow::Result;
use bio::io::bed;
use crossbeam::channel::{bounded, Receiver};
use lazy_static::lazy_static;
use log::*;
use num_cpus;
use rayon::prelude::*;
use rust_htslib::{
bam::{HeaderView, IndexedReader, Read},
bcf::{Read as bcfRead, Reader},
};
use rust_lapper::{Interval, Lapper};
use serde::Serialize;
use std::{convert::TryInto, path::PathBuf, thread};
const BYTES_INA_GIGABYTE: usize = 1024 * 1024 * 1024;
/// A modifier to apply to the channel size formular that is (BYTES_INA_GIGABYTE * channel_size_modifier) * threads / size_of(R::P)
/// 0.15 roughly corresponds to 1_000_000 PileupPosition objects per thread with some wiggle room.
pub const CHANNEL_SIZE_MODIFIER: f64 = 0.15;
/// The ideal number of basepairs each worker will receive. Total bp in memory at one time = `threads` * `chunksize`
pub const CHUNKSIZE: u32 = 1_000_000;
lazy_static! {
/// CHANNEL_SIZE_MODIFIER as a str
pub static ref CHANNEL_SIZE_MODIFIER_STR: String = CHANNEL_SIZE_MODIFIER.to_string();
/// CHUNKSIZE as a str
pub static ref CHUNKSIZE_STR: String = CHUNKSIZE.to_string();
}
/// RegionProcessor defines the methods that must be implemented to process a region
pub trait RegionProcessor {
/// A vector of P make up the output of [`process_region`] and
/// are values associated with each position.
///
/// [`process_region`]: #method.process_region
type P: 'static + Send + Sync + Serialize;
/// A function that takes the tid, start, and stop and returns something serializable.
/// Note, a common use of this function will be a `fetch` -> `pileup`. The pileup must
/// be bounds checked.
fn process_region(&self, tid: u32, start: u32, stop: u32) -> Vec<Self::P>;
}
/// ParGranges holds all the information and configuration needed to launch the
/// [`ParGranges::process`].
///
/// [`ParGranges::process`]: #method.process
#[derive(Debug)]
pub struct ParGranges<R: 'static + RegionProcessor + Send + Sync> {
/// Path to an indexed BAM / CRAM file
reads: PathBuf,
/// Optional reference file for CRAM
ref_fasta: Option<PathBuf>,
/// Optional path to a BED file to restrict the regions iterated over
regions_bed: Option<PathBuf>,
/// Optional path to a BCF/VCF file to restrict the regions iterated over
regions_bcf: Option<PathBuf>,
/// Number of threads this is allowed to use, uses all if None
threads: usize,
/// The ideal number of basepairs each worker will receive. Total bp in memory at one time = `threads` * `chunksize`
chunksize: u32,
/// A modifier to apply to the channel size formular that is (BYTES_INA_GIGABYTE * channel_size_modifier) * threads / size_of(R::P)
channel_size_modifier: f64,
/// The rayon threadpool to operate in
pool: rayon::ThreadPool,
/// The implementation of [RegionProcessor] that will be used to process regions
processor: R,
}
impl<R: RegionProcessor + Send + Sync> ParGranges<R> {
/// Create a ParIO object
///
/// # Arguments
///
/// * `reads`- path to an indexed BAM/CRAM
/// * `ref_fasta`- path to an indexed reference file for CRAM
/// * `regions_bed`- Optional BED file path restricting the regions to be examined
/// * `regions_bcf`- Optional BCF/VCF file path restricting the regions to be examined
/// * `threads`- Optional threads to restrict the number of threads this process will use, defaults to all
/// * `chunksize`- optional argument to change the default chunksize of 1_000_000. `chunksize` determines the number of bases
/// each worker will get to work on at one time.
/// * `channel_size_modifier`- Optional argument to modify the default size ration of the channel that `R::P` is sent on.
/// formula is: ((BYTES_INA_GIGABYTE * channel_size_modifier) * threads) / size_of(R::P)
/// * `processor`- Something that implements [`RegionProcessor`](RegionProcessor)
pub fn new(
reads: PathBuf,
ref_fasta: Option<PathBuf>,
regions_bed: Option<PathBuf>,
regions_bcf: Option<PathBuf>,
threads: Option<usize>,
chunksize: Option<u32>,
channel_size_modifier: Option<f64>,
processor: R,
) -> Self {
let threads = if let Some(threads) = threads {
threads
} else {
num_cpus::get()
};
// Keep two around for main thread and thread running the pool
let threads = std::cmp::max(threads.checked_sub(2).unwrap_or(0), 1);
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
info!("Using {} worker threads.", threads);
Self {
reads,
ref_fasta,
regions_bed,
regions_bcf,
threads,
chunksize: chunksize.unwrap_or(CHUNKSIZE),
channel_size_modifier: channel_size_modifier.unwrap_or(CHANNEL_SIZE_MODIFIER),
pool,
processor,
}
}
/// Process each region.
///
/// This method splits the sequences in the BAM/CRAM header into `chunksize` * `self.threads` regions (aka 'super chunks').
/// It then queries that 'super chunk' against the intervals (either the BED file, or the whole genome broken up into `chunksize`
/// regions). The results of that query are then processed by a pool of workers that apply `process_region` to reach interval to
/// do perbase analysis on. The collected result for each region is then sent back over the returned `Receiver<R::P>` channel
/// for the caller to use. The results will be returned in order according to the order of the intervals used to drive this method.
///
/// While one 'super chunk' is being worked on by all workers, the last 'super chunks' results are being printed to either to
/// a file or to STDOUT, in order.
///
/// Note, a common use case of this will be to fetch a region and do a pileup. The bounds of bases being looked at should still be
/// checked since a fetch will pull all reads that overlap the region in question.
pub fn process(self) -> Result<Receiver<R::P>> {
let channel_size: usize = ((BYTES_INA_GIGABYTE as f64 * self.channel_size_modifier).floor()
as usize
/ std::mem::size_of::<R::P>())
* self.threads;
info!(
"Creating channel of length {:?} (* 120 bytes to get mem)",
channel_size
);
let (snd, rxv) = bounded(channel_size);
thread::spawn(move || {
self.pool.install(|| {
info!("Reading from {:?}", self.reads);
let mut reader = IndexedReader::from_path(&self.reads).expect("Indexed BAM/CRAM");
// If passed add ref_fasta
if let Some(ref_fasta) = &self.ref_fasta {
reader.set_reference(ref_fasta).expect("Set ref");
}
// Get a copy of the header
let header = reader.header().to_owned();
// Work out if we are restricted to a subset of sites
let bed_intervals = if let Some(regions_bed) = &self.regions_bed {
Some(
Self::bed_to_intervals(&header, regions_bed)
.expect("Parsed BED to intervals"),
)
} else {
None
};
let bcf_intervals = if let Some(regions_bcf) = &self.regions_bcf {
Some(
Self::bcf_to_intervals(&header, regions_bcf)
.expect("Parsed BCF/VCF to intervals"),
)
} else {
None
};
let restricted_ivs = match (bed_intervals, bcf_intervals) {
(Some(bed_ivs), Some(bcf_ivs)) => Some(Self::merge_intervals(bed_ivs, bcf_ivs)),
(Some(bed_ivs), None) => Some(bed_ivs),
(None, Some(bcf_ivs)) => Some(bcf_ivs),
(None, None) => None,
};
let intervals = if let Some(restricted) = restricted_ivs {
restricted
} else {
Self::header_to_intervals(&header, self.chunksize)
.expect("Parsed BAM/CRAM header to intervals")
};
// The number positions to try to process in one batch
let serial_step_size = self
.chunksize
.checked_mul(self.threads as u32)
.unwrap_or(u32::MAX); // aka superchunk
for (tid, intervals) in intervals.into_iter().enumerate() {
let tid: u32 = tid as u32;
let tid_end: u32 = header.target_len(tid).unwrap().try_into().unwrap();
// Result holds the processed positions to be sent to writer
let mut result = vec![];
for chunk_start in (0..tid_end).step_by(serial_step_size as usize) {
let tid_name = std::str::from_utf8(header.tid2name(tid)).unwrap();
let chunk_end =
std::cmp::min(chunk_start as u32 + serial_step_size, tid_end);
info!(
"Batch Processing {}:{}-{}",
tid_name, chunk_start, chunk_end
);
let (r, _) = rayon::join(
|| {
// Must be a vec so that par_iter works and results stay in order
let ivs: Vec<Interval<u32, ()>> =
Lapper::<u32, ()>::find(&intervals, chunk_start, chunk_end)
// Truncate intervals that extend forward or backward of chunk in question
.map(|iv| Interval {
start: std::cmp::max(iv.start, chunk_start),
stop: std::cmp::min(iv.stop, chunk_end),
val: (),
})
.collect();
ivs.into_par_iter()
.flat_map(|iv| {
info!("Processing {}:{}-{}", tid_name, iv.start, iv.stop);
self.processor.process_region(tid, iv.start, iv.stop)
})
.collect()
},
|| {
result.into_iter().for_each(|p| {
snd.send(p).expect("Sent a serializable to writer")
})
},
);
result = r;
}
// Send final set of results
result
.into_iter()
.for_each(|p| snd.send(p).expect("Sent a serializable to writer"));
}
});
});
Ok(rxv)
}
// Convert the header into intervals of equally sized chunks. The last interval may be short.
fn header_to_intervals(header: &HeaderView, chunksize: u32) -> Result<Vec<Lapper<u32, ()>>> {
let mut intervals = vec![vec![]; header.target_count() as usize];
for tid in 0..(header.target_count()) {
let tid_len: u32 = header.target_len(tid).unwrap().try_into().unwrap();
for start in (0..tid_len).step_by(chunksize as usize) {
let stop = std::cmp::min(start as u32 + chunksize, tid_len);
intervals[tid as usize].push(Interval {
start: start as u32,
stop: stop,
val: (),
});
}
}
Ok(intervals.into_iter().map(|ivs| Lapper::new(ivs)).collect())
}
/// Read a bed file into a vector of lappers with the index representing the TID
// TODO add a proper error message
fn bed_to_intervals(header: &HeaderView, bed_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bed_reader = bed::Reader::from_file(bed_file)?;
let mut intervals = vec![vec![]; header.target_count() as usize];
for record in bed_reader.records() {
let record = record?;
let tid = header
.tid(record.chrom().as_bytes())
.expect("Chromosome not found in BAM/CRAM header");
intervals[tid as usize].push(Interval {
start: record.start().try_into().unwrap(),
stop: record.end().try_into().unwrap(),
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Read a BCF/VCF file into a vector of lappers with index representing the TID
fn bcf_to_intervals(header: &HeaderView, bcf_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bcf_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header = bcf_header_reader.header();
let mut intervals = vec![vec![]; header.target_count() as usize];
// TODO: validate the headers against eachother
for record in bcf_reader.records() {
let record = record?;
let record_rid = bcf_header.rid2name(record.rid().unwrap()).unwrap();
let tid = header
.tid(record_rid)
.expect("Chromosome not found in BAM/CRAM header");
let pos: u32 = record
.pos()
.try_into()
.expect("Got a negative value for pos");
intervals[tid as usize].push(Interval {
start: pos,
stop: pos + 1,
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Merge two sets of restriction intervals together
fn merge_intervals(
a_ivs: Vec<Lapper<u32, ()>>,
b_ivs: Vec<Lapper<u32, ()>>,
) -> Vec<Lapper<u32, ()>> {
let mut intervals = vec![vec![]; a_ivs.len()];
for (i, (a_lapper, b_lapper)) in a_ivs.into_iter().zip(b_ivs.into_iter()).enumerate() {
intervals[i] = a_lapper.into_iter().chain(b_lapper.into_iter()).collect();
}
intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect()
}
}
#[cfg(test)]
mod test {
use super::*;
use bio::io::bed;
use num_cpus;
use proptest::prelude::*;
use rust_htslib::{bam, bcf};
use rust_lapper::{Interval, Lapper};
use std::collections::{HashMap, HashSet};
use tempfile::tempdir;
// The purpose of these tests is to demonstrate that positions are covered once under a variety of circumstances
prop_compose! {
fn arb_iv_start(max_iv: u64)(start in 0..max_iv/2) -> u64 { start }
}
prop_compose! {
fn arb_iv_size(max_iv: u64)(size in 1..max_iv/2) -> u64 { size }
}
prop_compose! {
// Create an arbitrary interval where the min size == max_iv / 2
fn arb_iv(max_iv: u64)(start in arb_iv_start(max_iv), size in arb_iv_size(max_iv)) -> Interval<u64, ()> {
Interval {start, stop: start + size, val: ()}
}
}
// Create an arbitrary number of intervals along with the expected number of positions they cover
fn arb_ivs(
max_iv: u64, // max iv size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = (Vec<Interval<u64, ()>>, u64, u64)> {
prop::collection::vec(arb_iv(max_iv), 0..max_ivs).prop_map(|vec| {
let mut furthest_right = 0;
let lapper = Lapper::new(vec.clone());
let expected = lapper.cov();
for iv in vec.iter() {
if iv.stop > furthest_right {
furthest_right = iv.stop;
}
}
(vec, expected, furthest_right)
})
}
// Create arbitrary number of contigs with arbitrary intervals each
fn arb_chrs(
max_chr: usize, // number of chromosomes to use
max_iv: u64, // max interval size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = Vec<(Vec<Interval<u64, ()>>, u64, u64)>> {
prop::collection::vec(arb_ivs(max_iv, max_ivs), 0..max_chr)
}
// An empty BAM with correct header
// A BED file with the randomly generated intervals (with expected number of positions)
// proptest generate random chunksize, cpus
proptest! {
#[test]
// add random chunksize and random cpus
// NB: using any larger numbers for this tends to blow up the test runtime
fn interval_set(chromosomes in arb_chrs(4, 10_000, 1_000), chunksize in any::<u32>(), cpus in 0..num_cpus::get(), use_bed in any::<bool>(), use_vcf in any::<bool>()) {
let tempdir = tempdir().unwrap();
let bam_path = tempdir.path().join("test.bam");
let bed_path = tempdir.path().join("test.bed");
let vcf_path = tempdir.path().join("test.vcf");
// Build a BAM
let mut header = bam::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
let mut chr_rec = bam::header::HeaderRecord::new(b"SQ");
chr_rec.push_tag(b"SN", &i.to_string());
chr_rec.push_tag(b"LN", &chr.2.to_string()); // set len as max observed
header.push_record(&chr_rec);
}
let writer = bam::Writer::from_path(&bam_path, &header, bam::Format::BAM).expect("Opened test.bam for writing");
drop(writer); // force flush the writer so the header info is written
bam::index::build(&bam_path, None, bam::index::Type::BAI, 1).unwrap();
// Build a bed
let mut writer = bed::Writer::to_file(&bed_path).expect("Opened test.bed for writing");
for (i, chr) in chromosomes.iter().enumerate() {
for iv in chr.0.iter() {
let mut record = bed::Record::new();
record.set_start(iv.start);
record.set_end(iv.stop);
record.set_chrom(&i.to_string());
record.set_score(&0.to_string());
writer.write(&record).expect("Wrote to test.bed");
}
}
drop(writer); // force flush
// Build a VCF file
let mut vcf_truth = HashMap::new();
let mut header = bcf::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
header.push_record(format!("##contig=<ID={},length={}>", &i.to_string(), &chr.2.to_string()).as_bytes());
}
let mut writer = bcf::Writer::from_path(&vcf_path, &header, true, bcf::Format::VCF).expect("Failed to open test.vcf for writing");
let mut record = writer.empty_record();
for (i, chr) in chromosomes.iter().enumerate() {
record.set_rid(Some(i as u32));
let counter = vcf_truth.entry(i).or_insert(0);
let mut seen = HashSet::new();
for iv in chr.0.iter() {
if !seen.contains(&iv.start) {
*counter += 1;
seen.insert(iv.start);
}
record.set_pos(iv.start as i64);
writer.write(&record).expect("Failed to write to test.vcf")
}
}
drop(writer); // force flush
// Create the processor with a dumb impl of processing that just returns positions with no counting
let test_processor = TestProcessor {};
let par_granges_runner = ParGranges::new(
bam_path,
None,
if use_bed { Some(bed_path) } else { None }, // do one with regions
if use_vcf { Some(vcf_path) } else { None }, // do one with vcf regions
Some(cpus),
Some(chunksize),
Some(0.002),
test_processor
);
let receiver = par_granges_runner.process().expect("Launch ParGranges Process");
let mut chrom_counts = HashMap::new();
receiver.into_iter().for_each(|p: PileupPosition| {
let positions = chrom_counts.entry(p.ref_seq.parse::<usize>().expect("parsed chr")).or_insert(0u64);
*positions += 1
});
// Validate that for each chr we get the expected number of bases
for (chrom, positions) in chrom_counts.iter() {
if use_bed && !use_vcf {
// if this was with bed, should be equal to .1
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_bed && use_vcf {
// if this was with bed, should be equal to .1, bed restrictions and vcf restrctions should overlap
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_vcf && !use_bed {
// total positions should be equal to the number of records for that chr in the vcf
prop_assert_eq!(vcf_truth.get(chrom).unwrap(), positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else {
// if this was bam only, should be equal to rightmost postion
prop_assert_eq!(chromosomes[*chrom].2, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].2, positions);
}
}
}
}
use crate::position::{pileup_position::PileupPosition, Position};
use smartstring::SmartString;
struct TestProcessor {}
impl RegionProcessor for TestProcessor {
type P = PileupPosition;
fn | (&self, tid: u32, start: u32, stop: u32) -> Vec<Self::P> {
let mut results = vec![];
for i in start..stop {
let chr = SmartString::from(&tid.to_string());
let pos = PileupPosition::new(chr, i);
results.push(pos);
}
results
}
}
}
| process_region | identifier_name |
medrs.rs | extern crate config;
extern crate mediawiki;
extern crate papers;
extern crate regex;
extern crate wikibase;
#[macro_use]
extern crate lazy_static;
/*
use papers::crossref2wikidata::Crossref2Wikidata;
use papers::orcid2wikidata::Orcid2Wikidata;
use papers::pubmed2wikidata::Pubmed2Wikidata;
use papers::semanticscholar2wikidata::Semanticscholar2Wikidata;
*/
use docopt::Docopt;
use mediawiki::api::Api;
use papers::wikidata_papers::WikidataPapers;
use papers::*;
use regex::Regex;
use serde::Deserialize;
use std::str;
use std::{
fs::File,
io::{prelude::*, BufReader},
};
use urlencoding;
fn lines_from_file(filename: &str) -> Vec<String> {
if filename.is_empty() {
return vec![];
}
let file = File::open(filename).expect(format!("no such file: {}", filename).as_str());
let buf = BufReader::new(file);
buf.lines()
.map(|l| l.expect("Could not parse line"))
.collect()
}
fn read_file_to_string(filename: &str) -> String {
let mut file = match File::open(filename) {
Ok(file) => file,
Err(_) => panic!("no such file"),
};
let mut file_contents = String::new();
file.read_to_string(&mut file_contents)
.ok()
.expect("failed to read!");
file_contents
}
fn replace_sparql_placeolder(pattern: &str, sparql: &String, lines: &Vec<String>) -> String {
let rep: String = if lines.is_empty() {
"".to_string()
} else {
"wd:".to_string() + &lines.join(" wd:")
};
sparql.replace(pattern, &rep)
}
fn output_sparql_result_items(sparql: &String) {
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let result = api.sparql_query(&sparql).expect("SPARQL query failed");
let varname = result["head"]["vars"][0]
.as_str()
.expect("Can't find first variable name in SPARQL result");
let entities = api.entities_from_sparql_result(&result, &varname);
println!("{}", entities.join("\n"));
}
/*
fn get_all_from_stdin() -> String {
let mut payload = Vec::new();
io::stdin().read_to_end(&mut payload).unwrap();
let s = match str::from_utf8(&payload) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
s.to_string()
}
*/
fn command_query(args: &Args) {
if args.arg_query.is_empty() {
println!("Requires SPARQL query");
}
let sparql = &args.arg_query;
output_sparql_result_items(&sparql);
}
fn command_run(args: &Args) {
let articles = lines_from_file(&args.flag_articles);
let reviews = lines_from_file(&args.flag_reviews);
let topics = lines_from_file(&args.flag_topics);
let journals = lines_from_file(&args.flag_journals);
let publishers = lines_from_file(&args.flag_publishers);
let mut sparql = read_file_to_string(&args.flag_sparql);
sparql = replace_sparql_placeolder("%%ARTICLES%%", &sparql, &articles);
sparql = replace_sparql_placeolder("%%REVIEWS%%", &sparql, &reviews);
sparql = replace_sparql_placeolder("%%TOPICS%%", &sparql, &topics);
sparql = replace_sparql_placeolder("%%JOURNALS%%", &sparql, &journals);
sparql = replace_sparql_placeolder("%%PUBLISHERS%%", &sparql, &publishers);
output_sparql_result_items(&sparql);
}
fn get_api_url_for_wiki(wiki: &String) -> Option<String> {
// Get site matrix from wikidata
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let params = api.params_into(&vec![("action", "sitematrix")]);
let site_matrix = api
.get_query_api_json(¶ms)
.expect("Can't load sitematrix from wikidata API");
//println!("{:#?}", &site_matrix);
// Go through the "normal" objects
let mut ret: Option<String> = None;
site_matrix["sitematrix"]
.as_object()
.expect("sitematrix is not an object")
.iter()
.for_each(|(_, data)| {
match data["site"]
.as_array()
.unwrap_or(&vec![])
.iter()
.filter_map(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
x["url"].as_str()
} else {
None
}
})
.next()
{
Some(url) => {
ret = Some(url.to_string() + "/w/api.php");
}
None => {}
}
});
// Try the "specials"
site_matrix["sitematrix"]["specials"]
.as_array()
.unwrap_or(&vec![])
.iter()
.for_each(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
ret = Some(x["url"].as_str().unwrap_or("").to_string() + "/w/api.php");
}
});
ret
}
fn get_external_urls(api_url: &String, title: &String) -> Vec<String> {
let api = Api::new(&api_url).expect(&format!("Can't connect to {}", &api_url));
let params = api.params_into(&vec![ | ("action", "query"),
("prop", "extlinks"),
("ellimit", "500"),
("titles", title.as_str()),
]);
let result = api
.get_query_api_json_all(¶ms)
.expect("query.extlinks failed");
let mut urls: Vec<String> = vec![];
result["query"]["pages"]
.as_object()
.expect("query.pages in result not an object")
.iter()
.for_each(|(_page_id, data)| {
data["extlinks"]
.as_array()
.expect("extlinks not an array")
.iter()
.for_each(|x| urls.push(x["*"].as_str().expect("* not a string").to_string()));
});
urls
}
fn get_paper_q(api: &Api, id: &GenericWorkIdentifier) -> Option<String> {
let wdp = WikidataPapers::new();
match &id.work_type {
GenericWorkType::Property(prop) => {
let result = wdp.search_external_id(&prop, &id.id, api);
result.get(0).map(|s| s.to_owned()) // First one will do
}
_ => None,
}
/*
wdp.add_adapter(Box::new(Pubmed2Wikidata::new()));
wdp.add_adapter(Box::new(Crossref2Wikidata::new()));
wdp.add_adapter(Box::new(Semanticscholar2Wikidata::new()));
wdp.add_adapter(Box::new(Orcid2Wikidata::new()));
let ids = vec![id.to_owned()];
let ids = wdp.update_from_paper_ids(&ids);
let q = ids
.iter()
.filter_map(|x| match x.work_type {
GenericWorkType::Item => Some(x.id.to_owned()),
_ => None,
})
.next();
q*/
}
fn command_refs(args: &Args) {
if args.arg_wiki.is_empty() {
panic!("wiki code (e.g. 'enwiki') is required");
}
if args.arg_title.is_empty() {
panic!("article title is required");
}
let wiki = &args.arg_wiki;
let title = &args.arg_title;
// Get the API URL for the wiki
let api_url = match get_api_url_for_wiki(&wiki) {
Some(url) => url,
None => panic!("Can't find API URL for {}", &wiki),
};
// Get all external URLs from that page, on that wiki
let urls = get_external_urls(&api_url, &title);
//println!("{:#?}", &urls);
lazy_static! {
static ref RE_DOI: Regex = Regex::new(r#"^*.?//doi.org/(.+)$"#).unwrap();
static ref RE_PMID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pubmed/(\d+)$"#).unwrap();
static ref RE_PMCID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pmc/articles/PMC(\d+)$"#).unwrap();
}
let mut ids: Vec<GenericWorkIdentifier> = vec![];
for url in urls {
match RE_DOI.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
match urlencoding::decode(&id) {
Ok(id) => {
ids.push(GenericWorkIdentifier::new_prop(PROP_DOI, &id));
}
_ => {}
}
}
None => {}
}
match RE_PMID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMID, id));
}
None => {}
}
match RE_PMCID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMCID, id));
}
None => {}
}
}
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
for id in ids {
match get_paper_q(&api, &id) {
Some(q) => {
println!("{}", &q);
}
None => {
/*
/TODO
let prop = match &id.work_type {
GenericWorkType::Property(p) => p,
_ => continue,
};
println!("No item for https://www.wikidata.org/w/index.php?search=&search=haswbstatement%3A{}={}&title=Special%3ASearch&go=Go&ns0=1&ns120=1", &prop,&id.id);
*/
}
}
}
}
const USAGE: &'static str = "
MEDRS
Usage:
medrs run [--articles=<file>] [--reviews=<file>] [--topics=<file>] [--journals=<file>] [--publishers=<file>] [--sparql=<file>]
medrs query <query>
medrs refs <wiki> <title>
medrs (-h | --help)
medrs --version
Options:
-h --help Show this screen.
--version Show version.
--reviews=<file> Deprecated reviews (article blacklist)
--topics=<file> Topical whitelist
--journals=<file> OA exceptions (journal whitelist)
--publishers=<file> Beall's list (publisher blacklist)
--sparql=<file> SPARQL pattern
";
#[derive(Debug, Deserialize)]
struct Args {
flag_articles: String,
flag_reviews: String,
flag_topics: String,
flag_journals: String,
flag_publishers: String,
flag_sparql: String,
arg_query: String,
arg_title: String,
arg_wiki: String,
cmd_run: bool,
cmd_query: bool,
cmd_refs: bool,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
//println!("{:?}", args);
if args.cmd_query {
command_query(&args);
}
if args.cmd_run {
command_run(&args);
}
if args.cmd_refs {
command_refs(&args);
}
} | random_line_split | |
medrs.rs | extern crate config;
extern crate mediawiki;
extern crate papers;
extern crate regex;
extern crate wikibase;
#[macro_use]
extern crate lazy_static;
/*
use papers::crossref2wikidata::Crossref2Wikidata;
use papers::orcid2wikidata::Orcid2Wikidata;
use papers::pubmed2wikidata::Pubmed2Wikidata;
use papers::semanticscholar2wikidata::Semanticscholar2Wikidata;
*/
use docopt::Docopt;
use mediawiki::api::Api;
use papers::wikidata_papers::WikidataPapers;
use papers::*;
use regex::Regex;
use serde::Deserialize;
use std::str;
use std::{
fs::File,
io::{prelude::*, BufReader},
};
use urlencoding;
fn | (filename: &str) -> Vec<String> {
if filename.is_empty() {
return vec![];
}
let file = File::open(filename).expect(format!("no such file: {}", filename).as_str());
let buf = BufReader::new(file);
buf.lines()
.map(|l| l.expect("Could not parse line"))
.collect()
}
fn read_file_to_string(filename: &str) -> String {
let mut file = match File::open(filename) {
Ok(file) => file,
Err(_) => panic!("no such file"),
};
let mut file_contents = String::new();
file.read_to_string(&mut file_contents)
.ok()
.expect("failed to read!");
file_contents
}
fn replace_sparql_placeolder(pattern: &str, sparql: &String, lines: &Vec<String>) -> String {
let rep: String = if lines.is_empty() {
"".to_string()
} else {
"wd:".to_string() + &lines.join(" wd:")
};
sparql.replace(pattern, &rep)
}
fn output_sparql_result_items(sparql: &String) {
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let result = api.sparql_query(&sparql).expect("SPARQL query failed");
let varname = result["head"]["vars"][0]
.as_str()
.expect("Can't find first variable name in SPARQL result");
let entities = api.entities_from_sparql_result(&result, &varname);
println!("{}", entities.join("\n"));
}
/*
fn get_all_from_stdin() -> String {
let mut payload = Vec::new();
io::stdin().read_to_end(&mut payload).unwrap();
let s = match str::from_utf8(&payload) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
s.to_string()
}
*/
fn command_query(args: &Args) {
if args.arg_query.is_empty() {
println!("Requires SPARQL query");
}
let sparql = &args.arg_query;
output_sparql_result_items(&sparql);
}
fn command_run(args: &Args) {
let articles = lines_from_file(&args.flag_articles);
let reviews = lines_from_file(&args.flag_reviews);
let topics = lines_from_file(&args.flag_topics);
let journals = lines_from_file(&args.flag_journals);
let publishers = lines_from_file(&args.flag_publishers);
let mut sparql = read_file_to_string(&args.flag_sparql);
sparql = replace_sparql_placeolder("%%ARTICLES%%", &sparql, &articles);
sparql = replace_sparql_placeolder("%%REVIEWS%%", &sparql, &reviews);
sparql = replace_sparql_placeolder("%%TOPICS%%", &sparql, &topics);
sparql = replace_sparql_placeolder("%%JOURNALS%%", &sparql, &journals);
sparql = replace_sparql_placeolder("%%PUBLISHERS%%", &sparql, &publishers);
output_sparql_result_items(&sparql);
}
fn get_api_url_for_wiki(wiki: &String) -> Option<String> {
// Get site matrix from wikidata
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let params = api.params_into(&vec![("action", "sitematrix")]);
let site_matrix = api
.get_query_api_json(¶ms)
.expect("Can't load sitematrix from wikidata API");
//println!("{:#?}", &site_matrix);
// Go through the "normal" objects
let mut ret: Option<String> = None;
site_matrix["sitematrix"]
.as_object()
.expect("sitematrix is not an object")
.iter()
.for_each(|(_, data)| {
match data["site"]
.as_array()
.unwrap_or(&vec![])
.iter()
.filter_map(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
x["url"].as_str()
} else {
None
}
})
.next()
{
Some(url) => {
ret = Some(url.to_string() + "/w/api.php");
}
None => {}
}
});
// Try the "specials"
site_matrix["sitematrix"]["specials"]
.as_array()
.unwrap_or(&vec![])
.iter()
.for_each(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
ret = Some(x["url"].as_str().unwrap_or("").to_string() + "/w/api.php");
}
});
ret
}
fn get_external_urls(api_url: &String, title: &String) -> Vec<String> {
let api = Api::new(&api_url).expect(&format!("Can't connect to {}", &api_url));
let params = api.params_into(&vec![
("action", "query"),
("prop", "extlinks"),
("ellimit", "500"),
("titles", title.as_str()),
]);
let result = api
.get_query_api_json_all(¶ms)
.expect("query.extlinks failed");
let mut urls: Vec<String> = vec![];
result["query"]["pages"]
.as_object()
.expect("query.pages in result not an object")
.iter()
.for_each(|(_page_id, data)| {
data["extlinks"]
.as_array()
.expect("extlinks not an array")
.iter()
.for_each(|x| urls.push(x["*"].as_str().expect("* not a string").to_string()));
});
urls
}
fn get_paper_q(api: &Api, id: &GenericWorkIdentifier) -> Option<String> {
let wdp = WikidataPapers::new();
match &id.work_type {
GenericWorkType::Property(prop) => {
let result = wdp.search_external_id(&prop, &id.id, api);
result.get(0).map(|s| s.to_owned()) // First one will do
}
_ => None,
}
/*
wdp.add_adapter(Box::new(Pubmed2Wikidata::new()));
wdp.add_adapter(Box::new(Crossref2Wikidata::new()));
wdp.add_adapter(Box::new(Semanticscholar2Wikidata::new()));
wdp.add_adapter(Box::new(Orcid2Wikidata::new()));
let ids = vec![id.to_owned()];
let ids = wdp.update_from_paper_ids(&ids);
let q = ids
.iter()
.filter_map(|x| match x.work_type {
GenericWorkType::Item => Some(x.id.to_owned()),
_ => None,
})
.next();
q*/
}
fn command_refs(args: &Args) {
if args.arg_wiki.is_empty() {
panic!("wiki code (e.g. 'enwiki') is required");
}
if args.arg_title.is_empty() {
panic!("article title is required");
}
let wiki = &args.arg_wiki;
let title = &args.arg_title;
// Get the API URL for the wiki
let api_url = match get_api_url_for_wiki(&wiki) {
Some(url) => url,
None => panic!("Can't find API URL for {}", &wiki),
};
// Get all external URLs from that page, on that wiki
let urls = get_external_urls(&api_url, &title);
//println!("{:#?}", &urls);
lazy_static! {
static ref RE_DOI: Regex = Regex::new(r#"^*.?//doi.org/(.+)$"#).unwrap();
static ref RE_PMID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pubmed/(\d+)$"#).unwrap();
static ref RE_PMCID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pmc/articles/PMC(\d+)$"#).unwrap();
}
let mut ids: Vec<GenericWorkIdentifier> = vec![];
for url in urls {
match RE_DOI.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
match urlencoding::decode(&id) {
Ok(id) => {
ids.push(GenericWorkIdentifier::new_prop(PROP_DOI, &id));
}
_ => {}
}
}
None => {}
}
match RE_PMID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMID, id));
}
None => {}
}
match RE_PMCID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMCID, id));
}
None => {}
}
}
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
for id in ids {
match get_paper_q(&api, &id) {
Some(q) => {
println!("{}", &q);
}
None => {
/*
/TODO
let prop = match &id.work_type {
GenericWorkType::Property(p) => p,
_ => continue,
};
println!("No item for https://www.wikidata.org/w/index.php?search=&search=haswbstatement%3A{}={}&title=Special%3ASearch&go=Go&ns0=1&ns120=1", &prop,&id.id);
*/
}
}
}
}
const USAGE: &'static str = "
MEDRS
Usage:
medrs run [--articles=<file>] [--reviews=<file>] [--topics=<file>] [--journals=<file>] [--publishers=<file>] [--sparql=<file>]
medrs query <query>
medrs refs <wiki> <title>
medrs (-h | --help)
medrs --version
Options:
-h --help Show this screen.
--version Show version.
--reviews=<file> Deprecated reviews (article blacklist)
--topics=<file> Topical whitelist
--journals=<file> OA exceptions (journal whitelist)
--publishers=<file> Beall's list (publisher blacklist)
--sparql=<file> SPARQL pattern
";
#[derive(Debug, Deserialize)]
struct Args {
flag_articles: String,
flag_reviews: String,
flag_topics: String,
flag_journals: String,
flag_publishers: String,
flag_sparql: String,
arg_query: String,
arg_title: String,
arg_wiki: String,
cmd_run: bool,
cmd_query: bool,
cmd_refs: bool,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
//println!("{:?}", args);
if args.cmd_query {
command_query(&args);
}
if args.cmd_run {
command_run(&args);
}
if args.cmd_refs {
command_refs(&args);
}
}
| lines_from_file | identifier_name |
medrs.rs | extern crate config;
extern crate mediawiki;
extern crate papers;
extern crate regex;
extern crate wikibase;
#[macro_use]
extern crate lazy_static;
/*
use papers::crossref2wikidata::Crossref2Wikidata;
use papers::orcid2wikidata::Orcid2Wikidata;
use papers::pubmed2wikidata::Pubmed2Wikidata;
use papers::semanticscholar2wikidata::Semanticscholar2Wikidata;
*/
use docopt::Docopt;
use mediawiki::api::Api;
use papers::wikidata_papers::WikidataPapers;
use papers::*;
use regex::Regex;
use serde::Deserialize;
use std::str;
use std::{
fs::File,
io::{prelude::*, BufReader},
};
use urlencoding;
fn lines_from_file(filename: &str) -> Vec<String> {
if filename.is_empty() {
return vec![];
}
let file = File::open(filename).expect(format!("no such file: {}", filename).as_str());
let buf = BufReader::new(file);
buf.lines()
.map(|l| l.expect("Could not parse line"))
.collect()
}
fn read_file_to_string(filename: &str) -> String {
let mut file = match File::open(filename) {
Ok(file) => file,
Err(_) => panic!("no such file"),
};
let mut file_contents = String::new();
file.read_to_string(&mut file_contents)
.ok()
.expect("failed to read!");
file_contents
}
fn replace_sparql_placeolder(pattern: &str, sparql: &String, lines: &Vec<String>) -> String |
fn output_sparql_result_items(sparql: &String) {
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let result = api.sparql_query(&sparql).expect("SPARQL query failed");
let varname = result["head"]["vars"][0]
.as_str()
.expect("Can't find first variable name in SPARQL result");
let entities = api.entities_from_sparql_result(&result, &varname);
println!("{}", entities.join("\n"));
}
/*
fn get_all_from_stdin() -> String {
let mut payload = Vec::new();
io::stdin().read_to_end(&mut payload).unwrap();
let s = match str::from_utf8(&payload) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
s.to_string()
}
*/
fn command_query(args: &Args) {
if args.arg_query.is_empty() {
println!("Requires SPARQL query");
}
let sparql = &args.arg_query;
output_sparql_result_items(&sparql);
}
fn command_run(args: &Args) {
let articles = lines_from_file(&args.flag_articles);
let reviews = lines_from_file(&args.flag_reviews);
let topics = lines_from_file(&args.flag_topics);
let journals = lines_from_file(&args.flag_journals);
let publishers = lines_from_file(&args.flag_publishers);
let mut sparql = read_file_to_string(&args.flag_sparql);
sparql = replace_sparql_placeolder("%%ARTICLES%%", &sparql, &articles);
sparql = replace_sparql_placeolder("%%REVIEWS%%", &sparql, &reviews);
sparql = replace_sparql_placeolder("%%TOPICS%%", &sparql, &topics);
sparql = replace_sparql_placeolder("%%JOURNALS%%", &sparql, &journals);
sparql = replace_sparql_placeolder("%%PUBLISHERS%%", &sparql, &publishers);
output_sparql_result_items(&sparql);
}
fn get_api_url_for_wiki(wiki: &String) -> Option<String> {
// Get site matrix from wikidata
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let params = api.params_into(&vec![("action", "sitematrix")]);
let site_matrix = api
.get_query_api_json(¶ms)
.expect("Can't load sitematrix from wikidata API");
//println!("{:#?}", &site_matrix);
// Go through the "normal" objects
let mut ret: Option<String> = None;
site_matrix["sitematrix"]
.as_object()
.expect("sitematrix is not an object")
.iter()
.for_each(|(_, data)| {
match data["site"]
.as_array()
.unwrap_or(&vec![])
.iter()
.filter_map(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
x["url"].as_str()
} else {
None
}
})
.next()
{
Some(url) => {
ret = Some(url.to_string() + "/w/api.php");
}
None => {}
}
});
// Try the "specials"
site_matrix["sitematrix"]["specials"]
.as_array()
.unwrap_or(&vec![])
.iter()
.for_each(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
ret = Some(x["url"].as_str().unwrap_or("").to_string() + "/w/api.php");
}
});
ret
}
fn get_external_urls(api_url: &String, title: &String) -> Vec<String> {
let api = Api::new(&api_url).expect(&format!("Can't connect to {}", &api_url));
let params = api.params_into(&vec![
("action", "query"),
("prop", "extlinks"),
("ellimit", "500"),
("titles", title.as_str()),
]);
let result = api
.get_query_api_json_all(¶ms)
.expect("query.extlinks failed");
let mut urls: Vec<String> = vec![];
result["query"]["pages"]
.as_object()
.expect("query.pages in result not an object")
.iter()
.for_each(|(_page_id, data)| {
data["extlinks"]
.as_array()
.expect("extlinks not an array")
.iter()
.for_each(|x| urls.push(x["*"].as_str().expect("* not a string").to_string()));
});
urls
}
fn get_paper_q(api: &Api, id: &GenericWorkIdentifier) -> Option<String> {
let wdp = WikidataPapers::new();
match &id.work_type {
GenericWorkType::Property(prop) => {
let result = wdp.search_external_id(&prop, &id.id, api);
result.get(0).map(|s| s.to_owned()) // First one will do
}
_ => None,
}
/*
wdp.add_adapter(Box::new(Pubmed2Wikidata::new()));
wdp.add_adapter(Box::new(Crossref2Wikidata::new()));
wdp.add_adapter(Box::new(Semanticscholar2Wikidata::new()));
wdp.add_adapter(Box::new(Orcid2Wikidata::new()));
let ids = vec![id.to_owned()];
let ids = wdp.update_from_paper_ids(&ids);
let q = ids
.iter()
.filter_map(|x| match x.work_type {
GenericWorkType::Item => Some(x.id.to_owned()),
_ => None,
})
.next();
q*/
}
fn command_refs(args: &Args) {
if args.arg_wiki.is_empty() {
panic!("wiki code (e.g. 'enwiki') is required");
}
if args.arg_title.is_empty() {
panic!("article title is required");
}
let wiki = &args.arg_wiki;
let title = &args.arg_title;
// Get the API URL for the wiki
let api_url = match get_api_url_for_wiki(&wiki) {
Some(url) => url,
None => panic!("Can't find API URL for {}", &wiki),
};
// Get all external URLs from that page, on that wiki
let urls = get_external_urls(&api_url, &title);
//println!("{:#?}", &urls);
lazy_static! {
static ref RE_DOI: Regex = Regex::new(r#"^*.?//doi.org/(.+)$"#).unwrap();
static ref RE_PMID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pubmed/(\d+)$"#).unwrap();
static ref RE_PMCID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pmc/articles/PMC(\d+)$"#).unwrap();
}
let mut ids: Vec<GenericWorkIdentifier> = vec![];
for url in urls {
match RE_DOI.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
match urlencoding::decode(&id) {
Ok(id) => {
ids.push(GenericWorkIdentifier::new_prop(PROP_DOI, &id));
}
_ => {}
}
}
None => {}
}
match RE_PMID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMID, id));
}
None => {}
}
match RE_PMCID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMCID, id));
}
None => {}
}
}
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
for id in ids {
match get_paper_q(&api, &id) {
Some(q) => {
println!("{}", &q);
}
None => {
/*
/TODO
let prop = match &id.work_type {
GenericWorkType::Property(p) => p,
_ => continue,
};
println!("No item for https://www.wikidata.org/w/index.php?search=&search=haswbstatement%3A{}={}&title=Special%3ASearch&go=Go&ns0=1&ns120=1", &prop,&id.id);
*/
}
}
}
}
const USAGE: &'static str = "
MEDRS
Usage:
medrs run [--articles=<file>] [--reviews=<file>] [--topics=<file>] [--journals=<file>] [--publishers=<file>] [--sparql=<file>]
medrs query <query>
medrs refs <wiki> <title>
medrs (-h | --help)
medrs --version
Options:
-h --help Show this screen.
--version Show version.
--reviews=<file> Deprecated reviews (article blacklist)
--topics=<file> Topical whitelist
--journals=<file> OA exceptions (journal whitelist)
--publishers=<file> Beall's list (publisher blacklist)
--sparql=<file> SPARQL pattern
";
#[derive(Debug, Deserialize)]
struct Args {
flag_articles: String,
flag_reviews: String,
flag_topics: String,
flag_journals: String,
flag_publishers: String,
flag_sparql: String,
arg_query: String,
arg_title: String,
arg_wiki: String,
cmd_run: bool,
cmd_query: bool,
cmd_refs: bool,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
//println!("{:?}", args);
if args.cmd_query {
command_query(&args);
}
if args.cmd_run {
command_run(&args);
}
if args.cmd_refs {
command_refs(&args);
}
}
| {
let rep: String = if lines.is_empty() {
"".to_string()
} else {
"wd:".to_string() + &lines.join(" wd:")
};
sparql.replace(pattern, &rep)
} | identifier_body |
views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from guozu.models import *
from django.db.models import Q
import datetime
from django import forms
from django.utils import simplejson as json
import requests
from PIL import Image, ImageOps, ImageDraw
import urllib2,urllib,cStringIO
import random,time,hashlib
import base64
from ServerAPI import ServerAPI
from django.core import serializers
import os
from PIL import Image, ImageOps, ImageDraw, ImageFont, ExifTags
import geoip2.database
import textwrap
class Sign:
def __init__(self, appId, appSecret, url):
self.appId = appId
self.appSecret = appSecret
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': self.getJsApiTicket(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
print string
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
def getJsApiTicket(self):
data = json.loads(open(os.getcwd()+'/jsapi_ticket.json').read())
jsapi_ticket = data['jsapi_ticket']
if data['expire_time'] < time.time():
url = "https://api.weixin.qq.com/cgi-bin/ticket/getticket?type=jsapi&access_token=%s" % (self.getAccessToken())
response = requests.get(url)
jsapi_ticket = json.loads(response.text)['ticket']
data['jsapi_ticket'] = jsapi_ticket
data['expire_time'] = int(time.time()) + 7000
fopen = open('jsapi_ticket.json', 'w')
fopen.write(json.dumps(data))
fopen.close()
return jsapi_ticket
def getAccessToken(self):
data = json.loads(open(os.getcwd()+'/access_token.json').read())
access_token = data['access_token']
if data['expire_time'] < time.time():
url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s" % (self.appId, self.appSecret)
response = requests.get(url)
access_token = json.loads(response.text)['access_token']
data['access_token'] = access_token
data['expire_time'] = int(time.time()) + 7000
fopen = open('access_token.json', 'w')
fopen.write(json.dumps(data))
fopen.close()
return access_token
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def add_corners(im, rad):
circle = Image.new('L', (rad * 2, rad * 2), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)
alpha = Image.new('L', im.size, 100)
w, h = im.size
alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))
alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))
alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))
alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))
im.putalpha(alpha)
return im
def guozuIndex(request):
return render_to_response('guozu/index.html',{'random': random.randrange(0, 10)})
def guozuSaveImage(request):
# if request.is_ajax():
# img = request.POST.get("img")
# openid = request.POST.get("openid")
# file = cStringIO.StringIO(urllib.urlopen(img).read())
# im = Image.open(file)
# im.thumbnail((100, 100), Image.ANTIALIAS)
# im.save("/home/project/project/template/guozu/upload/wx_"+openid+".png")
return HttpResponse('1')
def guozuImage(request):
ba = Image.open("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/logo.png")
im = Image.open('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/test.png')
bg = Image.open('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/img.png')
ba.paste(im, (199,85))
ba.paste(bg, (199,85), mask=bg)
draw = ImageDraw.Draw(ba)
font = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 20)
font1 = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 34)
font2 = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 30)
draw.text((194, 401),"123213",(255,240,8),font=font)
draw.text((75, 230),unicode('只要血液一天是红色的,','utf-8'),(255,255,255),font=font1) | draw.text((110, 230),unicode('昆明,你能为我们','utf-8'),(255,255,255),font=font1)
draw.text((90, 290),unicode('带来第一场胜利吗?!','utf-8'),(255,255,255),font=font1)
draw.text((130, 230),unicode('莫愁长征无知己,','utf-8'),(255,255,255),font=font1)
draw.text((60, 290),unicode('天涯海角永相随!国足必胜!','utf-8'),(255,255,255),font=font2)
draw.text((55, 230),unicode('国足,我们真的不想再失望!','utf-8'),(255,255,255),font=font2)
draw.text((90, 290),unicode('拿出斗志,拿下一场!','utf-8'),(255,255,255),font=font1)
ba.save("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/new.png")
return HttpResponse(1)
def guozuGetUserInfo(request):
if request.is_ajax():
code = request.POST.get("code")
r = requests.get('https://api.weixin.qq.com/sns/oauth2/access_token?appid=wx7dfb9e650ce540b1&secret=538b5277fe1d37660ed4b218f7057c86&code='+code+'&grant_type=authorization_code')
access_token = json.loads(r.text)['access_token']
openid = json.loads(r.text)['openid']
q = requests.get('https://api.weixin.qq.com/sns/userinfo?access_token='+access_token+'&openid='+openid+'&lang=zh_CN ')
q.encoding = 'utf-8'
return HttpResponse(json.dumps({"nickname": json.loads(q.text)['nickname'], "openid": json.loads(q.text)['openid']}))
def guozuSubmit(request):
if request.is_ajax():
ip = get_client_ip(request)
try:
reader = geoip2.database.Reader('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/GeoLite2-City.mmdb')
r = reader.city(ip)
city = r.city.names.get('zh-CN', '')
except:
city = u"北京"
return HttpResponse(city)
# img = request.POST.get('img')
# word = request.POST.get('word')
# name = request.POST.get('name')
# openid = request.POST.get('openid')
# p = GuozuRecord(openid=openid, name=name, city=city, image=img, word=word, date=datetime.datetime.now())
# p.save()
# if GuozuCity.objects.filter(name=city).count():
# count = GuozuCity.objects.get(name=city).count
# GuozuCity.objects.filter(name=city).update(count=count+1)
# else:
# p = GuozuCity(name=city, count=1)
# p.save()
# return HttpResponse(json.dumps({"city": city, "record": serializers.serialize('json', GuozuRecord.objects.filter(openid=openid).order_by('-id'))}))
def guozuGetCode(request):
if request.is_ajax():
phone = request.POST.get('phone')
AppKey = '1a9c8ee74f8ba02ddfe5c934d83a4cb5';
AppSecret = '0f1e0d91b368';
p = ServerAPI(AppKey,AppSecret, UseSSLLib = True);
a = p.sendSmsCode(phone,'')
return HttpResponse(a['obj']);
def guozuGetMsg(request):
if request.is_ajax():
phone = request.POST.get('phone')
count = GuozuCount.objects.get(id=1).count
count = count + 1
GuozuCount.objects.filter(id=1).update(count=count)
number = GuozuList.objects.get(id=count).number
AppKey = '1a9c8ee74f8ba02ddfe5c934d83a4cb5';
AppSecret = '0f1e0d91b368';
p = ServerAPI(AppKey,AppSecret, UseSSLLib = True);
return HttpResponse(number);
def guozuGetData(request):
if request.is_ajax():
openid = request.POST.get('openid')
serialized_obj1 = serializers.serialize('json', GuozuCity.objects.all().order_by('-count'))
if GuozuRecord.objects.filter(openid=openid).count():
record = serializers.serialize('json', GuozuRecord.objects.filter(openid=openid).order_by('-id'))
rtype = 1
else:
record = serializers.serialize('json', GuozuRecord.objects.all().order_by('-id')[0:10])
rtype = 2
data = serializers.serialize('json', GuozuRecord.objects.all().order_by('-id')[0:10])
return HttpResponse(json.dumps({"city": serialized_obj1, "record": record, "data": data, "total": GuozuRecord.objects.all().count(), "type": rtype}))
def guozuUpload(request):
if request.method == 'POST':
imageid = request.POST.get('openid')
file_ext = request.FILES['file'].name.split('.')[1]
user_upload_folder = '/Users/dongli/Documents/Outsourcing/project/project/template/guozu/upload/'
file_upload = open( os.path.join(user_upload_folder, imageid+'.'+file_ext), 'w')
file_upload.write(request.FILES['file'].read())
file_upload.close()
im = Image.open(os.path.join(user_upload_folder, imageid+'.'+file_ext))
orientation = 274
try:
exif = im._getexif()
if exif:
exif = dict(exif.items())
if exif[orientation] == 3:
im = im.rotate(180, expand=True)
elif exif[orientation] == 6:
im = im.rotate(270, expand=True)
elif exif[orientation] == 8:
im = im.rotate(90, expand=True)
except:
pass
im.thumbnail((100, 100), Image.ANTIALIAS)
im.save(os.path.join(user_upload_folder, 'm_'+imageid+'.'+file_ext))
return HttpResponse(imageid+'.'+file_ext)
def clipResizeImg(**args):
args_key = {'ori_img':'','dst_img':'','dst_w':'','dst_h':'','save_q':100}
arg = {}
for key in args_key:
if key in args:
arg[key] = args[key]
im = Image.open(arg['ori_img'])
ori_w,ori_h = im.size
dst_scale = float(arg['dst_h']) / arg['dst_w']
ori_scale = float(ori_h) / ori_w
if ori_scale >= dst_scale:
width = ori_w
height = int(width*dst_scale)
x = 0
y = (ori_h - height) / 3
else:
height = ori_h
width = int(height*dst_scale)
x = (ori_w - width) / 2
y = 0
box = (x,y,width+x,height+y)
newIm = im.crop(box)
im = None
ratio = float(arg['dst_w']) / width
newWidth = int(width * ratio)
newHeight = int(height * ratio)
newIm.resize((newWidth,newHeight),Image.ANTIALIAS).save(arg['dst_img'],quality=arg['save_q']) | draw.text((110, 290),unicode('心就一天是国足的!','utf-8'),(255,255,255),font=font1)
draw.text((50, 230),unicode('赢了一起狂,输了一起扛!','utf-8'),(255,255,255),font=font1)
draw.text((180, 290),unicode('国足雄起!','utf-8'),(255,255,255),font=font1) | random_line_split |
views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from guozu.models import *
from django.db.models import Q
import datetime
from django import forms
from django.utils import simplejson as json
import requests
from PIL import Image, ImageOps, ImageDraw
import urllib2,urllib,cStringIO
import random,time,hashlib
import base64
from ServerAPI import ServerAPI
from django.core import serializers
import os
from PIL import Image, ImageOps, ImageDraw, ImageFont, ExifTags
import geoip2.database
import textwrap
class Sign:
def __init__(self, appId, appSecret, url):
self.appId = appId
self.appSecret = appSecret
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': self.getJsApiTicket(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
print string
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
def getJsApiTicket(self):
data = json.loads(open(os.getcwd()+'/jsapi_ticket.json').read())
jsapi_ticket = data['jsapi_ticket']
if data['expire_time'] < time.time():
url = "https://api.weixin.qq.com/cgi-bin/ticket/getticket?type=jsapi&access_token=%s" % (self.getAccessToken())
response = requests.get(url)
jsapi_ticket = json.loads(response.text)['ticket']
data['jsapi_ticket'] = jsapi_ticket
data['expire_time'] = int(time.time()) + 7000
fopen = open('jsapi_ticket.json', 'w')
fopen.write(json.dumps(data))
fopen.close()
return jsapi_ticket
def getAccessToken(self):
data = json.loads(open(os.getcwd()+'/access_token.json').read())
access_token = data['access_token']
if data['expire_time'] < time.time():
url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s" % (self.appId, self.appSecret)
response = requests.get(url)
access_token = json.loads(response.text)['access_token']
data['access_token'] = access_token
data['expire_time'] = int(time.time()) + 7000
fopen = open('access_token.json', 'w')
fopen.write(json.dumps(data))
fopen.close()
return access_token
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def add_corners(im, rad):
circle = Image.new('L', (rad * 2, rad * 2), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)
alpha = Image.new('L', im.size, 100)
w, h = im.size
alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))
alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))
alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))
alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))
im.putalpha(alpha)
return im
def guozuIndex(request):
return render_to_response('guozu/index.html',{'random': random.randrange(0, 10)})
def guozuSaveImage(request):
# if request.is_ajax():
# img = request.POST.get("img")
# openid = request.POST.get("openid")
# file = cStringIO.StringIO(urllib.urlopen(img).read())
# im = Image.open(file)
# im.thumbnail((100, 100), Image.ANTIALIAS)
# im.save("/home/project/project/template/guozu/upload/wx_"+openid+".png")
return HttpResponse('1')
def guozuImage(request):
ba = Image.open("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/logo.png")
im = Image.open('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/test.png')
bg = Image.open('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/img.png')
ba.paste(im, (199,85))
ba.paste(bg, (199,85), mask=bg)
draw = ImageDraw.Draw(ba)
font = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 20)
font1 = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 34)
font2 = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 30)
draw.text((194, 401),"123213",(255,240,8),font=font)
draw.text((75, 230),unicode('只要血液一天是红色的,','utf-8'),(255,255,255),font=font1)
draw.text((110, 290),unicode('心就一天是国足的!','utf-8'),(255,255,255),font=font1)
draw.text((50, 230),unicode('赢了一起狂,输了一起扛!','utf-8'),(255,255,255),font=font1)
draw.text((180, 290),unicode('国足雄起!','utf-8'),(255,255,255),font=font1)
draw.text((110, 230),unicode('昆明,你能为我们','utf-8'),(255,255,255),font=font1)
draw.text((90, 290),unicode('带来第一场胜利吗?!','utf-8'),(255,255,255),font=font1)
draw.text((130, 230),unicode('莫愁长征无知己,','utf-8'),(255,255,255),font=font1)
draw.text((60, 290),unicode('天涯海角永相随!国足必胜!','utf-8'),(255,255,255),font=font2)
draw.text((55, 230),unicode('国足,我们真的不想再失望!','utf-8'),(255,255,255),font=font2)
draw.text((90, 290),unicode('拿出斗志,拿下一场!','utf-8'),(255,255,255),font=font1)
ba.save("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/new.png")
return HttpResponse(1)
def guozuGetUserInfo(request):
if request.is_ajax():
code = request.POST.get("code")
r = requests.get('https://api.weixin.qq.com/sns/oauth2/access_token?appid=wx7dfb9e650ce540b1&secret=538b5277fe1d37660ed4b218f7057c86&code='+code+'&grant_type=authorization_code')
access_token = json.loads(r.text)['access_token']
openid = json.loads(r.text)['openid']
q = requests.get('https://api.weixin.qq.com/sns/userinfo?access_token='+access_token+'&openid='+openid+'&lang=zh_CN ')
q.encoding = 'utf-8'
return HttpResponse(json.dumps({"nickname": json.loads(q.text)['nickname'], "openid": json.loads(q.text)['openid']}))
def guozuSubmit(request):
if request.is_ajax():
ip = get_client_ip(request)
try:
reader = geoip2.database.Reader('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/GeoLite2-City.mmdb')
r = reader.city(ip)
city = r.city.names.get('zh-CN', '')
except:
city = u"北京"
return HttpResponse(city)
# img = request.POST.get('img')
# word = request.POST.get('word')
# name = request.POST.get('name')
# openid = request.POST.get('openid')
# p = GuozuRecord(openid=openid, name=name, city=city, image=img, word=word, date=datetime.datetime.now())
# p.save()
# if GuozuCity.objects.filter(name=city).count():
# count = GuozuCity.objects.get(name=city).count
# GuozuCity.objects.filter(name=city).update(count=count+1)
# else:
# p = GuozuCity(name=city, count=1)
# p.save()
# return HttpResponse(json.dumps({"city": city, "record": serializers.serialize('json', GuozuRecord.objects.filter(openid=openid).order_by('-id'))}))
def guozuGetCode(request):
if request.is_ajax():
phone = request.POST.get('phone')
AppKey = '1a9c8ee74f8ba02ddfe5c934d83a4cb5';
AppSecret = '0f1e0d91b368';
p = ServerAPI(AppKey,AppSecret, UseSSLLib = True);
a = p.sendSmsCode(phone,'')
return HttpResponse(a['obj']);
def guozuGetMsg(request):
if request.is_ajax():
phone = request.POST.get('phone')
count = GuozuCount.objects.get(id=1).count
count = count + 1
GuozuCount.objects.filter(id=1).update(count=count)
number = GuozuList.objects.get(id=count).number
AppKey = '1a9c8ee74f8ba02ddfe5c934d83a4cb5';
AppSecret = '0f1e0d91b368';
p = ServerAPI(AppKey,AppSecret, UseSSLLib = True);
return HttpResponse(number);
def guozuGetData(request):
if request.is_ajax():
openid = request.POST.get('openid')
serialized_obj1 = serializers.serialize('json', GuozuCity.objects.all().order_by('-count'))
if GuozuRecord.objects.filter(openid=openid).count():
record = serializers.serialize('json', GuozuRecord.objects.filter(openid=openid).order_by('-id'))
rtype = 1
else:
record = serializers.serialize('json', GuozuRecord.objects.all().order_by('-id')[0:10])
rtype = 2
data = serializers.serialize('json', GuozuRecord.objects.all().order_by('-id')[0:10])
return HttpResponse(json.dumps({"city": serialized_obj1, "record": record, "data": data, "total": GuozuRecord.objects.all().count(), "type": rtype}))
def guozuUpload(request):
if request.method == 'POST':
imageid = request.POST.get('openid')
file_ext = request.FILES['file'].name.split('.')[1]
user_upload_folder = '/Users/dongli/Documents/Outsourcing/project/project/template/guozu/upload/'
file_upload = open( os.path.join(user_upload_folder, imageid+'.'+file_ext), 'w')
file_upload.write(request.FILES['file'].read())
file_upload.close()
im = Image.open(os.path.join(user_upload_folder, imageid+'.'+file_ext))
orientation = 274
try:
exif = im._getexif()
if exif:
exif = dict(exif.items())
if exif[orientation] == 3:
im = im.rotate(180, expand=True)
elif exif[orientation] == 6:
im = im.rotate(270, expand=True)
elif exif[orientation] == 8:
im = im.rotate(90, expand=True)
except:
pass
im.thumb | )
im.save(os.path.join(user_upload_folder, 'm_'+imageid+'.'+file_ext))
return HttpResponse(imageid+'.'+file_ext)
def clipResizeImg(**args):
args_key = {'ori_img':'','dst_img':'','dst_w':'','dst_h':'','save_q':100}
arg = {}
for key in args_key:
if key in args:
arg[key] = args[key]
im = Image.open(arg['ori_img'])
ori_w,ori_h = im.size
dst_scale = float(arg['dst_h']) / arg['dst_w']
ori_scale = float(ori_h) / ori_w
if ori_scale >= dst_scale:
width = ori_w
height = int(width*dst_scale)
x = 0
y = (ori_h - height) / 3
else:
height = ori_h
width = int(height*dst_scale)
x = (ori_w - width) / 2
y = 0
box = (x,y,width+x,height+y)
newIm = im.crop(box)
im = None
ratio = float(arg['dst_w']) / width
newWidth = int(width * ratio)
newHeight = int(height * ratio)
newIm.resize((newWidth,newHeight),Image.ANTIALIAS).save(arg['dst_img'],quality=arg['save_q'])
| nail((100, 100), Image.ANTIALIAS | conditional_block |
views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from guozu.models import *
from django.db.models import Q
import datetime
from django import forms
from django.utils import simplejson as json
import requests
from PIL import Image, ImageOps, ImageDraw
import urllib2,urllib,cStringIO
import random,time,hashlib
import base64
from ServerAPI import ServerAPI
from django.core import serializers
import os
from PIL import Image, ImageOps, ImageDraw, ImageFont, ExifTags
import geoip2.database
import textwrap
class Sign:
def __init__(self, appId, appSecret, url):
self.appId = appId
self.appSecret = appSecret
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': self.getJsApiTicket(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
print string
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
def getJsApiTicket(self):
data = json.loads(open(os.getcwd()+'/jsapi_ticket.json').read())
jsapi_ticket = data['jsapi_ticket']
if data['expire_time'] < time.time():
url = "https://api.weixin.qq.com/cgi-bin/ticket/getticket?type=jsapi&access_token=%s" % (self.getAccessToken())
response = requests.get(url)
jsapi_ticket = json.loads(response.text)['ticket']
data['jsapi_ticket'] = jsapi_ticket
data['expire_time'] = int(time.time()) + 7000
fopen = open('jsapi_ticket.json', 'w')
fopen.write(json.dumps(data))
fopen.close()
return jsapi_ticket
def getAccessToken(self):
data = json.loads(open(os.getcwd()+'/access_token.json').read())
access_token = data['access_token']
if data['expire_time'] < time.time():
url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s" % (self.appId, self.appSecret)
response = requests.get(url)
access_token = json.loads(response.text)['access_token']
data['access_token'] = access_token
data['expire_time'] = int(time.time()) + 7000
fopen = open('access_token.json', 'w')
fopen.write(json.dumps(data))
fopen.close()
return access_token
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def add_corners(im, rad):
circle = Image.new('L', (rad * 2, rad * 2), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)
alpha = Image.new('L', im.size, 100)
w, h = im.size
alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))
alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))
alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))
alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))
im.putalpha(alpha)
return im
def guozuIndex(request):
return render_to_response('guozu/index.html',{'random': random.randrange(0, 10)})
def guozuSaveImage(request):
# if request.is_ajax():
# img = request.POST.get("img")
# openid = request.POST.get("openid")
# file = cStringIO.StringIO(urllib.urlopen(img).read())
# im = Image.open(file)
# im.thumbnail((100, 100), Image.ANTIALIAS)
# im.save("/home/project/project/template/guozu/upload/wx_"+openid+".png")
|
def guozuImage(request):
ba = Image.open("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/logo.png")
im = Image.open('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/test.png')
bg = Image.open('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/img.png')
ba.paste(im, (199,85))
ba.paste(bg, (199,85), mask=bg)
draw = ImageDraw.Draw(ba)
font = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 20)
font1 = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 34)
font2 = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 30)
draw.text((194, 401),"123213",(255,240,8),font=font)
draw.text((75, 230),unicode('只要血液一天是红色的,','utf-8'),(255,255,255),font=font1)
draw.text((110, 290),unicode('心就一天是国足的!','utf-8'),(255,255,255),font=font1)
draw.text((50, 230),unicode('赢了一起狂,输了一起扛!','utf-8'),(255,255,255),font=font1)
draw.text((180, 290),unicode('国足雄起!','utf-8'),(255,255,255),font=font1)
draw.text((110, 230),unicode('昆明,你能为我们','utf-8'),(255,255,255),font=font1)
draw.text((90, 290),unicode('带来第一场胜利吗?!','utf-8'),(255,255,255),font=font1)
draw.text((130, 230),unicode('莫愁长征无知己,','utf-8'),(255,255,255),font=font1)
draw.text((60, 290),unicode('天涯海角永相随!国足必胜!','utf-8'),(255,255,255),font=font2)
draw.text((55, 230),unicode('国足,我们真的不想再失望!','utf-8'),(255,255,255),font=font2)
draw.text((90, 290),unicode('拿出斗志,拿下一场!','utf-8'),(255,255,255),font=font1)
ba.save("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/new.png")
return HttpResponse(1)
def guozuGetUserInfo(request):
if request.is_ajax():
code = request.POST.get("code")
r = requests.get('https://api.weixin.qq.com/sns/oauth2/access_token?appid=wx7dfb9e650ce540b1&secret=538b5277fe1d37660ed4b218f7057c86&code='+code+'&grant_type=authorization_code')
access_token = json.loads(r.text)['access_token']
openid = json.loads(r.text)['openid']
q = requests.get('https://api.weixin.qq.com/sns/userinfo?access_token='+access_token+'&openid='+openid+'&lang=zh_CN ')
q.encoding = 'utf-8'
return HttpResponse(json.dumps({"nickname": json.loads(q.text)['nickname'], "openid": json.loads(q.text)['openid']}))
def guozuSubmit(request):
if request.is_ajax():
ip = get_client_ip(request)
try:
reader = geoip2.database.Reader('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/GeoLite2-City.mmdb')
r = reader.city(ip)
city = r.city.names.get('zh-CN', '')
except:
city = u"北京"
return HttpResponse(city)
# img = request.POST.get('img')
# word = request.POST.get('word')
# name = request.POST.get('name')
# openid = request.POST.get('openid')
# p = GuozuRecord(openid=openid, name=name, city=city, image=img, word=word, date=datetime.datetime.now())
# p.save()
# if GuozuCity.objects.filter(name=city).count():
# count = GuozuCity.objects.get(name=city).count
# GuozuCity.objects.filter(name=city).update(count=count+1)
# else:
# p = GuozuCity(name=city, count=1)
# p.save()
# return HttpResponse(json.dumps({"city": city, "record": serializers.serialize('json', GuozuRecord.objects.filter(openid=openid).order_by('-id'))}))
def guozuGetCode(request):
if request.is_ajax():
phone = request.POST.get('phone')
AppKey = '1a9c8ee74f8ba02ddfe5c934d83a4cb5';
AppSecret = '0f1e0d91b368';
p = ServerAPI(AppKey,AppSecret, UseSSLLib = True);
a = p.sendSmsCode(phone,'')
return HttpResponse(a['obj']);
def guozuGetMsg(request):
if request.is_ajax():
phone = request.POST.get('phone')
count = GuozuCount.objects.get(id=1).count
count = count + 1
GuozuCount.objects.filter(id=1).update(count=count)
number = GuozuList.objects.get(id=count).number
AppKey = '1a9c8ee74f8ba02ddfe5c934d83a4cb5';
AppSecret = '0f1e0d91b368';
p = ServerAPI(AppKey,AppSecret, UseSSLLib = True);
return HttpResponse(number);
def guozuGetData(request):
if request.is_ajax():
openid = request.POST.get('openid')
serialized_obj1 = serializers.serialize('json', GuozuCity.objects.all().order_by('-count'))
if GuozuRecord.objects.filter(openid=openid).count():
record = serializers.serialize('json', GuozuRecord.objects.filter(openid=openid).order_by('-id'))
rtype = 1
else:
record = serializers.serialize('json', GuozuRecord.objects.all().order_by('-id')[0:10])
rtype = 2
data = serializers.serialize('json', GuozuRecord.objects.all().order_by('-id')[0:10])
return HttpResponse(json.dumps({"city": serialized_obj1, "record": record, "data": data, "total": GuozuRecord.objects.all().count(), "type": rtype}))
def guozuUpload(request):
if request.method == 'POST':
imageid = request.POST.get('openid')
file_ext = request.FILES['file'].name.split('.')[1]
user_upload_folder = '/Users/dongli/Documents/Outsourcing/project/project/template/guozu/upload/'
file_upload = open( os.path.join(user_upload_folder, imageid+'.'+file_ext), 'w')
file_upload.write(request.FILES['file'].read())
file_upload.close()
im = Image.open(os.path.join(user_upload_folder, imageid+'.'+file_ext))
orientation = 274
try:
exif = im._getexif()
if exif:
exif = dict(exif.items())
if exif[orientation] == 3:
im = im.rotate(180, expand=True)
elif exif[orientation] == 6:
im = im.rotate(270, expand=True)
elif exif[orientation] == 8:
im = im.rotate(90, expand=True)
except:
pass
im.thumbnail((100, 100), Image.ANTIALIAS)
im.save(os.path.join(user_upload_folder, 'm_'+imageid+'.'+file_ext))
return HttpResponse(imageid+'.'+file_ext)
def clipResizeImg(**args):
args_key = {'ori_img':'','dst_img':'','dst_w':'','dst_h':'','save_q':100}
arg = {}
for key in args_key:
if key in args:
arg[key] = args[key]
im = Image.open(arg['ori_img'])
ori_w,ori_h = im.size
dst_scale = float(arg['dst_h']) / arg['dst_w']
ori_scale = float(ori_h) / ori_w
if ori_scale >= dst_scale:
width = ori_w
height = int(width*dst_scale)
x = 0
y = (ori_h - height) / 3
else:
height = ori_h
width = int(height*dst_scale)
x = (ori_w - width) / 2
y = 0
box = (x,y,width+x,height+y)
newIm = im.crop(box)
im = None
ratio = float(arg['dst_w']) / width
newWidth = int(width * ratio)
newHeight = int(height * ratio)
newIm.resize((newWidth,newHeight),Image.ANTIALIAS).save(arg['dst_img'],quality=arg['save_q'])
| return HttpResponse('1') | identifier_body |
views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from guozu.models import *
from django.db.models import Q
import datetime
from django import forms
from django.utils import simplejson as json
import requests
from PIL import Image, ImageOps, ImageDraw
import urllib2,urllib,cStringIO
import random,time,hashlib
import base64
from ServerAPI import ServerAPI
from django.core import serializers
import os
from PIL import Image, ImageOps, ImageDraw, ImageFont, ExifTags
import geoip2.database
import textwrap
class Sign:
def __init__(self, appId, appSecret, url):
self.appId = appId
self.appSecret = appSecret
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': self.getJsApiTicket(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
print string
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
def getJsApiTicket(self):
data = json.loads(open(os.getcwd()+'/jsapi_ticket.json').read())
jsapi_ticket = data['jsapi_ticket']
if data['expire_time'] < time.time():
url = "https://api.weixin.qq.com/cgi-bin/ticket/getticket?type=jsapi&access_token=%s" % (self.getAccessToken())
response = requests.get(url)
jsapi_ticket = json.loads(response.text)['ticket']
data['jsapi_ticket'] = jsapi_ticket
data['expire_time'] = int(time.time()) + 7000
fopen = open('jsapi_ticket.json', 'w')
fopen.write(json.dumps(data))
fopen.close()
return jsapi_ticket
def getAccessToken(self):
data = json.loads(open(os.getcwd()+'/access_token.json').read())
access_token = data['access_token']
if data['expire_time'] < time.time():
url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s" % (self.appId, self.appSecret)
response = requests.get(url)
access_token = json.loads(response.text)['access_token']
data['access_token'] = access_token
data['expire_time'] = int(time.time()) + 7000
fopen = open('access_token.json', 'w')
fopen.write(json.dumps(data))
fopen.close()
return access_token
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def add_corners(im, rad):
circle = Image.new('L', (rad * 2, rad * 2), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)
alpha = Image.new('L', im.size, 100)
w, h = im.size
alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))
alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))
alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))
alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))
im.putalpha(alpha)
return im
def guozuIndex(request):
return render_to_response('guozu/index.html',{'random': random.randrange(0, 10)})
def guozuSaveImage(request):
# if request.is_ajax():
# img = request.POST.get("img")
# openid = request.POST.get("openid")
# file = cStringIO.StringIO(urllib.urlopen(img).read())
# im = Image.open(file)
# im.thumbnail((100, 100), Image.ANTIALIAS)
# im.save("/home/project/project/template/guozu/upload/wx_"+openid+".png")
return HttpResponse('1')
def guozuImage(request):
ba = Image.open("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/logo.png")
im = Image.open('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/test.png')
bg = Image.open('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/img.png')
ba.paste(im, (199,85))
ba.paste(bg, (199,85), mask=bg)
draw = ImageDraw.Draw(ba)
font = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 20)
font1 = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 34)
font2 = ImageFont.truetype("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/font.ttf", 30)
draw.text((194, 401),"123213",(255,240,8),font=font)
draw.text((75, 230),unicode('只要血液一天是红色的,','utf-8'),(255,255,255),font=font1)
draw.text((110, 290),unicode('心就一天是国足的!','utf-8'),(255,255,255),font=font1)
draw.text((50, 230),unicode('赢了一起狂,输了一起扛!','utf-8'),(255,255,255),font=font1)
draw.text((180, 290),unicode('国足雄起!','utf-8'),(255,255,255),font=font1)
draw.text((110, 230),unicode('昆明,你能为我们','utf-8'),(255,255,255),font=font1)
draw.text((90, 290),unicode('带来第一场胜利吗?!','utf-8'),(255,255,255),font=font1)
draw.text((130, 230),unicode('莫愁长征无知己,','utf-8'),(255,255,255),font=font1)
draw.text((60, 290),unicode('天涯海角永相随!国足必胜!','utf-8'),(255,255,255),font=font2)
draw.text((55, 230),unicode('国足,我们真的不想再失望!','utf-8'),(255,255,255),font=font2)
draw.text((90, 290),unicode('拿出斗志,拿下一场!','utf-8'),(255,255,255),font=font1)
ba.save("/Users/dongli/Documents/Outsourcing/project/project/template/guozu/images/new.png")
return HttpResponse(1)
def guozuGetUserInfo(request):
if request.is_ajax():
code = request.POST.get("code")
r = requests.get('https://api.weixin.qq.com/sns/oauth2/access_token?appid=wx7dfb9e650ce540b1&secret=538b5277fe1d37660ed4b218f7057c86&code='+code+'&grant_type=authorization_code')
access_token = json.loads(r.text)['access_token']
openid = json.loads(r.text)['openid']
q = requests.get('https://api.weixin.qq.com/sns/userinfo?access_token='+access_token+'&openid='+openid+'&lang=zh_CN ')
q.encoding = 'utf-8'
return HttpResponse(json.dumps({"nickname": json.loads(q.text)['nickname'], "openid": json.loads(q.text)['openid']}))
def guozuSubmit(request):
if request.is_ajax():
ip = get_client_ip(request)
try:
reader = geoip2.database.Reader('/Users/dongli/Documents/Outsourcing/project/project/template/guozu/GeoLite2-City.mmdb')
r = reader.city(ip)
city = r.city.names.get('zh-CN', '')
except:
city = u"北京"
return HttpResponse(city)
# img = request.POST.get('img')
# word = request.POST.get('word')
# name = request.POST.get('name')
# openid = request.POST.get('openid')
# p = GuozuRecord(openid=openid, name=name, city=city, image=img, word=word, date=datetime.datetime.now())
# p.save()
# if GuozuCity.objects.filter(name=city).count():
# count = GuozuCity.objects.get(name=city).count
# GuozuCity.objects.filter(name=city).update(count=count+1)
# else:
# p = GuozuCity(name=city, count=1)
# p.save()
# return HttpResponse(json.dumps({"city": city, "record": serializers.serialize('json', GuozuRecord.objects.filter(openid=openid).order_by('-id'))}))
def guozuGetCode(request):
if request.is_ajax():
phone = request.POST.get('phone')
AppKey = '1a9c8ee74f8ba02ddfe5c934d83a4cb5';
AppSecret = '0f1e0d91b368';
p = ServerAPI(AppKey,AppSecret, UseSSLLib = True);
a = p.sendSmsCode(phone,'')
return HttpResponse(a['obj']);
def guozuGetMsg(request):
if request.is_ajax():
phone = request.POST.get('phone')
count = GuozuCount.objects.get(id=1).count
count = count + 1
GuozuCount.objects.filter(id=1).update(count=count)
number = GuozuList.objects.get(id=count).number
AppKey = '1a9c8ee74f8ba02ddfe5c934d83a4cb5';
AppSecret = '0f1e0d91b368';
p = ServerAPI(AppKey,AppSecret, UseSSLLib = True);
return HttpResponse(number);
def guozuGetData(request):
if request.is_ajax():
openid = request.POST.get('openid')
serialized_obj1 = serializers.serialize('json', GuozuCity.objects.all().order_by('-count'))
if GuozuRecord.objects | id=openid).count():
record = serializers.serialize('json', GuozuRecord.objects.filter(openid=openid).order_by('-id'))
rtype = 1
else:
record = serializers.serialize('json', GuozuRecord.objects.all().order_by('-id')[0:10])
rtype = 2
data = serializers.serialize('json', GuozuRecord.objects.all().order_by('-id')[0:10])
return HttpResponse(json.dumps({"city": serialized_obj1, "record": record, "data": data, "total": GuozuRecord.objects.all().count(), "type": rtype}))
def guozuUpload(request):
if request.method == 'POST':
imageid = request.POST.get('openid')
file_ext = request.FILES['file'].name.split('.')[1]
user_upload_folder = '/Users/dongli/Documents/Outsourcing/project/project/template/guozu/upload/'
file_upload = open( os.path.join(user_upload_folder, imageid+'.'+file_ext), 'w')
file_upload.write(request.FILES['file'].read())
file_upload.close()
im = Image.open(os.path.join(user_upload_folder, imageid+'.'+file_ext))
orientation = 274
try:
exif = im._getexif()
if exif:
exif = dict(exif.items())
if exif[orientation] == 3:
im = im.rotate(180, expand=True)
elif exif[orientation] == 6:
im = im.rotate(270, expand=True)
elif exif[orientation] == 8:
im = im.rotate(90, expand=True)
except:
pass
im.thumbnail((100, 100), Image.ANTIALIAS)
im.save(os.path.join(user_upload_folder, 'm_'+imageid+'.'+file_ext))
return HttpResponse(imageid+'.'+file_ext)
def clipResizeImg(**args):
args_key = {'ori_img':'','dst_img':'','dst_w':'','dst_h':'','save_q':100}
arg = {}
for key in args_key:
if key in args:
arg[key] = args[key]
im = Image.open(arg['ori_img'])
ori_w,ori_h = im.size
dst_scale = float(arg['dst_h']) / arg['dst_w']
ori_scale = float(ori_h) / ori_w
if ori_scale >= dst_scale:
width = ori_w
height = int(width*dst_scale)
x = 0
y = (ori_h - height) / 3
else:
height = ori_h
width = int(height*dst_scale)
x = (ori_w - width) / 2
y = 0
box = (x,y,width+x,height+y)
newIm = im.crop(box)
im = None
ratio = float(arg['dst_w']) / width
newWidth = int(width * ratio)
newHeight = int(height * ratio)
newIm.resize((newWidth,newHeight),Image.ANTIALIAS).save(arg['dst_img'],quality=arg['save_q'])
| .filter(open | identifier_name |
host_window.py | #!/bin/python
"""
This file creates the window for displaying the output of the venous flow
detected by the analog and digital modules. The input is from a PIC18F2550
libUSB device. Since the maximum frequency of sampling we need is around
10 Hertz, the low sampling rate of the USB device will not pose a problem.
This module is derived from the dynamic matplotlib wxpython by Eli Bendersky
and is based on wxpython and matplotlib.
Author: Vishwanath
License: Not applicable yet
Name: VenoScope(VS)
"""
import time
import usb.core
import wx
from scipy import *
# Matplotlib imports
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import pylab
def _configure_device():
""" Configure and get the USB device running. Returns device class if
success and None if failed"""
vendor_id = 0x04D8 # These ids are microchip's libusb based device
product_id = 0x0204 # ids
dev = usb.core.find(idVendor=vendor_id, idProduct = product_id)
try:
dev.set_configuration()
return dev
except:
return None
class VSDataAquisition(object):
""" A place holder for collecting data from the ADC of the device. This
class will also control sample/hold bit of the device."""
def __init__(self):
""" Configure the device and set class properties"""
self.data0 = [] # This will hold data from ADC0
self.data1 = [] # This will hold data from ADC1
self.dev = _configure_device()
def get_data(self):
""" Get the next data from ADC0. For ADC1, use get_dc_offset()"""
self.dev.write(1, 'A0')
digit1, digit2 = self.dev.read(0x81, 64)[:2]
# Save the data as voltage between 0.0 and 5.0
self.data0.append((digit1 + 256*digit2)*5.0/1024)
def get_dc_offset(self):
""" Get the initial DC offset of the analog output"""
self.dev.write(1, 'A1')
digit1, digit2 = self.dev.read(0x81, 64)[:2]
# Save the data as voltage between 0.0 and 5.0
self.data1.append((digit1 + 256*digit2)*5.0/1024)
def sample(self):
""" Set the sample bit for getting intial DC offset"""
self.dev.write(1, 'S')
def hold(self):
""" Clear the sample bit for consecutive data aquisition"""
self.dev.write(1, 'H')
class VSControlBox(wx.Panel):
""" A static box for controlling the start and stop of the device and
displaying the final result of the venous flow measurement."""
def __init__(self, parent, ID, label):
wx.Panel.__init__(self, parent, ID)
# Create two box sizers, one for the button and one for the status
# message and final output reading.
button_box = wx.StaticBox(self, -1, label = 'Device Control')
info_box = wx.StaticBox(self, -1, label = 'Information')
box = wx.StaticBox(self, -1, label)
main_sizer = wx.StaticBoxSizer(box, wx.HORIZONTAL)
button_sizer = wx.StaticBoxSizer(button_box, orient=wx.HORIZONTAL)
info_sizer = wx.StaticBoxSizer(info_box, orient=wx.HORIZONTAL)
# Create a start/stop measurement button.
self.start_button = wx.Button(self, label = 'Start measurement',)
# Create a result and information text box.
self.result_box = wx.StaticText(self)
self.txt_info_box = wx.StaticText(self, size=(200, -1))
self.result_box.SetLabel("0.00")
| info_sizer.Add(self.txt_info_box, 0, wx.ALL, 10)
# Add the sizers to main sizer
main_sizer.Add(button_sizer, flag=wx.ALIGN_CENTER_VERTICAL)
main_sizer.AddSpacer(20)
main_sizer.Add(info_sizer, flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
# Bind events to the button
self.start_button.Bind(wx.EVT_BUTTON, parent.Parent.start_stop)
# Finally, make a fit
self.SetSizer(main_sizer)
main_sizer.Fit(self)
def start_stop(self, event):
""" Bind a rudimentary event now. Will update it later."""
self.start_button.SetLabel('Measuring')
self.start_button.Enable = False
# Do nothing as of now. Will call measuring functions later.
self.txt_info_box.SetLabel('Starting measurement.')
time.sleep(2)
self.start_button.SetLabel('Start measurement')
self.start_button.Enable = True
self.txt_info_box.SetLabel('Completed measurement.')
self.result_box.SetLabel("100.00")
class VSGraphFrame(wx.Frame):
""" Main frame for the measurement application"""
title = "Venous flow calculation"
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.SAMPLING_TIME = 10000.0 # Set the sampling time here.
self.daq = VSDataAquisition()
if self.daq == None:
help_string = ''' Device is not connected. Please connect the
device and restart the software'''
wx.MessageBox(help_string, 'Device not found',
wx.OK | wx.ICON_INFORMATION)
self.Destroy()
return
self.create_menu() # Create the menu
self.create_status_bar () # Add a status bar. Could use for debugging
self.create_main_panel() # The main panel
# We will use a timer for getting our samples.
self.redraw_timer = wx.Timer(self, wx.ID_ANY)
# Sampling duration itself is another timer. This timer is a
# oneshot timer and runs for SAMPLING_TIME time.
self.sampling_timer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.Bind(wx.EVT_TIMER, self.on_sampling_timer, self.sampling_timer)
self.redraw_timer.Start(100)
def create_menu(self):
""" Add menu bar items. One File and one About"""
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
menu_help = wx.Menu()
# Add save and exit to File menu
menu_save = menu_file.Append(-1, '&Save plot\tCtrl-S',
'Save plot to a file')
menu_file.AppendSeparator()
menu_exit = menu_file.Append(-1, '&Exit\tCtrl-X',
'Exit the program')
self.Bind(wx.EVT_MENU, self.on_save, menu_save)
self.Bind(wx.EVT_MENU, self.on_exit, menu_exit)
# Add an about in the Help menu. Will update later.
help_about = menu_help.Append(-1, '&About',
'About the program')
menu_help.AppendSeparator()
self.Bind(wx.EVT_MENU, self.on_about, help_about)
# Add them both to menubar
self.menubar.Append(menu_file, '&File')
self.menubar.Append(menu_help, '&Help')
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Create the main panel which will show the dynamic plot."""
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.control_box = VSControlBox(self.panel, -1, 'Information board')
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.control_box, 0, wx.ALIGN_LEFT | wx.TOP | wx.EXPAND)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
""" Create a status bar. Will use it in future for debugging"""
self.statusbar = self.CreateStatusBar()
def init_plot(self):
""" Initialize the plot canvas"""
self.dpi = 100
self.fig = Figure((5.0, 5.0), dpi = self.dpi)
self.main_plot = self.fig.add_subplot(111)
self.main_plot.set_axis_bgcolor('black')
self.main_plot.set_title('Dynamic venous flow view', size = 12)
pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)
pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)
# Plot the data as a green line
self.plot_data = self.main_plot.plot(
self.daq.data0,
linewidth = 1,
color = (0, 1, 0),
)[0]
self.main_plot.grid(True, color='gray')
def draw_plot(self):
""" Redraw the plot after every data aquisition."""
# X axis is auto follow.
XLEN = 100
xmax = max(len(self.daq.data0), XLEN)
xmin = xmax - XLEN
# The Y value will lie between 0.0 and 5.0 volts
ymax = 5.0
ymin = 0.0
self.main_plot.set_xbound(lower=xmin, upper=xmax)
self.main_plot.set_ybound(lower=ymin, upper=ymax)
# Add the grid. Grid looks cool and is actually very helpful.
self.main_plot.grid(True, color='gray')
pylab.setp(self.main_plot.get_xticklabels(),
visible=True)
self.plot_data.set_xdata(arange(len(self.daq.data0)))
self.plot_data.set_ydata(array(self.daq.data0))
self.canvas.draw()
def on_save(self, event):
""" Method for saving a plot """
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def start_stop(self, event):
""" Restart measurements and complete calculations"""
self.daq.data0= []
self.control_box.txt_info_box.SetLabel('Starting measurement')
self.sampling_timer.Start(self.SAMPLING_TIME, oneShot=True)
def on_redraw_timer(self, event):
""" Update the plot whenever data is obtained """
if self.sampling_timer.IsRunning():
self.daq.get_data()
self.draw_plot()
else:
self.control_box.txt_info_box.SetLabel('Measurement complete')
self.calculate()
return
def on_sampling_timer(self, event):
""" Stop the timer when sampling is complete."""
self.sampling_timer.Stop()
def calculate(self):
"""Calculate the venous flow rate. Dummy now."""
if self.sampling_timer.IsRunning():
return
if self.daq.data0 == []:
average = 0.0
else:
average = mean(self.daq.data0)
res_string = '%.2f' %average
self.control_box.result_box.SetLabel(res_string)
def on_exit(self, event):
""" Quit the window """
self.Destroy
def on_about(self, event):
""" Display an about message """
pass
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = VSGraphFrame()
app.frame.Show()
app.MainLoop() | # Add the items to sizers
button_sizer.Add(self.start_button, 0, wx.ALL, 10)
info_sizer.Add(self.result_box, 0, wx.ALL, 10) | random_line_split |
host_window.py | #!/bin/python
"""
This file creates the window for displaying the output of the venous flow
detected by the analog and digital modules. The input is from a PIC18F2550
libUSB device. Since the maximum frequency of sampling we need is around
10 Hertz, the low sampling rate of the USB device will not pose a problem.
This module is derived from the dynamic matplotlib wxpython by Eli Bendersky
and is based on wxpython and matplotlib.
Author: Vishwanath
License: Not applicable yet
Name: VenoScope(VS)
"""
import time
import usb.core
import wx
from scipy import *
# Matplotlib imports
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import pylab
def _configure_device():
""" Configure and get the USB device running. Returns device class if
success and None if failed"""
vendor_id = 0x04D8 # These ids are microchip's libusb based device
product_id = 0x0204 # ids
dev = usb.core.find(idVendor=vendor_id, idProduct = product_id)
try:
dev.set_configuration()
return dev
except:
return None
class VSDataAquisition(object):
""" A place holder for collecting data from the ADC of the device. This
class will also control sample/hold bit of the device."""
def __init__(self):
""" Configure the device and set class properties"""
self.data0 = [] # This will hold data from ADC0
self.data1 = [] # This will hold data from ADC1
self.dev = _configure_device()
def get_data(self):
""" Get the next data from ADC0. For ADC1, use get_dc_offset()"""
self.dev.write(1, 'A0')
digit1, digit2 = self.dev.read(0x81, 64)[:2]
# Save the data as voltage between 0.0 and 5.0
self.data0.append((digit1 + 256*digit2)*5.0/1024)
def get_dc_offset(self):
""" Get the initial DC offset of the analog output"""
self.dev.write(1, 'A1')
digit1, digit2 = self.dev.read(0x81, 64)[:2]
# Save the data as voltage between 0.0 and 5.0
self.data1.append((digit1 + 256*digit2)*5.0/1024)
def sample(self):
""" Set the sample bit for getting intial DC offset"""
self.dev.write(1, 'S')
def hold(self):
""" Clear the sample bit for consecutive data aquisition"""
self.dev.write(1, 'H')
class VSControlBox(wx.Panel):
""" A static box for controlling the start and stop of the device and
displaying the final result of the venous flow measurement."""
def __init__(self, parent, ID, label):
wx.Panel.__init__(self, parent, ID)
# Create two box sizers, one for the button and one for the status
# message and final output reading.
button_box = wx.StaticBox(self, -1, label = 'Device Control')
info_box = wx.StaticBox(self, -1, label = 'Information')
box = wx.StaticBox(self, -1, label)
main_sizer = wx.StaticBoxSizer(box, wx.HORIZONTAL)
button_sizer = wx.StaticBoxSizer(button_box, orient=wx.HORIZONTAL)
info_sizer = wx.StaticBoxSizer(info_box, orient=wx.HORIZONTAL)
# Create a start/stop measurement button.
self.start_button = wx.Button(self, label = 'Start measurement',)
# Create a result and information text box.
self.result_box = wx.StaticText(self)
self.txt_info_box = wx.StaticText(self, size=(200, -1))
self.result_box.SetLabel("0.00")
# Add the items to sizers
button_sizer.Add(self.start_button, 0, wx.ALL, 10)
info_sizer.Add(self.result_box, 0, wx.ALL, 10)
info_sizer.Add(self.txt_info_box, 0, wx.ALL, 10)
# Add the sizers to main sizer
main_sizer.Add(button_sizer, flag=wx.ALIGN_CENTER_VERTICAL)
main_sizer.AddSpacer(20)
main_sizer.Add(info_sizer, flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
# Bind events to the button
self.start_button.Bind(wx.EVT_BUTTON, parent.Parent.start_stop)
# Finally, make a fit
self.SetSizer(main_sizer)
main_sizer.Fit(self)
def start_stop(self, event):
""" Bind a rudimentary event now. Will update it later."""
self.start_button.SetLabel('Measuring')
self.start_button.Enable = False
# Do nothing as of now. Will call measuring functions later.
self.txt_info_box.SetLabel('Starting measurement.')
time.sleep(2)
self.start_button.SetLabel('Start measurement')
self.start_button.Enable = True
self.txt_info_box.SetLabel('Completed measurement.')
self.result_box.SetLabel("100.00")
class VSGraphFrame(wx.Frame):
""" Main frame for the measurement application"""
title = "Venous flow calculation"
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.SAMPLING_TIME = 10000.0 # Set the sampling time here.
self.daq = VSDataAquisition()
if self.daq == None:
help_string = ''' Device is not connected. Please connect the
device and restart the software'''
wx.MessageBox(help_string, 'Device not found',
wx.OK | wx.ICON_INFORMATION)
self.Destroy()
return
self.create_menu() # Create the menu
self.create_status_bar () # Add a status bar. Could use for debugging
self.create_main_panel() # The main panel
# We will use a timer for getting our samples.
self.redraw_timer = wx.Timer(self, wx.ID_ANY)
# Sampling duration itself is another timer. This timer is a
# oneshot timer and runs for SAMPLING_TIME time.
self.sampling_timer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.Bind(wx.EVT_TIMER, self.on_sampling_timer, self.sampling_timer)
self.redraw_timer.Start(100)
def create_menu(self):
""" Add menu bar items. One File and one About"""
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
menu_help = wx.Menu()
# Add save and exit to File menu
menu_save = menu_file.Append(-1, '&Save plot\tCtrl-S',
'Save plot to a file')
menu_file.AppendSeparator()
menu_exit = menu_file.Append(-1, '&Exit\tCtrl-X',
'Exit the program')
self.Bind(wx.EVT_MENU, self.on_save, menu_save)
self.Bind(wx.EVT_MENU, self.on_exit, menu_exit)
# Add an about in the Help menu. Will update later.
help_about = menu_help.Append(-1, '&About',
'About the program')
menu_help.AppendSeparator()
self.Bind(wx.EVT_MENU, self.on_about, help_about)
# Add them both to menubar
self.menubar.Append(menu_file, '&File')
self.menubar.Append(menu_help, '&Help')
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Create the main panel which will show the dynamic plot."""
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.control_box = VSControlBox(self.panel, -1, 'Information board')
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.control_box, 0, wx.ALIGN_LEFT | wx.TOP | wx.EXPAND)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
""" Create a status bar. Will use it in future for debugging"""
self.statusbar = self.CreateStatusBar()
def init_plot(self):
""" Initialize the plot canvas"""
self.dpi = 100
self.fig = Figure((5.0, 5.0), dpi = self.dpi)
self.main_plot = self.fig.add_subplot(111)
self.main_plot.set_axis_bgcolor('black')
self.main_plot.set_title('Dynamic venous flow view', size = 12)
pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)
pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)
# Plot the data as a green line
self.plot_data = self.main_plot.plot(
self.daq.data0,
linewidth = 1,
color = (0, 1, 0),
)[0]
self.main_plot.grid(True, color='gray')
def draw_plot(self):
""" Redraw the plot after every data aquisition."""
# X axis is auto follow.
XLEN = 100
xmax = max(len(self.daq.data0), XLEN)
xmin = xmax - XLEN
# The Y value will lie between 0.0 and 5.0 volts
ymax = 5.0
ymin = 0.0
self.main_plot.set_xbound(lower=xmin, upper=xmax)
self.main_plot.set_ybound(lower=ymin, upper=ymax)
# Add the grid. Grid looks cool and is actually very helpful.
self.main_plot.grid(True, color='gray')
pylab.setp(self.main_plot.get_xticklabels(),
visible=True)
self.plot_data.set_xdata(arange(len(self.daq.data0)))
self.plot_data.set_ydata(array(self.daq.data0))
self.canvas.draw()
def on_save(self, event):
""" Method for saving a plot """
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def start_stop(self, event):
""" Restart measurements and complete calculations"""
self.daq.data0= []
self.control_box.txt_info_box.SetLabel('Starting measurement')
self.sampling_timer.Start(self.SAMPLING_TIME, oneShot=True)
def on_redraw_timer(self, event):
""" Update the plot whenever data is obtained """
if self.sampling_timer.IsRunning():
self.daq.get_data()
self.draw_plot()
else:
self.control_box.txt_info_box.SetLabel('Measurement complete')
self.calculate()
return
def on_sampling_timer(self, event):
""" Stop the timer when sampling is complete."""
self.sampling_timer.Stop()
def calculate(self):
"""Calculate the venous flow rate. Dummy now."""
if self.sampling_timer.IsRunning():
return
if self.daq.data0 == []:
|
else:
average = mean(self.daq.data0)
res_string = '%.2f' %average
self.control_box.result_box.SetLabel(res_string)
def on_exit(self, event):
""" Quit the window """
self.Destroy
def on_about(self, event):
""" Display an about message """
pass
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = VSGraphFrame()
app.frame.Show()
app.MainLoop()
| average = 0.0 | conditional_block |
host_window.py | #!/bin/python
"""
This file creates the window for displaying the output of the venous flow
detected by the analog and digital modules. The input is from a PIC18F2550
libUSB device. Since the maximum frequency of sampling we need is around
10 Hertz, the low sampling rate of the USB device will not pose a problem.
This module is derived from the dynamic matplotlib wxpython by Eli Bendersky
and is based on wxpython and matplotlib.
Author: Vishwanath
License: Not applicable yet
Name: VenoScope(VS)
"""
import time
import usb.core
import wx
from scipy import *
# Matplotlib imports
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import pylab
def _configure_device():
""" Configure and get the USB device running. Returns device class if
success and None if failed"""
vendor_id = 0x04D8 # These ids are microchip's libusb based device
product_id = 0x0204 # ids
dev = usb.core.find(idVendor=vendor_id, idProduct = product_id)
try:
dev.set_configuration()
return dev
except:
return None
class VSDataAquisition(object):
""" A place holder for collecting data from the ADC of the device. This
class will also control sample/hold bit of the device."""
def __init__(self):
""" Configure the device and set class properties"""
self.data0 = [] # This will hold data from ADC0
self.data1 = [] # This will hold data from ADC1
self.dev = _configure_device()
def get_data(self):
""" Get the next data from ADC0. For ADC1, use get_dc_offset()"""
self.dev.write(1, 'A0')
digit1, digit2 = self.dev.read(0x81, 64)[:2]
# Save the data as voltage between 0.0 and 5.0
self.data0.append((digit1 + 256*digit2)*5.0/1024)
def get_dc_offset(self):
""" Get the initial DC offset of the analog output"""
self.dev.write(1, 'A1')
digit1, digit2 = self.dev.read(0x81, 64)[:2]
# Save the data as voltage between 0.0 and 5.0
self.data1.append((digit1 + 256*digit2)*5.0/1024)
def sample(self):
""" Set the sample bit for getting intial DC offset"""
self.dev.write(1, 'S')
def hold(self):
""" Clear the sample bit for consecutive data aquisition"""
self.dev.write(1, 'H')
class VSControlBox(wx.Panel):
|
class VSGraphFrame(wx.Frame):
""" Main frame for the measurement application"""
title = "Venous flow calculation"
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.SAMPLING_TIME = 10000.0 # Set the sampling time here.
self.daq = VSDataAquisition()
if self.daq == None:
help_string = ''' Device is not connected. Please connect the
device and restart the software'''
wx.MessageBox(help_string, 'Device not found',
wx.OK | wx.ICON_INFORMATION)
self.Destroy()
return
self.create_menu() # Create the menu
self.create_status_bar () # Add a status bar. Could use for debugging
self.create_main_panel() # The main panel
# We will use a timer for getting our samples.
self.redraw_timer = wx.Timer(self, wx.ID_ANY)
# Sampling duration itself is another timer. This timer is a
# oneshot timer and runs for SAMPLING_TIME time.
self.sampling_timer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.Bind(wx.EVT_TIMER, self.on_sampling_timer, self.sampling_timer)
self.redraw_timer.Start(100)
def create_menu(self):
""" Add menu bar items. One File and one About"""
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
menu_help = wx.Menu()
# Add save and exit to File menu
menu_save = menu_file.Append(-1, '&Save plot\tCtrl-S',
'Save plot to a file')
menu_file.AppendSeparator()
menu_exit = menu_file.Append(-1, '&Exit\tCtrl-X',
'Exit the program')
self.Bind(wx.EVT_MENU, self.on_save, menu_save)
self.Bind(wx.EVT_MENU, self.on_exit, menu_exit)
# Add an about in the Help menu. Will update later.
help_about = menu_help.Append(-1, '&About',
'About the program')
menu_help.AppendSeparator()
self.Bind(wx.EVT_MENU, self.on_about, help_about)
# Add them both to menubar
self.menubar.Append(menu_file, '&File')
self.menubar.Append(menu_help, '&Help')
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Create the main panel which will show the dynamic plot."""
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.control_box = VSControlBox(self.panel, -1, 'Information board')
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.control_box, 0, wx.ALIGN_LEFT | wx.TOP | wx.EXPAND)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
""" Create a status bar. Will use it in future for debugging"""
self.statusbar = self.CreateStatusBar()
def init_plot(self):
""" Initialize the plot canvas"""
self.dpi = 100
self.fig = Figure((5.0, 5.0), dpi = self.dpi)
self.main_plot = self.fig.add_subplot(111)
self.main_plot.set_axis_bgcolor('black')
self.main_plot.set_title('Dynamic venous flow view', size = 12)
pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)
pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)
# Plot the data as a green line
self.plot_data = self.main_plot.plot(
self.daq.data0,
linewidth = 1,
color = (0, 1, 0),
)[0]
self.main_plot.grid(True, color='gray')
def draw_plot(self):
""" Redraw the plot after every data aquisition."""
# X axis is auto follow.
XLEN = 100
xmax = max(len(self.daq.data0), XLEN)
xmin = xmax - XLEN
# The Y value will lie between 0.0 and 5.0 volts
ymax = 5.0
ymin = 0.0
self.main_plot.set_xbound(lower=xmin, upper=xmax)
self.main_plot.set_ybound(lower=ymin, upper=ymax)
# Add the grid. Grid looks cool and is actually very helpful.
self.main_plot.grid(True, color='gray')
pylab.setp(self.main_plot.get_xticklabels(),
visible=True)
self.plot_data.set_xdata(arange(len(self.daq.data0)))
self.plot_data.set_ydata(array(self.daq.data0))
self.canvas.draw()
def on_save(self, event):
""" Method for saving a plot """
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def start_stop(self, event):
""" Restart measurements and complete calculations"""
self.daq.data0= []
self.control_box.txt_info_box.SetLabel('Starting measurement')
self.sampling_timer.Start(self.SAMPLING_TIME, oneShot=True)
def on_redraw_timer(self, event):
""" Update the plot whenever data is obtained """
if self.sampling_timer.IsRunning():
self.daq.get_data()
self.draw_plot()
else:
self.control_box.txt_info_box.SetLabel('Measurement complete')
self.calculate()
return
def on_sampling_timer(self, event):
""" Stop the timer when sampling is complete."""
self.sampling_timer.Stop()
def calculate(self):
"""Calculate the venous flow rate. Dummy now."""
if self.sampling_timer.IsRunning():
return
if self.daq.data0 == []:
average = 0.0
else:
average = mean(self.daq.data0)
res_string = '%.2f' %average
self.control_box.result_box.SetLabel(res_string)
def on_exit(self, event):
""" Quit the window """
self.Destroy
def on_about(self, event):
""" Display an about message """
pass
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = VSGraphFrame()
app.frame.Show()
app.MainLoop()
| """ A static box for controlling the start and stop of the device and
displaying the final result of the venous flow measurement."""
def __init__(self, parent, ID, label):
wx.Panel.__init__(self, parent, ID)
# Create two box sizers, one for the button and one for the status
# message and final output reading.
button_box = wx.StaticBox(self, -1, label = 'Device Control')
info_box = wx.StaticBox(self, -1, label = 'Information')
box = wx.StaticBox(self, -1, label)
main_sizer = wx.StaticBoxSizer(box, wx.HORIZONTAL)
button_sizer = wx.StaticBoxSizer(button_box, orient=wx.HORIZONTAL)
info_sizer = wx.StaticBoxSizer(info_box, orient=wx.HORIZONTAL)
# Create a start/stop measurement button.
self.start_button = wx.Button(self, label = 'Start measurement',)
# Create a result and information text box.
self.result_box = wx.StaticText(self)
self.txt_info_box = wx.StaticText(self, size=(200, -1))
self.result_box.SetLabel("0.00")
# Add the items to sizers
button_sizer.Add(self.start_button, 0, wx.ALL, 10)
info_sizer.Add(self.result_box, 0, wx.ALL, 10)
info_sizer.Add(self.txt_info_box, 0, wx.ALL, 10)
# Add the sizers to main sizer
main_sizer.Add(button_sizer, flag=wx.ALIGN_CENTER_VERTICAL)
main_sizer.AddSpacer(20)
main_sizer.Add(info_sizer, flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
# Bind events to the button
self.start_button.Bind(wx.EVT_BUTTON, parent.Parent.start_stop)
# Finally, make a fit
self.SetSizer(main_sizer)
main_sizer.Fit(self)
def start_stop(self, event):
""" Bind a rudimentary event now. Will update it later."""
self.start_button.SetLabel('Measuring')
self.start_button.Enable = False
# Do nothing as of now. Will call measuring functions later.
self.txt_info_box.SetLabel('Starting measurement.')
time.sleep(2)
self.start_button.SetLabel('Start measurement')
self.start_button.Enable = True
self.txt_info_box.SetLabel('Completed measurement.')
self.result_box.SetLabel("100.00") | identifier_body |
host_window.py | #!/bin/python
"""
This file creates the window for displaying the output of the venous flow
detected by the analog and digital modules. The input is from a PIC18F2550
libUSB device. Since the maximum frequency of sampling we need is around
10 Hertz, the low sampling rate of the USB device will not pose a problem.
This module is derived from the dynamic matplotlib wxpython by Eli Bendersky
and is based on wxpython and matplotlib.
Author: Vishwanath
License: Not applicable yet
Name: VenoScope(VS)
"""
import time
import usb.core
import wx
from scipy import *
# Matplotlib imports
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import pylab
def _configure_device():
""" Configure and get the USB device running. Returns device class if
success and None if failed"""
vendor_id = 0x04D8 # These ids are microchip's libusb based device
product_id = 0x0204 # ids
dev = usb.core.find(idVendor=vendor_id, idProduct = product_id)
try:
dev.set_configuration()
return dev
except:
return None
class VSDataAquisition(object):
""" A place holder for collecting data from the ADC of the device. This
class will also control sample/hold bit of the device."""
def __init__(self):
""" Configure the device and set class properties"""
self.data0 = [] # This will hold data from ADC0
self.data1 = [] # This will hold data from ADC1
self.dev = _configure_device()
def get_data(self):
""" Get the next data from ADC0. For ADC1, use get_dc_offset()"""
self.dev.write(1, 'A0')
digit1, digit2 = self.dev.read(0x81, 64)[:2]
# Save the data as voltage between 0.0 and 5.0
self.data0.append((digit1 + 256*digit2)*5.0/1024)
def get_dc_offset(self):
""" Get the initial DC offset of the analog output"""
self.dev.write(1, 'A1')
digit1, digit2 = self.dev.read(0x81, 64)[:2]
# Save the data as voltage between 0.0 and 5.0
self.data1.append((digit1 + 256*digit2)*5.0/1024)
def sample(self):
""" Set the sample bit for getting intial DC offset"""
self.dev.write(1, 'S')
def hold(self):
""" Clear the sample bit for consecutive data aquisition"""
self.dev.write(1, 'H')
class VSControlBox(wx.Panel):
""" A static box for controlling the start and stop of the device and
displaying the final result of the venous flow measurement."""
def __init__(self, parent, ID, label):
wx.Panel.__init__(self, parent, ID)
# Create two box sizers, one for the button and one for the status
# message and final output reading.
button_box = wx.StaticBox(self, -1, label = 'Device Control')
info_box = wx.StaticBox(self, -1, label = 'Information')
box = wx.StaticBox(self, -1, label)
main_sizer = wx.StaticBoxSizer(box, wx.HORIZONTAL)
button_sizer = wx.StaticBoxSizer(button_box, orient=wx.HORIZONTAL)
info_sizer = wx.StaticBoxSizer(info_box, orient=wx.HORIZONTAL)
# Create a start/stop measurement button.
self.start_button = wx.Button(self, label = 'Start measurement',)
# Create a result and information text box.
self.result_box = wx.StaticText(self)
self.txt_info_box = wx.StaticText(self, size=(200, -1))
self.result_box.SetLabel("0.00")
# Add the items to sizers
button_sizer.Add(self.start_button, 0, wx.ALL, 10)
info_sizer.Add(self.result_box, 0, wx.ALL, 10)
info_sizer.Add(self.txt_info_box, 0, wx.ALL, 10)
# Add the sizers to main sizer
main_sizer.Add(button_sizer, flag=wx.ALIGN_CENTER_VERTICAL)
main_sizer.AddSpacer(20)
main_sizer.Add(info_sizer, flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
# Bind events to the button
self.start_button.Bind(wx.EVT_BUTTON, parent.Parent.start_stop)
# Finally, make a fit
self.SetSizer(main_sizer)
main_sizer.Fit(self)
def start_stop(self, event):
""" Bind a rudimentary event now. Will update it later."""
self.start_button.SetLabel('Measuring')
self.start_button.Enable = False
# Do nothing as of now. Will call measuring functions later.
self.txt_info_box.SetLabel('Starting measurement.')
time.sleep(2)
self.start_button.SetLabel('Start measurement')
self.start_button.Enable = True
self.txt_info_box.SetLabel('Completed measurement.')
self.result_box.SetLabel("100.00")
class VSGraphFrame(wx.Frame):
""" Main frame for the measurement application"""
title = "Venous flow calculation"
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.SAMPLING_TIME = 10000.0 # Set the sampling time here.
self.daq = VSDataAquisition()
if self.daq == None:
help_string = ''' Device is not connected. Please connect the
device and restart the software'''
wx.MessageBox(help_string, 'Device not found',
wx.OK | wx.ICON_INFORMATION)
self.Destroy()
return
self.create_menu() # Create the menu
self.create_status_bar () # Add a status bar. Could use for debugging
self.create_main_panel() # The main panel
# We will use a timer for getting our samples.
self.redraw_timer = wx.Timer(self, wx.ID_ANY)
# Sampling duration itself is another timer. This timer is a
# oneshot timer and runs for SAMPLING_TIME time.
self.sampling_timer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.Bind(wx.EVT_TIMER, self.on_sampling_timer, self.sampling_timer)
self.redraw_timer.Start(100)
def create_menu(self):
""" Add menu bar items. One File and one About"""
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
menu_help = wx.Menu()
# Add save and exit to File menu
menu_save = menu_file.Append(-1, '&Save plot\tCtrl-S',
'Save plot to a file')
menu_file.AppendSeparator()
menu_exit = menu_file.Append(-1, '&Exit\tCtrl-X',
'Exit the program')
self.Bind(wx.EVT_MENU, self.on_save, menu_save)
self.Bind(wx.EVT_MENU, self.on_exit, menu_exit)
# Add an about in the Help menu. Will update later.
help_about = menu_help.Append(-1, '&About',
'About the program')
menu_help.AppendSeparator()
self.Bind(wx.EVT_MENU, self.on_about, help_about)
# Add them both to menubar
self.menubar.Append(menu_file, '&File')
self.menubar.Append(menu_help, '&Help')
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Create the main panel which will show the dynamic plot."""
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.control_box = VSControlBox(self.panel, -1, 'Information board')
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.control_box, 0, wx.ALIGN_LEFT | wx.TOP | wx.EXPAND)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
""" Create a status bar. Will use it in future for debugging"""
self.statusbar = self.CreateStatusBar()
def init_plot(self):
""" Initialize the plot canvas"""
self.dpi = 100
self.fig = Figure((5.0, 5.0), dpi = self.dpi)
self.main_plot = self.fig.add_subplot(111)
self.main_plot.set_axis_bgcolor('black')
self.main_plot.set_title('Dynamic venous flow view', size = 12)
pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)
pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)
# Plot the data as a green line
self.plot_data = self.main_plot.plot(
self.daq.data0,
linewidth = 1,
color = (0, 1, 0),
)[0]
self.main_plot.grid(True, color='gray')
def draw_plot(self):
""" Redraw the plot after every data aquisition."""
# X axis is auto follow.
XLEN = 100
xmax = max(len(self.daq.data0), XLEN)
xmin = xmax - XLEN
# The Y value will lie between 0.0 and 5.0 volts
ymax = 5.0
ymin = 0.0
self.main_plot.set_xbound(lower=xmin, upper=xmax)
self.main_plot.set_ybound(lower=ymin, upper=ymax)
# Add the grid. Grid looks cool and is actually very helpful.
self.main_plot.grid(True, color='gray')
pylab.setp(self.main_plot.get_xticklabels(),
visible=True)
self.plot_data.set_xdata(arange(len(self.daq.data0)))
self.plot_data.set_ydata(array(self.daq.data0))
self.canvas.draw()
def on_save(self, event):
""" Method for saving a plot """
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def start_stop(self, event):
""" Restart measurements and complete calculations"""
self.daq.data0= []
self.control_box.txt_info_box.SetLabel('Starting measurement')
self.sampling_timer.Start(self.SAMPLING_TIME, oneShot=True)
def | (self, event):
""" Update the plot whenever data is obtained """
if self.sampling_timer.IsRunning():
self.daq.get_data()
self.draw_plot()
else:
self.control_box.txt_info_box.SetLabel('Measurement complete')
self.calculate()
return
def on_sampling_timer(self, event):
""" Stop the timer when sampling is complete."""
self.sampling_timer.Stop()
def calculate(self):
"""Calculate the venous flow rate. Dummy now."""
if self.sampling_timer.IsRunning():
return
if self.daq.data0 == []:
average = 0.0
else:
average = mean(self.daq.data0)
res_string = '%.2f' %average
self.control_box.result_box.SetLabel(res_string)
def on_exit(self, event):
""" Quit the window """
self.Destroy
def on_about(self, event):
""" Display an about message """
pass
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = VSGraphFrame()
app.frame.Show()
app.MainLoop()
| on_redraw_timer | identifier_name |
plot_materials.py | #!/usr/bin/env python
#coding:utf8
import os
import sys
import traceback
import numpy as np
import matplotlib
import matplotlib.cm
import matplotlib.pyplot as plt
import meep_materials
## Interesting materials:
## was_plotted has_model See_also
## Si OK (poor)
## SiC OK OK
## SiO2 narrow OK Huber?
## TiO2 narrow OK Baumard77, ..
## InP OK (questionable)
## GaAs OK-lim
## InSb OK-lim
## Au OK OK
## FeS2 N/A
## Si-doped Si plasmons: http://proj.ncku.edu.tw/research/articles/e/20090828/2.html
## SrTiO3 TODO
## Analytic Lorentz model (copied from meep_utils.py)
def analytic_eps(mat, freq):#{{{
complex_eps = mat.eps
for polariz in mat.pol:
complex_eps += polariz['sigma'] * polariz['omega']**2 / (polariz['omega']**2 - freq**2 - 1j*freq*polariz['gamma'])
return complex_eps # + sum(0)
#}}}
## Functions loading data from different format files
## Note: you must obtain the SOPRA and SCOUT databases from the web, if you want to use them
def load_SCOUT_permittivity(filename):#{{{
""" Reads the permittivity function from a given file with SCOUT binary format
The SCOUT database of materials is supplied with the SCOUT program and may be freely downloaded
from http://www.mtheiss.com/download/scout.zip
Different files use different units for the x-axis (179 files with "eV", 118 "micron", 49 "1/cm"), so
we have to build the frequency axis accordingly. The data were randomly verified against the SOPRA and luxpop data.
"""
## Open the file for binary access
f = open(filename, "rb")
## Load the number of data points, type of x axis and its boundaries | f.seek(160); x_axis_type = np.fromfile(f, dtype=np.uint8, count=1)[0]
print(x_axis_type)
f.seek(166); x_start, x_end = np.fromfile(f, dtype=np.float32, count=2)
print(x_start)
print(x_end)
## Load the n, k data
f.seek(174); raw_eps = np.fromfile(f, dtype=np.float32, count=datalength*2)
f.close
eps = raw_eps[::2] + 1j*raw_eps[1::2]
from scipy.constants import h, c, eV
if x_axis_type == 2: # 'eV'
freq = np.linspace(x_start*eV/h, x_end*eV/h, datalength)
elif x_axis_type == 3: # 'um'
wavelength = np.linspace(x_start*1e-6, x_end*1e-6, datalength)
freq = c/wavelength
elif x_axis_type == 0: # 'cm-1'
freq = np.linspace(x_start*100*c, x_end*100*c, datalength)
return freq, eps
#}}}
def load_SOPRA_permittivity(filename):#{{{
data = []
with open(filename) as f:
for line in f.readlines():
if line[:5] == 'DATA1': data.append(map(lambda x: float(x), line.split('*')[2:5]))
wl, n, k = np.array(data).T
eps = ((n+1j*k)**2)[::-1]
freq = (2.998e8 / (wl*1e-9))[::-1]
return freq, eps
#}}}
def load_n_k_eps(filename):#{{{
lambda_angstr, n, k = np.loadtxt(data_source, usecols=[0,1,2], unpack=True, comments=';')
eps = (n+1j*k)**2
freq = 2.997e8 / (lambda_angstr*1e-10)
return freq, n, k, eps
#}}}
def load_eps(filename):#{{{
return freq, eps
#}}}
## List of sources for given materials
## == TiO2 ==#{{{
TiO2_files = [
meep_materials.material_TiO2(where = None),
meep_materials.material_TiO2(where = None, extraordinary=1.), ## (rutile is anisotropic)
meep_materials.material_TiO2(where = None, extraordinary=0.),
"sopra/TIO2.MAT",
"sopra/TIO2B.MAT",
"luxpop/TiO2.nk",
"luxpop/TiO2-e.nk",
"luxpop/TiO2_llnl_cxro.nk",
"luxpop/TiO2-e_palik.nk",
"scout/TiO2 (amorph).b",
"scout/TiO2 I (Jellison).b",
"scout/TiO2 II (Jellison).b",
"scout/TiO2 II (Jellison).b",
"other/TiO2-Mounaix_polycryst.nk",
]
#}}}
## == SiC == #{{{
SiC_files = [
meep_materials.material_SiC(where = None),
"sopra/SIC.MAT",
"luxpop/SiC_llnl_cxro.nk",
"luxpop/SiC_osantowski.nk",
"luxpop/SiC_palik.nk",
"luxpop/SiC_windt.nk",
"luxpop/SiC_yanagihara.nk",
'scout/SiC (MIR).b',
'scout/SiC (NIR-UV).b',
#'scout_reverse_engineering/scout/SiC (simple infrared model).b'
]#}}}
## == SiO2 == #{{{
SiO2_files = [
'scout/SiO2 (MIR-VUV).b',
'scout/SiO2 [micron].b',
'scout/SiO2 (fused).b',
'sopra/SIO2.MAT',
#'kitamura/SiO2/Koike1989.n',
meep_materials.material_SiO2(where = None) ]
##SiO2_files += ['kitamura/SiO2/'+f for f in os.listdir('kitamura/SiO2/') if (f[-2:]=='.n' or f[-2:]=='.k')]
#}}}
## == Au == #{{{
Au_files = [
meep_materials.material_Au(),
'sopra/AU.MAT',
'scout/Au (J, & C,, L, & H,).b',
'scout/Au (JC).b',
'scout/Au (MQ).b',
'scout/Au [micron].b',
'scout/Au.b',
'scout/Au model.b',
]
#}}}
## == Si == #{{{
Si_files = [
meep_materials.material_Si_NIR(),
meep_materials.material_Si_MIR(),
'other/Si_Dai2003.k', 'other/Si_Dai2003.n',
'sopra/SI100_2.MAT',
'scout/Si (100).b',
'scout/Si (Aspnes).b',
'scout/Si (Vis-UV, Brendel model).b',
'scout/Si (cryst,).b',
'scout/Si (infrared).b',
'scout/Si (crystalline, MIR-VUV).b',
]
#}}}
## == InP == #{{{
InP_files = [
meep_materials.material_InP(),
'sopra/INP.MAT',
'scout/InP.b',
'scout/InP (IR model).b',
'scout/InP (Jellison).b',
]
#}}}
## == GaAs == #{{{
GaAs_files = [
meep_materials.material_GaAs(),
'sopra/GAAS.MAT',
'sopra/GAAS031T.MAT',
'scout/GaAs (100).b',
'scout/GaAs.b',
'scout/GaAs (31 deg C).b',
]
#}}}
## == InSb == #{{{
InSb_files = [
'sopra/INSB.MAT',
'scout/InSb (Jellison).b',
'scout/InSb.b',
]
#}}}
## == STO == #{{{
STO_files = [
'other/N_model_STO_300K.nk',
'other/N_STO_300K.nk',
'other/STO_Neville1972_300K.epsilon',
#'other/STO_Neville1972_090K.epsilon',
#'other/STO_Neville1972_004K.epsilon',
#meep_materials.material_STO_THz(),
meep_materials.material_STO(),
]
#}}}
## == Al2O3 == #{{{
Al2O3_files = [
meep_materials.material_Sapphire(),
meep_materials.material_Sapphire(ordinary=.66),
meep_materials.material_Sapphire(ordinary=0),
'other/N_Al2O3_c-cut.nk',
'other/N_model_Al2O3_c-cut.nk',
'scout/Al2O3.b',
'scout/Al2O3 (Palik).b',
'sopra/AL2O3.MAT',
'sopra/AL2O3P.MAT',
#'other/',
]
#}}}
## == Fe2O3 == #{{{
Fe2O3_files = [
'scout/Fe2O3.b',
]
#}}}
## == Fe2O3 == #{{{
Fe3O4_files = [
'scout/Fe3O4.b',
]
#}}}
## == Fe2O3 == #{{{
Cu_files = [
'scout/Cu.b',
]
#}}}
## == Fe2O3 == #{{{
Ni_files = [
'scout/Ni.b',
]
#}}}
## == Fe2O3 == #{{{
ZnO_files = [
'scout/ZnO.b',
]
#}}}
## Dictionary of materials
materials = {
'TiO2': TiO2_files,
'SiC': SiC_files,
'SiO2': SiO2_files,
'Au': Au_files,
'Si': Si_files,
'InP': InP_files,
'GaAs': GaAs_files,
'InSb': InSb_files,
'STO': STO_files,
'Al2O3': Al2O3_files,
'Fe2O3': Fe2O3_files,
'Fe3O4': Fe3O4_files,
'Ni': Ni_files,
'ZnO': ZnO_files,
'Cu': Cu_files,
}
##and not (len(sys.argv)>2 and (sys.argv[2] == "--big"))
if len(sys.argv)>1 and (sys.argv[1] in materials.keys()):
data_sources = materials[sys.argv[1]]
output_file = sys.argv[1]
else:
print("Error: the first parameter may be one of the following materials:", materials.keys())
exit(1)
if len(sys.argv)>2 and (sys.argv[2] == "--big"):
output_file+="_big"; plt.figure(figsize=(16,16))
else:
plt.figure(figsize=(8,8))
freq_range=(1e11, 1e16)
for number, data_source in enumerate(data_sources):
try:
if type(data_source)==str:
plotlabel = data_source
## Select the file type to be loaded from
n, eps, k = None, None, None
print("Loading ", data_source)
if data_source.endswith('.b'):
freq, eps = load_SCOUT_permittivity(data_source)
n, k = (eps**.5).real, (eps**.5).imag
print(freq)
print(eps)
print(n)
print(k)
elif data_source.endswith('.MAT'):
freq, eps = load_SOPRA_permittivity(data_source)
n, k = (eps**.5).real, (eps**.5).imag
elif data_source.endswith('.epsilon'):
freq, eps = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
n, k = (eps**.5).real, (eps**.5).imag
print(freq, n)
elif data_source.endswith('.nk'):
freq, n, k, eps = load_n_k_eps(data_source)
elif data_source.endswith('.n'):
lambda_micron, n = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
freq = 2.997e8 / (lambda_micron*1e-6)
k, eps = None, None
elif data_source.endswith('.k'):
lambda_micron, k = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
freq = 2.997e8 / (lambda_micron*1e-6)
print(freq)
n, eps = None, None
## Read the MEEP's material model
if hasattr(data_source, 'pol'):
freq = 10**np.arange(np.log10(freq_range[0]), np.log10(freq_range[1]), .003)
eps = analytic_eps(data_source, freq)
n = (analytic_eps(data_source, freq)**.5).real
k = (analytic_eps(data_source, freq)**.5).imag
plotlabel = "model " + getattr(data_source, 'shortname', data_source.name)
## Plot the data
if hasattr(data_source, 'pol'):
color = 'black'
else:
color = matplotlib.cm.hsv(float(number)/len(data_sources))
if eps.all()!=None or (n.all()!=None and k.all()!=None):
if eps.all()==None: eps=(n+1j*k)**.5
plt.subplot(3,1,1)
print(" Plotting epsilon for '%s' with %d data points" % (plotlabel, len(eps)))
plt.plot(freq, eps+float(number)/100, color=color, marker='o', markersize=0, label=plotlabel)
plt.plot(freq, eps.imag+float(number)/100, color=color, marker='s', markersize=0, ls='--')
plt.xlim(freq_range);
plt.grid(True)
if n.all()!=None:
plt.subplot(3,1,2)
print(" Plotting N for '%s' with %d data points" % (plotlabel, len(n)))
plt.plot(freq, n, color=color, marker='o', markersize=0, label=plotlabel)
plt.xlim(freq_range);
plt.grid(True)
if k.all()!=None:
plt.subplot(3,1,3)
print(" Plotting k for '%s' with %d data points" % (plotlabel, len(k)))
plt.plot(freq, k, color=color, marker='o', markersize=0, label=plotlabel)
plt.xlim(freq_range);
plt.grid(True)
except:
print("WARNING: data from file '%s' could not be plotted due to error" % (plotlabel))
print(traceback.format_exc())
print(sys.exc_info())
plt.subplot(3,1,1); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("permittivity $\\epsilon_r'$"); plt.title(sys.argv[1])
plt.subplot(3,1,2); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("index of refraction $n$");
plt.subplot(3,1,3); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("index of absorption $k$");
plt.xlabel("Frequency $f$ [Hz]");
plt.show()
plt.savefig(output_file) | f.seek(151); datalength = np.fromfile(f, dtype=np.uint16, count=1)[0]
print(datalength) | random_line_split |
plot_materials.py | #!/usr/bin/env python
#coding:utf8
import os
import sys
import traceback
import numpy as np
import matplotlib
import matplotlib.cm
import matplotlib.pyplot as plt
import meep_materials
## Interesting materials:
## was_plotted has_model See_also
## Si OK (poor)
## SiC OK OK
## SiO2 narrow OK Huber?
## TiO2 narrow OK Baumard77, ..
## InP OK (questionable)
## GaAs OK-lim
## InSb OK-lim
## Au OK OK
## FeS2 N/A
## Si-doped Si plasmons: http://proj.ncku.edu.tw/research/articles/e/20090828/2.html
## SrTiO3 TODO
## Analytic Lorentz model (copied from meep_utils.py)
def analytic_eps(mat, freq):#{{{
complex_eps = mat.eps
for polariz in mat.pol:
complex_eps += polariz['sigma'] * polariz['omega']**2 / (polariz['omega']**2 - freq**2 - 1j*freq*polariz['gamma'])
return complex_eps # + sum(0)
#}}}
## Functions loading data from different format files
## Note: you must obtain the SOPRA and SCOUT databases from the web, if you want to use them
def load_SCOUT_permittivity(filename):#{{{
""" Reads the permittivity function from a given file with SCOUT binary format
The SCOUT database of materials is supplied with the SCOUT program and may be freely downloaded
from http://www.mtheiss.com/download/scout.zip
Different files use different units for the x-axis (179 files with "eV", 118 "micron", 49 "1/cm"), so
we have to build the frequency axis accordingly. The data were randomly verified against the SOPRA and luxpop data.
"""
## Open the file for binary access
f = open(filename, "rb")
## Load the number of data points, type of x axis and its boundaries
f.seek(151); datalength = np.fromfile(f, dtype=np.uint16, count=1)[0]
print(datalength)
f.seek(160); x_axis_type = np.fromfile(f, dtype=np.uint8, count=1)[0]
print(x_axis_type)
f.seek(166); x_start, x_end = np.fromfile(f, dtype=np.float32, count=2)
print(x_start)
print(x_end)
## Load the n, k data
f.seek(174); raw_eps = np.fromfile(f, dtype=np.float32, count=datalength*2)
f.close
eps = raw_eps[::2] + 1j*raw_eps[1::2]
from scipy.constants import h, c, eV
if x_axis_type == 2: # 'eV'
freq = np.linspace(x_start*eV/h, x_end*eV/h, datalength)
elif x_axis_type == 3: # 'um'
wavelength = np.linspace(x_start*1e-6, x_end*1e-6, datalength)
freq = c/wavelength
elif x_axis_type == 0: # 'cm-1'
freq = np.linspace(x_start*100*c, x_end*100*c, datalength)
return freq, eps
#}}}
def load_SOPRA_permittivity(filename):#{{{
data = []
with open(filename) as f:
for line in f.readlines():
if line[:5] == 'DATA1': data.append(map(lambda x: float(x), line.split('*')[2:5]))
wl, n, k = np.array(data).T
eps = ((n+1j*k)**2)[::-1]
freq = (2.998e8 / (wl*1e-9))[::-1]
return freq, eps
#}}}
def load_n_k_eps(filename):#{{{
lambda_angstr, n, k = np.loadtxt(data_source, usecols=[0,1,2], unpack=True, comments=';')
eps = (n+1j*k)**2
freq = 2.997e8 / (lambda_angstr*1e-10)
return freq, n, k, eps
#}}}
def load_eps(filename):#{{{
return freq, eps
#}}}
## List of sources for given materials
## == TiO2 ==#{{{
TiO2_files = [
meep_materials.material_TiO2(where = None),
meep_materials.material_TiO2(where = None, extraordinary=1.), ## (rutile is anisotropic)
meep_materials.material_TiO2(where = None, extraordinary=0.),
"sopra/TIO2.MAT",
"sopra/TIO2B.MAT",
"luxpop/TiO2.nk",
"luxpop/TiO2-e.nk",
"luxpop/TiO2_llnl_cxro.nk",
"luxpop/TiO2-e_palik.nk",
"scout/TiO2 (amorph).b",
"scout/TiO2 I (Jellison).b",
"scout/TiO2 II (Jellison).b",
"scout/TiO2 II (Jellison).b",
"other/TiO2-Mounaix_polycryst.nk",
]
#}}}
## == SiC == #{{{
SiC_files = [
meep_materials.material_SiC(where = None),
"sopra/SIC.MAT",
"luxpop/SiC_llnl_cxro.nk",
"luxpop/SiC_osantowski.nk",
"luxpop/SiC_palik.nk",
"luxpop/SiC_windt.nk",
"luxpop/SiC_yanagihara.nk",
'scout/SiC (MIR).b',
'scout/SiC (NIR-UV).b',
#'scout_reverse_engineering/scout/SiC (simple infrared model).b'
]#}}}
## == SiO2 == #{{{
SiO2_files = [
'scout/SiO2 (MIR-VUV).b',
'scout/SiO2 [micron].b',
'scout/SiO2 (fused).b',
'sopra/SIO2.MAT',
#'kitamura/SiO2/Koike1989.n',
meep_materials.material_SiO2(where = None) ]
##SiO2_files += ['kitamura/SiO2/'+f for f in os.listdir('kitamura/SiO2/') if (f[-2:]=='.n' or f[-2:]=='.k')]
#}}}
## == Au == #{{{
Au_files = [
meep_materials.material_Au(),
'sopra/AU.MAT',
'scout/Au (J, & C,, L, & H,).b',
'scout/Au (JC).b',
'scout/Au (MQ).b',
'scout/Au [micron].b',
'scout/Au.b',
'scout/Au model.b',
]
#}}}
## == Si == #{{{
Si_files = [
meep_materials.material_Si_NIR(),
meep_materials.material_Si_MIR(),
'other/Si_Dai2003.k', 'other/Si_Dai2003.n',
'sopra/SI100_2.MAT',
'scout/Si (100).b',
'scout/Si (Aspnes).b',
'scout/Si (Vis-UV, Brendel model).b',
'scout/Si (cryst,).b',
'scout/Si (infrared).b',
'scout/Si (crystalline, MIR-VUV).b',
]
#}}}
## == InP == #{{{
InP_files = [
meep_materials.material_InP(),
'sopra/INP.MAT',
'scout/InP.b',
'scout/InP (IR model).b',
'scout/InP (Jellison).b',
]
#}}}
## == GaAs == #{{{
GaAs_files = [
meep_materials.material_GaAs(),
'sopra/GAAS.MAT',
'sopra/GAAS031T.MAT',
'scout/GaAs (100).b',
'scout/GaAs.b',
'scout/GaAs (31 deg C).b',
]
#}}}
## == InSb == #{{{
InSb_files = [
'sopra/INSB.MAT',
'scout/InSb (Jellison).b',
'scout/InSb.b',
]
#}}}
## == STO == #{{{
STO_files = [
'other/N_model_STO_300K.nk',
'other/N_STO_300K.nk',
'other/STO_Neville1972_300K.epsilon',
#'other/STO_Neville1972_090K.epsilon',
#'other/STO_Neville1972_004K.epsilon',
#meep_materials.material_STO_THz(),
meep_materials.material_STO(),
]
#}}}
## == Al2O3 == #{{{
Al2O3_files = [
meep_materials.material_Sapphire(),
meep_materials.material_Sapphire(ordinary=.66),
meep_materials.material_Sapphire(ordinary=0),
'other/N_Al2O3_c-cut.nk',
'other/N_model_Al2O3_c-cut.nk',
'scout/Al2O3.b',
'scout/Al2O3 (Palik).b',
'sopra/AL2O3.MAT',
'sopra/AL2O3P.MAT',
#'other/',
]
#}}}
## == Fe2O3 == #{{{
Fe2O3_files = [
'scout/Fe2O3.b',
]
#}}}
## == Fe2O3 == #{{{
Fe3O4_files = [
'scout/Fe3O4.b',
]
#}}}
## == Fe2O3 == #{{{
Cu_files = [
'scout/Cu.b',
]
#}}}
## == Fe2O3 == #{{{
Ni_files = [
'scout/Ni.b',
]
#}}}
## == Fe2O3 == #{{{
ZnO_files = [
'scout/ZnO.b',
]
#}}}
## Dictionary of materials
materials = {
'TiO2': TiO2_files,
'SiC': SiC_files,
'SiO2': SiO2_files,
'Au': Au_files,
'Si': Si_files,
'InP': InP_files,
'GaAs': GaAs_files,
'InSb': InSb_files,
'STO': STO_files,
'Al2O3': Al2O3_files,
'Fe2O3': Fe2O3_files,
'Fe3O4': Fe3O4_files,
'Ni': Ni_files,
'ZnO': ZnO_files,
'Cu': Cu_files,
}
##and not (len(sys.argv)>2 and (sys.argv[2] == "--big"))
if len(sys.argv)>1 and (sys.argv[1] in materials.keys()):
data_sources = materials[sys.argv[1]]
output_file = sys.argv[1]
else:
print("Error: the first parameter may be one of the following materials:", materials.keys())
exit(1)
if len(sys.argv)>2 and (sys.argv[2] == "--big"):
output_file+="_big"; plt.figure(figsize=(16,16))
else:
plt.figure(figsize=(8,8))
freq_range=(1e11, 1e16)
for number, data_source in enumerate(data_sources):
try:
if type(data_source)==str:
plotlabel = data_source
## Select the file type to be loaded from
n, eps, k = None, None, None
print("Loading ", data_source)
if data_source.endswith('.b'):
freq, eps = load_SCOUT_permittivity(data_source)
n, k = (eps**.5).real, (eps**.5).imag
print(freq)
print(eps)
print(n)
print(k)
elif data_source.endswith('.MAT'):
freq, eps = load_SOPRA_permittivity(data_source)
n, k = (eps**.5).real, (eps**.5).imag
elif data_source.endswith('.epsilon'):
freq, eps = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
n, k = (eps**.5).real, (eps**.5).imag
print(freq, n)
elif data_source.endswith('.nk'):
freq, n, k, eps = load_n_k_eps(data_source)
elif data_source.endswith('.n'):
lambda_micron, n = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
freq = 2.997e8 / (lambda_micron*1e-6)
k, eps = None, None
elif data_source.endswith('.k'):
lambda_micron, k = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
freq = 2.997e8 / (lambda_micron*1e-6)
print(freq)
n, eps = None, None
## Read the MEEP's material model
if hasattr(data_source, 'pol'):
freq = 10**np.arange(np.log10(freq_range[0]), np.log10(freq_range[1]), .003)
eps = analytic_eps(data_source, freq)
n = (analytic_eps(data_source, freq)**.5).real
k = (analytic_eps(data_source, freq)**.5).imag
plotlabel = "model " + getattr(data_source, 'shortname', data_source.name)
## Plot the data
if hasattr(data_source, 'pol'):
color = 'black'
else:
color = matplotlib.cm.hsv(float(number)/len(data_sources))
if eps.all()!=None or (n.all()!=None and k.all()!=None):
if eps.all()==None: eps=(n+1j*k)**.5
plt.subplot(3,1,1)
print(" Plotting epsilon for '%s' with %d data points" % (plotlabel, len(eps)))
plt.plot(freq, eps+float(number)/100, color=color, marker='o', markersize=0, label=plotlabel)
plt.plot(freq, eps.imag+float(number)/100, color=color, marker='s', markersize=0, ls='--')
plt.xlim(freq_range);
plt.grid(True)
if n.all()!=None:
plt.subplot(3,1,2)
print(" Plotting N for '%s' with %d data points" % (plotlabel, len(n)))
plt.plot(freq, n, color=color, marker='o', markersize=0, label=plotlabel)
plt.xlim(freq_range);
plt.grid(True)
if k.all()!=None:
|
except:
print("WARNING: data from file '%s' could not be plotted due to error" % (plotlabel))
print(traceback.format_exc())
print(sys.exc_info())
plt.subplot(3,1,1); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("permittivity $\\epsilon_r'$"); plt.title(sys.argv[1])
plt.subplot(3,1,2); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("index of refraction $n$");
plt.subplot(3,1,3); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("index of absorption $k$");
plt.xlabel("Frequency $f$ [Hz]");
plt.show()
plt.savefig(output_file)
| plt.subplot(3,1,3)
print(" Plotting k for '%s' with %d data points" % (plotlabel, len(k)))
plt.plot(freq, k, color=color, marker='o', markersize=0, label=plotlabel)
plt.xlim(freq_range);
plt.grid(True) | conditional_block |
plot_materials.py | #!/usr/bin/env python
#coding:utf8
import os
import sys
import traceback
import numpy as np
import matplotlib
import matplotlib.cm
import matplotlib.pyplot as plt
import meep_materials
## Interesting materials:
## was_plotted has_model See_also
## Si OK (poor)
## SiC OK OK
## SiO2 narrow OK Huber?
## TiO2 narrow OK Baumard77, ..
## InP OK (questionable)
## GaAs OK-lim
## InSb OK-lim
## Au OK OK
## FeS2 N/A
## Si-doped Si plasmons: http://proj.ncku.edu.tw/research/articles/e/20090828/2.html
## SrTiO3 TODO
## Analytic Lorentz model (copied from meep_utils.py)
def analytic_eps(mat, freq):#{{{
complex_eps = mat.eps
for polariz in mat.pol:
complex_eps += polariz['sigma'] * polariz['omega']**2 / (polariz['omega']**2 - freq**2 - 1j*freq*polariz['gamma'])
return complex_eps # + sum(0)
#}}}
## Functions loading data from different format files
## Note: you must obtain the SOPRA and SCOUT databases from the web, if you want to use them
def | (filename):#{{{
""" Reads the permittivity function from a given file with SCOUT binary format
The SCOUT database of materials is supplied with the SCOUT program and may be freely downloaded
from http://www.mtheiss.com/download/scout.zip
Different files use different units for the x-axis (179 files with "eV", 118 "micron", 49 "1/cm"), so
we have to build the frequency axis accordingly. The data were randomly verified against the SOPRA and luxpop data.
"""
## Open the file for binary access
f = open(filename, "rb")
## Load the number of data points, type of x axis and its boundaries
f.seek(151); datalength = np.fromfile(f, dtype=np.uint16, count=1)[0]
print(datalength)
f.seek(160); x_axis_type = np.fromfile(f, dtype=np.uint8, count=1)[0]
print(x_axis_type)
f.seek(166); x_start, x_end = np.fromfile(f, dtype=np.float32, count=2)
print(x_start)
print(x_end)
## Load the n, k data
f.seek(174); raw_eps = np.fromfile(f, dtype=np.float32, count=datalength*2)
f.close
eps = raw_eps[::2] + 1j*raw_eps[1::2]
from scipy.constants import h, c, eV
if x_axis_type == 2: # 'eV'
freq = np.linspace(x_start*eV/h, x_end*eV/h, datalength)
elif x_axis_type == 3: # 'um'
wavelength = np.linspace(x_start*1e-6, x_end*1e-6, datalength)
freq = c/wavelength
elif x_axis_type == 0: # 'cm-1'
freq = np.linspace(x_start*100*c, x_end*100*c, datalength)
return freq, eps
#}}}
def load_SOPRA_permittivity(filename):#{{{
data = []
with open(filename) as f:
for line in f.readlines():
if line[:5] == 'DATA1': data.append(map(lambda x: float(x), line.split('*')[2:5]))
wl, n, k = np.array(data).T
eps = ((n+1j*k)**2)[::-1]
freq = (2.998e8 / (wl*1e-9))[::-1]
return freq, eps
#}}}
def load_n_k_eps(filename):#{{{
lambda_angstr, n, k = np.loadtxt(data_source, usecols=[0,1,2], unpack=True, comments=';')
eps = (n+1j*k)**2
freq = 2.997e8 / (lambda_angstr*1e-10)
return freq, n, k, eps
#}}}
def load_eps(filename):#{{{
return freq, eps
#}}}
## List of sources for given materials
## == TiO2 ==#{{{
TiO2_files = [
meep_materials.material_TiO2(where = None),
meep_materials.material_TiO2(where = None, extraordinary=1.), ## (rutile is anisotropic)
meep_materials.material_TiO2(where = None, extraordinary=0.),
"sopra/TIO2.MAT",
"sopra/TIO2B.MAT",
"luxpop/TiO2.nk",
"luxpop/TiO2-e.nk",
"luxpop/TiO2_llnl_cxro.nk",
"luxpop/TiO2-e_palik.nk",
"scout/TiO2 (amorph).b",
"scout/TiO2 I (Jellison).b",
"scout/TiO2 II (Jellison).b",
"scout/TiO2 II (Jellison).b",
"other/TiO2-Mounaix_polycryst.nk",
]
#}}}
## == SiC == #{{{
SiC_files = [
meep_materials.material_SiC(where = None),
"sopra/SIC.MAT",
"luxpop/SiC_llnl_cxro.nk",
"luxpop/SiC_osantowski.nk",
"luxpop/SiC_palik.nk",
"luxpop/SiC_windt.nk",
"luxpop/SiC_yanagihara.nk",
'scout/SiC (MIR).b',
'scout/SiC (NIR-UV).b',
#'scout_reverse_engineering/scout/SiC (simple infrared model).b'
]#}}}
## == SiO2 == #{{{
SiO2_files = [
'scout/SiO2 (MIR-VUV).b',
'scout/SiO2 [micron].b',
'scout/SiO2 (fused).b',
'sopra/SIO2.MAT',
#'kitamura/SiO2/Koike1989.n',
meep_materials.material_SiO2(where = None) ]
##SiO2_files += ['kitamura/SiO2/'+f for f in os.listdir('kitamura/SiO2/') if (f[-2:]=='.n' or f[-2:]=='.k')]
#}}}
## == Au == #{{{
Au_files = [
meep_materials.material_Au(),
'sopra/AU.MAT',
'scout/Au (J, & C,, L, & H,).b',
'scout/Au (JC).b',
'scout/Au (MQ).b',
'scout/Au [micron].b',
'scout/Au.b',
'scout/Au model.b',
]
#}}}
## == Si == #{{{
Si_files = [
meep_materials.material_Si_NIR(),
meep_materials.material_Si_MIR(),
'other/Si_Dai2003.k', 'other/Si_Dai2003.n',
'sopra/SI100_2.MAT',
'scout/Si (100).b',
'scout/Si (Aspnes).b',
'scout/Si (Vis-UV, Brendel model).b',
'scout/Si (cryst,).b',
'scout/Si (infrared).b',
'scout/Si (crystalline, MIR-VUV).b',
]
#}}}
## == InP == #{{{
InP_files = [
meep_materials.material_InP(),
'sopra/INP.MAT',
'scout/InP.b',
'scout/InP (IR model).b',
'scout/InP (Jellison).b',
]
#}}}
## == GaAs == #{{{
GaAs_files = [
meep_materials.material_GaAs(),
'sopra/GAAS.MAT',
'sopra/GAAS031T.MAT',
'scout/GaAs (100).b',
'scout/GaAs.b',
'scout/GaAs (31 deg C).b',
]
#}}}
## == InSb == #{{{
InSb_files = [
'sopra/INSB.MAT',
'scout/InSb (Jellison).b',
'scout/InSb.b',
]
#}}}
## == STO == #{{{
STO_files = [
'other/N_model_STO_300K.nk',
'other/N_STO_300K.nk',
'other/STO_Neville1972_300K.epsilon',
#'other/STO_Neville1972_090K.epsilon',
#'other/STO_Neville1972_004K.epsilon',
#meep_materials.material_STO_THz(),
meep_materials.material_STO(),
]
#}}}
## == Al2O3 == #{{{
Al2O3_files = [
meep_materials.material_Sapphire(),
meep_materials.material_Sapphire(ordinary=.66),
meep_materials.material_Sapphire(ordinary=0),
'other/N_Al2O3_c-cut.nk',
'other/N_model_Al2O3_c-cut.nk',
'scout/Al2O3.b',
'scout/Al2O3 (Palik).b',
'sopra/AL2O3.MAT',
'sopra/AL2O3P.MAT',
#'other/',
]
#}}}
## == Fe2O3 == #{{{
Fe2O3_files = [
'scout/Fe2O3.b',
]
#}}}
## == Fe2O3 == #{{{
Fe3O4_files = [
'scout/Fe3O4.b',
]
#}}}
## == Fe2O3 == #{{{
Cu_files = [
'scout/Cu.b',
]
#}}}
## == Fe2O3 == #{{{
Ni_files = [
'scout/Ni.b',
]
#}}}
## == Fe2O3 == #{{{
ZnO_files = [
'scout/ZnO.b',
]
#}}}
## Dictionary of materials
materials = {
'TiO2': TiO2_files,
'SiC': SiC_files,
'SiO2': SiO2_files,
'Au': Au_files,
'Si': Si_files,
'InP': InP_files,
'GaAs': GaAs_files,
'InSb': InSb_files,
'STO': STO_files,
'Al2O3': Al2O3_files,
'Fe2O3': Fe2O3_files,
'Fe3O4': Fe3O4_files,
'Ni': Ni_files,
'ZnO': ZnO_files,
'Cu': Cu_files,
}
##and not (len(sys.argv)>2 and (sys.argv[2] == "--big"))
if len(sys.argv)>1 and (sys.argv[1] in materials.keys()):
data_sources = materials[sys.argv[1]]
output_file = sys.argv[1]
else:
print("Error: the first parameter may be one of the following materials:", materials.keys())
exit(1)
if len(sys.argv)>2 and (sys.argv[2] == "--big"):
output_file+="_big"; plt.figure(figsize=(16,16))
else:
plt.figure(figsize=(8,8))
freq_range=(1e11, 1e16)
for number, data_source in enumerate(data_sources):
try:
if type(data_source)==str:
plotlabel = data_source
## Select the file type to be loaded from
n, eps, k = None, None, None
print("Loading ", data_source)
if data_source.endswith('.b'):
freq, eps = load_SCOUT_permittivity(data_source)
n, k = (eps**.5).real, (eps**.5).imag
print(freq)
print(eps)
print(n)
print(k)
elif data_source.endswith('.MAT'):
freq, eps = load_SOPRA_permittivity(data_source)
n, k = (eps**.5).real, (eps**.5).imag
elif data_source.endswith('.epsilon'):
freq, eps = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
n, k = (eps**.5).real, (eps**.5).imag
print(freq, n)
elif data_source.endswith('.nk'):
freq, n, k, eps = load_n_k_eps(data_source)
elif data_source.endswith('.n'):
lambda_micron, n = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
freq = 2.997e8 / (lambda_micron*1e-6)
k, eps = None, None
elif data_source.endswith('.k'):
lambda_micron, k = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
freq = 2.997e8 / (lambda_micron*1e-6)
print(freq)
n, eps = None, None
## Read the MEEP's material model
if hasattr(data_source, 'pol'):
freq = 10**np.arange(np.log10(freq_range[0]), np.log10(freq_range[1]), .003)
eps = analytic_eps(data_source, freq)
n = (analytic_eps(data_source, freq)**.5).real
k = (analytic_eps(data_source, freq)**.5).imag
plotlabel = "model " + getattr(data_source, 'shortname', data_source.name)
## Plot the data
if hasattr(data_source, 'pol'):
color = 'black'
else:
color = matplotlib.cm.hsv(float(number)/len(data_sources))
if eps.all()!=None or (n.all()!=None and k.all()!=None):
if eps.all()==None: eps=(n+1j*k)**.5
plt.subplot(3,1,1)
print(" Plotting epsilon for '%s' with %d data points" % (plotlabel, len(eps)))
plt.plot(freq, eps+float(number)/100, color=color, marker='o', markersize=0, label=plotlabel)
plt.plot(freq, eps.imag+float(number)/100, color=color, marker='s', markersize=0, ls='--')
plt.xlim(freq_range);
plt.grid(True)
if n.all()!=None:
plt.subplot(3,1,2)
print(" Plotting N for '%s' with %d data points" % (plotlabel, len(n)))
plt.plot(freq, n, color=color, marker='o', markersize=0, label=plotlabel)
plt.xlim(freq_range);
plt.grid(True)
if k.all()!=None:
plt.subplot(3,1,3)
print(" Plotting k for '%s' with %d data points" % (plotlabel, len(k)))
plt.plot(freq, k, color=color, marker='o', markersize=0, label=plotlabel)
plt.xlim(freq_range);
plt.grid(True)
except:
print("WARNING: data from file '%s' could not be plotted due to error" % (plotlabel))
print(traceback.format_exc())
print(sys.exc_info())
plt.subplot(3,1,1); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("permittivity $\\epsilon_r'$"); plt.title(sys.argv[1])
plt.subplot(3,1,2); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("index of refraction $n$");
plt.subplot(3,1,3); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("index of absorption $k$");
plt.xlabel("Frequency $f$ [Hz]");
plt.show()
plt.savefig(output_file)
| load_SCOUT_permittivity | identifier_name |
plot_materials.py | #!/usr/bin/env python
#coding:utf8
import os
import sys
import traceback
import numpy as np
import matplotlib
import matplotlib.cm
import matplotlib.pyplot as plt
import meep_materials
## Interesting materials:
## was_plotted has_model See_also
## Si OK (poor)
## SiC OK OK
## SiO2 narrow OK Huber?
## TiO2 narrow OK Baumard77, ..
## InP OK (questionable)
## GaAs OK-lim
## InSb OK-lim
## Au OK OK
## FeS2 N/A
## Si-doped Si plasmons: http://proj.ncku.edu.tw/research/articles/e/20090828/2.html
## SrTiO3 TODO
## Analytic Lorentz model (copied from meep_utils.py)
def analytic_eps(mat, freq):#{{{
|
#}}}
## Functions loading data from different format files
## Note: you must obtain the SOPRA and SCOUT databases from the web, if you want to use them
def load_SCOUT_permittivity(filename):#{{{
""" Reads the permittivity function from a given file with SCOUT binary format
The SCOUT database of materials is supplied with the SCOUT program and may be freely downloaded
from http://www.mtheiss.com/download/scout.zip
Different files use different units for the x-axis (179 files with "eV", 118 "micron", 49 "1/cm"), so
we have to build the frequency axis accordingly. The data were randomly verified against the SOPRA and luxpop data.
"""
## Open the file for binary access
f = open(filename, "rb")
## Load the number of data points, type of x axis and its boundaries
f.seek(151); datalength = np.fromfile(f, dtype=np.uint16, count=1)[0]
print(datalength)
f.seek(160); x_axis_type = np.fromfile(f, dtype=np.uint8, count=1)[0]
print(x_axis_type)
f.seek(166); x_start, x_end = np.fromfile(f, dtype=np.float32, count=2)
print(x_start)
print(x_end)
## Load the n, k data
f.seek(174); raw_eps = np.fromfile(f, dtype=np.float32, count=datalength*2)
f.close
eps = raw_eps[::2] + 1j*raw_eps[1::2]
from scipy.constants import h, c, eV
if x_axis_type == 2: # 'eV'
freq = np.linspace(x_start*eV/h, x_end*eV/h, datalength)
elif x_axis_type == 3: # 'um'
wavelength = np.linspace(x_start*1e-6, x_end*1e-6, datalength)
freq = c/wavelength
elif x_axis_type == 0: # 'cm-1'
freq = np.linspace(x_start*100*c, x_end*100*c, datalength)
return freq, eps
#}}}
def load_SOPRA_permittivity(filename):#{{{
data = []
with open(filename) as f:
for line in f.readlines():
if line[:5] == 'DATA1': data.append(map(lambda x: float(x), line.split('*')[2:5]))
wl, n, k = np.array(data).T
eps = ((n+1j*k)**2)[::-1]
freq = (2.998e8 / (wl*1e-9))[::-1]
return freq, eps
#}}}
def load_n_k_eps(filename):#{{{
lambda_angstr, n, k = np.loadtxt(data_source, usecols=[0,1,2], unpack=True, comments=';')
eps = (n+1j*k)**2
freq = 2.997e8 / (lambda_angstr*1e-10)
return freq, n, k, eps
#}}}
def load_eps(filename):#{{{
return freq, eps
#}}}
## List of sources for given materials
## == TiO2 ==#{{{
TiO2_files = [
meep_materials.material_TiO2(where = None),
meep_materials.material_TiO2(where = None, extraordinary=1.), ## (rutile is anisotropic)
meep_materials.material_TiO2(where = None, extraordinary=0.),
"sopra/TIO2.MAT",
"sopra/TIO2B.MAT",
"luxpop/TiO2.nk",
"luxpop/TiO2-e.nk",
"luxpop/TiO2_llnl_cxro.nk",
"luxpop/TiO2-e_palik.nk",
"scout/TiO2 (amorph).b",
"scout/TiO2 I (Jellison).b",
"scout/TiO2 II (Jellison).b",
"scout/TiO2 II (Jellison).b",
"other/TiO2-Mounaix_polycryst.nk",
]
#}}}
## == SiC == #{{{
SiC_files = [
meep_materials.material_SiC(where = None),
"sopra/SIC.MAT",
"luxpop/SiC_llnl_cxro.nk",
"luxpop/SiC_osantowski.nk",
"luxpop/SiC_palik.nk",
"luxpop/SiC_windt.nk",
"luxpop/SiC_yanagihara.nk",
'scout/SiC (MIR).b',
'scout/SiC (NIR-UV).b',
#'scout_reverse_engineering/scout/SiC (simple infrared model).b'
]#}}}
## == SiO2 == #{{{
SiO2_files = [
'scout/SiO2 (MIR-VUV).b',
'scout/SiO2 [micron].b',
'scout/SiO2 (fused).b',
'sopra/SIO2.MAT',
#'kitamura/SiO2/Koike1989.n',
meep_materials.material_SiO2(where = None) ]
##SiO2_files += ['kitamura/SiO2/'+f for f in os.listdir('kitamura/SiO2/') if (f[-2:]=='.n' or f[-2:]=='.k')]
#}}}
## == Au == #{{{
Au_files = [
meep_materials.material_Au(),
'sopra/AU.MAT',
'scout/Au (J, & C,, L, & H,).b',
'scout/Au (JC).b',
'scout/Au (MQ).b',
'scout/Au [micron].b',
'scout/Au.b',
'scout/Au model.b',
]
#}}}
## == Si == #{{{
Si_files = [
meep_materials.material_Si_NIR(),
meep_materials.material_Si_MIR(),
'other/Si_Dai2003.k', 'other/Si_Dai2003.n',
'sopra/SI100_2.MAT',
'scout/Si (100).b',
'scout/Si (Aspnes).b',
'scout/Si (Vis-UV, Brendel model).b',
'scout/Si (cryst,).b',
'scout/Si (infrared).b',
'scout/Si (crystalline, MIR-VUV).b',
]
#}}}
## == InP == #{{{
InP_files = [
meep_materials.material_InP(),
'sopra/INP.MAT',
'scout/InP.b',
'scout/InP (IR model).b',
'scout/InP (Jellison).b',
]
#}}}
## == GaAs == #{{{
GaAs_files = [
meep_materials.material_GaAs(),
'sopra/GAAS.MAT',
'sopra/GAAS031T.MAT',
'scout/GaAs (100).b',
'scout/GaAs.b',
'scout/GaAs (31 deg C).b',
]
#}}}
## == InSb == #{{{
InSb_files = [
'sopra/INSB.MAT',
'scout/InSb (Jellison).b',
'scout/InSb.b',
]
#}}}
## == STO == #{{{
STO_files = [
'other/N_model_STO_300K.nk',
'other/N_STO_300K.nk',
'other/STO_Neville1972_300K.epsilon',
#'other/STO_Neville1972_090K.epsilon',
#'other/STO_Neville1972_004K.epsilon',
#meep_materials.material_STO_THz(),
meep_materials.material_STO(),
]
#}}}
## == Al2O3 == #{{{
Al2O3_files = [
meep_materials.material_Sapphire(),
meep_materials.material_Sapphire(ordinary=.66),
meep_materials.material_Sapphire(ordinary=0),
'other/N_Al2O3_c-cut.nk',
'other/N_model_Al2O3_c-cut.nk',
'scout/Al2O3.b',
'scout/Al2O3 (Palik).b',
'sopra/AL2O3.MAT',
'sopra/AL2O3P.MAT',
#'other/',
]
#}}}
## == Fe2O3 == #{{{
Fe2O3_files = [
'scout/Fe2O3.b',
]
#}}}
## == Fe2O3 == #{{{
Fe3O4_files = [
'scout/Fe3O4.b',
]
#}}}
## == Fe2O3 == #{{{
Cu_files = [
'scout/Cu.b',
]
#}}}
## == Fe2O3 == #{{{
Ni_files = [
'scout/Ni.b',
]
#}}}
## == Fe2O3 == #{{{
ZnO_files = [
'scout/ZnO.b',
]
#}}}
## Dictionary of materials
materials = {
'TiO2': TiO2_files,
'SiC': SiC_files,
'SiO2': SiO2_files,
'Au': Au_files,
'Si': Si_files,
'InP': InP_files,
'GaAs': GaAs_files,
'InSb': InSb_files,
'STO': STO_files,
'Al2O3': Al2O3_files,
'Fe2O3': Fe2O3_files,
'Fe3O4': Fe3O4_files,
'Ni': Ni_files,
'ZnO': ZnO_files,
'Cu': Cu_files,
}
##and not (len(sys.argv)>2 and (sys.argv[2] == "--big"))
if len(sys.argv)>1 and (sys.argv[1] in materials.keys()):
data_sources = materials[sys.argv[1]]
output_file = sys.argv[1]
else:
print("Error: the first parameter may be one of the following materials:", materials.keys())
exit(1)
if len(sys.argv)>2 and (sys.argv[2] == "--big"):
output_file+="_big"; plt.figure(figsize=(16,16))
else:
plt.figure(figsize=(8,8))
freq_range=(1e11, 1e16)
for number, data_source in enumerate(data_sources):
try:
if type(data_source)==str:
plotlabel = data_source
## Select the file type to be loaded from
n, eps, k = None, None, None
print("Loading ", data_source)
if data_source.endswith('.b'):
freq, eps = load_SCOUT_permittivity(data_source)
n, k = (eps**.5).real, (eps**.5).imag
print(freq)
print(eps)
print(n)
print(k)
elif data_source.endswith('.MAT'):
freq, eps = load_SOPRA_permittivity(data_source)
n, k = (eps**.5).real, (eps**.5).imag
elif data_source.endswith('.epsilon'):
freq, eps = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
n, k = (eps**.5).real, (eps**.5).imag
print(freq, n)
elif data_source.endswith('.nk'):
freq, n, k, eps = load_n_k_eps(data_source)
elif data_source.endswith('.n'):
lambda_micron, n = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
freq = 2.997e8 / (lambda_micron*1e-6)
k, eps = None, None
elif data_source.endswith('.k'):
lambda_micron, k = np.loadtxt(data_source, usecols=[0,1], unpack=True, comments='#')
freq = 2.997e8 / (lambda_micron*1e-6)
print(freq)
n, eps = None, None
## Read the MEEP's material model
if hasattr(data_source, 'pol'):
freq = 10**np.arange(np.log10(freq_range[0]), np.log10(freq_range[1]), .003)
eps = analytic_eps(data_source, freq)
n = (analytic_eps(data_source, freq)**.5).real
k = (analytic_eps(data_source, freq)**.5).imag
plotlabel = "model " + getattr(data_source, 'shortname', data_source.name)
## Plot the data
if hasattr(data_source, 'pol'):
color = 'black'
else:
color = matplotlib.cm.hsv(float(number)/len(data_sources))
if eps.all()!=None or (n.all()!=None and k.all()!=None):
if eps.all()==None: eps=(n+1j*k)**.5
plt.subplot(3,1,1)
print(" Plotting epsilon for '%s' with %d data points" % (plotlabel, len(eps)))
plt.plot(freq, eps+float(number)/100, color=color, marker='o', markersize=0, label=plotlabel)
plt.plot(freq, eps.imag+float(number)/100, color=color, marker='s', markersize=0, ls='--')
plt.xlim(freq_range);
plt.grid(True)
if n.all()!=None:
plt.subplot(3,1,2)
print(" Plotting N for '%s' with %d data points" % (plotlabel, len(n)))
plt.plot(freq, n, color=color, marker='o', markersize=0, label=plotlabel)
plt.xlim(freq_range);
plt.grid(True)
if k.all()!=None:
plt.subplot(3,1,3)
print(" Plotting k for '%s' with %d data points" % (plotlabel, len(k)))
plt.plot(freq, k, color=color, marker='o', markersize=0, label=plotlabel)
plt.xlim(freq_range);
plt.grid(True)
except:
print("WARNING: data from file '%s' could not be plotted due to error" % (plotlabel))
print(traceback.format_exc())
print(sys.exc_info())
plt.subplot(3,1,1); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("permittivity $\\epsilon_r'$"); plt.title(sys.argv[1])
plt.subplot(3,1,2); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("index of refraction $n$");
plt.subplot(3,1,3); plt.legend(loc=(0,0),prop={'size':6}); plt.ylabel("index of absorption $k$");
plt.xlabel("Frequency $f$ [Hz]");
plt.show()
plt.savefig(output_file)
| complex_eps = mat.eps
for polariz in mat.pol:
complex_eps += polariz['sigma'] * polariz['omega']**2 / (polariz['omega']**2 - freq**2 - 1j*freq*polariz['gamma'])
return complex_eps # + sum(0) | identifier_body |
generate_concept_dicts.py | #!/usr/bin/env python
# Encoding required to deal with 'micro' character
"""Script for auto-generating DICOM SR context groups from FHIR JSON value set
resources.
"""
from io import BytesIO
import json
import ftplib
import glob
import logging
import os
import re
import sys
import tempfile
from pprint import pprint
from xml.etree import ElementTree as ET
if sys.version_info[0] < 3:
import urllib as urllib_request
else:
import urllib.request as urllib_request
logger = logging.getLogger(__name__)
# Example excerpt fhir JSON for reference
"""
"resourceType":"ValueSet",
"id":"dicom-cid-10-InterventionalDrug",
...
"name":"InterventionalDrug",
...
"compose":{
"include":[
{
"system":"http://snomed.info/sct",
"concept":[
{
"code":"387362001",
"display":"Epinephrine"
},
"""
# The list of scheme designators is not complete.
# For full list see table 8-1 in part 3.16 chapter 8:
# http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_8.html#table_8-1
FHIR_SYSTEM_TO_DICOM_SCHEME_DESIGNATOR = {
"http://snomed.info/sct": "SCT",
"http://dicom.nema.org/resources/ontology/DCM": "DCM",
"http://loinc.org": "LN",
"http://www.radlex.org": "RADLEX",
"http://sig.biostr.washington.edu/projects/fm/AboutFM.html": "FMA",
"http://www.nlm.nih.gov/mesh/meshhome.html": "MSH",
"http://ncit.nci.nih.gov": "NCIt",
"http://unitsofmeasure.org": "UCUM",
"http://hl7.org/fhir/sid/ndc": "NDC",
"urn:iso:std:iso:11073:10101": "MDC",
"doi:10.1016/S0735-1097(99)00126-6": "BARI",
"http://www.nlm.nih.gov/research/umls": "UMLS",
"http://pubchem.ncbi.nlm.nih.gov": "PUBCHEM_CID",
"http://braininfo.rprc.washington.edu/aboutBrainInfo.aspx#NeuroNames": "NEU",
"http://www.itis.gov": "ITIS_TSN",
"http://arxiv.org/abs/1612.07003": "IBSI",
"http://www.nlm.nih.gov/research/umls/rxnorm": "RXNORM",
}
DOC_LINES = [
f"# Auto-generated by {os.path.basename(__file__)}.\n",
"# -*- coding: utf-8 -*-\n",
"\n",
]
def camel_case(s):
leave_alone = (
"mm",
"cm",
"km",
"um",
"ms", # 'us'?-doesn't seem to be there
"ml",
"mg",
"kg",
) # ... probably need others
return "".join(
word.capitalize() if word != word.upper() and word not in leave_alone else word
for word in re.split(r"\W", s, flags=re.UNICODE)
if word.isalnum()
)
def keyword_from_meaning(name):
"""Return a camel case valid python identifier"""
# Try to adhere to keyword scheme in DICOM (CP850)
# singular/plural alternative forms are made plural
# e.g., “Physician(s) of Record” becomes “PhysiciansOfRecord”
name = name.replace("(s)", "s")
# “Patient’s Name” -> “PatientName”
# “Operators’ Name” -> “OperatorsName”
name = name.replace("’s ", " ")
name = name.replace("'s ", " ")
name = name.replace("s’ ", "s ")
name = name.replace("s' ", "s ")
# Mathematical symbols
name = name.replace("%", " Percent ")
name = name.replace(">", " Greater Than ")
name = name.replace("=", " Equals ")
name = name.replace("<", " Lesser Than ")
name = re.sub(r"([0-9]+)\.([0-9]+)", "\\1 Point \\2", name)
name = re.sub(r"\s([0-9.]+)-([0-9.]+)\s", " \\1 To \\2 ", name)
name = re.sub(r"([0-9]+)day", "\\1 Day", name)
name = re.sub(r"([0-9]+)y", "\\1 Years", name)
# Remove category modifiers, such as "(specimen)", "(procedure)",
# "(body structure)", etc.
name = re.sub(r"^(.+) \([a-z ]+\)$", "\\1", name)
name = camel_case(name.strip())
# Python variables must not begin with a number.
if re.match(r"[0-9]", name):
name = "_" + name
return name
def download_fhir_value_sets(local_dir):
ftp_host = "medical.nema.org"
if not os.path.exists(local_dir):
os.makedirs(local_dir)
logger.info("storing files in " + local_dir)
logger.info(f'log into FTP server "{ftp_host}"')
ftp = ftplib.FTP(ftp_host, timeout=60)
ftp.login("anonymous")
ftp_path = "medical/dicom/resources/valuesets/fhir/json"
logger.info(f'list files in directory "{ftp_path}"')
fhir_value_set_files = ftp.nlst(ftp_path)
try:
for ftp_filepath in fhir_value_set_files:
ftp_filename = os.path.basename(ftp_filepath)
logger.info(f'retrieve value set file "{ftp_filename}"')
with BytesIO() as fp:
ftp.retrbinary(f"RETR {ftp_filepath}", fp.write)
content = fp.getvalue()
local_filename = os.path.join(local_dir, ftp_filename)
with open(local_filename, "wb") as f_local:
f_local.write(content)
finally:
ftp.quit()
def _parse_html(content):
# from lxml import html
# doc = html.document_fromstring(content)
return ET.fromstring(content, pa | response = urllib_request.urlopen(url)
return response.read()
def _get_text(element):
text = "".join(element.itertext())
return text.strip()
def get_table_o1():
logger.info("process Table O1")
url = "http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_O.html#table_O-1"
root = _parse_html(_download_html(url))
namespaces = {"w3": root.tag.split("}")[0].strip("{")}
body = root.find("w3:body", namespaces=namespaces)
table = body.findall(".//w3:tbody", namespaces=namespaces)[0]
rows = table.findall("./w3:tr", namespaces=namespaces)
data = []
for row in rows:
data.append(
(
_get_text(row[0].findall(".//w3:p", namespaces=namespaces)[-1]),
_get_text(row[1].findall(".//w3:p", namespaces=namespaces)[0]),
_get_text(row[2].findall(".//w3:p", namespaces=namespaces)[0]),
)
)
return data
def get_table_d1():
logger.info("process Table D1")
url = "http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_D.html#table_D-1"
root = _parse_html(_download_html(url))
namespaces = {"w3": root.tag.split("}")[0].strip("{")}
body = root.find("w3:body", namespaces=namespaces)
table = body.findall(".//w3:tbody", namespaces=namespaces)[0]
rows = table.findall("./w3:tr", namespaces=namespaces)
return [
(
_get_text(row[0].findall(".//w3:p", namespaces=namespaces)[0]),
_get_text(row[1].findall(".//w3:p", namespaces=namespaces)[0]),
)
for row in rows
]
def write_concepts(concepts, cid_concepts, cid_lists, name_for_cid):
lines = DOC_LINES + [
"# Dict with scheme designator keys; value format is:\n",
"# {keyword: {code1: (meaning, cid_list), code2: ...}\n",
"#\n",
"# Most keyword identifiers map to a single code, but not all\n",
"\n",
]
with open("_concepts_dict.py", "w", encoding="UTF8") as f_concepts:
f_concepts.writelines(lines)
f_concepts.write("concepts = {}\n") # start with empty dict
for scheme, value in concepts.items():
f_concepts.write(f"\nconcepts['{scheme}'] = \\\n")
pprint(value, f_concepts)
lines = DOC_LINES + [
"# Dict with cid number as keys; value format is:\n",
"# {scheme designator: <list of keywords for current cid>\n",
"# scheme_designator: ...}\n",
"\n",
]
with open("_cid_dict.py", "w", encoding="UTF8") as f_cid:
f_cid.writelines(lines)
f_cid.write("name_for_cid = {}\n")
f_cid.write("cid_concepts = {}\n")
for cid, value in cid_lists.items():
f_cid.write(f"\nname_for_cid[{cid}] = '{name_for_cid[cid]}'\n")
f_cid.write(f"cid_concepts[{cid}] = \\\n")
pprint(value, f_cid)
def write_snomed_mapping(snomed_codes):
with open("_snomed_dict.py", "w", encoding="UTF8") as f_concepts:
lines = DOC_LINES + [
"# Dict with scheme designator keys; value format is:\n",
"# {concept_id1: snomed_id1, concept_id2: ...}\n",
"# or\n",
"# {snomed_id1: concept_id1, snomed_id2: ...}\n",
"\n",
]
f_concepts.writelines(lines)
f_concepts.write("mapping = {}\n") # start with empty dict
f_concepts.write("\nmapping['{}'] = {{\n".format("SCT"))
for sct, srt, meaning in snomed_codes:
f_concepts.write(f" '{sct}': '{srt}',\n")
f_concepts.write("}\n")
f_concepts.write("\nmapping['{}'] = {{\n".format("SRT"))
for sct, srt, meaning in snomed_codes:
f_concepts.write(f" '{srt}': '{sct}',\n")
f_concepts.write("}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
local_dir = tempfile.gettempdir()
fhir_dir = os.path.join(local_dir, "fhir")
if not os.path.exists(fhir_dir) or not os.listdir(fhir_dir):
download_fhir_value_sets(fhir_dir)
else:
msg = "Using locally downloaded files\n"
msg += "from directory " + fhir_dir
logging.info(msg)
fhir_value_set_files = glob.glob(os.path.join(fhir_dir, "*"))
cid_pattern = re.compile("^dicom-cid-([0-9]+)-[a-zA-Z]+")
concepts = dict()
cid_lists = dict()
name_for_cid = dict()
# XXX = 0
try:
for ftp_filepath in fhir_value_set_files:
ftp_filename = os.path.basename(ftp_filepath)
logger.info(f'process file "{ftp_filename}"')
with open(ftp_filepath, "rb") as fp:
content = fp.read()
value_set = json.loads(content)
cid_match = cid_pattern.search(value_set["id"])
cid = int(cid_match.group(1)) # can take int off to store as string
name_for_cid[cid] = value_set["name"]
cid_concepts = {}
for group in value_set["compose"]["include"]:
system = group["system"]
try:
scheme_designator = FHIR_SYSTEM_TO_DICOM_SCHEME_DESIGNATOR[system]
except KeyError:
raise NotImplementedError(
"The DICOM scheme designator for the following FHIR system "
f"has not been specified: {system}"
)
if scheme_designator not in concepts:
concepts[scheme_designator] = dict()
for concept in group["concept"]:
name = keyword_from_meaning(concept["display"])
code = concept["code"].strip()
display = concept["display"].strip()
# If new name under this scheme, start dict of codes/cids that use that code
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (display, [cid])}
else:
prior = concepts[scheme_designator][name]
if code in prior:
prior[code][1].append(cid)
else:
prior[code] = (display, [cid])
if prior[code][0].lower() != display.lower():
# Meanings can only be different by symbols, etc.
# because converted to same keyword.
# Nevertheless, print as info
msg = "'{}': Meaning '{}' in cid_{}, previously '{}' in cids {}"
msg = msg.format(
name, display, cid, prior[code][0], prior[code][1]
)
logger.info(msg)
# Keep track of this cid referencing that name
if scheme_designator not in cid_concepts:
cid_concepts[scheme_designator] = []
if name in cid_concepts[scheme_designator]:
msg = "'{}': Meaning '{}' in cid_{} is duplicated!"
msg = msg.format(name, concept["display"], cid)
logger.warning(msg)
cid_concepts[scheme_designator].append(name)
cid_lists[cid] = cid_concepts
# if XXX > 3:
# break
# XXX += 1
scheme_designator = "SCT"
snomed_codes = get_table_o1()
for code, srt_code, meaning in snomed_codes:
name = keyword_from_meaning(meaning)
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (meaning, [])}
else:
prior = concepts[scheme_designator][name]
if code not in prior:
prior[code] = (meaning, [])
scheme_designator = "DCM"
dicom_codes = get_table_d1()
for code, meaning in dicom_codes:
name = keyword_from_meaning(meaning)
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (meaning, [])}
else:
prior = concepts[scheme_designator][name]
if code not in prior:
prior[code] = (meaning, [])
finally:
# If any error or KeyboardInterrupt, close up and write what we have
write_concepts(concepts, cid_concepts, cid_lists, name_for_cid)
write_snomed_mapping(snomed_codes)
| rser=ET.XMLParser(encoding="utf-8"))
def _download_html(url):
| identifier_body |
generate_concept_dicts.py | #!/usr/bin/env python
# Encoding required to deal with 'micro' character
"""Script for auto-generating DICOM SR context groups from FHIR JSON value set
resources.
"""
from io import BytesIO
import json
import ftplib
import glob
import logging
import os
import re
import sys
import tempfile
from pprint import pprint
from xml.etree import ElementTree as ET
if sys.version_info[0] < 3:
import urllib as urllib_request
else:
import urllib.request as urllib_request
logger = logging.getLogger(__name__)
# Example excerpt fhir JSON for reference
"""
"resourceType":"ValueSet",
"id":"dicom-cid-10-InterventionalDrug",
...
"name":"InterventionalDrug",
...
"compose":{
"include":[
{
"system":"http://snomed.info/sct",
"concept":[
{
"code":"387362001",
"display":"Epinephrine"
},
"""
# The list of scheme designators is not complete.
# For full list see table 8-1 in part 3.16 chapter 8:
# http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_8.html#table_8-1
FHIR_SYSTEM_TO_DICOM_SCHEME_DESIGNATOR = {
"http://snomed.info/sct": "SCT",
"http://dicom.nema.org/resources/ontology/DCM": "DCM",
"http://loinc.org": "LN",
"http://www.radlex.org": "RADLEX",
"http://sig.biostr.washington.edu/projects/fm/AboutFM.html": "FMA",
"http://www.nlm.nih.gov/mesh/meshhome.html": "MSH",
"http://ncit.nci.nih.gov": "NCIt",
"http://unitsofmeasure.org": "UCUM",
"http://hl7.org/fhir/sid/ndc": "NDC",
"urn:iso:std:iso:11073:10101": "MDC",
"doi:10.1016/S0735-1097(99)00126-6": "BARI",
"http://www.nlm.nih.gov/research/umls": "UMLS",
"http://pubchem.ncbi.nlm.nih.gov": "PUBCHEM_CID",
"http://braininfo.rprc.washington.edu/aboutBrainInfo.aspx#NeuroNames": "NEU",
"http://www.itis.gov": "ITIS_TSN",
"http://arxiv.org/abs/1612.07003": "IBSI",
"http://www.nlm.nih.gov/research/umls/rxnorm": "RXNORM",
}
DOC_LINES = [
f"# Auto-generated by {os.path.basename(__file__)}.\n",
"# -*- coding: utf-8 -*-\n",
"\n",
]
def camel_case(s):
leave_alone = (
"mm",
"cm",
"km",
"um",
"ms", # 'us'?-doesn't seem to be there
"ml",
"mg",
"kg",
) # ... probably need others
return "".join(
word.capitalize() if word != word.upper() and word not in leave_alone else word
for word in re.split(r"\W", s, flags=re.UNICODE)
if word.isalnum()
)
def keyword_from_meaning(name):
"""Return a camel case valid python identifier"""
# Try to adhere to keyword scheme in DICOM (CP850)
# singular/plural alternative forms are made plural
# e.g., “Physician(s) of Record” becomes “PhysiciansOfRecord”
name = name.replace("(s)", "s")
# “Patient’s Name” -> “PatientName”
# “Operators’ Name” -> “OperatorsName”
name = name.replace("’s ", " ")
name = name.replace("'s ", " ")
name = name.replace("s’ ", "s ")
name = name.replace("s' ", "s ")
# Mathematical symbols
name = name.replace("%", " Percent ")
name = name.replace(">", " Greater Than ")
name = name.replace("=", " Equals ")
name = name.replace("<", " Lesser Than ")
name = re.sub(r"([0-9]+)\.([0-9]+)", "\\1 Point \\2", name)
name = re.sub(r"\s([0-9.]+)-([0-9.]+)\s", " \\1 To \\2 ", name)
name = re.sub(r"([0-9]+)day", "\\1 Day", name)
name = re.sub(r"([0-9]+)y", "\\1 Years", name)
# Remove category modifiers, such as "(specimen)", "(procedure)",
# "(body structure)", etc.
name = re.sub(r"^(.+) \([a-z ]+\)$", "\\1", name)
name = camel_case(name.strip())
# Python variables must not begin with a number.
if re.match(r"[0-9]", name):
name = "_" + name
return name
def download_fhir_value_sets(local_dir):
ftp_host = "medical.nema.org"
if not os.path.exists(local_dir):
os.makedirs(local_dir)
logger.info("storing files in " + local_dir)
logger.info(f'log into FTP server "{ftp_host}"')
ftp = ftplib.FTP(ftp_host, timeout=60)
ftp.login("anonymous")
ftp_path = "medical/dicom/resources/valuesets/fhir/json"
logger.info(f'list files in directory "{ftp_path}"')
fhir_value_set_files = ftp.nlst(ftp_path)
try:
for ftp_filepath in fhir_value_set_files:
ftp_filename = os.path.basename(ftp_filepath)
logger.info(f'retrieve value set file "{ftp_filename}"')
with BytesIO() as fp:
ftp.retrbinary(f"RETR {ftp_filepath}", fp.write)
content = fp.getvalue()
local_filename = os.path.join(local_dir, ftp_filename)
with open(local_filename, "wb") as f_local:
f_local.write(content)
finally:
ftp.quit()
def _parse_html(content):
# from lxml import html
# doc = html.document_fromstring(content)
return ET.fromstring(content, parser=ET.XMLParser(encoding="utf-8"))
def _download_html(url):
response = urllib_request.urlopen(url)
return response.read()
def _get_text(element):
text = "".join(element.itertext())
return text.strip()
def get_table_o1():
logger.info("process Table O1")
url = "http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_O.html#table_O-1"
root = _parse_html(_download_html(url))
namespaces = {"w3": root.tag.split("}")[0].strip("{")}
body = root.find("w3:body", namespaces=namespaces)
table = body.findall(".//w3:tbody", namespaces=namespaces)[0]
rows = table.findall("./w3:tr", namespaces=namespaces)
data = []
for row in rows:
data.append(
(
_get_text(row[0].findall(".//w3:p", namespaces=namespaces)[-1]),
_get_text(row[1].findall(".//w3:p", namespaces=namespaces)[0]),
_get_text(row[2].findall(".//w3:p", namespaces=namespaces)[0]),
)
)
return data
def get_table_d1():
logger.info("process Table D1")
url = "http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_D.html#table_D-1"
root = _parse_html(_download_html(url))
namespaces = {"w3": root.tag.split("}")[0].strip("{")}
body = root.find("w3:body", namespaces=namespaces)
table = body.findall(".//w3:tbody", namespaces=namespaces)[0]
rows = table.findall("./w3:tr", namespaces=namespaces)
return [
(
_get_text(row[0].findall(".//w3:p", namespaces=namespaces)[0]),
_get_text(row[1].findall(".//w3:p", namespaces=namespaces)[0]),
)
for row in rows
]
def write_concepts(concepts, cid_concepts, cid_lists, name_for_cid):
lines = DOC_LINES + [
"# Dict with scheme designator keys; value format is:\n",
"# {keyword: {code1: (meaning, cid_list), code2: ...}\n",
"#\n",
"# Most keyword identifiers map to a single code, but not all\n",
"\n",
]
with open("_concepts_dict.py", "w", encoding="UTF8") as f_concepts:
f_concepts.writelines(lines)
f_concepts.write("concepts = {}\n") # start with empty dict
for scheme, value in concepts.items():
f_concepts.write(f"\nconcepts['{scheme}'] = \\\n")
pprint(value, f_concepts)
lines = DOC_LINES + [
"# Dict with cid number as keys; value format is:\n",
"# {scheme designator: <list of keywords for current cid>\n",
"# scheme_designator: ...}\n",
"\n",
]
with open("_cid_dict.py", "w", encoding="UTF8") as f_cid:
f_cid.writelines(lines)
f_cid.write("name_for_cid = {}\n")
f_cid.write("cid_concepts = {}\n")
for cid, value in cid_lists.items():
f_cid.write(f"\nname_for_cid[{cid}] = '{name_for_cid[cid]}'\n")
f_cid.write(f"cid_concepts[{cid}] = \\\n")
pprint(value, f_cid)
def write_snomed_mapping(snomed_codes):
with open("_snomed_dict.py", "w", encoding="UTF8") as f_concepts:
lines = DOC_LINES + [
"# Dict with scheme designator keys; value format is:\n",
"# {concept_id1: snomed_id1, concept_id2: ...}\n",
"# or\n",
"# {snomed_id1: concept_id1, snomed_id2: ...}\n",
"\n",
]
f_concepts.writelines(lines)
f_concepts.write("mapping = {}\n") # start with empty dict
f_concepts.write("\nmapping['{}'] = {{\n".format("SCT"))
for sct, srt, meaning in snomed_codes:
f_concepts.write(f" '{sct}': '{srt}',\n")
f_concepts.write("}\n")
f_concepts.write("\nmapping['{}'] = {{\n".format("SRT"))
for sct, srt, meaning in snomed_codes:
f_concepts.write(f" '{srt}': '{sct}',\n")
f_concepts.write("}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
local_dir = tempfile.gettempdir()
fhir_dir = os.path.join(local_dir, "fhir")
if not os.path.exists(fhir_dir) or not os.listdir(fhir_dir):
download_fhir_value_sets(fhir_dir)
else:
msg = "Using locally downloaded files\n"
msg += "from directory " + fhir_dir
logging.info(msg)
fhir_value_set_files = glob.glob(os.path.join(fhir_dir, "*"))
cid_pattern = re.compile("^dicom-cid-([0-9]+)-[a-zA-Z]+")
concepts = dict()
cid_lists = dict()
name_for_cid = dict() | # XXX = 0
try:
for ftp_filepath in fhir_value_set_files:
ftp_filename = os.path.basename(ftp_filepath)
logger.info(f'process file "{ftp_filename}"')
with open(ftp_filepath, "rb") as fp:
content = fp.read()
value_set = json.loads(content)
cid_match = cid_pattern.search(value_set["id"])
cid = int(cid_match.group(1)) # can take int off to store as string
name_for_cid[cid] = value_set["name"]
cid_concepts = {}
for group in value_set["compose"]["include"]:
system = group["system"]
try:
scheme_designator = FHIR_SYSTEM_TO_DICOM_SCHEME_DESIGNATOR[system]
except KeyError:
raise NotImplementedError(
"The DICOM scheme designator for the following FHIR system "
f"has not been specified: {system}"
)
if scheme_designator not in concepts:
concepts[scheme_designator] = dict()
for concept in group["concept"]:
name = keyword_from_meaning(concept["display"])
code = concept["code"].strip()
display = concept["display"].strip()
# If new name under this scheme, start dict of codes/cids that use that code
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (display, [cid])}
else:
prior = concepts[scheme_designator][name]
if code in prior:
prior[code][1].append(cid)
else:
prior[code] = (display, [cid])
if prior[code][0].lower() != display.lower():
# Meanings can only be different by symbols, etc.
# because converted to same keyword.
# Nevertheless, print as info
msg = "'{}': Meaning '{}' in cid_{}, previously '{}' in cids {}"
msg = msg.format(
name, display, cid, prior[code][0], prior[code][1]
)
logger.info(msg)
# Keep track of this cid referencing that name
if scheme_designator not in cid_concepts:
cid_concepts[scheme_designator] = []
if name in cid_concepts[scheme_designator]:
msg = "'{}': Meaning '{}' in cid_{} is duplicated!"
msg = msg.format(name, concept["display"], cid)
logger.warning(msg)
cid_concepts[scheme_designator].append(name)
cid_lists[cid] = cid_concepts
# if XXX > 3:
# break
# XXX += 1
scheme_designator = "SCT"
snomed_codes = get_table_o1()
for code, srt_code, meaning in snomed_codes:
name = keyword_from_meaning(meaning)
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (meaning, [])}
else:
prior = concepts[scheme_designator][name]
if code not in prior:
prior[code] = (meaning, [])
scheme_designator = "DCM"
dicom_codes = get_table_d1()
for code, meaning in dicom_codes:
name = keyword_from_meaning(meaning)
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (meaning, [])}
else:
prior = concepts[scheme_designator][name]
if code not in prior:
prior[code] = (meaning, [])
finally:
# If any error or KeyboardInterrupt, close up and write what we have
write_concepts(concepts, cid_concepts, cid_lists, name_for_cid)
write_snomed_mapping(snomed_codes) | random_line_split | |
generate_concept_dicts.py | #!/usr/bin/env python
# Encoding required to deal with 'micro' character
"""Script for auto-generating DICOM SR context groups from FHIR JSON value set
resources.
"""
from io import BytesIO
import json
import ftplib
import glob
import logging
import os
import re
import sys
import tempfile
from pprint import pprint
from xml.etree import ElementTree as ET
if sys.version_info[0] < 3:
import urllib as urllib_request
else:
import urllib.request as urllib_request
logger = logging.getLogger(__name__)
# Example excerpt fhir JSON for reference
"""
"resourceType":"ValueSet",
"id":"dicom-cid-10-InterventionalDrug",
...
"name":"InterventionalDrug",
...
"compose":{
"include":[
{
"system":"http://snomed.info/sct",
"concept":[
{
"code":"387362001",
"display":"Epinephrine"
},
"""
# The list of scheme designators is not complete.
# For full list see table 8-1 in part 3.16 chapter 8:
# http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_8.html#table_8-1
FHIR_SYSTEM_TO_DICOM_SCHEME_DESIGNATOR = {
"http://snomed.info/sct": "SCT",
"http://dicom.nema.org/resources/ontology/DCM": "DCM",
"http://loinc.org": "LN",
"http://www.radlex.org": "RADLEX",
"http://sig.biostr.washington.edu/projects/fm/AboutFM.html": "FMA",
"http://www.nlm.nih.gov/mesh/meshhome.html": "MSH",
"http://ncit.nci.nih.gov": "NCIt",
"http://unitsofmeasure.org": "UCUM",
"http://hl7.org/fhir/sid/ndc": "NDC",
"urn:iso:std:iso:11073:10101": "MDC",
"doi:10.1016/S0735-1097(99)00126-6": "BARI",
"http://www.nlm.nih.gov/research/umls": "UMLS",
"http://pubchem.ncbi.nlm.nih.gov": "PUBCHEM_CID",
"http://braininfo.rprc.washington.edu/aboutBrainInfo.aspx#NeuroNames": "NEU",
"http://www.itis.gov": "ITIS_TSN",
"http://arxiv.org/abs/1612.07003": "IBSI",
"http://www.nlm.nih.gov/research/umls/rxnorm": "RXNORM",
}
DOC_LINES = [
f"# Auto-generated by {os.path.basename(__file__)}.\n",
"# -*- coding: utf-8 -*-\n",
"\n",
]
def camel_case(s):
leave_alone = (
"mm",
"cm",
"km",
"um",
"ms", # 'us'?-doesn't seem to be there
"ml",
"mg",
"kg",
) # ... probably need others
return "".join(
word.capitalize() if word != word.upper() and word not in leave_alone else word
for word in re.split(r"\W", s, flags=re.UNICODE)
if word.isalnum()
)
def keyword_from_meaning(name):
"""Return a camel case valid python identifier"""
# Try to adhere to keyword scheme in DICOM (CP850)
# singular/plural alternative forms are made plural
# e.g., “Physician(s) of Record” becomes “PhysiciansOfRecord”
name = name.replace("(s)", "s")
# “Patient’s Name” -> “PatientName”
# “Operators’ Name” -> “OperatorsName”
name = name.replace("’s ", " ")
name = name.replace("'s ", " ")
name = name.replace("s’ ", "s ")
name = name.replace("s' ", "s ")
# Mathematical symbols
name = name.replace("%", " Percent ")
name = name.replace(">", " Greater Than ")
name = name.replace("=", " Equals ")
name = name.replace("<", " Lesser Than ")
name = re.sub(r"([0-9]+)\.([0-9]+)", "\\1 Point \\2", name)
name = re.sub(r"\s([0-9.]+)-([0-9.]+)\s", " \\1 To \\2 ", name)
name = re.sub(r"([0-9]+)day", "\\1 Day", name)
name = re.sub(r"([0-9]+)y", "\\1 Years", name)
# Remove category modifiers, such as "(specimen)", "(procedure)",
# "(body structure)", etc.
name = re.sub(r"^(.+) \([a-z ]+\)$", "\\1", name)
name = camel_case(name.strip())
# Python variables must not begin with a number.
if re.match(r"[0-9]", name):
name = "_" + name
return na | _fhir_value_sets(local_dir):
ftp_host = "medical.nema.org"
if not os.path.exists(local_dir):
os.makedirs(local_dir)
logger.info("storing files in " + local_dir)
logger.info(f'log into FTP server "{ftp_host}"')
ftp = ftplib.FTP(ftp_host, timeout=60)
ftp.login("anonymous")
ftp_path = "medical/dicom/resources/valuesets/fhir/json"
logger.info(f'list files in directory "{ftp_path}"')
fhir_value_set_files = ftp.nlst(ftp_path)
try:
for ftp_filepath in fhir_value_set_files:
ftp_filename = os.path.basename(ftp_filepath)
logger.info(f'retrieve value set file "{ftp_filename}"')
with BytesIO() as fp:
ftp.retrbinary(f"RETR {ftp_filepath}", fp.write)
content = fp.getvalue()
local_filename = os.path.join(local_dir, ftp_filename)
with open(local_filename, "wb") as f_local:
f_local.write(content)
finally:
ftp.quit()
def _parse_html(content):
# from lxml import html
# doc = html.document_fromstring(content)
return ET.fromstring(content, parser=ET.XMLParser(encoding="utf-8"))
def _download_html(url):
response = urllib_request.urlopen(url)
return response.read()
def _get_text(element):
text = "".join(element.itertext())
return text.strip()
def get_table_o1():
logger.info("process Table O1")
url = "http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_O.html#table_O-1"
root = _parse_html(_download_html(url))
namespaces = {"w3": root.tag.split("}")[0].strip("{")}
body = root.find("w3:body", namespaces=namespaces)
table = body.findall(".//w3:tbody", namespaces=namespaces)[0]
rows = table.findall("./w3:tr", namespaces=namespaces)
data = []
for row in rows:
data.append(
(
_get_text(row[0].findall(".//w3:p", namespaces=namespaces)[-1]),
_get_text(row[1].findall(".//w3:p", namespaces=namespaces)[0]),
_get_text(row[2].findall(".//w3:p", namespaces=namespaces)[0]),
)
)
return data
def get_table_d1():
logger.info("process Table D1")
url = "http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_D.html#table_D-1"
root = _parse_html(_download_html(url))
namespaces = {"w3": root.tag.split("}")[0].strip("{")}
body = root.find("w3:body", namespaces=namespaces)
table = body.findall(".//w3:tbody", namespaces=namespaces)[0]
rows = table.findall("./w3:tr", namespaces=namespaces)
return [
(
_get_text(row[0].findall(".//w3:p", namespaces=namespaces)[0]),
_get_text(row[1].findall(".//w3:p", namespaces=namespaces)[0]),
)
for row in rows
]
def write_concepts(concepts, cid_concepts, cid_lists, name_for_cid):
lines = DOC_LINES + [
"# Dict with scheme designator keys; value format is:\n",
"# {keyword: {code1: (meaning, cid_list), code2: ...}\n",
"#\n",
"# Most keyword identifiers map to a single code, but not all\n",
"\n",
]
with open("_concepts_dict.py", "w", encoding="UTF8") as f_concepts:
f_concepts.writelines(lines)
f_concepts.write("concepts = {}\n") # start with empty dict
for scheme, value in concepts.items():
f_concepts.write(f"\nconcepts['{scheme}'] = \\\n")
pprint(value, f_concepts)
lines = DOC_LINES + [
"# Dict with cid number as keys; value format is:\n",
"# {scheme designator: <list of keywords for current cid>\n",
"# scheme_designator: ...}\n",
"\n",
]
with open("_cid_dict.py", "w", encoding="UTF8") as f_cid:
f_cid.writelines(lines)
f_cid.write("name_for_cid = {}\n")
f_cid.write("cid_concepts = {}\n")
for cid, value in cid_lists.items():
f_cid.write(f"\nname_for_cid[{cid}] = '{name_for_cid[cid]}'\n")
f_cid.write(f"cid_concepts[{cid}] = \\\n")
pprint(value, f_cid)
def write_snomed_mapping(snomed_codes):
with open("_snomed_dict.py", "w", encoding="UTF8") as f_concepts:
lines = DOC_LINES + [
"# Dict with scheme designator keys; value format is:\n",
"# {concept_id1: snomed_id1, concept_id2: ...}\n",
"# or\n",
"# {snomed_id1: concept_id1, snomed_id2: ...}\n",
"\n",
]
f_concepts.writelines(lines)
f_concepts.write("mapping = {}\n") # start with empty dict
f_concepts.write("\nmapping['{}'] = {{\n".format("SCT"))
for sct, srt, meaning in snomed_codes:
f_concepts.write(f" '{sct}': '{srt}',\n")
f_concepts.write("}\n")
f_concepts.write("\nmapping['{}'] = {{\n".format("SRT"))
for sct, srt, meaning in snomed_codes:
f_concepts.write(f" '{srt}': '{sct}',\n")
f_concepts.write("}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
local_dir = tempfile.gettempdir()
fhir_dir = os.path.join(local_dir, "fhir")
if not os.path.exists(fhir_dir) or not os.listdir(fhir_dir):
download_fhir_value_sets(fhir_dir)
else:
msg = "Using locally downloaded files\n"
msg += "from directory " + fhir_dir
logging.info(msg)
fhir_value_set_files = glob.glob(os.path.join(fhir_dir, "*"))
cid_pattern = re.compile("^dicom-cid-([0-9]+)-[a-zA-Z]+")
concepts = dict()
cid_lists = dict()
name_for_cid = dict()
# XXX = 0
try:
for ftp_filepath in fhir_value_set_files:
ftp_filename = os.path.basename(ftp_filepath)
logger.info(f'process file "{ftp_filename}"')
with open(ftp_filepath, "rb") as fp:
content = fp.read()
value_set = json.loads(content)
cid_match = cid_pattern.search(value_set["id"])
cid = int(cid_match.group(1)) # can take int off to store as string
name_for_cid[cid] = value_set["name"]
cid_concepts = {}
for group in value_set["compose"]["include"]:
system = group["system"]
try:
scheme_designator = FHIR_SYSTEM_TO_DICOM_SCHEME_DESIGNATOR[system]
except KeyError:
raise NotImplementedError(
"The DICOM scheme designator for the following FHIR system "
f"has not been specified: {system}"
)
if scheme_designator not in concepts:
concepts[scheme_designator] = dict()
for concept in group["concept"]:
name = keyword_from_meaning(concept["display"])
code = concept["code"].strip()
display = concept["display"].strip()
# If new name under this scheme, start dict of codes/cids that use that code
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (display, [cid])}
else:
prior = concepts[scheme_designator][name]
if code in prior:
prior[code][1].append(cid)
else:
prior[code] = (display, [cid])
if prior[code][0].lower() != display.lower():
# Meanings can only be different by symbols, etc.
# because converted to same keyword.
# Nevertheless, print as info
msg = "'{}': Meaning '{}' in cid_{}, previously '{}' in cids {}"
msg = msg.format(
name, display, cid, prior[code][0], prior[code][1]
)
logger.info(msg)
# Keep track of this cid referencing that name
if scheme_designator not in cid_concepts:
cid_concepts[scheme_designator] = []
if name in cid_concepts[scheme_designator]:
msg = "'{}': Meaning '{}' in cid_{} is duplicated!"
msg = msg.format(name, concept["display"], cid)
logger.warning(msg)
cid_concepts[scheme_designator].append(name)
cid_lists[cid] = cid_concepts
# if XXX > 3:
# break
# XXX += 1
scheme_designator = "SCT"
snomed_codes = get_table_o1()
for code, srt_code, meaning in snomed_codes:
name = keyword_from_meaning(meaning)
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (meaning, [])}
else:
prior = concepts[scheme_designator][name]
if code not in prior:
prior[code] = (meaning, [])
scheme_designator = "DCM"
dicom_codes = get_table_d1()
for code, meaning in dicom_codes:
name = keyword_from_meaning(meaning)
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (meaning, [])}
else:
prior = concepts[scheme_designator][name]
if code not in prior:
prior[code] = (meaning, [])
finally:
# If any error or KeyboardInterrupt, close up and write what we have
write_concepts(concepts, cid_concepts, cid_lists, name_for_cid)
write_snomed_mapping(snomed_codes)
| me
def download | conditional_block |
generate_concept_dicts.py | #!/usr/bin/env python
# Encoding required to deal with 'micro' character
"""Script for auto-generating DICOM SR context groups from FHIR JSON value set
resources.
"""
from io import BytesIO
import json
import ftplib
import glob
import logging
import os
import re
import sys
import tempfile
from pprint import pprint
from xml.etree import ElementTree as ET
if sys.version_info[0] < 3:
import urllib as urllib_request
else:
import urllib.request as urllib_request
logger = logging.getLogger(__name__)
# Example excerpt fhir JSON for reference
"""
"resourceType":"ValueSet",
"id":"dicom-cid-10-InterventionalDrug",
...
"name":"InterventionalDrug",
...
"compose":{
"include":[
{
"system":"http://snomed.info/sct",
"concept":[
{
"code":"387362001",
"display":"Epinephrine"
},
"""
# The list of scheme designators is not complete.
# For full list see table 8-1 in part 3.16 chapter 8:
# http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_8.html#table_8-1
FHIR_SYSTEM_TO_DICOM_SCHEME_DESIGNATOR = {
"http://snomed.info/sct": "SCT",
"http://dicom.nema.org/resources/ontology/DCM": "DCM",
"http://loinc.org": "LN",
"http://www.radlex.org": "RADLEX",
"http://sig.biostr.washington.edu/projects/fm/AboutFM.html": "FMA",
"http://www.nlm.nih.gov/mesh/meshhome.html": "MSH",
"http://ncit.nci.nih.gov": "NCIt",
"http://unitsofmeasure.org": "UCUM",
"http://hl7.org/fhir/sid/ndc": "NDC",
"urn:iso:std:iso:11073:10101": "MDC",
"doi:10.1016/S0735-1097(99)00126-6": "BARI",
"http://www.nlm.nih.gov/research/umls": "UMLS",
"http://pubchem.ncbi.nlm.nih.gov": "PUBCHEM_CID",
"http://braininfo.rprc.washington.edu/aboutBrainInfo.aspx#NeuroNames": "NEU",
"http://www.itis.gov": "ITIS_TSN",
"http://arxiv.org/abs/1612.07003": "IBSI",
"http://www.nlm.nih.gov/research/umls/rxnorm": "RXNORM",
}
DOC_LINES = [
f"# Auto-generated by {os.path.basename(__file__)}.\n",
"# -*- coding: utf-8 -*-\n",
"\n",
]
def camel_case(s):
leave_alone = (
"mm",
"cm",
"km",
"um",
"ms", # 'us'?-doesn't seem to be there
"ml",
"mg",
"kg",
) # ... probably need others
return "".join(
word.capitalize() if word != word.upper() and word not in leave_alone else word
for word in re.split(r"\W", s, flags=re.UNICODE)
if word.isalnum()
)
def keyword_from_meaning(name):
"""Return a camel case valid python identifier"""
# Try to adhere to keyword scheme in DICOM (CP850)
# singular/plural alternative forms are made plural
# e.g., “Physician(s) of Record” becomes “PhysiciansOfRecord”
name = name.replace("(s)", "s")
# “Patient’s Name” -> “PatientName”
# “Operators’ Name” -> “OperatorsName”
name = name.replace("’s ", " ")
name = name.replace("'s ", " ")
name = name.replace("s’ ", "s ")
name = name.replace("s' ", "s ")
# Mathematical symbols
name = name.replace("%", " Percent ")
name = name.replace(">", " Greater Than ")
name = name.replace("=", " Equals ")
name = name.replace("<", " Lesser Than ")
name = re.sub(r"([0-9]+)\.([0-9]+)", "\\1 Point \\2", name)
name = re.sub(r"\s([0-9.]+)-([0-9.]+)\s", " \\1 To \\2 ", name)
name = re.sub(r"([0-9]+)day", "\\1 Day", name)
name = re.sub(r"([0-9]+)y", "\\1 Years", name)
# Remove category modifiers, such as "(specimen)", "(procedure)",
# "(body structure)", etc.
name = re.sub(r"^(.+) \([a-z ]+\)$", "\\1", name)
name = camel_case(name.strip())
# Python variables must not begin with a number.
if re.match(r"[0-9]", name):
name = "_" + name
return name
def download_fhir_value_sets(local_dir):
ftp_host = "medical.nema.org"
if not os.path.exists(local_dir):
os.makedirs(local_dir)
logger.info("storing files in " + local_dir)
logger.info(f'log into FTP server "{ftp_host}"')
ftp = ftplib.FTP(ftp_host, timeout=60)
ftp.login("anonymous")
ftp_path = "medical/dicom/resources/valuesets/fhir/json"
logger.info(f'list files in directory "{ftp_path}"')
fhir_value_set_files = ftp.nlst(ftp_path)
try:
for ftp_filepath in fhir_value_set_files:
ftp_filename = os.path.basename(ftp_filepath)
logger.info(f'retrieve value set file "{ftp_filename}"')
with BytesIO() as fp:
ftp.retrbinary(f"RETR {ftp_filepath}", fp.write)
content = fp.getvalue()
local_filename = os.path.join(local_dir, ftp_filename)
with open(local_filename, "wb") as f_local:
f_local.write(content)
finally:
ftp.quit()
def _parse_html(content):
# from lxml import html
# doc = html.document_fromstring(content)
return ET.fromstring(content, parser=ET.XMLParser(encoding="utf-8"))
def _download_html(url):
response = urllib_request.urlopen(url)
return response.read()
def _get_text(element):
text = "".join(element.itertext())
return text.strip()
def get_table_o1():
logger.info("process Table O1")
url = "http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_O.html#table_O-1"
root = _parse_html(_download_html(url))
namespaces = {"w3": root.tag.split("}")[0].strip("{")}
body = root.find("w3:body", namespaces=namespaces)
table = body.findall(".//w3:tbody", namespaces=namespaces)[0]
rows = table.findall("./w3:tr", namespaces=namespaces)
data = []
for row in rows:
data.append(
(
_get_text(row[0].findall(".//w3:p", namespaces=namespaces)[-1]),
_get_text(row[1].findall(".//w3:p", namespaces=namespaces)[0]),
_get_text(row[2].findall(".//w3:p", namespaces=namespaces)[0]),
)
)
return data
def get_table_d1():
logger.info( | le D1")
url = "http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_D.html#table_D-1"
root = _parse_html(_download_html(url))
namespaces = {"w3": root.tag.split("}")[0].strip("{")}
body = root.find("w3:body", namespaces=namespaces)
table = body.findall(".//w3:tbody", namespaces=namespaces)[0]
rows = table.findall("./w3:tr", namespaces=namespaces)
return [
(
_get_text(row[0].findall(".//w3:p", namespaces=namespaces)[0]),
_get_text(row[1].findall(".//w3:p", namespaces=namespaces)[0]),
)
for row in rows
]
def write_concepts(concepts, cid_concepts, cid_lists, name_for_cid):
lines = DOC_LINES + [
"# Dict with scheme designator keys; value format is:\n",
"# {keyword: {code1: (meaning, cid_list), code2: ...}\n",
"#\n",
"# Most keyword identifiers map to a single code, but not all\n",
"\n",
]
with open("_concepts_dict.py", "w", encoding="UTF8") as f_concepts:
f_concepts.writelines(lines)
f_concepts.write("concepts = {}\n") # start with empty dict
for scheme, value in concepts.items():
f_concepts.write(f"\nconcepts['{scheme}'] = \\\n")
pprint(value, f_concepts)
lines = DOC_LINES + [
"# Dict with cid number as keys; value format is:\n",
"# {scheme designator: <list of keywords for current cid>\n",
"# scheme_designator: ...}\n",
"\n",
]
with open("_cid_dict.py", "w", encoding="UTF8") as f_cid:
f_cid.writelines(lines)
f_cid.write("name_for_cid = {}\n")
f_cid.write("cid_concepts = {}\n")
for cid, value in cid_lists.items():
f_cid.write(f"\nname_for_cid[{cid}] = '{name_for_cid[cid]}'\n")
f_cid.write(f"cid_concepts[{cid}] = \\\n")
pprint(value, f_cid)
def write_snomed_mapping(snomed_codes):
with open("_snomed_dict.py", "w", encoding="UTF8") as f_concepts:
lines = DOC_LINES + [
"# Dict with scheme designator keys; value format is:\n",
"# {concept_id1: snomed_id1, concept_id2: ...}\n",
"# or\n",
"# {snomed_id1: concept_id1, snomed_id2: ...}\n",
"\n",
]
f_concepts.writelines(lines)
f_concepts.write("mapping = {}\n") # start with empty dict
f_concepts.write("\nmapping['{}'] = {{\n".format("SCT"))
for sct, srt, meaning in snomed_codes:
f_concepts.write(f" '{sct}': '{srt}',\n")
f_concepts.write("}\n")
f_concepts.write("\nmapping['{}'] = {{\n".format("SRT"))
for sct, srt, meaning in snomed_codes:
f_concepts.write(f" '{srt}': '{sct}',\n")
f_concepts.write("}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
local_dir = tempfile.gettempdir()
fhir_dir = os.path.join(local_dir, "fhir")
if not os.path.exists(fhir_dir) or not os.listdir(fhir_dir):
download_fhir_value_sets(fhir_dir)
else:
msg = "Using locally downloaded files\n"
msg += "from directory " + fhir_dir
logging.info(msg)
fhir_value_set_files = glob.glob(os.path.join(fhir_dir, "*"))
cid_pattern = re.compile("^dicom-cid-([0-9]+)-[a-zA-Z]+")
concepts = dict()
cid_lists = dict()
name_for_cid = dict()
# XXX = 0
try:
for ftp_filepath in fhir_value_set_files:
ftp_filename = os.path.basename(ftp_filepath)
logger.info(f'process file "{ftp_filename}"')
with open(ftp_filepath, "rb") as fp:
content = fp.read()
value_set = json.loads(content)
cid_match = cid_pattern.search(value_set["id"])
cid = int(cid_match.group(1)) # can take int off to store as string
name_for_cid[cid] = value_set["name"]
cid_concepts = {}
for group in value_set["compose"]["include"]:
system = group["system"]
try:
scheme_designator = FHIR_SYSTEM_TO_DICOM_SCHEME_DESIGNATOR[system]
except KeyError:
raise NotImplementedError(
"The DICOM scheme designator for the following FHIR system "
f"has not been specified: {system}"
)
if scheme_designator not in concepts:
concepts[scheme_designator] = dict()
for concept in group["concept"]:
name = keyword_from_meaning(concept["display"])
code = concept["code"].strip()
display = concept["display"].strip()
# If new name under this scheme, start dict of codes/cids that use that code
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (display, [cid])}
else:
prior = concepts[scheme_designator][name]
if code in prior:
prior[code][1].append(cid)
else:
prior[code] = (display, [cid])
if prior[code][0].lower() != display.lower():
# Meanings can only be different by symbols, etc.
# because converted to same keyword.
# Nevertheless, print as info
msg = "'{}': Meaning '{}' in cid_{}, previously '{}' in cids {}"
msg = msg.format(
name, display, cid, prior[code][0], prior[code][1]
)
logger.info(msg)
# Keep track of this cid referencing that name
if scheme_designator not in cid_concepts:
cid_concepts[scheme_designator] = []
if name in cid_concepts[scheme_designator]:
msg = "'{}': Meaning '{}' in cid_{} is duplicated!"
msg = msg.format(name, concept["display"], cid)
logger.warning(msg)
cid_concepts[scheme_designator].append(name)
cid_lists[cid] = cid_concepts
# if XXX > 3:
# break
# XXX += 1
scheme_designator = "SCT"
snomed_codes = get_table_o1()
for code, srt_code, meaning in snomed_codes:
name = keyword_from_meaning(meaning)
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (meaning, [])}
else:
prior = concepts[scheme_designator][name]
if code not in prior:
prior[code] = (meaning, [])
scheme_designator = "DCM"
dicom_codes = get_table_d1()
for code, meaning in dicom_codes:
name = keyword_from_meaning(meaning)
if name not in concepts[scheme_designator]:
concepts[scheme_designator][name] = {code: (meaning, [])}
else:
prior = concepts[scheme_designator][name]
if code not in prior:
prior[code] = (meaning, [])
finally:
# If any error or KeyboardInterrupt, close up and write what we have
write_concepts(concepts, cid_concepts, cid_lists, name_for_cid)
write_snomed_mapping(snomed_codes)
| "process Tab | identifier_name |
lobby.go | package main
import (
"fmt"
"log"
"errors"
"sync"
//"bytes"
"time"
"net"
//"net/http"
"encoding/json"
"github.com/googollee/go-socket.io"
)
/*
Enumerated constants for commands
*/
const (
C_Leave = iota //0
C_Kick = iota //1
C_End = iota //2
C_Start = iota //3
C_Terminate = iota //4
)
type Lobby interface {
SessionId() int
addAnonUser(id string, anon AnonUser) error
removeAnonUser(id, reason string) error
AnonUserById(id string) (*AnonUser, bool)
CurrentUserCount() int
emitAnonUserData() //return data in json format for front end
setAnonUserReady(id int)
sendHandler()
receiveHandler()
dataHandler()
}
type User interface {
SetSession(s *Session)
SetSocket(sock socketio.Socket)
Player() int
SetPlayer(p int)
Setup()
sendHandler()
receiveHandler()
}
type HostUser struct {
userId int
username string
Sess *Session
player int
Send chan interface{}
Receive chan interface{}
socket socketio.Socket
socketId string
}
type AnonUser struct {
//userId int //key to map to this user
Nickname string
Ready bool
Sess *Session
player int
Send chan interface{}
Receive chan interface{}
socket socketio.Socket
socketId string
}
type Session struct {
sync.RWMutex
sessionId int
game Game
LobbyHost *HostUser
userMap map[string]*AnonUser //int map changed
PlayerMap []User
//PlayerMap map[int]*User
Send chan []byte
Receive chan interface{}
Data chan interface{}
Exit chan bool
timeout chan bool
gameTCPConn *net.TCPConn
}
/*
Structs used in websocket message processing
*/
/*
type GameMessage struct {
Receipient int `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
type GameMessageAll struct {
Msg map[string]interface{} `json:"msg"`
}
*/
//WHAT ABOUT INTERFACES (i.e FEEDBACK) WITH STRUCT IMPLEMENTING FOR EVENTS?
//INTERFACE FUNCTIONS COULD BE CALLED CONCURRENTLY
type GameMessage struct {
Event string `json:"event"`
Player int `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
type MsgServer struct {
Msg map[string]interface{}
}
type LobbyUser struct {
Player int `json:"player"`
Nickname string `json:"nickname"`
Ready bool `json:"ready"`
}
type SetReady struct {
Nickname string `json:"nickname"`
Ready bool `json:"ready"`
}
type RemovedUser struct {
Nickname string
Reason string
}
type Joined struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
type GameStart struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
type GameEnd struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
/*
Cmd = Command consts
*/
type Command struct {
Cmd int
Data interface{}
}
func (s Session) closeConnection() {
err := s.gameTCPConn.Close()
if err != nil {
log.Panic(err)
}
}
/*
Game table in DB will need Host and Port stored
*/
func newSession(g Game, sessId int, host *HostUser) (*Session, error) {
session := Session{
sessionId: sessId,
game: g,
LobbyHost: host,
userMap: make(map[string]*AnonUser),
PlayerMap: make([]User, 0, g.MaxUsers() + 1),
//PlayerMap: make(map[int]*User, g.MaxUsers() + 1),
Send: make(chan []byte),
Receive: make(chan interface{}),
Data: make(chan interface{}),
Exit: make(chan bool, 1),
}
host.SetSession(&session)
host.SetPlayer(0)
session.PlayerMap = append(session.PlayerMap, session.LobbyHost) //set index 0 as host
//session.PlayerMap[0] = session.LobbyHost
go session.dataHandler()
return &session, nil
}
func | (uId int, user string) (*HostUser, error) {
hostUser := HostUser{
userId: uId,
username: user,
Send: make(chan interface{}),
Receive: make(chan interface{}),
}
return &hostUser, nil
}
func newAnonUser(nick string) *AnonUser {
anon := AnonUser{
Nickname: nick,
Ready: false,
Send: make(chan interface{}),
Receive: make(chan interface{}),
}
return &anon
}
func (s Session) SessionId() int {
return s.sessionId
}
/*
Adds an anon user to the session, and sets user's pointer to the session
*/
func (s Session) addAnonUser(id string, anon *AnonUser) error {
s.Lock()
defer s.Unlock()
_, ok := s.userMap[id]
if !ok {
return errors.New("lobby: user already exists.")
}
if len(s.PlayerMap) >= cap(s.PlayerMap) {
return errors.New("lobby: lobby is full.")
}
s.userMap[id] = anon
s.PlayerMap = append(s.PlayerMap, anon)
anon.SetPlayer(len(s.PlayerMap) - 1)
anon.SetSession(&s)
s.emitAnonUserData()
return nil
}
func (s Session) removeAnonUser(id, reason string) error {
s.Lock()
defer s.Unlock()
u, ok := s.userMap[id]
if !ok {
return errors.New("lobby: user does not exist.")
}
index := u.Player()
//user, ok := s.PlayerMap[index]
//if !ok {
//return errors.New("removePlayer: no player found at index")
//}
//if u != user {
//return errors.New("removePlayer: player does not match retrieved")
//}
//delete(s.PlayerMap, index)
s.removePlayer(index)
delete(s.userMap, id)
var msg Command
if reason == "" {
msg.Cmd = C_Leave
} else {
msg.Cmd = C_Kick
msg.Data = reason
}
u.Send <- msg
s.emitAnonUserData()
return nil
}
/*
Removes a player from the PlayerMap slice - No Memory Leak:
Example: Removing index 2 from: [0][1][2][3][4]
Append: [0][1] [3][4]
Final memory block still has redundant data: [0][1][3][4][4]
Overwrite with nil: [0][1][3][4][nil]
*/
func (s Session) removePlayer(index int) {
s.PlayerMap, s.PlayerMap[len(s.PlayerMap)-1] = append(s.PlayerMap[:index], s.PlayerMap[index+1:]...), nil
//Update all player numbers greater than deleted index
for i := index; i < len(s.PlayerMap); i++ {
s.PlayerMap[i].SetPlayer(i)
}
}
func (s Session) connectSession(addr string) error {
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return err
}
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
return err
}
s.gameTCPConn = conn
go s.receiveHandler()
go s.sendHandler()
return nil
}
func (s Session) requestSession() {
addr := s.game.host + ":" + s.game.port
err := s.connectSession(addr)
if err != nil {
//return nil, err
log.Print(err)
}
request := make(map[string]interface{})
request["event"] = "new"
request["players"] = s.CurrentUserCount()
request["maxplayers"] = s.game.MaxUsers()
jsonMsg, err := json.Marshal(request)
if err != nil {
log.Print(err)
}
s.Send <- jsonMsg //MAKE A TIMEOUT
go s.requestTimeout()
go s.sendTimeout()
}
func (s Session) sendTimeout() {
time.Sleep(10 * time.Second)
s.timeout <- true
}
func (s Session) requestTimeout() {
s.timeout = make(chan bool, 1)
var gs GameStart
select {
case t := <- s.timeout:
if t { //server timed out
gs = GameStart{
Response: false,
Feedback: "Server was unable to host.",
}
} else { //game created
gs = GameStart{
Response: true,
Feedback: "Application started succesfully.",
}
}
}
s.LobbyHost.Send <- gs
}
func (s Session) AnonUserById(id string) (*AnonUser, bool) {
a, ok := s.userMap[id]
return a, ok
}
func (s Session) CurrentUserCount() int {
return len(s.userMap)
}
/*
Flip ready bool
*/
func (s Session) setAnonUserReady(n string, r bool) {
s.Lock()
defer s.Unlock()
s.userMap[n].Ready = r
s.emitAnonUserData()
}
func (s Session) emitAnonUserData() {
var list []LobbyUser
players := s.PlayerMap[1:] //slice out host index
/*
for i := 1; i < len(s.PlayerMap); i++ {
p := s.PlayerMap[i]
user := LobbyUser{
Player: i,
Nickname: p.Nickname
Ready: p.Ready
}
list = append(list, user)
}
*/
for i, p := range players {
p := p.(AnonUser)
user := LobbyUser{
Player: i,
Nickname: p.Nickname,
Ready: p.Ready,
}
list = append(list, user)
}
s.LobbyHost.Send <- list
}
/*
Main goroutine for handling messages to the game server
*/
func (s Session) sendHandler() {
for {
select {
case data := <-s.Send:
s.gameTCPConn.Write(data)
case <- s.Exit:
return
}
}
}
/*
Main goroutine for handling messages from the game server
*/
func (s Session) receiveHandler() {
decoder := json.NewDecoder(s.gameTCPConn)
for {
var gMsg GameMessage
err := decoder.Decode(&gMsg)
if err != nil {
log.Print(err)
}
//check event
switch gMsg.Event {
case "created":
s.Data <- gMsg
case "gamestart":
s.LobbyHost.Send <- gMsg
case "gameterminate":
s.Data <- gMsg
case "msgplayer":
if gMsg.Player == 0 {
user := s.PlayerMap[gMsg.Player].(HostUser)
user.Send <- gMsg
} else {
user := s.PlayerMap[gMsg.Player].(AnonUser)
user.Send <- gMsg
}
case "msgall":
s.LobbyHost.Send <- gMsg
}
}
}
/*
Main goroutine for processing lobby commands
*/
func (s Session) dataHandler() {
for {
select {
case data := <-s.Data:
switch jsonType := data.(type) {
case SetReady:
s.setAnonUserReady(jsonType.Nickname, jsonType.Ready)
case RemovedUser:
err := s.removeAnonUser(jsonType.Nickname, jsonType.Reason)
if err != nil {
log.Panic(err)
}
case GameMessage:
switch jsonType.Event {
case "created":
s.timeout <- false
log.Printf("Game successfully created: %s", jsonType.Msg)
}
default:
log.Print("Session dataHandler: unknown type received")
}
case <- s.Exit:
return
}
}
}
func (u HostUser) UserId() int {
return u.userId
}
func (u HostUser) SetSession(s *Session) {
u.Sess = s
}
func (u HostUser) SetSocket(sock socketio.Socket) {
u.socket = sock
}
func (u HostUser) Player() int {
return u.player
}
/*
Only to be called while Session is locked
*/
func (u HostUser) SetPlayer(number int) {
u.player = number
}
/*
Joins the user's socket namespace, and the session namespace
*/
func (u HostUser) Setup() {
//u.socket.Join(u.username) //not necessary socket ID namespace
u.socket.Join(fmt.Sprintf("%d", u.Sess.SessionId()))
go u.sendHandler()
u.receiveHandler()
}
/*
Emits socket.io messages to the namespace
*/
func (u HostUser) sendHandler() {
sessionNamespace := fmt.Sprintf("%d", u.Sess.SessionId())
for {
select {
case data := <-u.Send:
switch dataType := data.(type) {
case []LobbyUser:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("send lobby user list: error")
}
*/
u.socket.BroadcastTo(sessionNamespace, "updatelobby", dataType)
u.socket.Emit("updatelobby", dataType)
case GameStart:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.BroadcastTo(sessionNamespace, "gamestart", dataType)
u.socket.Emit("gamestart", dataType)
case GameMessage:
/*msg, err := json.Marshal(dataType.Msg)
if err != nil {
log.Panic("unable to marshal message")
}
*/
switch dataType.Event{
case "msgplayer":
u.socket.Emit("msgplayer", dataType.Msg)
case "msgall":
u.socket.BroadcastTo(sessionNamespace, "msgall", dataType.Msg)
u.socket.Emit("msgall", dataType.Msg)
}
default:
log.Print("HostUser sendHandler: unknown type received")
}
}
}
}
/*
Main goroutine for handling messages for host user
*/
func (u HostUser) receiveHandler() {
//Tell server the applet has loaded and ready to communicate
//Used to initially ping server and pass any preliminary host information
//u.socket.Of(fmt.Sprintf("/%s", u.socket.Id())).On("kick", func(msg []byte) {
u.socket.On("kick", func(msg map[string]interface{}) {
var data RemovedUser
/*err := json.Unmarshal(msg, &data)
if err != nil {
log.Panic(err)
}
*/
data = RemovedUser{
Nickname: msg["nickname"].(string),
Reason: msg["reason"].(string),
}
u.Sess.Data <- data
})
//launch the game with the current users in lobby, server should respond if successful
u.socket.On("start", func() {
start := Command{
Cmd: C_Start,
}
jsonMsg, err := json.Marshal(start)
if err != nil {
log.Print(err)
}
u.Sess.Send <- jsonMsg
})
//send to game server, server should give response to be emitted
u.socket.On("terminate", func() {
terminate := Command{
Cmd: C_Terminate,
}
jsonMsg, err := json.Marshal(terminate)
if err != nil {
log.Print(err)
}
u.Sess.Send <- jsonMsg
})
u.socket.On("msgserver", func(msg map[string]interface{}) {
data := make(map[string]interface{}, 2)
data["player"] = u.Player()
data["msg"] = msg
json, err := json.Marshal(data)
if err != nil {
log.Print(err)
}
u.Sess.Send <- json
})
//Starts the session with all users set ready assigned as players
//u.socket.Of(fmt.Sprintf("/%s", u.socket.Id())).On("start", func(msg interface{}) {
// u.Sess.Send <- msg
//})
//Host user forced disconnection
u.socket.On("disconnection", func() {
//host disconnected - pause application?
})
}
func (u AnonUser) SetSession(s *Session) {
u.Sess = s
}
func (u AnonUser) SetSocket(sock socketio.Socket) {
u.socket = sock
}
func (u AnonUser) Player() int {
return u.player
}
/*
Only to be called while Session is locked
*/
func (u AnonUser) SetPlayer(p int) {
u.player = p
}
func (u AnonUser) Setup() {
u.socket.Join(fmt.Sprintf("%d", u.Sess.SessionId()))
go u.sendHandler()
u.receiveHandler()
u.Send <- Joined{
Response: true,
Feedback: "Used added to Lobby",
}
}
/*
Main goroutine for handling messages for host user
*/
func (u AnonUser) sendHandler() {
//namespace := fmt.Sprintf("/%s", u.socket.Id())
for {
select {
case data := <-u.Send:
switch dataType := data.(type) {
case Joined:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.Emit("joined", dataType)
case Command:
switch dataType.Cmd {
case C_Leave:
u.socket.Emit("disconnect")
return
case C_Kick:
u.socket.Emit("kick", dataType.Data.(string))
u.socket.Emit("disconnect")
return
case C_End:
return
default:
log.Print("AnonUser sendHandler: unknown command")
}
case GameMessage:
/*msg, err := json.Marshal(dataType.Msg)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.Emit("msgplayer", dataType.Msg)
default:
log.Print("AnonUser sendHandler: unknown type received")
}
}
}
}
/*
Sets all socket.io events for receiving emits from the AnonUser's device
*/
func (u AnonUser) receiveHandler() {
//get and format this user's personal socket namespace i.e. "/012345"
//namespace := fmt.Sprintf("/%s", u.socket.Id())
//Toggle the ready bool in the lobby
//u.socket.Of(namespace).On("setready", func(msg interface{}) {
u.socket.On("setready", func(msg map[string]interface{}) {
var data SetReady
data = SetReady{
Nickname: msg["nickname"].(string),
Ready: msg["ready"].(bool),
}
//err := json.Unmarshal(msg, &data)
//if err != nil {
// log.Panic(err)
//}
u.Sess.Data <- data
})
//Leave the session (manual leave)
//u.socket.Of(namespace).On("leavelobby", func() {
u.socket.On("leavelobby", func() {
ru := RemovedUser{
Nickname: u.Nickname,
}
u.Sess.Data <- ru
})
u.socket.On("msgserver", func(msg map[string]interface{}) {
data := make(map[string]interface{}, 2)
data["player"] = u.Player()
data["msg"] = msg
jsonMsg, err := json.Marshal(data)
if err != nil {
log.Panic(err)
}
u.Sess.Send <- jsonMsg
})
/*Tell server the applet has loaded and ready to communicate
u.socket.Of(namespace).On("loaded", func(msg interface{}) {
u.Sess.Send <- msg
})
//Receive game data from player -> forwarded to game server channel
u.socket.Of(namespace).On("in", func(msg interface{}) {
u.Sess.Send <- msg
})
*/
//Forced disconnection event
u.socket.On("disconnection", func() {
var msg Command
msg.Cmd = C_End
u.Send <- msg
})
} | newHostUser | identifier_name |
lobby.go | package main
import (
"fmt"
"log"
"errors"
"sync"
//"bytes"
"time"
"net"
//"net/http"
"encoding/json"
"github.com/googollee/go-socket.io"
)
/*
Enumerated constants for commands
*/
const (
C_Leave = iota //0
C_Kick = iota //1
C_End = iota //2
C_Start = iota //3
C_Terminate = iota //4
)
type Lobby interface {
SessionId() int
addAnonUser(id string, anon AnonUser) error
removeAnonUser(id, reason string) error
AnonUserById(id string) (*AnonUser, bool)
CurrentUserCount() int
emitAnonUserData() //return data in json format for front end
setAnonUserReady(id int)
sendHandler()
receiveHandler()
dataHandler()
}
type User interface {
SetSession(s *Session)
SetSocket(sock socketio.Socket)
Player() int
SetPlayer(p int)
Setup()
sendHandler()
receiveHandler()
}
type HostUser struct {
userId int
username string
Sess *Session
player int
Send chan interface{}
Receive chan interface{}
socket socketio.Socket
socketId string
}
type AnonUser struct {
//userId int //key to map to this user
Nickname string
Ready bool
Sess *Session
player int
Send chan interface{}
Receive chan interface{}
socket socketio.Socket
socketId string
}
type Session struct {
sync.RWMutex
sessionId int
game Game
LobbyHost *HostUser
userMap map[string]*AnonUser //int map changed
PlayerMap []User
//PlayerMap map[int]*User
Send chan []byte
Receive chan interface{}
Data chan interface{}
Exit chan bool
timeout chan bool
gameTCPConn *net.TCPConn
}
/*
Structs used in websocket message processing
*/
/*
type GameMessage struct {
Receipient int `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
type GameMessageAll struct {
Msg map[string]interface{} `json:"msg"`
}
*/
//WHAT ABOUT INTERFACES (i.e FEEDBACK) WITH STRUCT IMPLEMENTING FOR EVENTS?
//INTERFACE FUNCTIONS COULD BE CALLED CONCURRENTLY
type GameMessage struct {
Event string `json:"event"`
Player int `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
type MsgServer struct {
Msg map[string]interface{}
}
type LobbyUser struct {
Player int `json:"player"`
Nickname string `json:"nickname"`
Ready bool `json:"ready"`
}
type SetReady struct {
Nickname string `json:"nickname"`
Ready bool `json:"ready"`
}
type RemovedUser struct {
Nickname string
Reason string
}
type Joined struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
type GameStart struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
type GameEnd struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
/*
Cmd = Command consts
*/
type Command struct {
Cmd int
Data interface{}
}
func (s Session) closeConnection() {
err := s.gameTCPConn.Close()
if err != nil {
log.Panic(err)
}
}
/*
Game table in DB will need Host and Port stored
*/
func newSession(g Game, sessId int, host *HostUser) (*Session, error) {
session := Session{
sessionId: sessId,
game: g,
LobbyHost: host,
userMap: make(map[string]*AnonUser),
PlayerMap: make([]User, 0, g.MaxUsers() + 1),
//PlayerMap: make(map[int]*User, g.MaxUsers() + 1),
Send: make(chan []byte),
Receive: make(chan interface{}),
Data: make(chan interface{}),
Exit: make(chan bool, 1),
}
host.SetSession(&session)
host.SetPlayer(0)
session.PlayerMap = append(session.PlayerMap, session.LobbyHost) //set index 0 as host
//session.PlayerMap[0] = session.LobbyHost
go session.dataHandler()
return &session, nil
}
func newHostUser(uId int, user string) (*HostUser, error) {
hostUser := HostUser{
userId: uId,
username: user,
Send: make(chan interface{}),
Receive: make(chan interface{}),
}
return &hostUser, nil
}
func newAnonUser(nick string) *AnonUser {
anon := AnonUser{
Nickname: nick,
Ready: false,
Send: make(chan interface{}),
Receive: make(chan interface{}),
}
return &anon
}
func (s Session) SessionId() int {
return s.sessionId
}
/*
Adds an anon user to the session, and sets user's pointer to the session
*/
func (s Session) addAnonUser(id string, anon *AnonUser) error {
s.Lock()
defer s.Unlock()
_, ok := s.userMap[id]
if !ok {
return errors.New("lobby: user already exists.")
}
if len(s.PlayerMap) >= cap(s.PlayerMap) {
return errors.New("lobby: lobby is full.")
}
s.userMap[id] = anon
s.PlayerMap = append(s.PlayerMap, anon)
anon.SetPlayer(len(s.PlayerMap) - 1)
anon.SetSession(&s)
s.emitAnonUserData()
return nil
}
func (s Session) removeAnonUser(id, reason string) error {
s.Lock()
defer s.Unlock()
u, ok := s.userMap[id]
if !ok {
return errors.New("lobby: user does not exist.")
}
index := u.Player()
//user, ok := s.PlayerMap[index]
//if !ok {
//return errors.New("removePlayer: no player found at index")
//}
//if u != user {
//return errors.New("removePlayer: player does not match retrieved")
//}
//delete(s.PlayerMap, index)
s.removePlayer(index)
delete(s.userMap, id)
var msg Command
if reason == "" {
msg.Cmd = C_Leave
} else {
msg.Cmd = C_Kick
msg.Data = reason
}
u.Send <- msg
s.emitAnonUserData()
return nil
}
/*
Removes a player from the PlayerMap slice - No Memory Leak:
Example: Removing index 2 from: [0][1][2][3][4]
Append: [0][1] [3][4]
Final memory block still has redundant data: [0][1][3][4][4]
Overwrite with nil: [0][1][3][4][nil]
*/
func (s Session) removePlayer(index int) {
s.PlayerMap, s.PlayerMap[len(s.PlayerMap)-1] = append(s.PlayerMap[:index], s.PlayerMap[index+1:]...), nil
//Update all player numbers greater than deleted index
for i := index; i < len(s.PlayerMap); i++ {
s.PlayerMap[i].SetPlayer(i)
}
}
func (s Session) connectSession(addr string) error {
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return err
}
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
return err
}
s.gameTCPConn = conn
go s.receiveHandler()
go s.sendHandler()
return nil
}
func (s Session) requestSession() {
addr := s.game.host + ":" + s.game.port
err := s.connectSession(addr)
if err != nil {
//return nil, err
log.Print(err)
}
request := make(map[string]interface{})
request["event"] = "new"
request["players"] = s.CurrentUserCount()
request["maxplayers"] = s.game.MaxUsers()
jsonMsg, err := json.Marshal(request)
if err != nil {
log.Print(err)
}
s.Send <- jsonMsg //MAKE A TIMEOUT
go s.requestTimeout()
go s.sendTimeout()
}
func (s Session) sendTimeout() {
time.Sleep(10 * time.Second)
s.timeout <- true
}
func (s Session) requestTimeout() {
s.timeout = make(chan bool, 1)
var gs GameStart
select {
case t := <- s.timeout:
if t { //server timed out
gs = GameStart{
Response: false,
Feedback: "Server was unable to host.",
}
} else { //game created
gs = GameStart{
Response: true,
Feedback: "Application started succesfully.",
}
}
}
s.LobbyHost.Send <- gs
}
func (s Session) AnonUserById(id string) (*AnonUser, bool) {
a, ok := s.userMap[id]
return a, ok
}
func (s Session) CurrentUserCount() int {
return len(s.userMap)
}
/*
Flip ready bool
*/
func (s Session) setAnonUserReady(n string, r bool) {
s.Lock()
defer s.Unlock()
s.userMap[n].Ready = r
s.emitAnonUserData()
}
func (s Session) emitAnonUserData() {
var list []LobbyUser
players := s.PlayerMap[1:] //slice out host index
/*
for i := 1; i < len(s.PlayerMap); i++ {
p := s.PlayerMap[i]
user := LobbyUser{
Player: i,
Nickname: p.Nickname
Ready: p.Ready
}
list = append(list, user)
}
*/
for i, p := range players {
p := p.(AnonUser)
user := LobbyUser{
Player: i,
Nickname: p.Nickname,
Ready: p.Ready,
}
list = append(list, user)
}
s.LobbyHost.Send <- list
}
/*
Main goroutine for handling messages to the game server
*/
func (s Session) sendHandler() {
for {
select {
case data := <-s.Send:
s.gameTCPConn.Write(data)
case <- s.Exit:
return
}
}
}
/*
Main goroutine for handling messages from the game server
*/
func (s Session) receiveHandler() {
decoder := json.NewDecoder(s.gameTCPConn)
for {
var gMsg GameMessage
err := decoder.Decode(&gMsg)
if err != nil {
log.Print(err)
}
//check event
switch gMsg.Event {
case "created":
s.Data <- gMsg
case "gamestart":
s.LobbyHost.Send <- gMsg
case "gameterminate":
s.Data <- gMsg
case "msgplayer":
if gMsg.Player == 0 | else {
user := s.PlayerMap[gMsg.Player].(AnonUser)
user.Send <- gMsg
}
case "msgall":
s.LobbyHost.Send <- gMsg
}
}
}
/*
Main goroutine for processing lobby commands
*/
func (s Session) dataHandler() {
for {
select {
case data := <-s.Data:
switch jsonType := data.(type) {
case SetReady:
s.setAnonUserReady(jsonType.Nickname, jsonType.Ready)
case RemovedUser:
err := s.removeAnonUser(jsonType.Nickname, jsonType.Reason)
if err != nil {
log.Panic(err)
}
case GameMessage:
switch jsonType.Event {
case "created":
s.timeout <- false
log.Printf("Game successfully created: %s", jsonType.Msg)
}
default:
log.Print("Session dataHandler: unknown type received")
}
case <- s.Exit:
return
}
}
}
func (u HostUser) UserId() int {
return u.userId
}
func (u HostUser) SetSession(s *Session) {
u.Sess = s
}
func (u HostUser) SetSocket(sock socketio.Socket) {
u.socket = sock
}
func (u HostUser) Player() int {
return u.player
}
/*
Only to be called while Session is locked
*/
func (u HostUser) SetPlayer(number int) {
u.player = number
}
/*
Joins the user's socket namespace, and the session namespace
*/
func (u HostUser) Setup() {
//u.socket.Join(u.username) //not necessary socket ID namespace
u.socket.Join(fmt.Sprintf("%d", u.Sess.SessionId()))
go u.sendHandler()
u.receiveHandler()
}
/*
Emits socket.io messages to the namespace
*/
func (u HostUser) sendHandler() {
sessionNamespace := fmt.Sprintf("%d", u.Sess.SessionId())
for {
select {
case data := <-u.Send:
switch dataType := data.(type) {
case []LobbyUser:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("send lobby user list: error")
}
*/
u.socket.BroadcastTo(sessionNamespace, "updatelobby", dataType)
u.socket.Emit("updatelobby", dataType)
case GameStart:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.BroadcastTo(sessionNamespace, "gamestart", dataType)
u.socket.Emit("gamestart", dataType)
case GameMessage:
/*msg, err := json.Marshal(dataType.Msg)
if err != nil {
log.Panic("unable to marshal message")
}
*/
switch dataType.Event{
case "msgplayer":
u.socket.Emit("msgplayer", dataType.Msg)
case "msgall":
u.socket.BroadcastTo(sessionNamespace, "msgall", dataType.Msg)
u.socket.Emit("msgall", dataType.Msg)
}
default:
log.Print("HostUser sendHandler: unknown type received")
}
}
}
}
/*
Main goroutine for handling messages for host user
*/
func (u HostUser) receiveHandler() {
//Tell server the applet has loaded and ready to communicate
//Used to initially ping server and pass any preliminary host information
//u.socket.Of(fmt.Sprintf("/%s", u.socket.Id())).On("kick", func(msg []byte) {
u.socket.On("kick", func(msg map[string]interface{}) {
var data RemovedUser
/*err := json.Unmarshal(msg, &data)
if err != nil {
log.Panic(err)
}
*/
data = RemovedUser{
Nickname: msg["nickname"].(string),
Reason: msg["reason"].(string),
}
u.Sess.Data <- data
})
//launch the game with the current users in lobby, server should respond if successful
u.socket.On("start", func() {
start := Command{
Cmd: C_Start,
}
jsonMsg, err := json.Marshal(start)
if err != nil {
log.Print(err)
}
u.Sess.Send <- jsonMsg
})
//send to game server, server should give response to be emitted
u.socket.On("terminate", func() {
terminate := Command{
Cmd: C_Terminate,
}
jsonMsg, err := json.Marshal(terminate)
if err != nil {
log.Print(err)
}
u.Sess.Send <- jsonMsg
})
u.socket.On("msgserver", func(msg map[string]interface{}) {
data := make(map[string]interface{}, 2)
data["player"] = u.Player()
data["msg"] = msg
json, err := json.Marshal(data)
if err != nil {
log.Print(err)
}
u.Sess.Send <- json
})
//Starts the session with all users set ready assigned as players
//u.socket.Of(fmt.Sprintf("/%s", u.socket.Id())).On("start", func(msg interface{}) {
// u.Sess.Send <- msg
//})
//Host user forced disconnection
u.socket.On("disconnection", func() {
//host disconnected - pause application?
})
}
func (u AnonUser) SetSession(s *Session) {
u.Sess = s
}
func (u AnonUser) SetSocket(sock socketio.Socket) {
u.socket = sock
}
func (u AnonUser) Player() int {
return u.player
}
/*
Only to be called while Session is locked
*/
func (u AnonUser) SetPlayer(p int) {
u.player = p
}
func (u AnonUser) Setup() {
u.socket.Join(fmt.Sprintf("%d", u.Sess.SessionId()))
go u.sendHandler()
u.receiveHandler()
u.Send <- Joined{
Response: true,
Feedback: "Used added to Lobby",
}
}
/*
Main goroutine for handling messages for host user
*/
func (u AnonUser) sendHandler() {
//namespace := fmt.Sprintf("/%s", u.socket.Id())
for {
select {
case data := <-u.Send:
switch dataType := data.(type) {
case Joined:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.Emit("joined", dataType)
case Command:
switch dataType.Cmd {
case C_Leave:
u.socket.Emit("disconnect")
return
case C_Kick:
u.socket.Emit("kick", dataType.Data.(string))
u.socket.Emit("disconnect")
return
case C_End:
return
default:
log.Print("AnonUser sendHandler: unknown command")
}
case GameMessage:
/*msg, err := json.Marshal(dataType.Msg)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.Emit("msgplayer", dataType.Msg)
default:
log.Print("AnonUser sendHandler: unknown type received")
}
}
}
}
/*
Sets all socket.io events for receiving emits from the AnonUser's device
*/
func (u AnonUser) receiveHandler() {
//get and format this user's personal socket namespace i.e. "/012345"
//namespace := fmt.Sprintf("/%s", u.socket.Id())
//Toggle the ready bool in the lobby
//u.socket.Of(namespace).On("setready", func(msg interface{}) {
u.socket.On("setready", func(msg map[string]interface{}) {
var data SetReady
data = SetReady{
Nickname: msg["nickname"].(string),
Ready: msg["ready"].(bool),
}
//err := json.Unmarshal(msg, &data)
//if err != nil {
// log.Panic(err)
//}
u.Sess.Data <- data
})
//Leave the session (manual leave)
//u.socket.Of(namespace).On("leavelobby", func() {
u.socket.On("leavelobby", func() {
ru := RemovedUser{
Nickname: u.Nickname,
}
u.Sess.Data <- ru
})
u.socket.On("msgserver", func(msg map[string]interface{}) {
data := make(map[string]interface{}, 2)
data["player"] = u.Player()
data["msg"] = msg
jsonMsg, err := json.Marshal(data)
if err != nil {
log.Panic(err)
}
u.Sess.Send <- jsonMsg
})
/*Tell server the applet has loaded and ready to communicate
u.socket.Of(namespace).On("loaded", func(msg interface{}) {
u.Sess.Send <- msg
})
//Receive game data from player -> forwarded to game server channel
u.socket.Of(namespace).On("in", func(msg interface{}) {
u.Sess.Send <- msg
})
*/
//Forced disconnection event
u.socket.On("disconnection", func() {
var msg Command
msg.Cmd = C_End
u.Send <- msg
})
} | {
user := s.PlayerMap[gMsg.Player].(HostUser)
user.Send <- gMsg
} | conditional_block |
lobby.go | package main
import (
"fmt"
"log"
"errors"
"sync"
//"bytes"
"time"
"net"
//"net/http"
"encoding/json"
"github.com/googollee/go-socket.io"
)
/*
Enumerated constants for commands
*/
const (
C_Leave = iota //0
C_Kick = iota //1
C_End = iota //2
C_Start = iota //3
C_Terminate = iota //4
)
type Lobby interface {
SessionId() int
addAnonUser(id string, anon AnonUser) error
removeAnonUser(id, reason string) error
AnonUserById(id string) (*AnonUser, bool)
CurrentUserCount() int
emitAnonUserData() //return data in json format for front end
setAnonUserReady(id int)
sendHandler()
receiveHandler()
dataHandler()
}
type User interface {
SetSession(s *Session)
SetSocket(sock socketio.Socket)
Player() int
SetPlayer(p int)
Setup()
sendHandler()
receiveHandler()
}
type HostUser struct {
userId int
username string
Sess *Session
player int
Send chan interface{}
Receive chan interface{}
socket socketio.Socket
socketId string
}
type AnonUser struct {
//userId int //key to map to this user
Nickname string
Ready bool
Sess *Session
player int
Send chan interface{}
Receive chan interface{}
socket socketio.Socket
socketId string
}
type Session struct {
sync.RWMutex
sessionId int
game Game
LobbyHost *HostUser
userMap map[string]*AnonUser //int map changed
PlayerMap []User
//PlayerMap map[int]*User
Send chan []byte
Receive chan interface{}
Data chan interface{}
Exit chan bool
timeout chan bool
gameTCPConn *net.TCPConn
}
/*
Structs used in websocket message processing
*/
/*
type GameMessage struct {
Receipient int `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
type GameMessageAll struct {
Msg map[string]interface{} `json:"msg"`
}
*/
//WHAT ABOUT INTERFACES (i.e FEEDBACK) WITH STRUCT IMPLEMENTING FOR EVENTS?
//INTERFACE FUNCTIONS COULD BE CALLED CONCURRENTLY
type GameMessage struct {
Event string `json:"event"`
Player int `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
type MsgServer struct {
Msg map[string]interface{}
}
type LobbyUser struct {
Player int `json:"player"`
Nickname string `json:"nickname"`
Ready bool `json:"ready"`
}
type SetReady struct {
Nickname string `json:"nickname"`
Ready bool `json:"ready"`
}
type RemovedUser struct {
Nickname string
Reason string
}
type Joined struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
type GameStart struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
type GameEnd struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
/*
Cmd = Command consts
*/
type Command struct {
Cmd int
Data interface{}
}
func (s Session) closeConnection() {
err := s.gameTCPConn.Close()
if err != nil {
log.Panic(err)
}
}
/*
Game table in DB will need Host and Port stored
*/
func newSession(g Game, sessId int, host *HostUser) (*Session, error) {
session := Session{
sessionId: sessId,
game: g,
LobbyHost: host,
userMap: make(map[string]*AnonUser),
PlayerMap: make([]User, 0, g.MaxUsers() + 1),
//PlayerMap: make(map[int]*User, g.MaxUsers() + 1),
Send: make(chan []byte),
Receive: make(chan interface{}),
Data: make(chan interface{}),
Exit: make(chan bool, 1),
}
host.SetSession(&session)
host.SetPlayer(0)
session.PlayerMap = append(session.PlayerMap, session.LobbyHost) //set index 0 as host
//session.PlayerMap[0] = session.LobbyHost
go session.dataHandler()
return &session, nil
}
func newHostUser(uId int, user string) (*HostUser, error) {
hostUser := HostUser{
userId: uId,
username: user,
Send: make(chan interface{}),
Receive: make(chan interface{}),
}
return &hostUser, nil
}
func newAnonUser(nick string) *AnonUser {
anon := AnonUser{
Nickname: nick,
Ready: false,
Send: make(chan interface{}),
Receive: make(chan interface{}),
}
return &anon
}
func (s Session) SessionId() int {
return s.sessionId
}
/*
Adds an anon user to the session, and sets user's pointer to the session
*/
func (s Session) addAnonUser(id string, anon *AnonUser) error {
s.Lock()
defer s.Unlock()
_, ok := s.userMap[id]
if !ok {
return errors.New("lobby: user already exists.")
}
if len(s.PlayerMap) >= cap(s.PlayerMap) {
return errors.New("lobby: lobby is full.")
}
s.userMap[id] = anon
s.PlayerMap = append(s.PlayerMap, anon)
anon.SetPlayer(len(s.PlayerMap) - 1)
anon.SetSession(&s)
s.emitAnonUserData()
return nil
}
func (s Session) removeAnonUser(id, reason string) error {
s.Lock()
defer s.Unlock()
u, ok := s.userMap[id]
if !ok {
return errors.New("lobby: user does not exist.")
}
index := u.Player()
//user, ok := s.PlayerMap[index]
//if !ok {
//return errors.New("removePlayer: no player found at index")
//}
//if u != user {
//return errors.New("removePlayer: player does not match retrieved")
//}
//delete(s.PlayerMap, index)
s.removePlayer(index)
delete(s.userMap, id)
var msg Command
if reason == "" {
msg.Cmd = C_Leave
} else {
msg.Cmd = C_Kick
msg.Data = reason
}
u.Send <- msg
s.emitAnonUserData()
return nil
}
/*
Removes a player from the PlayerMap slice - No Memory Leak:
Example: Removing index 2 from: [0][1][2][3][4]
Append: [0][1] [3][4]
Final memory block still has redundant data: [0][1][3][4][4]
Overwrite with nil: [0][1][3][4][nil]
*/
func (s Session) removePlayer(index int) {
s.PlayerMap, s.PlayerMap[len(s.PlayerMap)-1] = append(s.PlayerMap[:index], s.PlayerMap[index+1:]...), nil
//Update all player numbers greater than deleted index
for i := index; i < len(s.PlayerMap); i++ {
s.PlayerMap[i].SetPlayer(i)
}
}
func (s Session) connectSession(addr string) error {
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return err
}
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
return err
}
s.gameTCPConn = conn
go s.receiveHandler()
go s.sendHandler()
return nil
}
func (s Session) requestSession() {
addr := s.game.host + ":" + s.game.port
err := s.connectSession(addr)
if err != nil {
//return nil, err
log.Print(err)
}
request := make(map[string]interface{})
request["event"] = "new"
request["players"] = s.CurrentUserCount()
request["maxplayers"] = s.game.MaxUsers()
jsonMsg, err := json.Marshal(request)
if err != nil {
log.Print(err)
}
s.Send <- jsonMsg //MAKE A TIMEOUT
go s.requestTimeout()
go s.sendTimeout()
}
func (s Session) sendTimeout() {
time.Sleep(10 * time.Second)
s.timeout <- true
}
func (s Session) requestTimeout() {
s.timeout = make(chan bool, 1)
var gs GameStart
select {
case t := <- s.timeout:
if t { //server timed out
gs = GameStart{
Response: false,
Feedback: "Server was unable to host.",
}
} else { //game created
gs = GameStart{
Response: true,
Feedback: "Application started succesfully.",
}
}
}
s.LobbyHost.Send <- gs
}
func (s Session) AnonUserById(id string) (*AnonUser, bool) {
a, ok := s.userMap[id]
return a, ok
}
func (s Session) CurrentUserCount() int {
return len(s.userMap)
}
/*
Flip ready bool
*/
func (s Session) setAnonUserReady(n string, r bool) {
s.Lock()
defer s.Unlock()
s.userMap[n].Ready = r
s.emitAnonUserData()
}
func (s Session) emitAnonUserData() {
var list []LobbyUser
players := s.PlayerMap[1:] //slice out host index
/*
for i := 1; i < len(s.PlayerMap); i++ {
p := s.PlayerMap[i]
user := LobbyUser{
Player: i,
Nickname: p.Nickname
Ready: p.Ready
}
list = append(list, user)
}
*/
for i, p := range players {
p := p.(AnonUser)
user := LobbyUser{
Player: i,
Nickname: p.Nickname,
Ready: p.Ready,
}
list = append(list, user)
}
s.LobbyHost.Send <- list
}
/*
Main goroutine for handling messages to the game server
*/
func (s Session) sendHandler() {
for {
select {
case data := <-s.Send:
s.gameTCPConn.Write(data)
case <- s.Exit:
return
}
}
}
/*
Main goroutine for handling messages from the game server
*/
func (s Session) receiveHandler() {
decoder := json.NewDecoder(s.gameTCPConn)
for {
var gMsg GameMessage
err := decoder.Decode(&gMsg)
if err != nil {
log.Print(err)
}
//check event
switch gMsg.Event {
case "created":
s.Data <- gMsg
case "gamestart":
s.LobbyHost.Send <- gMsg
case "gameterminate":
s.Data <- gMsg
case "msgplayer":
if gMsg.Player == 0 {
user := s.PlayerMap[gMsg.Player].(HostUser)
user.Send <- gMsg
} else {
user := s.PlayerMap[gMsg.Player].(AnonUser)
user.Send <- gMsg
}
case "msgall":
s.LobbyHost.Send <- gMsg
}
}
}
/*
Main goroutine for processing lobby commands
*/
func (s Session) dataHandler() {
for {
select {
case data := <-s.Data:
switch jsonType := data.(type) {
case SetReady:
s.setAnonUserReady(jsonType.Nickname, jsonType.Ready)
case RemovedUser:
err := s.removeAnonUser(jsonType.Nickname, jsonType.Reason)
if err != nil {
log.Panic(err)
}
case GameMessage:
switch jsonType.Event {
case "created":
s.timeout <- false
log.Printf("Game successfully created: %s", jsonType.Msg)
}
default:
log.Print("Session dataHandler: unknown type received")
}
case <- s.Exit:
return
}
}
}
func (u HostUser) UserId() int {
return u.userId
}
func (u HostUser) SetSession(s *Session) {
u.Sess = s
}
func (u HostUser) SetSocket(sock socketio.Socket) {
u.socket = sock
}
func (u HostUser) Player() int {
return u.player
}
/*
Only to be called while Session is locked
*/
func (u HostUser) SetPlayer(number int) {
u.player = number
}
/*
Joins the user's socket namespace, and the session namespace
*/
func (u HostUser) Setup() {
//u.socket.Join(u.username) //not necessary socket ID namespace
u.socket.Join(fmt.Sprintf("%d", u.Sess.SessionId()))
go u.sendHandler()
u.receiveHandler()
}
/*
Emits socket.io messages to the namespace
*/
func (u HostUser) sendHandler() {
sessionNamespace := fmt.Sprintf("%d", u.Sess.SessionId())
for {
select {
case data := <-u.Send:
switch dataType := data.(type) {
case []LobbyUser:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("send lobby user list: error")
}
*/
u.socket.BroadcastTo(sessionNamespace, "updatelobby", dataType)
u.socket.Emit("updatelobby", dataType)
case GameStart:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.BroadcastTo(sessionNamespace, "gamestart", dataType)
u.socket.Emit("gamestart", dataType)
case GameMessage:
/*msg, err := json.Marshal(dataType.Msg)
if err != nil {
log.Panic("unable to marshal message")
}
*/
switch dataType.Event{
case "msgplayer":
u.socket.Emit("msgplayer", dataType.Msg)
case "msgall":
u.socket.BroadcastTo(sessionNamespace, "msgall", dataType.Msg)
u.socket.Emit("msgall", dataType.Msg)
}
default:
log.Print("HostUser sendHandler: unknown type received")
}
}
}
}
/*
Main goroutine for handling messages for host user
*/
func (u HostUser) receiveHandler() {
//Tell server the applet has loaded and ready to communicate
//Used to initially ping server and pass any preliminary host information
//u.socket.Of(fmt.Sprintf("/%s", u.socket.Id())).On("kick", func(msg []byte) {
u.socket.On("kick", func(msg map[string]interface{}) {
var data RemovedUser
/*err := json.Unmarshal(msg, &data)
if err != nil {
log.Panic(err)
}
*/
data = RemovedUser{
Nickname: msg["nickname"].(string),
Reason: msg["reason"].(string),
}
u.Sess.Data <- data
})
//launch the game with the current users in lobby, server should respond if successful
u.socket.On("start", func() {
start := Command{
Cmd: C_Start,
}
jsonMsg, err := json.Marshal(start)
if err != nil {
log.Print(err)
}
u.Sess.Send <- jsonMsg
})
//send to game server, server should give response to be emitted | u.socket.On("terminate", func() {
terminate := Command{
Cmd: C_Terminate,
}
jsonMsg, err := json.Marshal(terminate)
if err != nil {
log.Print(err)
}
u.Sess.Send <- jsonMsg
})
u.socket.On("msgserver", func(msg map[string]interface{}) {
data := make(map[string]interface{}, 2)
data["player"] = u.Player()
data["msg"] = msg
json, err := json.Marshal(data)
if err != nil {
log.Print(err)
}
u.Sess.Send <- json
})
//Starts the session with all users set ready assigned as players
//u.socket.Of(fmt.Sprintf("/%s", u.socket.Id())).On("start", func(msg interface{}) {
// u.Sess.Send <- msg
//})
//Host user forced disconnection
u.socket.On("disconnection", func() {
//host disconnected - pause application?
})
}
func (u AnonUser) SetSession(s *Session) {
u.Sess = s
}
func (u AnonUser) SetSocket(sock socketio.Socket) {
u.socket = sock
}
func (u AnonUser) Player() int {
return u.player
}
/*
Only to be called while Session is locked
*/
func (u AnonUser) SetPlayer(p int) {
u.player = p
}
func (u AnonUser) Setup() {
u.socket.Join(fmt.Sprintf("%d", u.Sess.SessionId()))
go u.sendHandler()
u.receiveHandler()
u.Send <- Joined{
Response: true,
Feedback: "Used added to Lobby",
}
}
/*
Main goroutine for handling messages for host user
*/
func (u AnonUser) sendHandler() {
//namespace := fmt.Sprintf("/%s", u.socket.Id())
for {
select {
case data := <-u.Send:
switch dataType := data.(type) {
case Joined:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.Emit("joined", dataType)
case Command:
switch dataType.Cmd {
case C_Leave:
u.socket.Emit("disconnect")
return
case C_Kick:
u.socket.Emit("kick", dataType.Data.(string))
u.socket.Emit("disconnect")
return
case C_End:
return
default:
log.Print("AnonUser sendHandler: unknown command")
}
case GameMessage:
/*msg, err := json.Marshal(dataType.Msg)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.Emit("msgplayer", dataType.Msg)
default:
log.Print("AnonUser sendHandler: unknown type received")
}
}
}
}
/*
Sets all socket.io events for receiving emits from the AnonUser's device
*/
func (u AnonUser) receiveHandler() {
//get and format this user's personal socket namespace i.e. "/012345"
//namespace := fmt.Sprintf("/%s", u.socket.Id())
//Toggle the ready bool in the lobby
//u.socket.Of(namespace).On("setready", func(msg interface{}) {
u.socket.On("setready", func(msg map[string]interface{}) {
var data SetReady
data = SetReady{
Nickname: msg["nickname"].(string),
Ready: msg["ready"].(bool),
}
//err := json.Unmarshal(msg, &data)
//if err != nil {
// log.Panic(err)
//}
u.Sess.Data <- data
})
//Leave the session (manual leave)
//u.socket.Of(namespace).On("leavelobby", func() {
u.socket.On("leavelobby", func() {
ru := RemovedUser{
Nickname: u.Nickname,
}
u.Sess.Data <- ru
})
u.socket.On("msgserver", func(msg map[string]interface{}) {
data := make(map[string]interface{}, 2)
data["player"] = u.Player()
data["msg"] = msg
jsonMsg, err := json.Marshal(data)
if err != nil {
log.Panic(err)
}
u.Sess.Send <- jsonMsg
})
/*Tell server the applet has loaded and ready to communicate
u.socket.Of(namespace).On("loaded", func(msg interface{}) {
u.Sess.Send <- msg
})
//Receive game data from player -> forwarded to game server channel
u.socket.Of(namespace).On("in", func(msg interface{}) {
u.Sess.Send <- msg
})
*/
//Forced disconnection event
u.socket.On("disconnection", func() {
var msg Command
msg.Cmd = C_End
u.Send <- msg
})
} | random_line_split | |
lobby.go | package main
import (
"fmt"
"log"
"errors"
"sync"
//"bytes"
"time"
"net"
//"net/http"
"encoding/json"
"github.com/googollee/go-socket.io"
)
/*
Enumerated constants for commands
*/
const (
C_Leave = iota //0
C_Kick = iota //1
C_End = iota //2
C_Start = iota //3
C_Terminate = iota //4
)
type Lobby interface {
SessionId() int
addAnonUser(id string, anon AnonUser) error
removeAnonUser(id, reason string) error
AnonUserById(id string) (*AnonUser, bool)
CurrentUserCount() int
emitAnonUserData() //return data in json format for front end
setAnonUserReady(id int)
sendHandler()
receiveHandler()
dataHandler()
}
type User interface {
SetSession(s *Session)
SetSocket(sock socketio.Socket)
Player() int
SetPlayer(p int)
Setup()
sendHandler()
receiveHandler()
}
type HostUser struct {
userId int
username string
Sess *Session
player int
Send chan interface{}
Receive chan interface{}
socket socketio.Socket
socketId string
}
type AnonUser struct {
//userId int //key to map to this user
Nickname string
Ready bool
Sess *Session
player int
Send chan interface{}
Receive chan interface{}
socket socketio.Socket
socketId string
}
type Session struct {
sync.RWMutex
sessionId int
game Game
LobbyHost *HostUser
userMap map[string]*AnonUser //int map changed
PlayerMap []User
//PlayerMap map[int]*User
Send chan []byte
Receive chan interface{}
Data chan interface{}
Exit chan bool
timeout chan bool
gameTCPConn *net.TCPConn
}
/*
Structs used in websocket message processing
*/
/*
type GameMessage struct {
Receipient int `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
type GameMessageAll struct {
Msg map[string]interface{} `json:"msg"`
}
*/
//WHAT ABOUT INTERFACES (i.e FEEDBACK) WITH STRUCT IMPLEMENTING FOR EVENTS?
//INTERFACE FUNCTIONS COULD BE CALLED CONCURRENTLY
type GameMessage struct {
Event string `json:"event"`
Player int `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
type MsgServer struct {
Msg map[string]interface{}
}
type LobbyUser struct {
Player int `json:"player"`
Nickname string `json:"nickname"`
Ready bool `json:"ready"`
}
type SetReady struct {
Nickname string `json:"nickname"`
Ready bool `json:"ready"`
}
type RemovedUser struct {
Nickname string
Reason string
}
type Joined struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
type GameStart struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
type GameEnd struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
/*
Cmd = Command consts
*/
type Command struct {
Cmd int
Data interface{}
}
func (s Session) closeConnection() {
err := s.gameTCPConn.Close()
if err != nil {
log.Panic(err)
}
}
/*
Game table in DB will need Host and Port stored
*/
func newSession(g Game, sessId int, host *HostUser) (*Session, error) {
session := Session{
sessionId: sessId,
game: g,
LobbyHost: host,
userMap: make(map[string]*AnonUser),
PlayerMap: make([]User, 0, g.MaxUsers() + 1),
//PlayerMap: make(map[int]*User, g.MaxUsers() + 1),
Send: make(chan []byte),
Receive: make(chan interface{}),
Data: make(chan interface{}),
Exit: make(chan bool, 1),
}
host.SetSession(&session)
host.SetPlayer(0)
session.PlayerMap = append(session.PlayerMap, session.LobbyHost) //set index 0 as host
//session.PlayerMap[0] = session.LobbyHost
go session.dataHandler()
return &session, nil
}
func newHostUser(uId int, user string) (*HostUser, error) {
hostUser := HostUser{
userId: uId,
username: user,
Send: make(chan interface{}),
Receive: make(chan interface{}),
}
return &hostUser, nil
}
func newAnonUser(nick string) *AnonUser {
anon := AnonUser{
Nickname: nick,
Ready: false,
Send: make(chan interface{}),
Receive: make(chan interface{}),
}
return &anon
}
func (s Session) SessionId() int {
return s.sessionId
}
/*
Adds an anon user to the session, and sets user's pointer to the session
*/
func (s Session) addAnonUser(id string, anon *AnonUser) error {
s.Lock()
defer s.Unlock()
_, ok := s.userMap[id]
if !ok {
return errors.New("lobby: user already exists.")
}
if len(s.PlayerMap) >= cap(s.PlayerMap) {
return errors.New("lobby: lobby is full.")
}
s.userMap[id] = anon
s.PlayerMap = append(s.PlayerMap, anon)
anon.SetPlayer(len(s.PlayerMap) - 1)
anon.SetSession(&s)
s.emitAnonUserData()
return nil
}
func (s Session) removeAnonUser(id, reason string) error {
s.Lock()
defer s.Unlock()
u, ok := s.userMap[id]
if !ok {
return errors.New("lobby: user does not exist.")
}
index := u.Player()
//user, ok := s.PlayerMap[index]
//if !ok {
//return errors.New("removePlayer: no player found at index")
//}
//if u != user {
//return errors.New("removePlayer: player does not match retrieved")
//}
//delete(s.PlayerMap, index)
s.removePlayer(index)
delete(s.userMap, id)
var msg Command
if reason == "" {
msg.Cmd = C_Leave
} else {
msg.Cmd = C_Kick
msg.Data = reason
}
u.Send <- msg
s.emitAnonUserData()
return nil
}
/*
Removes a player from the PlayerMap slice - No Memory Leak:
Example: Removing index 2 from: [0][1][2][3][4]
Append: [0][1] [3][4]
Final memory block still has redundant data: [0][1][3][4][4]
Overwrite with nil: [0][1][3][4][nil]
*/
func (s Session) removePlayer(index int) {
s.PlayerMap, s.PlayerMap[len(s.PlayerMap)-1] = append(s.PlayerMap[:index], s.PlayerMap[index+1:]...), nil
//Update all player numbers greater than deleted index
for i := index; i < len(s.PlayerMap); i++ {
s.PlayerMap[i].SetPlayer(i)
}
}
func (s Session) connectSession(addr string) error {
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return err
}
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
return err
}
s.gameTCPConn = conn
go s.receiveHandler()
go s.sendHandler()
return nil
}
func (s Session) requestSession() |
func (s Session) sendTimeout() {
time.Sleep(10 * time.Second)
s.timeout <- true
}
func (s Session) requestTimeout() {
s.timeout = make(chan bool, 1)
var gs GameStart
select {
case t := <- s.timeout:
if t { //server timed out
gs = GameStart{
Response: false,
Feedback: "Server was unable to host.",
}
} else { //game created
gs = GameStart{
Response: true,
Feedback: "Application started succesfully.",
}
}
}
s.LobbyHost.Send <- gs
}
func (s Session) AnonUserById(id string) (*AnonUser, bool) {
a, ok := s.userMap[id]
return a, ok
}
func (s Session) CurrentUserCount() int {
return len(s.userMap)
}
/*
Flip ready bool
*/
func (s Session) setAnonUserReady(n string, r bool) {
s.Lock()
defer s.Unlock()
s.userMap[n].Ready = r
s.emitAnonUserData()
}
func (s Session) emitAnonUserData() {
var list []LobbyUser
players := s.PlayerMap[1:] //slice out host index
/*
for i := 1; i < len(s.PlayerMap); i++ {
p := s.PlayerMap[i]
user := LobbyUser{
Player: i,
Nickname: p.Nickname
Ready: p.Ready
}
list = append(list, user)
}
*/
for i, p := range players {
p := p.(AnonUser)
user := LobbyUser{
Player: i,
Nickname: p.Nickname,
Ready: p.Ready,
}
list = append(list, user)
}
s.LobbyHost.Send <- list
}
/*
Main goroutine for handling messages to the game server
*/
func (s Session) sendHandler() {
for {
select {
case data := <-s.Send:
s.gameTCPConn.Write(data)
case <- s.Exit:
return
}
}
}
/*
Main goroutine for handling messages from the game server
*/
func (s Session) receiveHandler() {
decoder := json.NewDecoder(s.gameTCPConn)
for {
var gMsg GameMessage
err := decoder.Decode(&gMsg)
if err != nil {
log.Print(err)
}
//check event
switch gMsg.Event {
case "created":
s.Data <- gMsg
case "gamestart":
s.LobbyHost.Send <- gMsg
case "gameterminate":
s.Data <- gMsg
case "msgplayer":
if gMsg.Player == 0 {
user := s.PlayerMap[gMsg.Player].(HostUser)
user.Send <- gMsg
} else {
user := s.PlayerMap[gMsg.Player].(AnonUser)
user.Send <- gMsg
}
case "msgall":
s.LobbyHost.Send <- gMsg
}
}
}
/*
Main goroutine for processing lobby commands
*/
func (s Session) dataHandler() {
for {
select {
case data := <-s.Data:
switch jsonType := data.(type) {
case SetReady:
s.setAnonUserReady(jsonType.Nickname, jsonType.Ready)
case RemovedUser:
err := s.removeAnonUser(jsonType.Nickname, jsonType.Reason)
if err != nil {
log.Panic(err)
}
case GameMessage:
switch jsonType.Event {
case "created":
s.timeout <- false
log.Printf("Game successfully created: %s", jsonType.Msg)
}
default:
log.Print("Session dataHandler: unknown type received")
}
case <- s.Exit:
return
}
}
}
func (u HostUser) UserId() int {
return u.userId
}
func (u HostUser) SetSession(s *Session) {
u.Sess = s
}
func (u HostUser) SetSocket(sock socketio.Socket) {
u.socket = sock
}
func (u HostUser) Player() int {
return u.player
}
/*
Only to be called while Session is locked
*/
func (u HostUser) SetPlayer(number int) {
u.player = number
}
/*
Joins the user's socket namespace, and the session namespace
*/
func (u HostUser) Setup() {
//u.socket.Join(u.username) //not necessary socket ID namespace
u.socket.Join(fmt.Sprintf("%d", u.Sess.SessionId()))
go u.sendHandler()
u.receiveHandler()
}
/*
Emits socket.io messages to the namespace
*/
func (u HostUser) sendHandler() {
sessionNamespace := fmt.Sprintf("%d", u.Sess.SessionId())
for {
select {
case data := <-u.Send:
switch dataType := data.(type) {
case []LobbyUser:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("send lobby user list: error")
}
*/
u.socket.BroadcastTo(sessionNamespace, "updatelobby", dataType)
u.socket.Emit("updatelobby", dataType)
case GameStart:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.BroadcastTo(sessionNamespace, "gamestart", dataType)
u.socket.Emit("gamestart", dataType)
case GameMessage:
/*msg, err := json.Marshal(dataType.Msg)
if err != nil {
log.Panic("unable to marshal message")
}
*/
switch dataType.Event{
case "msgplayer":
u.socket.Emit("msgplayer", dataType.Msg)
case "msgall":
u.socket.BroadcastTo(sessionNamespace, "msgall", dataType.Msg)
u.socket.Emit("msgall", dataType.Msg)
}
default:
log.Print("HostUser sendHandler: unknown type received")
}
}
}
}
/*
Main goroutine for handling messages for host user
*/
func (u HostUser) receiveHandler() {
//Tell server the applet has loaded and ready to communicate
//Used to initially ping server and pass any preliminary host information
//u.socket.Of(fmt.Sprintf("/%s", u.socket.Id())).On("kick", func(msg []byte) {
u.socket.On("kick", func(msg map[string]interface{}) {
var data RemovedUser
/*err := json.Unmarshal(msg, &data)
if err != nil {
log.Panic(err)
}
*/
data = RemovedUser{
Nickname: msg["nickname"].(string),
Reason: msg["reason"].(string),
}
u.Sess.Data <- data
})
//launch the game with the current users in lobby, server should respond if successful
u.socket.On("start", func() {
start := Command{
Cmd: C_Start,
}
jsonMsg, err := json.Marshal(start)
if err != nil {
log.Print(err)
}
u.Sess.Send <- jsonMsg
})
//send to game server, server should give response to be emitted
u.socket.On("terminate", func() {
terminate := Command{
Cmd: C_Terminate,
}
jsonMsg, err := json.Marshal(terminate)
if err != nil {
log.Print(err)
}
u.Sess.Send <- jsonMsg
})
u.socket.On("msgserver", func(msg map[string]interface{}) {
data := make(map[string]interface{}, 2)
data["player"] = u.Player()
data["msg"] = msg
json, err := json.Marshal(data)
if err != nil {
log.Print(err)
}
u.Sess.Send <- json
})
//Starts the session with all users set ready assigned as players
//u.socket.Of(fmt.Sprintf("/%s", u.socket.Id())).On("start", func(msg interface{}) {
// u.Sess.Send <- msg
//})
//Host user forced disconnection
u.socket.On("disconnection", func() {
//host disconnected - pause application?
})
}
func (u AnonUser) SetSession(s *Session) {
u.Sess = s
}
func (u AnonUser) SetSocket(sock socketio.Socket) {
u.socket = sock
}
func (u AnonUser) Player() int {
return u.player
}
/*
Only to be called while Session is locked
*/
func (u AnonUser) SetPlayer(p int) {
u.player = p
}
func (u AnonUser) Setup() {
u.socket.Join(fmt.Sprintf("%d", u.Sess.SessionId()))
go u.sendHandler()
u.receiveHandler()
u.Send <- Joined{
Response: true,
Feedback: "Used added to Lobby",
}
}
/*
Main goroutine for handling messages for host user
*/
func (u AnonUser) sendHandler() {
//namespace := fmt.Sprintf("/%s", u.socket.Id())
for {
select {
case data := <-u.Send:
switch dataType := data.(type) {
case Joined:
/*msg, err := json.Marshal(dataType)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.Emit("joined", dataType)
case Command:
switch dataType.Cmd {
case C_Leave:
u.socket.Emit("disconnect")
return
case C_Kick:
u.socket.Emit("kick", dataType.Data.(string))
u.socket.Emit("disconnect")
return
case C_End:
return
default:
log.Print("AnonUser sendHandler: unknown command")
}
case GameMessage:
/*msg, err := json.Marshal(dataType.Msg)
if err != nil {
log.Panic("unable to marshal message")
}
*/
u.socket.Emit("msgplayer", dataType.Msg)
default:
log.Print("AnonUser sendHandler: unknown type received")
}
}
}
}
/*
Sets all socket.io events for receiving emits from the AnonUser's device
*/
func (u AnonUser) receiveHandler() {
//get and format this user's personal socket namespace i.e. "/012345"
//namespace := fmt.Sprintf("/%s", u.socket.Id())
//Toggle the ready bool in the lobby
//u.socket.Of(namespace).On("setready", func(msg interface{}) {
u.socket.On("setready", func(msg map[string]interface{}) {
var data SetReady
data = SetReady{
Nickname: msg["nickname"].(string),
Ready: msg["ready"].(bool),
}
//err := json.Unmarshal(msg, &data)
//if err != nil {
// log.Panic(err)
//}
u.Sess.Data <- data
})
//Leave the session (manual leave)
//u.socket.Of(namespace).On("leavelobby", func() {
u.socket.On("leavelobby", func() {
ru := RemovedUser{
Nickname: u.Nickname,
}
u.Sess.Data <- ru
})
u.socket.On("msgserver", func(msg map[string]interface{}) {
data := make(map[string]interface{}, 2)
data["player"] = u.Player()
data["msg"] = msg
jsonMsg, err := json.Marshal(data)
if err != nil {
log.Panic(err)
}
u.Sess.Send <- jsonMsg
})
/*Tell server the applet has loaded and ready to communicate
u.socket.Of(namespace).On("loaded", func(msg interface{}) {
u.Sess.Send <- msg
})
//Receive game data from player -> forwarded to game server channel
u.socket.Of(namespace).On("in", func(msg interface{}) {
u.Sess.Send <- msg
})
*/
//Forced disconnection event
u.socket.On("disconnection", func() {
var msg Command
msg.Cmd = C_End
u.Send <- msg
})
} | {
addr := s.game.host + ":" + s.game.port
err := s.connectSession(addr)
if err != nil {
//return nil, err
log.Print(err)
}
request := make(map[string]interface{})
request["event"] = "new"
request["players"] = s.CurrentUserCount()
request["maxplayers"] = s.game.MaxUsers()
jsonMsg, err := json.Marshal(request)
if err != nil {
log.Print(err)
}
s.Send <- jsonMsg //MAKE A TIMEOUT
go s.requestTimeout()
go s.sendTimeout()
} | identifier_body |
session.go | package popart
import (
"bufio"
"fmt"
"io"
"net"
"net/textproto"
"strconv"
"strings"
"time"
)
const (
stateAuthorization = iota
stateTransaction
stateUpdate
stateTerminateConnection
)
type operationHandler func(s *session, args []string) error
var (
operationHandlers = map[string]operationHandler{
"APOP": (*session).handleAPOP,
"CAPA": (*session).handleCAPA,
"DELE": (*session).handleDELE,
"LIST": (*session).handleLIST,
"NOOP": (*session).handleNOOP,
"PASS": (*session).handlePASS,
"QUIT": (*session).handleQUIT,
"RETR": (*session).handleRETR,
"RSET": (*session).handleRSET,
"STAT": (*session).handleSTAT,
"TOP": (*session).handleTOP,
"UIDL": (*session).handleUIDL,
"USER": (*session).handleUSER,
}
)
type session struct {
server *Server
handler Handler
conn net.Conn
state int
username string
markedDeleted map[uint64]struct{}
msgSizes map[uint64]uint64
reader *textproto.Reader
writer *textproto.Writer
}
func newSession(server *Server, handler Handler, conn net.Conn) *session {
return &session{
server: server,
handler: handler,
conn: conn,
markedDeleted: make(map[uint64]struct{}),
msgSizes: make(map[uint64]uint64),
reader: textproto.NewReader(bufio.NewReader(conn)),
writer: textproto.NewWriter(bufio.NewWriter(conn)),
}
}
// serve method handles the entire session which after the first message from
// the server is a series of command-response interactions.
func (s *session) serve() {
defer s.conn.Close()
defer s.unlock() // unlock maildrop if locked no matter what
helloParts := []string{"POP3 server ready"}
if s.server.APOP {
banner := s.server.getBanner()
helloParts = append(helloParts, banner)
if err := s.handler.SetBanner(banner); err != nil {
s.handler.HandleSessionError(err)
return // go home handler, you're drunk!
}
}
if err := s.respondOK(strings.Join(helloParts, " ")); err != nil {
s.handler.HandleSessionError(err)
return // communication problem, most likely?
}
for {
if keepGoing := s.serveOne(); !keepGoing {
return
}
}
}
// serveOne handles each command-response interaction with the client. The
// boolean return value indicates whether the communication with the client
// should continue or not.
func (s *session) serveOne() bool {
if s.state == stateTerminateConnection {
return false
}
readBy := time.Now().Add(s.server.Timeout)
if err := s.conn.SetReadDeadline(readBy); err != nil {
return s.handleError(err, false)
}
line, err := s.reader.ReadLine()
if err != nil {
return s.handleError(err, false) // communication problem, most likely?
}
args := strings.Split(line, " ")
command := strings.ToUpper(args[0])
cmdValidator, exists := validators[command]
if !exists {
return s.handleError(errInvalidSyntax, true) // unknown command
}
if err := cmdValidator.validate(s, args[1:]); err != nil {
return s.handleError(err, true)
}
return s.handleError(operationHandlers[command](s, args[1:]), true)
}
// handleCAPA is a callback for capability listing.
// RFC 2449, page 2.
func (s *session) handleCAPA(args []string) error {
if err := s.respondOK("Capability list follows"); err != nil {
return err
}
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
for _, capability := range s.server.capabilities {
if _, err := fmt.Fprintln(dotWriter, capability); err != nil {
return err
}
}
return nil
}
// handleAPOP is a callback for an APOP authentication mechanism.
// RFC 1939, page 15.
func (s *session) handleAPOP(args []string) error {
if !s.server.APOP {
return NewReportableError("server does not support APOP")
}
if err := s.handler.AuthenticateAPOP(args[0], args[1]); err != nil {
return err
}
return s.signIn()
}
// handleDELE is a callback for a single message deletion.
// RFC 1939, page 8.
func (s *session) handleDELE(args []string) error {
return s.withMessageDo(args[0], func(msgId uint64) error {
s.markedDeleted[msgId] = struct{}{}
return s.respondOK("message %d deleted", msgId)
})
}
// handleAPOP is a callback for listing one or more messages.
// RFC 1939, page 6.
func (s *session) handleLIST(args []string) error {
if len(args) == 1 {
return s.withMessageDo(args[0], func(msgId uint64) error {
return s.respondOK("%d %d", msgId, s.msgSizes[msgId])
})
}
return s.forEachMessage(func(msgId uint64) (string, error) {
return fmt.Sprintf("%d %d", msgId, s.msgSizes[msgId]), nil
})
}
// handleNOOP is a callback for a no-op (timeout reset) command.
// RFC 1939, page 9.
func (s *session) handleNOOP(args []string) error {
return s.respondOK("doing nothing")
}
// handlePASS is a callback for the client providing password ("PASS" command).
// This must have been preceded by a "USER" command where the client provides
// its username.
// RFC 1939, page 14.
func (s *session) handlePASS(args []string) error {
if s.username == "" |
if err := s.handler.AuthenticatePASS(s.username, args[0]); err != nil {
return err
}
return s.signIn()
}
// handleQUIT is a callback for the client terminating the session. It will do
// slightly different things depending on the current state of the transaction.
// RFC 1939, pages 5 (in authorization state) and 10 (in transaction state).
func (s *session) handleQUIT(args []string) error {
bye := func() error {
s.state = stateTerminateConnection
return s.respondOK("dewey POP3 server signing off")
}
if s.state == stateAuthorization {
return bye()
}
s.state = stateUpdate // so that no future calls will succeed
var delMsg []uint64
for key := range s.markedDeleted {
delMsg = append(delMsg, key)
}
if err := s.handler.DeleteMessages(delMsg); err != nil {
return err
}
return bye()
}
// handleRETR is a callback for the client requesting full content of a a single
// message.
// RFC 1939, page 8.
func (s *session) handleRETR(args []string) (err error) {
return s.withMessageDo(args[0], func(msgId uint64) error {
if err := s.respondOK("%d octets", s.msgSizes[msgId]); err != nil {
return err
}
readCloser, err := s.handler.GetMessageReader(msgId)
if err != nil {
return err
}
defer s.closeOrReport(readCloser)
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
_, err = io.Copy(dotWriter, readCloser)
return err
})
}
// handleRSET is a callback for the client requesting the session to be reset.
// This essentially means undeleting all messages previously marked for
// deletion.
// RFC 1939, page 9.
func (s *session) handleRSET(args []string) error {
s.markedDeleted = make(map[uint64]struct{})
return s.respondOK(
"maildrop has %d messages (%d octets)",
s.getMessageCount(),
s.getMaildropSize(),
)
}
// handleRETR is a callback for the client requesting full content of a a single
// message.
// RFC 1939, page 8.
func (s *session) handleSTAT(args []string) error {
return s.respondOK("%d %d", s.getMessageCount(), s.getMaildropSize())
}
// handleTOP is a callback for the client requesting a number of lines from the
// top of a single message.
// RFC 1939, page 11.
func (s *session) handleTOP(args []string) error {
return s.withMessageDo(args[0], func(msgId uint64) error {
noLines, err := strconv.ParseUint(args[1], 10, 64)
if err != nil {
return errInvalidSyntax
}
if err := s.writer.PrintfLine("+OK"); err != nil {
return err
}
readCloser, err := s.handler.GetMessageReader(msgId)
if err != nil {
return err
}
defer s.closeOrReport(readCloser)
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
protoReader := textproto.NewReader(bufio.NewReader(readCloser))
for i := uint64(0); i < noLines; i++ {
line, readErr := protoReader.ReadLineBytes()
if err := printTopLine(line, readErr, dotWriter); err != nil {
return err
}
}
return nil
})
}
func printTopLine(line []byte, readErr error, writer io.Writer) error {
if readErr == io.EOF || readErr == nil {
if err := writeWithError(writer, line); err != nil {
return err
}
}
if readErr != nil {
return readErr
}
return writeWithError(writer, []byte{'\n'})
}
func writeWithError(w io.Writer, content []byte) error {
_, err := w.Write(content)
return err
}
// handleUIDL is a callback for the client unique message identifiers for
// either one or all messages.
// RFC 1939, page 12.
func (s *session) handleUIDL(args []string) (err error) {
if len(args) == 1 {
return s.withMessageDo(args[0], func(msgId uint64) error {
uidl, err := s.handler.GetMessageID(msgId)
if err != nil {
return err
}
return s.respondOK("%d %s", msgId, uidl)
})
}
return s.forEachMessage(func(msgId uint64) (string, error) {
uidl, err := s.handler.GetMessageID(msgId)
if err != nil {
return "", err
}
return fmt.Sprintf("%d %s", msgId, uidl), nil
})
}
// handleUSER is a callback for the client providing it's username. This must be
// followed by a "PASS" command with a corresponding password.
// RFC 1939, page 13.
func (s *session) handleUSER(args []string) (err error) {
s.username = args[0]
return s.respondOK("welcome %s", s.username)
}
// handleError provides a helper to decide what to do with the result of a
// single command handler. There are three possible outcomes. First - the
// command succeeded. Second, the command failed but the failure is reported to
// the user and the transaction continues. Third, an error occurred that calls
// for and immediate termination of the session.
func (s *session) handleError(err error, shouldContinue bool) bool {
if err == nil {
return shouldContinue
}
rErr, isReportable := err.(*ReportableError)
if isReportable {
if err = s.writer.PrintfLine("-ERR %s", rErr); err == nil {
return shouldContinue
}
}
s.state = stateTerminateConnection // will terminate the connection!
s.handler.HandleSessionError(err)
return shouldContinue
}
// respondOK provides a helper to write a "success" line to the client, with
// printf-like formatting. It will only fail if it is impossible to write to the
// client (e.g. closed TCP socket).
func (s *session) respondOK(format string, args ...interface{}) error {
return s.writer.PrintfLine(fmt.Sprintf("+OK %s", format), args...)
}
// fetchMaildropStats queries the handler for message count and sizes and builds
// based on that builds maildrop statistics that are then cached internally
// throughout the whole length of the session.
func (s *session) fetchMaildropStats() error {
msgCount, err := s.handler.GetMessageCount()
if err != nil {
return err
}
for i := uint64(0); i < msgCount; i++ {
mSize, err := s.handler.GetMessageSize(i + 1)
if err != nil {
return err
}
s.msgSizes[i+1] = mSize
}
return nil
}
// signIn is called after successful authentication whereby the protocol
// requires that the maildrop is not available to any other users trying to
// access it concurrently (RFC 1939, page 3).
func (s *session) signIn() error {
if err := s.handler.LockMaildrop(); err != nil {
return err
}
s.state = stateTransaction
if err := s.fetchMaildropStats(); err != nil {
return err
}
return s.respondOK(
"%s's maildrop has %d messages (%d octets)",
s.username,
s.getMessageCount(),
s.getMaildropSize(),
)
}
// getMessageCount reports the relevant number based on cached data.
func (s *session) getMessageCount() uint64 {
return uint64(len(s.msgSizes) - len(s.markedDeleted))
}
// getMaildropSize reports the relevant number based on cached data.
func (s *session) getMaildropSize() uint64 {
var ret uint64
for msgID, size := range s.msgSizes {
if _, deleted := s.markedDeleted[msgID]; !deleted {
ret += size
}
}
return ret
}
// forEachMessage is a helper that allows a callback to be invoked for every
// message in the maildrop that is not deleted. The callback is expected to
// return a line that is then printed out to the client.
func (s *session) forEachMessage(fn func(id uint64) (string, error)) error {
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
for i := uint64(0); i < uint64(len(s.msgSizes)); i++ {
if _, deleted := s.markedDeleted[i+1]; deleted {
continue
}
line, err := fn(i + 1)
if err != nil {
return err
}
if _, err := fmt.Fprintln(dotWriter, line); err != nil {
return err
}
}
return nil
}
// withMessageDo is a wrapper for handlers operating on a single message. It
// generally makes sure that the message number provided makes sense within
// the context of the current transaction.
func (s *session) withMessageDo(sID string, fn func(id uint64) error) error {
msgID, err := strconv.ParseUint(sID, 10, 64)
if err != nil {
return errInvalidSyntax
}
if msgID == 0 || msgID > uint64(len(s.msgSizes)) {
return NewReportableError("no such message: %d", msgID)
}
if _, gone := s.markedDeleted[msgID]; gone {
return NewReportableError("message %d already deleted", msgID)
}
return fn(msgID)
}
// unlock will unlock the client's maildrop if it is locked. Note that we assume
// the mailbox is locked if the exchange proceeded past the authorization stage.
func (s *session) unlock() {
if s.state == stateAuthorization {
return // we didn't yet even have a chance to lock the maildrop
}
if err := s.handler.UnlockMaildrop(); err != nil {
s.handler.HandleSessionError(err)
}
}
// closer provides a wrapper that allows deferred 'Close' operations to have
// their errors reported to the session error handler.
func (s *session) closeOrReport(closer io.Closer) {
if err := closer.Close(); err != nil {
s.handler.HandleSessionError(err)
}
}
| {
return NewReportableError("please provide username first")
} | conditional_block |
session.go | package popart
import (
"bufio"
"fmt"
"io"
"net"
"net/textproto"
"strconv"
"strings" | const (
stateAuthorization = iota
stateTransaction
stateUpdate
stateTerminateConnection
)
type operationHandler func(s *session, args []string) error
var (
operationHandlers = map[string]operationHandler{
"APOP": (*session).handleAPOP,
"CAPA": (*session).handleCAPA,
"DELE": (*session).handleDELE,
"LIST": (*session).handleLIST,
"NOOP": (*session).handleNOOP,
"PASS": (*session).handlePASS,
"QUIT": (*session).handleQUIT,
"RETR": (*session).handleRETR,
"RSET": (*session).handleRSET,
"STAT": (*session).handleSTAT,
"TOP": (*session).handleTOP,
"UIDL": (*session).handleUIDL,
"USER": (*session).handleUSER,
}
)
type session struct {
server *Server
handler Handler
conn net.Conn
state int
username string
markedDeleted map[uint64]struct{}
msgSizes map[uint64]uint64
reader *textproto.Reader
writer *textproto.Writer
}
func newSession(server *Server, handler Handler, conn net.Conn) *session {
return &session{
server: server,
handler: handler,
conn: conn,
markedDeleted: make(map[uint64]struct{}),
msgSizes: make(map[uint64]uint64),
reader: textproto.NewReader(bufio.NewReader(conn)),
writer: textproto.NewWriter(bufio.NewWriter(conn)),
}
}
// serve method handles the entire session which after the first message from
// the server is a series of command-response interactions.
func (s *session) serve() {
defer s.conn.Close()
defer s.unlock() // unlock maildrop if locked no matter what
helloParts := []string{"POP3 server ready"}
if s.server.APOP {
banner := s.server.getBanner()
helloParts = append(helloParts, banner)
if err := s.handler.SetBanner(banner); err != nil {
s.handler.HandleSessionError(err)
return // go home handler, you're drunk!
}
}
if err := s.respondOK(strings.Join(helloParts, " ")); err != nil {
s.handler.HandleSessionError(err)
return // communication problem, most likely?
}
for {
if keepGoing := s.serveOne(); !keepGoing {
return
}
}
}
// serveOne handles each command-response interaction with the client. The
// boolean return value indicates whether the communication with the client
// should continue or not.
func (s *session) serveOne() bool {
if s.state == stateTerminateConnection {
return false
}
readBy := time.Now().Add(s.server.Timeout)
if err := s.conn.SetReadDeadline(readBy); err != nil {
return s.handleError(err, false)
}
line, err := s.reader.ReadLine()
if err != nil {
return s.handleError(err, false) // communication problem, most likely?
}
args := strings.Split(line, " ")
command := strings.ToUpper(args[0])
cmdValidator, exists := validators[command]
if !exists {
return s.handleError(errInvalidSyntax, true) // unknown command
}
if err := cmdValidator.validate(s, args[1:]); err != nil {
return s.handleError(err, true)
}
return s.handleError(operationHandlers[command](s, args[1:]), true)
}
// handleCAPA is a callback for capability listing.
// RFC 2449, page 2.
func (s *session) handleCAPA(args []string) error {
if err := s.respondOK("Capability list follows"); err != nil {
return err
}
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
for _, capability := range s.server.capabilities {
if _, err := fmt.Fprintln(dotWriter, capability); err != nil {
return err
}
}
return nil
}
// handleAPOP is a callback for an APOP authentication mechanism.
// RFC 1939, page 15.
func (s *session) handleAPOP(args []string) error {
if !s.server.APOP {
return NewReportableError("server does not support APOP")
}
if err := s.handler.AuthenticateAPOP(args[0], args[1]); err != nil {
return err
}
return s.signIn()
}
// handleDELE is a callback for a single message deletion.
// RFC 1939, page 8.
func (s *session) handleDELE(args []string) error {
return s.withMessageDo(args[0], func(msgId uint64) error {
s.markedDeleted[msgId] = struct{}{}
return s.respondOK("message %d deleted", msgId)
})
}
// handleAPOP is a callback for listing one or more messages.
// RFC 1939, page 6.
func (s *session) handleLIST(args []string) error {
if len(args) == 1 {
return s.withMessageDo(args[0], func(msgId uint64) error {
return s.respondOK("%d %d", msgId, s.msgSizes[msgId])
})
}
return s.forEachMessage(func(msgId uint64) (string, error) {
return fmt.Sprintf("%d %d", msgId, s.msgSizes[msgId]), nil
})
}
// handleNOOP is a callback for a no-op (timeout reset) command.
// RFC 1939, page 9.
func (s *session) handleNOOP(args []string) error {
return s.respondOK("doing nothing")
}
// handlePASS is a callback for the client providing password ("PASS" command).
// This must have been preceded by a "USER" command where the client provides
// its username.
// RFC 1939, page 14.
func (s *session) handlePASS(args []string) error {
if s.username == "" {
return NewReportableError("please provide username first")
}
if err := s.handler.AuthenticatePASS(s.username, args[0]); err != nil {
return err
}
return s.signIn()
}
// handleQUIT is a callback for the client terminating the session. It will do
// slightly different things depending on the current state of the transaction.
// RFC 1939, pages 5 (in authorization state) and 10 (in transaction state).
func (s *session) handleQUIT(args []string) error {
bye := func() error {
s.state = stateTerminateConnection
return s.respondOK("dewey POP3 server signing off")
}
if s.state == stateAuthorization {
return bye()
}
s.state = stateUpdate // so that no future calls will succeed
var delMsg []uint64
for key := range s.markedDeleted {
delMsg = append(delMsg, key)
}
if err := s.handler.DeleteMessages(delMsg); err != nil {
return err
}
return bye()
}
// handleRETR is a callback for the client requesting full content of a a single
// message.
// RFC 1939, page 8.
func (s *session) handleRETR(args []string) (err error) {
return s.withMessageDo(args[0], func(msgId uint64) error {
if err := s.respondOK("%d octets", s.msgSizes[msgId]); err != nil {
return err
}
readCloser, err := s.handler.GetMessageReader(msgId)
if err != nil {
return err
}
defer s.closeOrReport(readCloser)
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
_, err = io.Copy(dotWriter, readCloser)
return err
})
}
// handleRSET is a callback for the client requesting the session to be reset.
// This essentially means undeleting all messages previously marked for
// deletion.
// RFC 1939, page 9.
func (s *session) handleRSET(args []string) error {
s.markedDeleted = make(map[uint64]struct{})
return s.respondOK(
"maildrop has %d messages (%d octets)",
s.getMessageCount(),
s.getMaildropSize(),
)
}
// handleRETR is a callback for the client requesting full content of a a single
// message.
// RFC 1939, page 8.
func (s *session) handleSTAT(args []string) error {
return s.respondOK("%d %d", s.getMessageCount(), s.getMaildropSize())
}
// handleTOP is a callback for the client requesting a number of lines from the
// top of a single message.
// RFC 1939, page 11.
func (s *session) handleTOP(args []string) error {
return s.withMessageDo(args[0], func(msgId uint64) error {
noLines, err := strconv.ParseUint(args[1], 10, 64)
if err != nil {
return errInvalidSyntax
}
if err := s.writer.PrintfLine("+OK"); err != nil {
return err
}
readCloser, err := s.handler.GetMessageReader(msgId)
if err != nil {
return err
}
defer s.closeOrReport(readCloser)
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
protoReader := textproto.NewReader(bufio.NewReader(readCloser))
for i := uint64(0); i < noLines; i++ {
line, readErr := protoReader.ReadLineBytes()
if err := printTopLine(line, readErr, dotWriter); err != nil {
return err
}
}
return nil
})
}
func printTopLine(line []byte, readErr error, writer io.Writer) error {
if readErr == io.EOF || readErr == nil {
if err := writeWithError(writer, line); err != nil {
return err
}
}
if readErr != nil {
return readErr
}
return writeWithError(writer, []byte{'\n'})
}
func writeWithError(w io.Writer, content []byte) error {
_, err := w.Write(content)
return err
}
// handleUIDL is a callback for the client unique message identifiers for
// either one or all messages.
// RFC 1939, page 12.
func (s *session) handleUIDL(args []string) (err error) {
if len(args) == 1 {
return s.withMessageDo(args[0], func(msgId uint64) error {
uidl, err := s.handler.GetMessageID(msgId)
if err != nil {
return err
}
return s.respondOK("%d %s", msgId, uidl)
})
}
return s.forEachMessage(func(msgId uint64) (string, error) {
uidl, err := s.handler.GetMessageID(msgId)
if err != nil {
return "", err
}
return fmt.Sprintf("%d %s", msgId, uidl), nil
})
}
// handleUSER is a callback for the client providing it's username. This must be
// followed by a "PASS" command with a corresponding password.
// RFC 1939, page 13.
func (s *session) handleUSER(args []string) (err error) {
s.username = args[0]
return s.respondOK("welcome %s", s.username)
}
// handleError provides a helper to decide what to do with the result of a
// single command handler. There are three possible outcomes. First - the
// command succeeded. Second, the command failed but the failure is reported to
// the user and the transaction continues. Third, an error occurred that calls
// for and immediate termination of the session.
func (s *session) handleError(err error, shouldContinue bool) bool {
if err == nil {
return shouldContinue
}
rErr, isReportable := err.(*ReportableError)
if isReportable {
if err = s.writer.PrintfLine("-ERR %s", rErr); err == nil {
return shouldContinue
}
}
s.state = stateTerminateConnection // will terminate the connection!
s.handler.HandleSessionError(err)
return shouldContinue
}
// respondOK provides a helper to write a "success" line to the client, with
// printf-like formatting. It will only fail if it is impossible to write to the
// client (e.g. closed TCP socket).
func (s *session) respondOK(format string, args ...interface{}) error {
return s.writer.PrintfLine(fmt.Sprintf("+OK %s", format), args...)
}
// fetchMaildropStats queries the handler for message count and sizes and builds
// based on that builds maildrop statistics that are then cached internally
// throughout the whole length of the session.
func (s *session) fetchMaildropStats() error {
msgCount, err := s.handler.GetMessageCount()
if err != nil {
return err
}
for i := uint64(0); i < msgCount; i++ {
mSize, err := s.handler.GetMessageSize(i + 1)
if err != nil {
return err
}
s.msgSizes[i+1] = mSize
}
return nil
}
// signIn is called after successful authentication whereby the protocol
// requires that the maildrop is not available to any other users trying to
// access it concurrently (RFC 1939, page 3).
func (s *session) signIn() error {
if err := s.handler.LockMaildrop(); err != nil {
return err
}
s.state = stateTransaction
if err := s.fetchMaildropStats(); err != nil {
return err
}
return s.respondOK(
"%s's maildrop has %d messages (%d octets)",
s.username,
s.getMessageCount(),
s.getMaildropSize(),
)
}
// getMessageCount reports the relevant number based on cached data.
func (s *session) getMessageCount() uint64 {
return uint64(len(s.msgSizes) - len(s.markedDeleted))
}
// getMaildropSize reports the relevant number based on cached data.
func (s *session) getMaildropSize() uint64 {
var ret uint64
for msgID, size := range s.msgSizes {
if _, deleted := s.markedDeleted[msgID]; !deleted {
ret += size
}
}
return ret
}
// forEachMessage is a helper that allows a callback to be invoked for every
// message in the maildrop that is not deleted. The callback is expected to
// return a line that is then printed out to the client.
func (s *session) forEachMessage(fn func(id uint64) (string, error)) error {
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
for i := uint64(0); i < uint64(len(s.msgSizes)); i++ {
if _, deleted := s.markedDeleted[i+1]; deleted {
continue
}
line, err := fn(i + 1)
if err != nil {
return err
}
if _, err := fmt.Fprintln(dotWriter, line); err != nil {
return err
}
}
return nil
}
// withMessageDo is a wrapper for handlers operating on a single message. It
// generally makes sure that the message number provided makes sense within
// the context of the current transaction.
func (s *session) withMessageDo(sID string, fn func(id uint64) error) error {
msgID, err := strconv.ParseUint(sID, 10, 64)
if err != nil {
return errInvalidSyntax
}
if msgID == 0 || msgID > uint64(len(s.msgSizes)) {
return NewReportableError("no such message: %d", msgID)
}
if _, gone := s.markedDeleted[msgID]; gone {
return NewReportableError("message %d already deleted", msgID)
}
return fn(msgID)
}
// unlock will unlock the client's maildrop if it is locked. Note that we assume
// the mailbox is locked if the exchange proceeded past the authorization stage.
func (s *session) unlock() {
if s.state == stateAuthorization {
return // we didn't yet even have a chance to lock the maildrop
}
if err := s.handler.UnlockMaildrop(); err != nil {
s.handler.HandleSessionError(err)
}
}
// closer provides a wrapper that allows deferred 'Close' operations to have
// their errors reported to the session error handler.
func (s *session) closeOrReport(closer io.Closer) {
if err := closer.Close(); err != nil {
s.handler.HandleSessionError(err)
}
} | "time"
)
| random_line_split |
session.go | package popart
import (
"bufio"
"fmt"
"io"
"net"
"net/textproto"
"strconv"
"strings"
"time"
)
const (
stateAuthorization = iota
stateTransaction
stateUpdate
stateTerminateConnection
)
type operationHandler func(s *session, args []string) error
var (
operationHandlers = map[string]operationHandler{
"APOP": (*session).handleAPOP,
"CAPA": (*session).handleCAPA,
"DELE": (*session).handleDELE,
"LIST": (*session).handleLIST,
"NOOP": (*session).handleNOOP,
"PASS": (*session).handlePASS,
"QUIT": (*session).handleQUIT,
"RETR": (*session).handleRETR,
"RSET": (*session).handleRSET,
"STAT": (*session).handleSTAT,
"TOP": (*session).handleTOP,
"UIDL": (*session).handleUIDL,
"USER": (*session).handleUSER,
}
)
type session struct {
server *Server
handler Handler
conn net.Conn
state int
username string
markedDeleted map[uint64]struct{}
msgSizes map[uint64]uint64
reader *textproto.Reader
writer *textproto.Writer
}
func newSession(server *Server, handler Handler, conn net.Conn) *session {
return &session{
server: server,
handler: handler,
conn: conn,
markedDeleted: make(map[uint64]struct{}),
msgSizes: make(map[uint64]uint64),
reader: textproto.NewReader(bufio.NewReader(conn)),
writer: textproto.NewWriter(bufio.NewWriter(conn)),
}
}
// serve method handles the entire session which after the first message from
// the server is a series of command-response interactions.
func (s *session) serve() {
defer s.conn.Close()
defer s.unlock() // unlock maildrop if locked no matter what
helloParts := []string{"POP3 server ready"}
if s.server.APOP {
banner := s.server.getBanner()
helloParts = append(helloParts, banner)
if err := s.handler.SetBanner(banner); err != nil {
s.handler.HandleSessionError(err)
return // go home handler, you're drunk!
}
}
if err := s.respondOK(strings.Join(helloParts, " ")); err != nil {
s.handler.HandleSessionError(err)
return // communication problem, most likely?
}
for {
if keepGoing := s.serveOne(); !keepGoing {
return
}
}
}
// serveOne handles each command-response interaction with the client. The
// boolean return value indicates whether the communication with the client
// should continue or not.
func (s *session) serveOne() bool {
if s.state == stateTerminateConnection {
return false
}
readBy := time.Now().Add(s.server.Timeout)
if err := s.conn.SetReadDeadline(readBy); err != nil {
return s.handleError(err, false)
}
line, err := s.reader.ReadLine()
if err != nil {
return s.handleError(err, false) // communication problem, most likely?
}
args := strings.Split(line, " ")
command := strings.ToUpper(args[0])
cmdValidator, exists := validators[command]
if !exists {
return s.handleError(errInvalidSyntax, true) // unknown command
}
if err := cmdValidator.validate(s, args[1:]); err != nil {
return s.handleError(err, true)
}
return s.handleError(operationHandlers[command](s, args[1:]), true)
}
// handleCAPA is a callback for capability listing.
// RFC 2449, page 2.
func (s *session) handleCAPA(args []string) error {
if err := s.respondOK("Capability list follows"); err != nil {
return err
}
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
for _, capability := range s.server.capabilities {
if _, err := fmt.Fprintln(dotWriter, capability); err != nil {
return err
}
}
return nil
}
// handleAPOP is a callback for an APOP authentication mechanism.
// RFC 1939, page 15.
func (s *session) handleAPOP(args []string) error {
if !s.server.APOP {
return NewReportableError("server does not support APOP")
}
if err := s.handler.AuthenticateAPOP(args[0], args[1]); err != nil {
return err
}
return s.signIn()
}
// handleDELE is a callback for a single message deletion.
// RFC 1939, page 8.
func (s *session) handleDELE(args []string) error {
return s.withMessageDo(args[0], func(msgId uint64) error {
s.markedDeleted[msgId] = struct{}{}
return s.respondOK("message %d deleted", msgId)
})
}
// handleAPOP is a callback for listing one or more messages.
// RFC 1939, page 6.
func (s *session) handleLIST(args []string) error {
if len(args) == 1 {
return s.withMessageDo(args[0], func(msgId uint64) error {
return s.respondOK("%d %d", msgId, s.msgSizes[msgId])
})
}
return s.forEachMessage(func(msgId uint64) (string, error) {
return fmt.Sprintf("%d %d", msgId, s.msgSizes[msgId]), nil
})
}
// handleNOOP is a callback for a no-op (timeout reset) command.
// RFC 1939, page 9.
func (s *session) handleNOOP(args []string) error {
return s.respondOK("doing nothing")
}
// handlePASS is a callback for the client providing password ("PASS" command).
// This must have been preceded by a "USER" command where the client provides
// its username.
// RFC 1939, page 14.
func (s *session) handlePASS(args []string) error {
if s.username == "" {
return NewReportableError("please provide username first")
}
if err := s.handler.AuthenticatePASS(s.username, args[0]); err != nil {
return err
}
return s.signIn()
}
// handleQUIT is a callback for the client terminating the session. It will do
// slightly different things depending on the current state of the transaction.
// RFC 1939, pages 5 (in authorization state) and 10 (in transaction state).
func (s *session) handleQUIT(args []string) error {
bye := func() error {
s.state = stateTerminateConnection
return s.respondOK("dewey POP3 server signing off")
}
if s.state == stateAuthorization {
return bye()
}
s.state = stateUpdate // so that no future calls will succeed
var delMsg []uint64
for key := range s.markedDeleted {
delMsg = append(delMsg, key)
}
if err := s.handler.DeleteMessages(delMsg); err != nil {
return err
}
return bye()
}
// handleRETR is a callback for the client requesting full content of a a single
// message.
// RFC 1939, page 8.
func (s *session) | (args []string) (err error) {
return s.withMessageDo(args[0], func(msgId uint64) error {
if err := s.respondOK("%d octets", s.msgSizes[msgId]); err != nil {
return err
}
readCloser, err := s.handler.GetMessageReader(msgId)
if err != nil {
return err
}
defer s.closeOrReport(readCloser)
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
_, err = io.Copy(dotWriter, readCloser)
return err
})
}
// handleRSET is a callback for the client requesting the session to be reset.
// This essentially means undeleting all messages previously marked for
// deletion.
// RFC 1939, page 9.
func (s *session) handleRSET(args []string) error {
s.markedDeleted = make(map[uint64]struct{})
return s.respondOK(
"maildrop has %d messages (%d octets)",
s.getMessageCount(),
s.getMaildropSize(),
)
}
// handleRETR is a callback for the client requesting full content of a a single
// message.
// RFC 1939, page 8.
func (s *session) handleSTAT(args []string) error {
return s.respondOK("%d %d", s.getMessageCount(), s.getMaildropSize())
}
// handleTOP is a callback for the client requesting a number of lines from the
// top of a single message.
// RFC 1939, page 11.
func (s *session) handleTOP(args []string) error {
return s.withMessageDo(args[0], func(msgId uint64) error {
noLines, err := strconv.ParseUint(args[1], 10, 64)
if err != nil {
return errInvalidSyntax
}
if err := s.writer.PrintfLine("+OK"); err != nil {
return err
}
readCloser, err := s.handler.GetMessageReader(msgId)
if err != nil {
return err
}
defer s.closeOrReport(readCloser)
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
protoReader := textproto.NewReader(bufio.NewReader(readCloser))
for i := uint64(0); i < noLines; i++ {
line, readErr := protoReader.ReadLineBytes()
if err := printTopLine(line, readErr, dotWriter); err != nil {
return err
}
}
return nil
})
}
func printTopLine(line []byte, readErr error, writer io.Writer) error {
if readErr == io.EOF || readErr == nil {
if err := writeWithError(writer, line); err != nil {
return err
}
}
if readErr != nil {
return readErr
}
return writeWithError(writer, []byte{'\n'})
}
func writeWithError(w io.Writer, content []byte) error {
_, err := w.Write(content)
return err
}
// handleUIDL is a callback for the client unique message identifiers for
// either one or all messages.
// RFC 1939, page 12.
func (s *session) handleUIDL(args []string) (err error) {
if len(args) == 1 {
return s.withMessageDo(args[0], func(msgId uint64) error {
uidl, err := s.handler.GetMessageID(msgId)
if err != nil {
return err
}
return s.respondOK("%d %s", msgId, uidl)
})
}
return s.forEachMessage(func(msgId uint64) (string, error) {
uidl, err := s.handler.GetMessageID(msgId)
if err != nil {
return "", err
}
return fmt.Sprintf("%d %s", msgId, uidl), nil
})
}
// handleUSER is a callback for the client providing it's username. This must be
// followed by a "PASS" command with a corresponding password.
// RFC 1939, page 13.
func (s *session) handleUSER(args []string) (err error) {
s.username = args[0]
return s.respondOK("welcome %s", s.username)
}
// handleError provides a helper to decide what to do with the result of a
// single command handler. There are three possible outcomes. First - the
// command succeeded. Second, the command failed but the failure is reported to
// the user and the transaction continues. Third, an error occurred that calls
// for and immediate termination of the session.
func (s *session) handleError(err error, shouldContinue bool) bool {
if err == nil {
return shouldContinue
}
rErr, isReportable := err.(*ReportableError)
if isReportable {
if err = s.writer.PrintfLine("-ERR %s", rErr); err == nil {
return shouldContinue
}
}
s.state = stateTerminateConnection // will terminate the connection!
s.handler.HandleSessionError(err)
return shouldContinue
}
// respondOK provides a helper to write a "success" line to the client, with
// printf-like formatting. It will only fail if it is impossible to write to the
// client (e.g. closed TCP socket).
func (s *session) respondOK(format string, args ...interface{}) error {
return s.writer.PrintfLine(fmt.Sprintf("+OK %s", format), args...)
}
// fetchMaildropStats queries the handler for message count and sizes and builds
// based on that builds maildrop statistics that are then cached internally
// throughout the whole length of the session.
func (s *session) fetchMaildropStats() error {
msgCount, err := s.handler.GetMessageCount()
if err != nil {
return err
}
for i := uint64(0); i < msgCount; i++ {
mSize, err := s.handler.GetMessageSize(i + 1)
if err != nil {
return err
}
s.msgSizes[i+1] = mSize
}
return nil
}
// signIn is called after successful authentication whereby the protocol
// requires that the maildrop is not available to any other users trying to
// access it concurrently (RFC 1939, page 3).
func (s *session) signIn() error {
if err := s.handler.LockMaildrop(); err != nil {
return err
}
s.state = stateTransaction
if err := s.fetchMaildropStats(); err != nil {
return err
}
return s.respondOK(
"%s's maildrop has %d messages (%d octets)",
s.username,
s.getMessageCount(),
s.getMaildropSize(),
)
}
// getMessageCount reports the relevant number based on cached data.
func (s *session) getMessageCount() uint64 {
return uint64(len(s.msgSizes) - len(s.markedDeleted))
}
// getMaildropSize reports the relevant number based on cached data.
func (s *session) getMaildropSize() uint64 {
var ret uint64
for msgID, size := range s.msgSizes {
if _, deleted := s.markedDeleted[msgID]; !deleted {
ret += size
}
}
return ret
}
// forEachMessage is a helper that allows a callback to be invoked for every
// message in the maildrop that is not deleted. The callback is expected to
// return a line that is then printed out to the client.
func (s *session) forEachMessage(fn func(id uint64) (string, error)) error {
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
for i := uint64(0); i < uint64(len(s.msgSizes)); i++ {
if _, deleted := s.markedDeleted[i+1]; deleted {
continue
}
line, err := fn(i + 1)
if err != nil {
return err
}
if _, err := fmt.Fprintln(dotWriter, line); err != nil {
return err
}
}
return nil
}
// withMessageDo is a wrapper for handlers operating on a single message. It
// generally makes sure that the message number provided makes sense within
// the context of the current transaction.
func (s *session) withMessageDo(sID string, fn func(id uint64) error) error {
msgID, err := strconv.ParseUint(sID, 10, 64)
if err != nil {
return errInvalidSyntax
}
if msgID == 0 || msgID > uint64(len(s.msgSizes)) {
return NewReportableError("no such message: %d", msgID)
}
if _, gone := s.markedDeleted[msgID]; gone {
return NewReportableError("message %d already deleted", msgID)
}
return fn(msgID)
}
// unlock will unlock the client's maildrop if it is locked. Note that we assume
// the mailbox is locked if the exchange proceeded past the authorization stage.
func (s *session) unlock() {
if s.state == stateAuthorization {
return // we didn't yet even have a chance to lock the maildrop
}
if err := s.handler.UnlockMaildrop(); err != nil {
s.handler.HandleSessionError(err)
}
}
// closer provides a wrapper that allows deferred 'Close' operations to have
// their errors reported to the session error handler.
func (s *session) closeOrReport(closer io.Closer) {
if err := closer.Close(); err != nil {
s.handler.HandleSessionError(err)
}
}
| handleRETR | identifier_name |
session.go | package popart
import (
"bufio"
"fmt"
"io"
"net"
"net/textproto"
"strconv"
"strings"
"time"
)
const (
stateAuthorization = iota
stateTransaction
stateUpdate
stateTerminateConnection
)
type operationHandler func(s *session, args []string) error
var (
operationHandlers = map[string]operationHandler{
"APOP": (*session).handleAPOP,
"CAPA": (*session).handleCAPA,
"DELE": (*session).handleDELE,
"LIST": (*session).handleLIST,
"NOOP": (*session).handleNOOP,
"PASS": (*session).handlePASS,
"QUIT": (*session).handleQUIT,
"RETR": (*session).handleRETR,
"RSET": (*session).handleRSET,
"STAT": (*session).handleSTAT,
"TOP": (*session).handleTOP,
"UIDL": (*session).handleUIDL,
"USER": (*session).handleUSER,
}
)
type session struct {
server *Server
handler Handler
conn net.Conn
state int
username string
markedDeleted map[uint64]struct{}
msgSizes map[uint64]uint64
reader *textproto.Reader
writer *textproto.Writer
}
func newSession(server *Server, handler Handler, conn net.Conn) *session {
return &session{
server: server,
handler: handler,
conn: conn,
markedDeleted: make(map[uint64]struct{}),
msgSizes: make(map[uint64]uint64),
reader: textproto.NewReader(bufio.NewReader(conn)),
writer: textproto.NewWriter(bufio.NewWriter(conn)),
}
}
// serve method handles the entire session which after the first message from
// the server is a series of command-response interactions.
func (s *session) serve() {
defer s.conn.Close()
defer s.unlock() // unlock maildrop if locked no matter what
helloParts := []string{"POP3 server ready"}
if s.server.APOP {
banner := s.server.getBanner()
helloParts = append(helloParts, banner)
if err := s.handler.SetBanner(banner); err != nil {
s.handler.HandleSessionError(err)
return // go home handler, you're drunk!
}
}
if err := s.respondOK(strings.Join(helloParts, " ")); err != nil {
s.handler.HandleSessionError(err)
return // communication problem, most likely?
}
for {
if keepGoing := s.serveOne(); !keepGoing {
return
}
}
}
// serveOne handles each command-response interaction with the client. The
// boolean return value indicates whether the communication with the client
// should continue or not.
func (s *session) serveOne() bool {
if s.state == stateTerminateConnection {
return false
}
readBy := time.Now().Add(s.server.Timeout)
if err := s.conn.SetReadDeadline(readBy); err != nil {
return s.handleError(err, false)
}
line, err := s.reader.ReadLine()
if err != nil {
return s.handleError(err, false) // communication problem, most likely?
}
args := strings.Split(line, " ")
command := strings.ToUpper(args[0])
cmdValidator, exists := validators[command]
if !exists {
return s.handleError(errInvalidSyntax, true) // unknown command
}
if err := cmdValidator.validate(s, args[1:]); err != nil {
return s.handleError(err, true)
}
return s.handleError(operationHandlers[command](s, args[1:]), true)
}
// handleCAPA is a callback for capability listing.
// RFC 2449, page 2.
func (s *session) handleCAPA(args []string) error {
if err := s.respondOK("Capability list follows"); err != nil {
return err
}
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
for _, capability := range s.server.capabilities {
if _, err := fmt.Fprintln(dotWriter, capability); err != nil {
return err
}
}
return nil
}
// handleAPOP is a callback for an APOP authentication mechanism.
// RFC 1939, page 15.
func (s *session) handleAPOP(args []string) error {
if !s.server.APOP {
return NewReportableError("server does not support APOP")
}
if err := s.handler.AuthenticateAPOP(args[0], args[1]); err != nil {
return err
}
return s.signIn()
}
// handleDELE is a callback for a single message deletion.
// RFC 1939, page 8.
func (s *session) handleDELE(args []string) error {
return s.withMessageDo(args[0], func(msgId uint64) error {
s.markedDeleted[msgId] = struct{}{}
return s.respondOK("message %d deleted", msgId)
})
}
// handleAPOP is a callback for listing one or more messages.
// RFC 1939, page 6.
func (s *session) handleLIST(args []string) error {
if len(args) == 1 {
return s.withMessageDo(args[0], func(msgId uint64) error {
return s.respondOK("%d %d", msgId, s.msgSizes[msgId])
})
}
return s.forEachMessage(func(msgId uint64) (string, error) {
return fmt.Sprintf("%d %d", msgId, s.msgSizes[msgId]), nil
})
}
// handleNOOP is a callback for a no-op (timeout reset) command.
// RFC 1939, page 9.
func (s *session) handleNOOP(args []string) error {
return s.respondOK("doing nothing")
}
// handlePASS is a callback for the client providing password ("PASS" command).
// This must have been preceded by a "USER" command where the client provides
// its username.
// RFC 1939, page 14.
func (s *session) handlePASS(args []string) error {
if s.username == "" {
return NewReportableError("please provide username first")
}
if err := s.handler.AuthenticatePASS(s.username, args[0]); err != nil {
return err
}
return s.signIn()
}
// handleQUIT is a callback for the client terminating the session. It will do
// slightly different things depending on the current state of the transaction.
// RFC 1939, pages 5 (in authorization state) and 10 (in transaction state).
func (s *session) handleQUIT(args []string) error {
bye := func() error {
s.state = stateTerminateConnection
return s.respondOK("dewey POP3 server signing off")
}
if s.state == stateAuthorization {
return bye()
}
s.state = stateUpdate // so that no future calls will succeed
var delMsg []uint64
for key := range s.markedDeleted {
delMsg = append(delMsg, key)
}
if err := s.handler.DeleteMessages(delMsg); err != nil {
return err
}
return bye()
}
// handleRETR is a callback for the client requesting full content of a a single
// message.
// RFC 1939, page 8.
func (s *session) handleRETR(args []string) (err error) {
return s.withMessageDo(args[0], func(msgId uint64) error {
if err := s.respondOK("%d octets", s.msgSizes[msgId]); err != nil {
return err
}
readCloser, err := s.handler.GetMessageReader(msgId)
if err != nil {
return err
}
defer s.closeOrReport(readCloser)
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
_, err = io.Copy(dotWriter, readCloser)
return err
})
}
// handleRSET is a callback for the client requesting the session to be reset.
// This essentially means undeleting all messages previously marked for
// deletion.
// RFC 1939, page 9.
func (s *session) handleRSET(args []string) error {
s.markedDeleted = make(map[uint64]struct{})
return s.respondOK(
"maildrop has %d messages (%d octets)",
s.getMessageCount(),
s.getMaildropSize(),
)
}
// handleRETR is a callback for the client requesting full content of a a single
// message.
// RFC 1939, page 8.
func (s *session) handleSTAT(args []string) error {
return s.respondOK("%d %d", s.getMessageCount(), s.getMaildropSize())
}
// handleTOP is a callback for the client requesting a number of lines from the
// top of a single message.
// RFC 1939, page 11.
func (s *session) handleTOP(args []string) error |
func printTopLine(line []byte, readErr error, writer io.Writer) error {
if readErr == io.EOF || readErr == nil {
if err := writeWithError(writer, line); err != nil {
return err
}
}
if readErr != nil {
return readErr
}
return writeWithError(writer, []byte{'\n'})
}
func writeWithError(w io.Writer, content []byte) error {
_, err := w.Write(content)
return err
}
// handleUIDL is a callback for the client unique message identifiers for
// either one or all messages.
// RFC 1939, page 12.
func (s *session) handleUIDL(args []string) (err error) {
if len(args) == 1 {
return s.withMessageDo(args[0], func(msgId uint64) error {
uidl, err := s.handler.GetMessageID(msgId)
if err != nil {
return err
}
return s.respondOK("%d %s", msgId, uidl)
})
}
return s.forEachMessage(func(msgId uint64) (string, error) {
uidl, err := s.handler.GetMessageID(msgId)
if err != nil {
return "", err
}
return fmt.Sprintf("%d %s", msgId, uidl), nil
})
}
// handleUSER is a callback for the client providing it's username. This must be
// followed by a "PASS" command with a corresponding password.
// RFC 1939, page 13.
func (s *session) handleUSER(args []string) (err error) {
s.username = args[0]
return s.respondOK("welcome %s", s.username)
}
// handleError provides a helper to decide what to do with the result of a
// single command handler. There are three possible outcomes. First - the
// command succeeded. Second, the command failed but the failure is reported to
// the user and the transaction continues. Third, an error occurred that calls
// for and immediate termination of the session.
func (s *session) handleError(err error, shouldContinue bool) bool {
if err == nil {
return shouldContinue
}
rErr, isReportable := err.(*ReportableError)
if isReportable {
if err = s.writer.PrintfLine("-ERR %s", rErr); err == nil {
return shouldContinue
}
}
s.state = stateTerminateConnection // will terminate the connection!
s.handler.HandleSessionError(err)
return shouldContinue
}
// respondOK provides a helper to write a "success" line to the client, with
// printf-like formatting. It will only fail if it is impossible to write to the
// client (e.g. closed TCP socket).
func (s *session) respondOK(format string, args ...interface{}) error {
return s.writer.PrintfLine(fmt.Sprintf("+OK %s", format), args...)
}
// fetchMaildropStats queries the handler for message count and sizes and builds
// based on that builds maildrop statistics that are then cached internally
// throughout the whole length of the session.
func (s *session) fetchMaildropStats() error {
msgCount, err := s.handler.GetMessageCount()
if err != nil {
return err
}
for i := uint64(0); i < msgCount; i++ {
mSize, err := s.handler.GetMessageSize(i + 1)
if err != nil {
return err
}
s.msgSizes[i+1] = mSize
}
return nil
}
// signIn is called after successful authentication whereby the protocol
// requires that the maildrop is not available to any other users trying to
// access it concurrently (RFC 1939, page 3).
func (s *session) signIn() error {
if err := s.handler.LockMaildrop(); err != nil {
return err
}
s.state = stateTransaction
if err := s.fetchMaildropStats(); err != nil {
return err
}
return s.respondOK(
"%s's maildrop has %d messages (%d octets)",
s.username,
s.getMessageCount(),
s.getMaildropSize(),
)
}
// getMessageCount reports the relevant number based on cached data.
func (s *session) getMessageCount() uint64 {
return uint64(len(s.msgSizes) - len(s.markedDeleted))
}
// getMaildropSize reports the relevant number based on cached data.
func (s *session) getMaildropSize() uint64 {
var ret uint64
for msgID, size := range s.msgSizes {
if _, deleted := s.markedDeleted[msgID]; !deleted {
ret += size
}
}
return ret
}
// forEachMessage is a helper that allows a callback to be invoked for every
// message in the maildrop that is not deleted. The callback is expected to
// return a line that is then printed out to the client.
func (s *session) forEachMessage(fn func(id uint64) (string, error)) error {
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
for i := uint64(0); i < uint64(len(s.msgSizes)); i++ {
if _, deleted := s.markedDeleted[i+1]; deleted {
continue
}
line, err := fn(i + 1)
if err != nil {
return err
}
if _, err := fmt.Fprintln(dotWriter, line); err != nil {
return err
}
}
return nil
}
// withMessageDo is a wrapper for handlers operating on a single message. It
// generally makes sure that the message number provided makes sense within
// the context of the current transaction.
func (s *session) withMessageDo(sID string, fn func(id uint64) error) error {
msgID, err := strconv.ParseUint(sID, 10, 64)
if err != nil {
return errInvalidSyntax
}
if msgID == 0 || msgID > uint64(len(s.msgSizes)) {
return NewReportableError("no such message: %d", msgID)
}
if _, gone := s.markedDeleted[msgID]; gone {
return NewReportableError("message %d already deleted", msgID)
}
return fn(msgID)
}
// unlock will unlock the client's maildrop if it is locked. Note that we assume
// the mailbox is locked if the exchange proceeded past the authorization stage.
func (s *session) unlock() {
if s.state == stateAuthorization {
return // we didn't yet even have a chance to lock the maildrop
}
if err := s.handler.UnlockMaildrop(); err != nil {
s.handler.HandleSessionError(err)
}
}
// closer provides a wrapper that allows deferred 'Close' operations to have
// their errors reported to the session error handler.
func (s *session) closeOrReport(closer io.Closer) {
if err := closer.Close(); err != nil {
s.handler.HandleSessionError(err)
}
}
| {
return s.withMessageDo(args[0], func(msgId uint64) error {
noLines, err := strconv.ParseUint(args[1], 10, 64)
if err != nil {
return errInvalidSyntax
}
if err := s.writer.PrintfLine("+OK"); err != nil {
return err
}
readCloser, err := s.handler.GetMessageReader(msgId)
if err != nil {
return err
}
defer s.closeOrReport(readCloser)
dotWriter := s.writer.DotWriter()
defer s.closeOrReport(dotWriter)
protoReader := textproto.NewReader(bufio.NewReader(readCloser))
for i := uint64(0); i < noLines; i++ {
line, readErr := protoReader.ReadLineBytes()
if err := printTopLine(line, readErr, dotWriter); err != nil {
return err
}
}
return nil
})
} | identifier_body |
game.py | import os.path, re, datetime, difflib, logging
from pyquery import PyQuery as pq
from src.download import open_or_download, get_page, sanity_check_game,sanity_check_game_copa
from src.season import BASE_URL
from models.basemodel import BaseModel
from models.team import Team, TeamName
from peewee import (PrimaryKeyField, IntegerField, DateTimeField, ForeignKeyField, BooleanField, CharField)
from src.utils import get_current_season
from utils.log import logger, init_logging
init_logging('game.log')
class | (BaseModel):
"""
Class representing a Game.
A game only contains basic information about the game and the scores.
"""
id = PrimaryKeyField()
game_acbid = IntegerField(unique=True, index=True)
team_home_id = ForeignKeyField(Team, related_name='games_home', index=True, null=True)
team_away_id = ForeignKeyField(Team, related_name='games_away', index=True, null=True)
season = IntegerField(null=False)
competition_phase = CharField(max_length=255, null=True)
round_phase = CharField(max_length=255, null=True)
journey = IntegerField(null=False)
score_home = IntegerField(null=True)
score_away = IntegerField(null=True)
score_home_first = IntegerField(null=True)
score_away_first = IntegerField(null=True)
score_home_second = IntegerField(null=True)
score_away_second = IntegerField(null=True)
score_home_third = IntegerField(null=True)
score_away_third = IntegerField(null=True)
score_home_fourth = IntegerField(null=True)
score_away_fourth = IntegerField(null=True)
score_home_extra = IntegerField(null=True)
score_away_extra = IntegerField(null=True)
venue = CharField(max_length=255, null=True)
attendance = IntegerField(null=True)
kickoff_time = DateTimeField(index=True)
referee_1 = CharField(max_length=255, null=True)
referee_2 = CharField(max_length=255, null=True)
referee_3 = CharField(max_length=255, null=True)
db_flag = BooleanField(null=True)
@staticmethod
def save_games(season, logging_level=logging.INFO):
"""
Method for saving locally the games of a season.
:param season: int
:param logging_level: logging object
:return:
"""
logger.info('Starting the download of games...')
if season.season == get_current_season():
current_game_events_ids = season.get_current_game_events_ids()
game_ids_list = list(current_game_events_ids.values())
else:
game_ids_list = season.get_game_ids()
n_checkpoints = 4
checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]
for i in range(len(game_ids_list)):
game_id = int(game_ids_list[i]) % 1000
url2 = BASE_URL + "/fichas/LACB{}.php".format(game_ids_list[i])
filename = os.path.join(season.GAMES_PATH, str(game_id)+"-" +str(game_ids_list[i]) + '.html')
open_or_download(file_path=filename, url=url2)
if i in checkpoints:
logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))
logger.info('Download finished! (new {} games in {})\n'.format(len(game_ids_list), season.GAMES_PATH))
@staticmethod
def save_games_copa(season, logging_level=logging.INFO):
"""
Method for saving locally the games of a season.
:param season: int
:param logging_level: logging object
:return:
"""
logging.basicConfig(level=logging_level)
logger = logging.getLogger(__name__)
logger.info('Starting the download of games...')
if season.season == get_current_season():
current_game_events_ids = season.get_current_game_events_ids_copa()
game_ids_list = list(current_game_events_ids.values())
else:
game_ids_list=season.get_game_ids_copa()
n_checkpoints = 4
checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]
for i in range(len(game_ids_list)):
game_id=int(game_ids_list[i]) % 1000
url2 = BASE_URL + "/fichas/CREY{}.php".format(game_ids_list[i])
filename = os.path.join(season.GAMES_COPA_PATH, str(game_id)+"-" +str(game_ids_list[i]) + '.html')
open_or_download(file_path=filename, url=url2)
if i in checkpoints:
logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))
logger.info('Download finished! (new {} games in {})\n'.format(len(game_ids_list), season.GAMES_COPA_PATH))
@staticmethod
def sanity_check(season, logging_level=logging.INFO):
sanity_check_game(season.GAMES_PATH, logging_level)
@staticmethod
def sanity_check_copa(season, logging_level=logging.INFO):
sanity_check_game_copa(season.GAMES_COPA_PATH, logging_level)
@staticmethod
def create_instance(raw_game, game_acbid, season, competition_phase,round_phase=None):
"""
Extract all the information regarding the game such as the date, attendance, venue, score per quarter or teams.
Therefore, we need first to extract and insert the teams in the database in order to get the references to the db.
:param raw_game: String
:param game_acbid: int
:param season: Season
:param competition_phase: String
:param round_phase: String
:return: Game object
"""
# There are two different statistics table in acb.com. I assume they created the new one to introduce the +/- stat.
estadisticas_tag = '.estadisticasnew' if re.search(r'<table class="estadisticasnew"', raw_game) else '.estadisticas'
doc = pq(raw_game)
game_dict = dict()
"""
Each game has an unique id in acb.com. The id has 5 digits, where the first two digits are the season code (the
oldest season in 1956 has code 1) and the three last are the number of the game (a simple counter since the beginning
of the season).
This id can be used to access the concrete game within the link 'http://www.acb.com/fichas/LACBXXYYY.php'
"""
game_dict['game_acbid'] = game_acbid
game_dict['season'] = season.season
game_dict['competition_phase'] = competition_phase
game_dict['round_phase'] = round_phase
# Information about the teams.
info_teams_data = doc(estadisticas_tag).eq(1)
home_team_name = None
away_team_name = None
"""
We only have the names of the teams (text) within the doc. We will look for its associated id by looking in our teamname table, where
we have all the historical official names for each team and season. However the ACB sometimes doesn't agree in the names
and writes them in different ways depending on the game (sometimes taking older names or making small changes).
For instance VALENCIA BASKET CLUB instead of VALENCIA BASKET.
So if there is not such a direct correspondance we will take the closest match.
"""
for i in [0, 2]:
team_data = info_teams_data('.estverde').eq(i)('td').eq(0).text()
team_name = re.search("(.*) [0-9]", team_data).groups()[0]
try: ## In case the name of the team is exactly the same as one stated in our database for a season
team_acbid = TeamName.get(TeamName.name == team_name).team_id.team_acbid
team = Team.get(Team.team_acbid == team_acbid)
except TeamName.DoesNotExist: ## In case there is not an exact correspondance within our database, let's find the closest match.
query = TeamName.select(TeamName.team_id, TeamName.name)
teams_names_ids = dict()
for q in query:
teams_names_ids[q.name] = q.team_id.id
most_likely_team = difflib.get_close_matches(team_name, teams_names_ids.keys(), 1, 0.4)[0]
team = Team.get(Team.id == teams_names_ids[most_likely_team])
if most_likely_team not in season.mismatched_teams: # debug info to check the correctness.
season.mismatched_teams.append(most_likely_team)
logger.info('Season {} -> {} has been matched to: {}'.format(season.season, team_name, most_likely_team))
# TeamName.get_or_create(**{'team': team, 'name': team_name, 'season': season.season})
game_dict['team_home_id' if i == 0 else 'team_away_id'] = team
home_team_name = team_name if i == 0 else home_team_name
away_team_name = team_name if i != 0 else away_team_name
# Information about the game.
info_game_data = doc(estadisticas_tag).eq(0)
scheduling_data = info_game_data('.estnegro')('td').eq(0).text()
scheduling_data = scheduling_data.split("|")
journey, date, time, venue, attendance = list(map(lambda x: x.strip(), scheduling_data)) # Remove extra spaces.
if date and time:
day, month, year = list(map(int, date.split("/")))
hour, minute = list(map(int, time.split(":")))
game_dict['kickoff_time'] = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute)
if attendance:
try:
game_dict['attendance'] = int(attendance.split(":")[1])
except ValueError:
pass
if venue:
game_dict['venue'] = venue
if journey:
game_dict['journey'] = journey.split(" ")[1]
if competition_phase=='cup':
if int(journey.split(" ")[1])==1:
game_dict['round_phase'] ="quarter_final"
elif int(journey.split(" ")[1])==2:
game_dict['round_phase'] ="semi_final"
elif int(journey.split(" ")[1])==3:
game_dict['round_phase'] ="final"
for i in range(2, 7):
score_home_attribute = ''
score_away_attribute = ''
if i == 2:
score_home_attribute = 'score_home_first'
score_away_attribute = 'score_away_first'
elif i == 3:
score_home_attribute = 'score_home_second'
score_away_attribute = 'score_away_second'
elif i == 4:
score_home_attribute = 'score_home_third'
score_away_attribute = 'score_away_third'
elif i == 5:
score_home_attribute = 'score_home_fourth'
score_away_attribute = 'score_away_fourth'
elif i == 6:
score_home_attribute = 'score_home_extra'
score_away_attribute = 'score_away_extra'
quarter_data = info_game_data('.estnaranja')('td').eq(i).text()
if quarter_data:
try:
game_dict[score_home_attribute], game_dict[score_away_attribute] = list(
map(int, quarter_data.split("|")))
except ValueError:
pass
referees_data = info_game_data('.estnaranja')('td').eq(0).text()
if referees_data:
referees = referees_data.split(":")[1].strip().split(",")
referees = list(filter(None, referees))
referees = list(map(lambda x: x.strip(), referees))
n_ref = 1
for referee in referees:
game_dict['referee_'+str(n_ref)] = referee
n_ref+=1
try:
game = Game.get(Game.game_acbid == game_dict['game_acbid'])
except:
game = Game.create(**game_dict)
return game
from src.season import Season
s = Season(2017)
Game.save_games(s) | Game | identifier_name |
game.py | import os.path, re, datetime, difflib, logging
from pyquery import PyQuery as pq
from src.download import open_or_download, get_page, sanity_check_game,sanity_check_game_copa
from src.season import BASE_URL
from models.basemodel import BaseModel
from models.team import Team, TeamName
from peewee import (PrimaryKeyField, IntegerField, DateTimeField, ForeignKeyField, BooleanField, CharField)
from src.utils import get_current_season
from utils.log import logger, init_logging
init_logging('game.log')
class Game(BaseModel):
"""
Class representing a Game.
A game only contains basic information about the game and the scores.
"""
id = PrimaryKeyField()
game_acbid = IntegerField(unique=True, index=True)
team_home_id = ForeignKeyField(Team, related_name='games_home', index=True, null=True)
team_away_id = ForeignKeyField(Team, related_name='games_away', index=True, null=True)
season = IntegerField(null=False)
competition_phase = CharField(max_length=255, null=True)
round_phase = CharField(max_length=255, null=True)
journey = IntegerField(null=False)
score_home = IntegerField(null=True)
score_away = IntegerField(null=True)
score_home_first = IntegerField(null=True)
score_away_first = IntegerField(null=True)
score_home_second = IntegerField(null=True)
score_away_second = IntegerField(null=True)
score_home_third = IntegerField(null=True)
score_away_third = IntegerField(null=True)
score_home_fourth = IntegerField(null=True)
score_away_fourth = IntegerField(null=True)
score_home_extra = IntegerField(null=True)
score_away_extra = IntegerField(null=True)
venue = CharField(max_length=255, null=True)
attendance = IntegerField(null=True)
kickoff_time = DateTimeField(index=True)
referee_1 = CharField(max_length=255, null=True)
referee_2 = CharField(max_length=255, null=True)
referee_3 = CharField(max_length=255, null=True)
db_flag = BooleanField(null=True)
@staticmethod
def save_games(season, logging_level=logging.INFO):
"""
Method for saving locally the games of a season.
:param season: int
:param logging_level: logging object
:return:
"""
logger.info('Starting the download of games...')
if season.season == get_current_season():
current_game_events_ids = season.get_current_game_events_ids()
game_ids_list = list(current_game_events_ids.values())
else:
game_ids_list = season.get_game_ids()
n_checkpoints = 4
checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]
for i in range(len(game_ids_list)):
game_id = int(game_ids_list[i]) % 1000
url2 = BASE_URL + "/fichas/LACB{}.php".format(game_ids_list[i])
filename = os.path.join(season.GAMES_PATH, str(game_id)+"-" +str(game_ids_list[i]) + '.html')
open_or_download(file_path=filename, url=url2)
if i in checkpoints:
logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))
logger.info('Download finished! (new {} games in {})\n'.format(len(game_ids_list), season.GAMES_PATH))
@staticmethod
def save_games_copa(season, logging_level=logging.INFO):
"""
Method for saving locally the games of a season.
:param season: int
:param logging_level: logging object
:return:
"""
logging.basicConfig(level=logging_level)
logger = logging.getLogger(__name__)
logger.info('Starting the download of games...')
if season.season == get_current_season():
current_game_events_ids = season.get_current_game_events_ids_copa()
game_ids_list = list(current_game_events_ids.values())
else:
game_ids_list=season.get_game_ids_copa()
n_checkpoints = 4
checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]
for i in range(len(game_ids_list)):
game_id=int(game_ids_list[i]) % 1000
url2 = BASE_URL + "/fichas/CREY{}.php".format(game_ids_list[i])
filename = os.path.join(season.GAMES_COPA_PATH, str(game_id)+"-" +str(game_ids_list[i]) + '.html')
open_or_download(file_path=filename, url=url2)
if i in checkpoints:
logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))
logger.info('Download finished! (new {} games in {})\n'.format(len(game_ids_list), season.GAMES_COPA_PATH))
@staticmethod
def sanity_check(season, logging_level=logging.INFO):
sanity_check_game(season.GAMES_PATH, logging_level)
@staticmethod
def sanity_check_copa(season, logging_level=logging.INFO):
sanity_check_game_copa(season.GAMES_COPA_PATH, logging_level)
@staticmethod
def create_instance(raw_game, game_acbid, season, competition_phase,round_phase=None):
"""
Extract all the information regarding the game such as the date, attendance, venue, score per quarter or teams.
Therefore, we need first to extract and insert the teams in the database in order to get the references to the db.
:param raw_game: String
:param game_acbid: int
:param season: Season
:param competition_phase: String
:param round_phase: String
:return: Game object
"""
# There are two different statistics table in acb.com. I assume they created the new one to introduce the +/- stat.
estadisticas_tag = '.estadisticasnew' if re.search(r'<table class="estadisticasnew"', raw_game) else '.estadisticas'
doc = pq(raw_game)
game_dict = dict()
"""
Each game has an unique id in acb.com. The id has 5 digits, where the first two digits are the season code (the
oldest season in 1956 has code 1) and the three last are the number of the game (a simple counter since the beginning
of the season).
This id can be used to access the concrete game within the link 'http://www.acb.com/fichas/LACBXXYYY.php'
"""
game_dict['game_acbid'] = game_acbid
game_dict['season'] = season.season
game_dict['competition_phase'] = competition_phase
game_dict['round_phase'] = round_phase
# Information about the teams.
info_teams_data = doc(estadisticas_tag).eq(1)
home_team_name = None
away_team_name = None
"""
We only have the names of the teams (text) within the doc. We will look for its associated id by looking in our teamname table, where
we have all the historical official names for each team and season. However the ACB sometimes doesn't agree in the names
and writes them in different ways depending on the game (sometimes taking older names or making small changes).
For instance VALENCIA BASKET CLUB instead of VALENCIA BASKET.
So if there is not such a direct correspondance we will take the closest match.
"""
for i in [0, 2]:
team_data = info_teams_data('.estverde').eq(i)('td').eq(0).text()
team_name = re.search("(.*) [0-9]", team_data).groups()[0]
try: ## In case the name of the team is exactly the same as one stated in our database for a season
team_acbid = TeamName.get(TeamName.name == team_name).team_id.team_acbid
team = Team.get(Team.team_acbid == team_acbid)
except TeamName.DoesNotExist: ## In case there is not an exact correspondance within our database, let's find the closest match.
query = TeamName.select(TeamName.team_id, TeamName.name)
teams_names_ids = dict()
for q in query:
teams_names_ids[q.name] = q.team_id.id
most_likely_team = difflib.get_close_matches(team_name, teams_names_ids.keys(), 1, 0.4)[0]
team = Team.get(Team.id == teams_names_ids[most_likely_team])
if most_likely_team not in season.mismatched_teams: # debug info to check the correctness.
season.mismatched_teams.append(most_likely_team)
logger.info('Season {} -> {} has been matched to: {}'.format(season.season, team_name, most_likely_team))
# TeamName.get_or_create(**{'team': team, 'name': team_name, 'season': season.season})
game_dict['team_home_id' if i == 0 else 'team_away_id'] = team
home_team_name = team_name if i == 0 else home_team_name
away_team_name = team_name if i != 0 else away_team_name
# Information about the game.
info_game_data = doc(estadisticas_tag).eq(0)
scheduling_data = info_game_data('.estnegro')('td').eq(0).text()
scheduling_data = scheduling_data.split("|")
journey, date, time, venue, attendance = list(map(lambda x: x.strip(), scheduling_data)) # Remove extra spaces.
if date and time:
day, month, year = list(map(int, date.split("/")))
hour, minute = list(map(int, time.split(":")))
game_dict['kickoff_time'] = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute)
if attendance:
try:
game_dict['attendance'] = int(attendance.split(":")[1])
except ValueError:
pass
if venue:
game_dict['venue'] = venue
if journey:
game_dict['journey'] = journey.split(" ")[1]
if competition_phase=='cup':
if int(journey.split(" ")[1])==1:
game_dict['round_phase'] ="quarter_final"
elif int(journey.split(" ")[1])==2:
game_dict['round_phase'] ="semi_final"
elif int(journey.split(" ")[1])==3:
game_dict['round_phase'] ="final"
for i in range(2, 7):
score_home_attribute = ''
score_away_attribute = ''
if i == 2:
|
elif i == 3:
score_home_attribute = 'score_home_second'
score_away_attribute = 'score_away_second'
elif i == 4:
score_home_attribute = 'score_home_third'
score_away_attribute = 'score_away_third'
elif i == 5:
score_home_attribute = 'score_home_fourth'
score_away_attribute = 'score_away_fourth'
elif i == 6:
score_home_attribute = 'score_home_extra'
score_away_attribute = 'score_away_extra'
quarter_data = info_game_data('.estnaranja')('td').eq(i).text()
if quarter_data:
try:
game_dict[score_home_attribute], game_dict[score_away_attribute] = list(
map(int, quarter_data.split("|")))
except ValueError:
pass
referees_data = info_game_data('.estnaranja')('td').eq(0).text()
if referees_data:
referees = referees_data.split(":")[1].strip().split(",")
referees = list(filter(None, referees))
referees = list(map(lambda x: x.strip(), referees))
n_ref = 1
for referee in referees:
game_dict['referee_'+str(n_ref)] = referee
n_ref+=1
try:
game = Game.get(Game.game_acbid == game_dict['game_acbid'])
except:
game = Game.create(**game_dict)
return game
from src.season import Season
s = Season(2017)
Game.save_games(s) | score_home_attribute = 'score_home_first'
score_away_attribute = 'score_away_first' | conditional_block |
game.py | import os.path, re, datetime, difflib, logging
from pyquery import PyQuery as pq
from src.download import open_or_download, get_page, sanity_check_game,sanity_check_game_copa
from src.season import BASE_URL
from models.basemodel import BaseModel
from models.team import Team, TeamName
from peewee import (PrimaryKeyField, IntegerField, DateTimeField, ForeignKeyField, BooleanField, CharField)
from src.utils import get_current_season
from utils.log import logger, init_logging
init_logging('game.log')
class Game(BaseModel):
"""
Class representing a Game.
A game only contains basic information about the game and the scores.
"""
id = PrimaryKeyField()
game_acbid = IntegerField(unique=True, index=True)
team_home_id = ForeignKeyField(Team, related_name='games_home', index=True, null=True)
team_away_id = ForeignKeyField(Team, related_name='games_away', index=True, null=True)
season = IntegerField(null=False)
competition_phase = CharField(max_length=255, null=True)
round_phase = CharField(max_length=255, null=True)
journey = IntegerField(null=False)
score_home = IntegerField(null=True)
score_away = IntegerField(null=True)
score_home_first = IntegerField(null=True)
score_away_first = IntegerField(null=True)
score_home_second = IntegerField(null=True)
score_away_second = IntegerField(null=True)
score_home_third = IntegerField(null=True)
score_away_third = IntegerField(null=True)
score_home_fourth = IntegerField(null=True)
score_away_fourth = IntegerField(null=True)
score_home_extra = IntegerField(null=True)
score_away_extra = IntegerField(null=True)
venue = CharField(max_length=255, null=True)
attendance = IntegerField(null=True)
kickoff_time = DateTimeField(index=True)
referee_1 = CharField(max_length=255, null=True)
referee_2 = CharField(max_length=255, null=True)
referee_3 = CharField(max_length=255, null=True)
db_flag = BooleanField(null=True)
@staticmethod
def save_games(season, logging_level=logging.INFO):
"""
Method for saving locally the games of a season.
:param season: int
:param logging_level: logging object
:return:
"""
logger.info('Starting the download of games...')
if season.season == get_current_season():
current_game_events_ids = season.get_current_game_events_ids()
game_ids_list = list(current_game_events_ids.values())
else:
game_ids_list = season.get_game_ids()
n_checkpoints = 4
checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]
for i in range(len(game_ids_list)):
game_id = int(game_ids_list[i]) % 1000
url2 = BASE_URL + "/fichas/LACB{}.php".format(game_ids_list[i])
filename = os.path.join(season.GAMES_PATH, str(game_id)+"-" +str(game_ids_list[i]) + '.html')
open_or_download(file_path=filename, url=url2)
if i in checkpoints:
logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))
logger.info('Download finished! (new {} games in {})\n'.format(len(game_ids_list), season.GAMES_PATH))
@staticmethod
def save_games_copa(season, logging_level=logging.INFO):
"""
Method for saving locally the games of a season.
:param season: int
:param logging_level: logging object
:return:
"""
logging.basicConfig(level=logging_level)
logger = logging.getLogger(__name__)
logger.info('Starting the download of games...')
if season.season == get_current_season():
current_game_events_ids = season.get_current_game_events_ids_copa()
game_ids_list = list(current_game_events_ids.values())
else:
game_ids_list=season.get_game_ids_copa()
n_checkpoints = 4
checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]
for i in range(len(game_ids_list)):
game_id=int(game_ids_list[i]) % 1000
url2 = BASE_URL + "/fichas/CREY{}.php".format(game_ids_list[i])
filename = os.path.join(season.GAMES_COPA_PATH, str(game_id)+"-" +str(game_ids_list[i]) + '.html')
open_or_download(file_path=filename, url=url2)
if i in checkpoints:
logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))
logger.info('Download finished! (new {} games in {})\n'.format(len(game_ids_list), season.GAMES_COPA_PATH))
@staticmethod
def sanity_check(season, logging_level=logging.INFO):
sanity_check_game(season.GAMES_PATH, logging_level)
@staticmethod
def sanity_check_copa(season, logging_level=logging.INFO):
|
@staticmethod
def create_instance(raw_game, game_acbid, season, competition_phase,round_phase=None):
"""
Extract all the information regarding the game such as the date, attendance, venue, score per quarter or teams.
Therefore, we need first to extract and insert the teams in the database in order to get the references to the db.
:param raw_game: String
:param game_acbid: int
:param season: Season
:param competition_phase: String
:param round_phase: String
:return: Game object
"""
# There are two different statistics table in acb.com. I assume they created the new one to introduce the +/- stat.
estadisticas_tag = '.estadisticasnew' if re.search(r'<table class="estadisticasnew"', raw_game) else '.estadisticas'
doc = pq(raw_game)
game_dict = dict()
"""
Each game has an unique id in acb.com. The id has 5 digits, where the first two digits are the season code (the
oldest season in 1956 has code 1) and the three last are the number of the game (a simple counter since the beginning
of the season).
This id can be used to access the concrete game within the link 'http://www.acb.com/fichas/LACBXXYYY.php'
"""
game_dict['game_acbid'] = game_acbid
game_dict['season'] = season.season
game_dict['competition_phase'] = competition_phase
game_dict['round_phase'] = round_phase
# Information about the teams.
info_teams_data = doc(estadisticas_tag).eq(1)
home_team_name = None
away_team_name = None
"""
We only have the names of the teams (text) within the doc. We will look for its associated id by looking in our teamname table, where
we have all the historical official names for each team and season. However the ACB sometimes doesn't agree in the names
and writes them in different ways depending on the game (sometimes taking older names or making small changes).
For instance VALENCIA BASKET CLUB instead of VALENCIA BASKET.
So if there is not such a direct correspondance we will take the closest match.
"""
for i in [0, 2]:
team_data = info_teams_data('.estverde').eq(i)('td').eq(0).text()
team_name = re.search("(.*) [0-9]", team_data).groups()[0]
try: ## In case the name of the team is exactly the same as one stated in our database for a season
team_acbid = TeamName.get(TeamName.name == team_name).team_id.team_acbid
team = Team.get(Team.team_acbid == team_acbid)
except TeamName.DoesNotExist: ## In case there is not an exact correspondance within our database, let's find the closest match.
query = TeamName.select(TeamName.team_id, TeamName.name)
teams_names_ids = dict()
for q in query:
teams_names_ids[q.name] = q.team_id.id
most_likely_team = difflib.get_close_matches(team_name, teams_names_ids.keys(), 1, 0.4)[0]
team = Team.get(Team.id == teams_names_ids[most_likely_team])
if most_likely_team not in season.mismatched_teams: # debug info to check the correctness.
season.mismatched_teams.append(most_likely_team)
logger.info('Season {} -> {} has been matched to: {}'.format(season.season, team_name, most_likely_team))
# TeamName.get_or_create(**{'team': team, 'name': team_name, 'season': season.season})
game_dict['team_home_id' if i == 0 else 'team_away_id'] = team
home_team_name = team_name if i == 0 else home_team_name
away_team_name = team_name if i != 0 else away_team_name
# Information about the game.
info_game_data = doc(estadisticas_tag).eq(0)
scheduling_data = info_game_data('.estnegro')('td').eq(0).text()
scheduling_data = scheduling_data.split("|")
journey, date, time, venue, attendance = list(map(lambda x: x.strip(), scheduling_data)) # Remove extra spaces.
if date and time:
day, month, year = list(map(int, date.split("/")))
hour, minute = list(map(int, time.split(":")))
game_dict['kickoff_time'] = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute)
if attendance:
try:
game_dict['attendance'] = int(attendance.split(":")[1])
except ValueError:
pass
if venue:
game_dict['venue'] = venue
if journey:
game_dict['journey'] = journey.split(" ")[1]
if competition_phase=='cup':
if int(journey.split(" ")[1])==1:
game_dict['round_phase'] ="quarter_final"
elif int(journey.split(" ")[1])==2:
game_dict['round_phase'] ="semi_final"
elif int(journey.split(" ")[1])==3:
game_dict['round_phase'] ="final"
for i in range(2, 7):
score_home_attribute = ''
score_away_attribute = ''
if i == 2:
score_home_attribute = 'score_home_first'
score_away_attribute = 'score_away_first'
elif i == 3:
score_home_attribute = 'score_home_second'
score_away_attribute = 'score_away_second'
elif i == 4:
score_home_attribute = 'score_home_third'
score_away_attribute = 'score_away_third'
elif i == 5:
score_home_attribute = 'score_home_fourth'
score_away_attribute = 'score_away_fourth'
elif i == 6:
score_home_attribute = 'score_home_extra'
score_away_attribute = 'score_away_extra'
quarter_data = info_game_data('.estnaranja')('td').eq(i).text()
if quarter_data:
try:
game_dict[score_home_attribute], game_dict[score_away_attribute] = list(
map(int, quarter_data.split("|")))
except ValueError:
pass
referees_data = info_game_data('.estnaranja')('td').eq(0).text()
if referees_data:
referees = referees_data.split(":")[1].strip().split(",")
referees = list(filter(None, referees))
referees = list(map(lambda x: x.strip(), referees))
n_ref = 1
for referee in referees:
game_dict['referee_'+str(n_ref)] = referee
n_ref+=1
try:
game = Game.get(Game.game_acbid == game_dict['game_acbid'])
except:
game = Game.create(**game_dict)
return game
from src.season import Season
s = Season(2017)
Game.save_games(s) | sanity_check_game_copa(season.GAMES_COPA_PATH, logging_level) | identifier_body |
game.py | import os.path, re, datetime, difflib, logging
from pyquery import PyQuery as pq
from src.download import open_or_download, get_page, sanity_check_game,sanity_check_game_copa
from src.season import BASE_URL
from models.basemodel import BaseModel
from models.team import Team, TeamName
from peewee import (PrimaryKeyField, IntegerField, DateTimeField, ForeignKeyField, BooleanField, CharField)
from src.utils import get_current_season
from utils.log import logger, init_logging
init_logging('game.log')
class Game(BaseModel):
"""
Class representing a Game.
A game only contains basic information about the game and the scores.
"""
id = PrimaryKeyField()
game_acbid = IntegerField(unique=True, index=True)
team_home_id = ForeignKeyField(Team, related_name='games_home', index=True, null=True)
team_away_id = ForeignKeyField(Team, related_name='games_away', index=True, null=True)
season = IntegerField(null=False)
competition_phase = CharField(max_length=255, null=True)
round_phase = CharField(max_length=255, null=True)
journey = IntegerField(null=False)
score_home = IntegerField(null=True)
score_away = IntegerField(null=True)
score_home_first = IntegerField(null=True)
score_away_first = IntegerField(null=True)
score_home_second = IntegerField(null=True)
score_away_second = IntegerField(null=True)
score_home_third = IntegerField(null=True)
score_away_third = IntegerField(null=True)
score_home_fourth = IntegerField(null=True)
score_away_fourth = IntegerField(null=True)
score_home_extra = IntegerField(null=True)
score_away_extra = IntegerField(null=True)
venue = CharField(max_length=255, null=True)
attendance = IntegerField(null=True)
kickoff_time = DateTimeField(index=True)
referee_1 = CharField(max_length=255, null=True)
referee_2 = CharField(max_length=255, null=True)
referee_3 = CharField(max_length=255, null=True)
db_flag = BooleanField(null=True)
@staticmethod
def save_games(season, logging_level=logging.INFO):
"""
Method for saving locally the games of a season.
:param season: int
:param logging_level: logging object
:return:
"""
logger.info('Starting the download of games...')
if season.season == get_current_season():
current_game_events_ids = season.get_current_game_events_ids()
game_ids_list = list(current_game_events_ids.values())
else:
game_ids_list = season.get_game_ids()
n_checkpoints = 4
checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]
for i in range(len(game_ids_list)):
game_id = int(game_ids_list[i]) % 1000
url2 = BASE_URL + "/fichas/LACB{}.php".format(game_ids_list[i])
filename = os.path.join(season.GAMES_PATH, str(game_id)+"-" +str(game_ids_list[i]) + '.html')
open_or_download(file_path=filename, url=url2)
if i in checkpoints:
logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))
logger.info('Download finished! (new {} games in {})\n'.format(len(game_ids_list), season.GAMES_PATH))
@staticmethod
def save_games_copa(season, logging_level=logging.INFO):
"""
Method for saving locally the games of a season.
:param season: int
:param logging_level: logging object
:return:
"""
logging.basicConfig(level=logging_level)
logger = logging.getLogger(__name__)
logger.info('Starting the download of games...')
if season.season == get_current_season():
current_game_events_ids = season.get_current_game_events_ids_copa()
game_ids_list = list(current_game_events_ids.values())
else:
game_ids_list=season.get_game_ids_copa()
n_checkpoints = 4
checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]
for i in range(len(game_ids_list)):
game_id=int(game_ids_list[i]) % 1000
url2 = BASE_URL + "/fichas/CREY{}.php".format(game_ids_list[i])
filename = os.path.join(season.GAMES_COPA_PATH, str(game_id)+"-" +str(game_ids_list[i]) + '.html')
open_or_download(file_path=filename, url=url2)
if i in checkpoints:
logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))
logger.info('Download finished! (new {} games in {})\n'.format(len(game_ids_list), season.GAMES_COPA_PATH))
@staticmethod
def sanity_check(season, logging_level=logging.INFO):
sanity_check_game(season.GAMES_PATH, logging_level)
@staticmethod
def sanity_check_copa(season, logging_level=logging.INFO):
sanity_check_game_copa(season.GAMES_COPA_PATH, logging_level)
@staticmethod
def create_instance(raw_game, game_acbid, season, competition_phase,round_phase=None):
"""
Extract all the information regarding the game such as the date, attendance, venue, score per quarter or teams.
Therefore, we need first to extract and insert the teams in the database in order to get the references to the db.
:param raw_game: String
:param game_acbid: int
:param season: Season
:param competition_phase: String
:param round_phase: String
:return: Game object
"""
# There are two different statistics table in acb.com. I assume they created the new one to introduce the +/- stat.
estadisticas_tag = '.estadisticasnew' if re.search(r'<table class="estadisticasnew"', raw_game) else '.estadisticas'
doc = pq(raw_game)
game_dict = dict()
"""
Each game has an unique id in acb.com. The id has 5 digits, where the first two digits are the season code (the
oldest season in 1956 has code 1) and the three last are the number of the game (a simple counter since the beginning
of the season).
This id can be used to access the concrete game within the link 'http://www.acb.com/fichas/LACBXXYYY.php'
"""
game_dict['game_acbid'] = game_acbid
game_dict['season'] = season.season
game_dict['competition_phase'] = competition_phase
game_dict['round_phase'] = round_phase
# Information about the teams.
info_teams_data = doc(estadisticas_tag).eq(1)
home_team_name = None
away_team_name = None
"""
We only have the names of the teams (text) within the doc. We will look for its associated id by looking in our teamname table, where
we have all the historical official names for each team and season. However the ACB sometimes doesn't agree in the names
and writes them in different ways depending on the game (sometimes taking older names or making small changes).
For instance VALENCIA BASKET CLUB instead of VALENCIA BASKET.
So if there is not such a direct correspondance we will take the closest match.
"""
for i in [0, 2]:
team_data = info_teams_data('.estverde').eq(i)('td').eq(0).text()
team_name = re.search("(.*) [0-9]", team_data).groups()[0]
try: ## In case the name of the team is exactly the same as one stated in our database for a season
team_acbid = TeamName.get(TeamName.name == team_name).team_id.team_acbid
team = Team.get(Team.team_acbid == team_acbid)
except TeamName.DoesNotExist: ## In case there is not an exact correspondance within our database, let's find the closest match.
query = TeamName.select(TeamName.team_id, TeamName.name)
teams_names_ids = dict()
for q in query:
teams_names_ids[q.name] = q.team_id.id
most_likely_team = difflib.get_close_matches(team_name, teams_names_ids.keys(), 1, 0.4)[0]
team = Team.get(Team.id == teams_names_ids[most_likely_team])
if most_likely_team not in season.mismatched_teams: # debug info to check the correctness.
season.mismatched_teams.append(most_likely_team)
logger.info('Season {} -> {} has been matched to: {}'.format(season.season, team_name, most_likely_team))
# TeamName.get_or_create(**{'team': team, 'name': team_name, 'season': season.season})
game_dict['team_home_id' if i == 0 else 'team_away_id'] = team
home_team_name = team_name if i == 0 else home_team_name
away_team_name = team_name if i != 0 else away_team_name
# Information about the game.
info_game_data = doc(estadisticas_tag).eq(0)
scheduling_data = info_game_data('.estnegro')('td').eq(0).text()
scheduling_data = scheduling_data.split("|")
journey, date, time, venue, attendance = list(map(lambda x: x.strip(), scheduling_data)) # Remove extra spaces.
if date and time:
day, month, year = list(map(int, date.split("/")))
hour, minute = list(map(int, time.split(":")))
game_dict['kickoff_time'] = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute)
if attendance:
try:
game_dict['attendance'] = int(attendance.split(":")[1])
except ValueError:
pass
if venue:
game_dict['venue'] = venue
if journey:
game_dict['journey'] = journey.split(" ")[1]
if competition_phase=='cup':
if int(journey.split(" ")[1])==1:
game_dict['round_phase'] ="quarter_final"
elif int(journey.split(" ")[1])==2:
game_dict['round_phase'] ="semi_final"
elif int(journey.split(" ")[1])==3:
game_dict['round_phase'] ="final"
for i in range(2, 7):
score_home_attribute = ''
score_away_attribute = ''
if i == 2:
score_home_attribute = 'score_home_first'
score_away_attribute = 'score_away_first'
elif i == 3:
score_home_attribute = 'score_home_second'
score_away_attribute = 'score_away_second'
elif i == 4:
score_home_attribute = 'score_home_third'
score_away_attribute = 'score_away_third'
elif i == 5:
score_home_attribute = 'score_home_fourth'
score_away_attribute = 'score_away_fourth' | if quarter_data:
try:
game_dict[score_home_attribute], game_dict[score_away_attribute] = list(
map(int, quarter_data.split("|")))
except ValueError:
pass
referees_data = info_game_data('.estnaranja')('td').eq(0).text()
if referees_data:
referees = referees_data.split(":")[1].strip().split(",")
referees = list(filter(None, referees))
referees = list(map(lambda x: x.strip(), referees))
n_ref = 1
for referee in referees:
game_dict['referee_'+str(n_ref)] = referee
n_ref+=1
try:
game = Game.get(Game.game_acbid == game_dict['game_acbid'])
except:
game = Game.create(**game_dict)
return game
from src.season import Season
s = Season(2017)
Game.save_games(s) | elif i == 6:
score_home_attribute = 'score_home_extra'
score_away_attribute = 'score_away_extra'
quarter_data = info_game_data('.estnaranja')('td').eq(i).text() | random_line_split |
utils.py | import re
import importlib
from collections import namedtuple
import itertools
import json
from io import StringIO, BytesIO
import zipfile
import csv
import pysolr
from reversion.models import Version
from django.conf import settings
from django.utils.translation import gettext as _
from django.contrib.auth.decorators import login_required
from django.db import connection
from django.urls import reverse
CONFIDENCE_MAP = {
'low': 1,
'medium': 2,
'high': 3,
}
REVERSE_CONFIDENCE = {v:k for k,v in CONFIDENCE_MAP.items()}
class Autofill(object):
'''
Helper class for getting attributes that we already know about entities
based on autocomplete queries.
'''
def __init__(self, objects=[], simple_attrs=[], complex_attrs=[],
list_attrs=[], set_attrs={}):
# Model objects that we want to query
self.objects = objects
# Simple (non-complex) fields
self.simple_attrs = simple_attrs
# Complex single-select fields
self.complex_attrs = complex_attrs
# Complex multiselect fields
self.list_attrs = list_attrs
# Querysets from foreign key relationships
# (Requires both the attr name, and the foreign key name,
# like: {'membershippersonmember_set', 'organization'})
self.set_attrs = set_attrs
@property
def attrs(self):
collected_attrs = []
for obj in self.objects:
obj_data = {
'text': str(obj.name),
'id': obj.id
}
# Add optional attributes, with confidence values, to the results
for attr in self.simple_attrs:
try: |
if val:
if attr.endswith('date'):
display_value = repr(val.value)
else:
display_value = str(val.value)
attr_confidence = val.confidence
else:
display_value = ''
attr_confidence = '1'
obj_data[attr] = display_value
obj_data[attr + '_confidence'] = attr_confidence
# Differentiate id/text for complex attributes
for attr in self.complex_attrs:
try:
val = getattr(obj, attr).get_value()
except AttributeError:
val = None
if val:
val_id = val.id
val_text = str(val.value)
attr_confidence = val.confidence
else:
val_id, val_text = '', ''
attr_confidence = '1'
obj_data[attr] = {}
obj_data[attr]['id'] = val_id
obj_data[attr]['text'] = val_text
obj_data[attr + '_confidence'] = attr_confidence
# Add optional attributes that are lists
for attr in self.list_attrs:
try:
lst = getattr(obj, attr).get_list()
except AttributeError:
lst = []
lst_no_nulls = [inst.get_value() for inst in lst if inst.get_value()]
if any(lst_no_nulls):
lst_confidence = lst_no_nulls[0].confidence
else:
lst_confidence = '1'
cleaned_lst = []
for inst in lst_no_nulls:
if attr != 'classification':
cleaned_lst.append({
'id': inst.id,
'text': str(inst.value)
})
else:
# For classificaitons, we want to get the Classification
# model, not the OrganizationClassification model
cleaned_lst.append({
'id': inst.value.id,
'text': str(inst.value)
})
obj_data[attr] = cleaned_lst
obj_data[attr + '_confidence'] = lst_confidence
# Add objects corresponding to foreign keys
for attr, fkey in self.set_attrs.items():
try:
lst = getattr(obj, attr).all()
except AttributeError:
lst = []
lst_refs = [getattr(inst.object_ref, fkey) for inst in lst
if getattr(inst.object_ref, fkey, None)]
lst_values = [inst.get_value().value for inst in lst_refs if inst.get_value()]
# We need to traverse the relationship again due to the particular
# membership relationships on complex fields
lst_values = [inst.get_value() for inst in lst_values if inst.get_value()]
if any(lst_values):
lst_confidence = lst_values[0].confidence
else:
lst_confidence = '1'
cleaned_lst = []
for inst in lst_values:
cleaned_lst.append({
'id': inst.id,
'text': str(inst.value)
})
obj_data[attr] = cleaned_lst
obj_data[attr + '_confidence'] = lst_confidence
collected_attrs.append(obj_data)
return collected_attrs
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
class VersionsMixin(object):
'''
Model mixin to get version diff for a given model instance
'''
def _getDiff(self, differ):
skip_fields = ['date_updated', 'id']
def makeIt(change_type):
for field in getattr(differ, change_type)():
if field not in skip_fields:
if change_type == 'changed':
yield {
'field_name': field,
'to': differ.current_dict[field],
'from': differ.past_dict[field],
}
elif change_type == 'added':
yield {
'field_name': field,
'to': differ.current_dict[field],
'from': None
}
elif change_type == 'removed':
yield {
'field_name': field,
'to': None,
'from': differ.past_dict[field]
}
additions = [a for a in makeIt('added')]
changes = [c for c in makeIt('changed')]
removals = [r for r in makeIt('removed')]
return additions, changes, removals
def getRevisions(self, versions):
from source.models import Source, AccessPoint
revisions = []
for version in versions:
complete_revision = {
'id': version.revision.id
}
revision_meta = {
'modification_date': version.revision.date_created,
'comment': version.revision.comment,
'user': version.revision.user,
}
complex_list_models = [c.field_model._meta.model_name for c in getattr(self, 'complex_lists', [])]
for object_property in version.revision.version_set.all():
if object_property.object != self or isinstance(self, Source):
serialized_data = json.loads(object_property.serialized_data)[0]
# a bit of a hack in order to get sources and access points
# to work
field_names = []
if 'value' in serialized_data['fields']:
field_names.append((serialized_data['fields']['value'],
serialized_data['model'].split('.')[1]))
else:
for field in serialized_data['fields']:
field_names.append((serialized_data['fields'][field], field))
for value, field_name in field_names:
if field_name in complex_list_models:
try:
complete_revision[field_name].add(value)
except KeyError:
complete_revision[field_name] = {value}
else:
complete_revision[field_name] = value
revisions.append((complete_revision, version.revision))
return revisions
def getDifferences(self, revisions):
differences = []
for index, (version, revision) in enumerate(revisions):
if (index - 1) > 0:
try:
previous, previous_revision = revisions[index - 1]
except (IndexError, AssertionError):
continue
else:
continue
differ = DictDiffer(previous, version)
fields_added, fields_changed, fields_removed = self._getDiff(differ)
diff = {
'modification_date': previous_revision.date_created,
'comment': previous_revision.comment,
'user': previous_revision.user,
'from_id': version['id'],
'to_id': previous_revision.id,
'fields_added': fields_added,
'fields_changed': fields_changed,
'fields_removed': fields_removed,
'model': self._meta.object_name,
}
differences.append(diff)
return differences
def getVersions(self, versions=None):
if not versions:
versions = Version.objects.get_for_object(self)
revisions = self.getRevisions(versions)
return self.getDifferences(revisions)
def execute_sql(file_path):
'''
Execute arbitrary SQL code from a file location.
'''
with open(file_path) as f:
statements = f.read().split(';')
with connection.cursor() as c:
for statement in statements:
if statement.strip():
c.execute(statement.strip())
def class_for_name(class_name, module_name="person.models"):
# Check for irregular class names (names where we cannot infer the class
# name by capitalizing the first letter of class_name)
irregular_names = (
('Membershipperson', 'MembershipPerson'),
('Membershiporganization', 'MembershipOrganization'),
('Personextra', 'PersonExtra'),
('Personbiography', 'PersonBiography')
)
for name, formatted_name in irregular_names:
if class_name == name:
class_name = formatted_name
break
if class_name not in settings.ALLOWED_CLASS_FOR_NAME:
raise Exception("Unallowed class for name")
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
return class_
def get_osm_by_id(osm_id):
osm_feature = None
cursor = connection.cursor()
query = '''
SELECT
ST_X(ST_Centroid(geometry)),
ST_Y(ST_Centroid(geometry)),
*
FROM osm_data
WHERE id = {osm_id}
'''.format(osm_id=osm_id)
cursor.execute(query)
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('OSMFeature', columns)
row = cursor.fetchone()
if row:
osm_feature = results_tuple(*row)
return osm_feature
def get_hierarchy_by_id(osm_id):
hierarchy = '''
SELECT parents.*
FROM osm_data AS parents
JOIN (
SELECT
UNNEST(hierarchy) AS h_id,
localname,
tags,
admin_level,
name,
geometry
FROM osm_data
WHERE id = %s
) AS child
ON parents.id = child.h_id::integer
'''
cursor = connection.cursor()
cursor.execute(hierarchy, [osm_id])
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('OSMFeature', columns)
hierarchy = [results_tuple(*r) for r in cursor]
return hierarchy
def generate_hierarchy(query, q_args, rel_field, sources=False):
cursor = connection.cursor()
cursor.execute(query, q_args)
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('Organization', columns)
hierarchy = [(idx, results_tuple(*r)) for idx, r in enumerate(cursor)]
trimmed_hierarchy = []
for org_id, orgs in itertools.groupby(hierarchy, key=lambda x: x[1].id):
group = list(orgs)
lowest_index = min(g[0] for g in group)
orgs = [o[1] for o in group]
start_date = None
if orgs[0].start_date:
start_date = orgs[0].start_date
end_date = None
if orgs[0].end_date:
end_date = orgs[0].end_date
# Create a label, which we display on the charts for person and unit "parents."
label = '<b>' + orgs[0].name + '</b>' + '\n\n' + _('Unknown commander')
if orgs[0].commander:
label = '<b>' + orgs[0].name + '</b>' + '\n\n' + orgs[0].commander
trimmed = {
'id': org_id,
'label': str(label),
'detail_id': str(orgs[0].org_org_id),
'name': orgs[0].name,
'other_names': list({o.alias.strip() for o in orgs if o.alias}),
'classifications': list({o.classification.strip() for o in orgs if o.classification}),
'division_id': orgs[0].division_id,
'date_first_cited': start_date,
'date_last_cited': end_date,
'commander': orgs[0].commander,
}
trimmed[rel_field] = getattr(orgs[0], rel_field)
if sources:
trimmed['sources'] = []
source_ids = []
for o in orgs:
org_source = json.loads(o.source)
if org_source['id'] not in source_ids:
trimmed['sources'].append(org_source)
source_ids.append(org_source['id'])
trimmed['confidence'] = REVERSE_CONFIDENCE[int(orgs[0].confidence)].title()
trimmed_hierarchy.append((lowest_index, trimmed))
hierarchy = [i[1] for i in sorted(trimmed_hierarchy, key=lambda x: x[0])]
return hierarchy
# this makes an edge list that shows the parent relationships (see child_id)
solr = pysolr.Solr(settings.SOLR_URL)
def get_org_hierarchy_by_id(org_id,
when=None,
sources=False,
direction='up',
authenticated=False):
'''
org_id: uuid for the organization
when: date for limiting the search
'''
base_url = settings.SOLR_URL
if direction == 'up':
from_ = 'child'
to = 'parent'
elif direction == 'down':
from_ = 'parent'
to = 'child'
filter_query = '{!graph from=composition_%s_id_s_fct to=composition_%s_id_s_fct returnRoot=true}composition_%s_id_s_fct:%s' % (from_, to, from_, org_id)
if when:
filter_query += ' AND {!field f=composition_daterange_dr op=contains}%s' % when
if not authenticated:
filter_query += ' AND published_b:T'
results = solr.search('*:*', fq=filter_query)
if when:
for result in results:
for key in [from_, to]:
org = result['composition_{}_id_s_fct'.format(key)]
args = (org, when)
query = 'commander_org_id_s_fct:%s AND {!field f=commander_assignment_range_dr op=contains}%s' % args
if not authenticated:
query += ' AND published_b:T'
commanders = solr.search(query)
# We need to deduplicate commanders and then throw out the open ended date ranges.
result['commanders-{}'.format(key)] = []
for commander in commanders:
label_fmt = '{name} ({start} - {end})'
assignment_range = commander['commander_assignment_range_dr']
start, end = assignment_range.replace('[', '').replace(']', '').split(' TO ')
if start == '*':
start = '?'
if end == '*':
end = '?'
label = label_fmt.format(name=commander['commander_person_name_s'],
start=start,
end=end)
commander['label'] = label
result['commanders-{}'.format(key)].append(commander)
return results
def get_child_orgs_by_id(org_id, when=None, sources=False):
hierarchy = '''
WITH RECURSIVE parents AS (
SELECT
o.*,
NULL::VARCHAR AS parent_id,
NULL::VARCHAR AS parent_name,
NULL::DATE AS start_date,
NULL::DATE AS end_date,
NULL::VARCHAR AS comp_open_ended,
NULL::VARCHAR AS source,
NULL::VARCHAR AS confidence
FROM organization As o
WHERE id = %s
UNION
SELECT
o.*,
h.parent_id::VARCHAR AS parent_id,
parents.name AS parent_name,
h.start_date::date,
h.end_date::date,
h.open_ended AS comp_open_ended,
row_to_json(ss.*)::VARCHAR AS source,
ccc.confidence
FROM organization AS o
JOIN composition AS h
ON o.id = h.child_id
JOIN composition_compositionchild AS ccc
ON h.id = ccc.object_ref_id
LEFT JOIN composition_compositionchild_sources AS cccs
ON ccc.id = cccs.compositionchild_id
LEFT JOIN source_source AS ss
ON cccs.source_id = ss.uuid
JOIN parents
ON parents.id = h.parent_id
) SELECT * FROM parents WHERE id != %s
'''
q_args = [org_id, org_id]
if when:
hierarchy = '''
{}
AND CASE
WHEN (start_date IS NOT NULL AND
end_date IS NOT NULL AND
comp_open_ended IN ('N', 'E'))
THEN (%s::date BETWEEN start_date::date AND end_date::date)
WHEN (start_date IS NOT NULL AND
end_date IS NOT NULL AND
comp_open_ended = 'Y')
THEN (%s::date BETWEEN start_date::date AND NOW()::date)
WHEN (start_date IS NOT NULL AND
end_date IS NULL AND
comp_open_ended IN ('N', 'E'))
THEN (start_date::date = %s::date)
WHEN (start_date IS NOT NULL AND
end_date IS NULL AND
comp_open_ended = 'Y')
THEN (%s::date BETWEEN start_date::date AND NOW()::date)
WHEN (start_date IS NULL AND
end_date IS NOT NULL AND
comp_open_ended IN ('N', 'E'))
THEN (end_date::date = %s)
WHEN (start_date IS NULL AND
end_date IS NOT NULL AND
comp_open_ended = 'Y')
THEN TRUE
END
'''.format(hierarchy)
q_args.extend([when] * 5)
hierarchy = '{} ORDER BY id'.format(hierarchy)
hierarchy = generate_hierarchy(hierarchy, q_args, 'parent_id', sources=sources)
return hierarchy
def deleted_in_str(objects):
index = 0
for obj in objects:
if isinstance(obj, list):
objects[index] = deleted_in_str(obj)
else:
if hasattr(obj, 'field_name'):
name = obj.field_name + ": " + str(obj)
else:
name = type(obj).__name__ + ": " + str(obj)
if '_sources' in name:
objects[index] = "Object sources"
else:
objects[index] = name
index += 1
return objects
def import_class(cl):
d = cl.rfind('.')
classname = cl[d+1:len(cl)]
m = __import__(cl[0:d], globals(), locals(), [classname])
return getattr(m, classname)
def format_facets(facet_dict):
'''
pysolr formats facets in a weird way. This helper function converts their
list-like data structure into a dict that we can iterate more easily.
Basic idea: convert counts from a list to a dict containing a list of tuples
and a flag for whether any facets were found, e.g.:
['foo', 1, 'bar', 2] --> {'any': True, 'counts': [('foo', 1), ('bar': 2)]}
'''
facet_types = ['facet_queries', 'facet_fields', 'facet_ranges',
'facet_heatmaps', 'facet_intervals']
out = {}
for ftype, facets in facet_dict.items():
updated_facets = {}
for facet, items in facets.items():
if isinstance(items, dict):
# Ranges have a slightly different format
item_list = items['counts']
else:
item_list = items
counts = []
for i, el in enumerate(item_list):
# The attribute name always comes first; use them as keys
if i % 2 == 0:
count = item_list[i+1]
counts.append((el, count))
else:
# We already bunched this one, so skip it
continue
updated_facets[facet] = {}
updated_facets[facet]['counts'] = counts
# Check to see if there are any facets in this category
any_facets = sum(count[1] for count in counts) > 0
updated_facets[facet]['any'] = any_facets
out[ftype] = updated_facets
return out
def get_command_edges(org_id, when=None, parents=True):
edges = []
if parents:
hierarchy_list = get_org_hierarchy_by_id(org_id, when=when)
from_key, to_key = 'id', 'child_id'
else:
hierarchy_list = get_child_orgs_by_id(org_id, when=when)
from_key, to_key = 'parent_id', 'id'
# Iterate over the hierarchy_list, and create nodes
for org in hierarchy_list:
edges.append({'from': str(org[from_key]), 'to': org[to_key]})
return edges
def get_command_nodes(org_id, when=None):
hierarchy_list = get_org_hierarchy_by_id(org_id, when=when)
# Iterate over the hierarchy_list, and convert/modify hierarchy object, as needed.
nodes = []
for org in hierarchy_list:
trimmed = {
'id': str(org['id']),
'label': org['label'],
'detail_id': org['detail_id']
}
nodes.append(trimmed)
return nodes
def get_source_context(field_name, access_point, uncommitted=True):
context = {
'field_name': field_name,
'uncommitted': uncommitted,
'id': access_point.uuid,
'publication': access_point.source.publication,
'publication_country': access_point.source.publication_country,
'title': access_point.source.title,
'date_added': None,
'published_on': str(access_point.source.published_date),
'access_point': str(access_point),
'source_url': access_point.source.source_url,
'source_detail_url': reverse('view-source', kwargs={'pk': access_point.source.uuid}),
'archive_url': access_point.archive_url,
'source_id': access_point.source.uuid,
'page_number': access_point.trigger,
'accessed_on': None,
}
if access_point.source.date_added:
context['date_added'] = access_point.source.date_added.strftime('%Y-%m-%d')
if access_point.accessed_on:
context['accessed_on'] = access_point.accessed_on.strftime('%Y-%m-%dT%H:%M:%S')
return context | val = getattr(obj, attr).get_value()
except AttributeError:
val = None | random_line_split |
utils.py | import re
import importlib
from collections import namedtuple
import itertools
import json
from io import StringIO, BytesIO
import zipfile
import csv
import pysolr
from reversion.models import Version
from django.conf import settings
from django.utils.translation import gettext as _
from django.contrib.auth.decorators import login_required
from django.db import connection
from django.urls import reverse
CONFIDENCE_MAP = {
'low': 1,
'medium': 2,
'high': 3,
}
REVERSE_CONFIDENCE = {v:k for k,v in CONFIDENCE_MAP.items()}
class Autofill(object):
'''
Helper class for getting attributes that we already know about entities
based on autocomplete queries.
'''
def __init__(self, objects=[], simple_attrs=[], complex_attrs=[],
list_attrs=[], set_attrs={}):
# Model objects that we want to query
self.objects = objects
# Simple (non-complex) fields
self.simple_attrs = simple_attrs
# Complex single-select fields
self.complex_attrs = complex_attrs
# Complex multiselect fields
self.list_attrs = list_attrs
# Querysets from foreign key relationships
# (Requires both the attr name, and the foreign key name,
# like: {'membershippersonmember_set', 'organization'})
self.set_attrs = set_attrs
@property
def attrs(self):
collected_attrs = []
for obj in self.objects:
obj_data = {
'text': str(obj.name),
'id': obj.id
}
# Add optional attributes, with confidence values, to the results
for attr in self.simple_attrs:
try:
val = getattr(obj, attr).get_value()
except AttributeError:
val = None
if val:
if attr.endswith('date'):
display_value = repr(val.value)
else:
display_value = str(val.value)
attr_confidence = val.confidence
else:
display_value = ''
attr_confidence = '1'
obj_data[attr] = display_value
obj_data[attr + '_confidence'] = attr_confidence
# Differentiate id/text for complex attributes
for attr in self.complex_attrs:
try:
val = getattr(obj, attr).get_value()
except AttributeError:
val = None
if val:
val_id = val.id
val_text = str(val.value)
attr_confidence = val.confidence
else:
val_id, val_text = '', ''
attr_confidence = '1'
obj_data[attr] = {}
obj_data[attr]['id'] = val_id
obj_data[attr]['text'] = val_text
obj_data[attr + '_confidence'] = attr_confidence
# Add optional attributes that are lists
for attr in self.list_attrs:
try:
lst = getattr(obj, attr).get_list()
except AttributeError:
lst = []
lst_no_nulls = [inst.get_value() for inst in lst if inst.get_value()]
if any(lst_no_nulls):
lst_confidence = lst_no_nulls[0].confidence
else:
lst_confidence = '1'
cleaned_lst = []
for inst in lst_no_nulls:
if attr != 'classification':
cleaned_lst.append({
'id': inst.id,
'text': str(inst.value)
})
else:
# For classificaitons, we want to get the Classification
# model, not the OrganizationClassification model
cleaned_lst.append({
'id': inst.value.id,
'text': str(inst.value)
})
obj_data[attr] = cleaned_lst
obj_data[attr + '_confidence'] = lst_confidence
# Add objects corresponding to foreign keys
for attr, fkey in self.set_attrs.items():
try:
lst = getattr(obj, attr).all()
except AttributeError:
lst = []
lst_refs = [getattr(inst.object_ref, fkey) for inst in lst
if getattr(inst.object_ref, fkey, None)]
lst_values = [inst.get_value().value for inst in lst_refs if inst.get_value()]
# We need to traverse the relationship again due to the particular
# membership relationships on complex fields
lst_values = [inst.get_value() for inst in lst_values if inst.get_value()]
if any(lst_values):
lst_confidence = lst_values[0].confidence
else:
lst_confidence = '1'
cleaned_lst = []
for inst in lst_values:
cleaned_lst.append({
'id': inst.id,
'text': str(inst.value)
})
obj_data[attr] = cleaned_lst
obj_data[attr + '_confidence'] = lst_confidence
collected_attrs.append(obj_data)
return collected_attrs
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
class VersionsMixin(object):
'''
Model mixin to get version diff for a given model instance
'''
def _getDiff(self, differ):
skip_fields = ['date_updated', 'id']
def makeIt(change_type):
for field in getattr(differ, change_type)():
if field not in skip_fields:
if change_type == 'changed':
yield {
'field_name': field,
'to': differ.current_dict[field],
'from': differ.past_dict[field],
}
elif change_type == 'added':
yield {
'field_name': field,
'to': differ.current_dict[field],
'from': None
}
elif change_type == 'removed':
yield {
'field_name': field,
'to': None,
'from': differ.past_dict[field]
}
additions = [a for a in makeIt('added')]
changes = [c for c in makeIt('changed')]
removals = [r for r in makeIt('removed')]
return additions, changes, removals
def getRevisions(self, versions):
from source.models import Source, AccessPoint
revisions = []
for version in versions:
complete_revision = {
'id': version.revision.id
}
revision_meta = {
'modification_date': version.revision.date_created,
'comment': version.revision.comment,
'user': version.revision.user,
}
complex_list_models = [c.field_model._meta.model_name for c in getattr(self, 'complex_lists', [])]
for object_property in version.revision.version_set.all():
if object_property.object != self or isinstance(self, Source):
serialized_data = json.loads(object_property.serialized_data)[0]
# a bit of a hack in order to get sources and access points
# to work
field_names = []
if 'value' in serialized_data['fields']:
field_names.append((serialized_data['fields']['value'],
serialized_data['model'].split('.')[1]))
else:
for field in serialized_data['fields']:
field_names.append((serialized_data['fields'][field], field))
for value, field_name in field_names:
if field_name in complex_list_models:
try:
complete_revision[field_name].add(value)
except KeyError:
complete_revision[field_name] = {value}
else:
complete_revision[field_name] = value
revisions.append((complete_revision, version.revision))
return revisions
def getDifferences(self, revisions):
differences = []
for index, (version, revision) in enumerate(revisions):
if (index - 1) > 0:
try:
previous, previous_revision = revisions[index - 1]
except (IndexError, AssertionError):
continue
else:
continue
differ = DictDiffer(previous, version)
fields_added, fields_changed, fields_removed = self._getDiff(differ)
diff = {
'modification_date': previous_revision.date_created,
'comment': previous_revision.comment,
'user': previous_revision.user,
'from_id': version['id'],
'to_id': previous_revision.id,
'fields_added': fields_added,
'fields_changed': fields_changed,
'fields_removed': fields_removed,
'model': self._meta.object_name,
}
differences.append(diff)
return differences
def getVersions(self, versions=None):
if not versions:
versions = Version.objects.get_for_object(self)
revisions = self.getRevisions(versions)
return self.getDifferences(revisions)
def execute_sql(file_path):
'''
Execute arbitrary SQL code from a file location.
'''
with open(file_path) as f:
statements = f.read().split(';')
with connection.cursor() as c:
for statement in statements:
if statement.strip():
c.execute(statement.strip())
def class_for_name(class_name, module_name="person.models"):
# Check for irregular class names (names where we cannot infer the class
# name by capitalizing the first letter of class_name)
irregular_names = (
('Membershipperson', 'MembershipPerson'),
('Membershiporganization', 'MembershipOrganization'),
('Personextra', 'PersonExtra'),
('Personbiography', 'PersonBiography')
)
for name, formatted_name in irregular_names:
if class_name == name:
class_name = formatted_name
break
if class_name not in settings.ALLOWED_CLASS_FOR_NAME:
raise Exception("Unallowed class for name")
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
return class_
def get_osm_by_id(osm_id):
osm_feature = None
cursor = connection.cursor()
query = '''
SELECT
ST_X(ST_Centroid(geometry)),
ST_Y(ST_Centroid(geometry)),
*
FROM osm_data
WHERE id = {osm_id}
'''.format(osm_id=osm_id)
cursor.execute(query)
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('OSMFeature', columns)
row = cursor.fetchone()
if row:
osm_feature = results_tuple(*row)
return osm_feature
def get_hierarchy_by_id(osm_id):
hierarchy = '''
SELECT parents.*
FROM osm_data AS parents
JOIN (
SELECT
UNNEST(hierarchy) AS h_id,
localname,
tags,
admin_level,
name,
geometry
FROM osm_data
WHERE id = %s
) AS child
ON parents.id = child.h_id::integer
'''
cursor = connection.cursor()
cursor.execute(hierarchy, [osm_id])
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('OSMFeature', columns)
hierarchy = [results_tuple(*r) for r in cursor]
return hierarchy
def generate_hierarchy(query, q_args, rel_field, sources=False):
cursor = connection.cursor()
cursor.execute(query, q_args)
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('Organization', columns)
hierarchy = [(idx, results_tuple(*r)) for idx, r in enumerate(cursor)]
trimmed_hierarchy = []
for org_id, orgs in itertools.groupby(hierarchy, key=lambda x: x[1].id):
group = list(orgs)
lowest_index = min(g[0] for g in group)
orgs = [o[1] for o in group]
start_date = None
if orgs[0].start_date:
start_date = orgs[0].start_date
end_date = None
if orgs[0].end_date:
end_date = orgs[0].end_date
# Create a label, which we display on the charts for person and unit "parents."
label = '<b>' + orgs[0].name + '</b>' + '\n\n' + _('Unknown commander')
if orgs[0].commander:
label = '<b>' + orgs[0].name + '</b>' + '\n\n' + orgs[0].commander
trimmed = {
'id': org_id,
'label': str(label),
'detail_id': str(orgs[0].org_org_id),
'name': orgs[0].name,
'other_names': list({o.alias.strip() for o in orgs if o.alias}),
'classifications': list({o.classification.strip() for o in orgs if o.classification}),
'division_id': orgs[0].division_id,
'date_first_cited': start_date,
'date_last_cited': end_date,
'commander': orgs[0].commander,
}
trimmed[rel_field] = getattr(orgs[0], rel_field)
if sources:
trimmed['sources'] = []
source_ids = []
for o in orgs:
org_source = json.loads(o.source)
if org_source['id'] not in source_ids:
trimmed['sources'].append(org_source)
source_ids.append(org_source['id'])
trimmed['confidence'] = REVERSE_CONFIDENCE[int(orgs[0].confidence)].title()
trimmed_hierarchy.append((lowest_index, trimmed))
hierarchy = [i[1] for i in sorted(trimmed_hierarchy, key=lambda x: x[0])]
return hierarchy
# this makes an edge list that shows the parent relationships (see child_id)
solr = pysolr.Solr(settings.SOLR_URL)
def get_org_hierarchy_by_id(org_id,
when=None,
sources=False,
direction='up',
authenticated=False):
'''
org_id: uuid for the organization
when: date for limiting the search
'''
base_url = settings.SOLR_URL
if direction == 'up':
from_ = 'child'
to = 'parent'
elif direction == 'down':
from_ = 'parent'
to = 'child'
filter_query = '{!graph from=composition_%s_id_s_fct to=composition_%s_id_s_fct returnRoot=true}composition_%s_id_s_fct:%s' % (from_, to, from_, org_id)
if when:
filter_query += ' AND {!field f=composition_daterange_dr op=contains}%s' % when
if not authenticated:
filter_query += ' AND published_b:T'
results = solr.search('*:*', fq=filter_query)
if when:
for result in results:
for key in [from_, to]:
org = result['composition_{}_id_s_fct'.format(key)]
args = (org, when)
query = 'commander_org_id_s_fct:%s AND {!field f=commander_assignment_range_dr op=contains}%s' % args
if not authenticated:
query += ' AND published_b:T'
commanders = solr.search(query)
# We need to deduplicate commanders and then throw out the open ended date ranges.
result['commanders-{}'.format(key)] = []
for commander in commanders:
label_fmt = '{name} ({start} - {end})'
assignment_range = commander['commander_assignment_range_dr']
start, end = assignment_range.replace('[', '').replace(']', '').split(' TO ')
if start == '*':
start = '?'
if end == '*':
end = '?'
label = label_fmt.format(name=commander['commander_person_name_s'],
start=start,
end=end)
commander['label'] = label
result['commanders-{}'.format(key)].append(commander)
return results
def get_child_orgs_by_id(org_id, when=None, sources=False):
hierarchy = '''
WITH RECURSIVE parents AS (
SELECT
o.*,
NULL::VARCHAR AS parent_id,
NULL::VARCHAR AS parent_name,
NULL::DATE AS start_date,
NULL::DATE AS end_date,
NULL::VARCHAR AS comp_open_ended,
NULL::VARCHAR AS source,
NULL::VARCHAR AS confidence
FROM organization As o
WHERE id = %s
UNION
SELECT
o.*,
h.parent_id::VARCHAR AS parent_id,
parents.name AS parent_name,
h.start_date::date,
h.end_date::date,
h.open_ended AS comp_open_ended,
row_to_json(ss.*)::VARCHAR AS source,
ccc.confidence
FROM organization AS o
JOIN composition AS h
ON o.id = h.child_id
JOIN composition_compositionchild AS ccc
ON h.id = ccc.object_ref_id
LEFT JOIN composition_compositionchild_sources AS cccs
ON ccc.id = cccs.compositionchild_id
LEFT JOIN source_source AS ss
ON cccs.source_id = ss.uuid
JOIN parents
ON parents.id = h.parent_id
) SELECT * FROM parents WHERE id != %s
'''
q_args = [org_id, org_id]
if when:
hierarchy = '''
{}
AND CASE
WHEN (start_date IS NOT NULL AND
end_date IS NOT NULL AND
comp_open_ended IN ('N', 'E'))
THEN (%s::date BETWEEN start_date::date AND end_date::date)
WHEN (start_date IS NOT NULL AND
end_date IS NOT NULL AND
comp_open_ended = 'Y')
THEN (%s::date BETWEEN start_date::date AND NOW()::date)
WHEN (start_date IS NOT NULL AND
end_date IS NULL AND
comp_open_ended IN ('N', 'E'))
THEN (start_date::date = %s::date)
WHEN (start_date IS NOT NULL AND
end_date IS NULL AND
comp_open_ended = 'Y')
THEN (%s::date BETWEEN start_date::date AND NOW()::date)
WHEN (start_date IS NULL AND
end_date IS NOT NULL AND
comp_open_ended IN ('N', 'E'))
THEN (end_date::date = %s)
WHEN (start_date IS NULL AND
end_date IS NOT NULL AND
comp_open_ended = 'Y')
THEN TRUE
END
'''.format(hierarchy)
q_args.extend([when] * 5)
hierarchy = '{} ORDER BY id'.format(hierarchy)
hierarchy = generate_hierarchy(hierarchy, q_args, 'parent_id', sources=sources)
return hierarchy
def deleted_in_str(objects):
index = 0
for obj in objects:
if isinstance(obj, list):
objects[index] = deleted_in_str(obj)
else:
if hasattr(obj, 'field_name'):
name = obj.field_name + ": " + str(obj)
else:
name = type(obj).__name__ + ": " + str(obj)
if '_sources' in name:
objects[index] = "Object sources"
else:
|
index += 1
return objects
def import_class(cl):
d = cl.rfind('.')
classname = cl[d+1:len(cl)]
m = __import__(cl[0:d], globals(), locals(), [classname])
return getattr(m, classname)
def format_facets(facet_dict):
'''
pysolr formats facets in a weird way. This helper function converts their
list-like data structure into a dict that we can iterate more easily.
Basic idea: convert counts from a list to a dict containing a list of tuples
and a flag for whether any facets were found, e.g.:
['foo', 1, 'bar', 2] --> {'any': True, 'counts': [('foo', 1), ('bar': 2)]}
'''
facet_types = ['facet_queries', 'facet_fields', 'facet_ranges',
'facet_heatmaps', 'facet_intervals']
out = {}
for ftype, facets in facet_dict.items():
updated_facets = {}
for facet, items in facets.items():
if isinstance(items, dict):
# Ranges have a slightly different format
item_list = items['counts']
else:
item_list = items
counts = []
for i, el in enumerate(item_list):
# The attribute name always comes first; use them as keys
if i % 2 == 0:
count = item_list[i+1]
counts.append((el, count))
else:
# We already bunched this one, so skip it
continue
updated_facets[facet] = {}
updated_facets[facet]['counts'] = counts
# Check to see if there are any facets in this category
any_facets = sum(count[1] for count in counts) > 0
updated_facets[facet]['any'] = any_facets
out[ftype] = updated_facets
return out
def get_command_edges(org_id, when=None, parents=True):
edges = []
if parents:
hierarchy_list = get_org_hierarchy_by_id(org_id, when=when)
from_key, to_key = 'id', 'child_id'
else:
hierarchy_list = get_child_orgs_by_id(org_id, when=when)
from_key, to_key = 'parent_id', 'id'
# Iterate over the hierarchy_list, and create nodes
for org in hierarchy_list:
edges.append({'from': str(org[from_key]), 'to': org[to_key]})
return edges
def get_command_nodes(org_id, when=None):
hierarchy_list = get_org_hierarchy_by_id(org_id, when=when)
# Iterate over the hierarchy_list, and convert/modify hierarchy object, as needed.
nodes = []
for org in hierarchy_list:
trimmed = {
'id': str(org['id']),
'label': org['label'],
'detail_id': org['detail_id']
}
nodes.append(trimmed)
return nodes
def get_source_context(field_name, access_point, uncommitted=True):
context = {
'field_name': field_name,
'uncommitted': uncommitted,
'id': access_point.uuid,
'publication': access_point.source.publication,
'publication_country': access_point.source.publication_country,
'title': access_point.source.title,
'date_added': None,
'published_on': str(access_point.source.published_date),
'access_point': str(access_point),
'source_url': access_point.source.source_url,
'source_detail_url': reverse('view-source', kwargs={'pk': access_point.source.uuid}),
'archive_url': access_point.archive_url,
'source_id': access_point.source.uuid,
'page_number': access_point.trigger,
'accessed_on': None,
}
if access_point.source.date_added:
context['date_added'] = access_point.source.date_added.strftime('%Y-%m-%d')
if access_point.accessed_on:
context['accessed_on'] = access_point.accessed_on.strftime('%Y-%m-%dT%H:%M:%S')
return context
| objects[index] = name | conditional_block |
utils.py | import re
import importlib
from collections import namedtuple
import itertools
import json
from io import StringIO, BytesIO
import zipfile
import csv
import pysolr
from reversion.models import Version
from django.conf import settings
from django.utils.translation import gettext as _
from django.contrib.auth.decorators import login_required
from django.db import connection
from django.urls import reverse
CONFIDENCE_MAP = {
'low': 1,
'medium': 2,
'high': 3,
}
REVERSE_CONFIDENCE = {v:k for k,v in CONFIDENCE_MAP.items()}
class Autofill(object):
'''
Helper class for getting attributes that we already know about entities
based on autocomplete queries.
'''
def __init__(self, objects=[], simple_attrs=[], complex_attrs=[],
list_attrs=[], set_attrs={}):
# Model objects that we want to query
self.objects = objects
# Simple (non-complex) fields
self.simple_attrs = simple_attrs
# Complex single-select fields
self.complex_attrs = complex_attrs
# Complex multiselect fields
self.list_attrs = list_attrs
# Querysets from foreign key relationships
# (Requires both the attr name, and the foreign key name,
# like: {'membershippersonmember_set', 'organization'})
self.set_attrs = set_attrs
@property
def attrs(self):
collected_attrs = []
for obj in self.objects:
obj_data = {
'text': str(obj.name),
'id': obj.id
}
# Add optional attributes, with confidence values, to the results
for attr in self.simple_attrs:
try:
val = getattr(obj, attr).get_value()
except AttributeError:
val = None
if val:
if attr.endswith('date'):
display_value = repr(val.value)
else:
display_value = str(val.value)
attr_confidence = val.confidence
else:
display_value = ''
attr_confidence = '1'
obj_data[attr] = display_value
obj_data[attr + '_confidence'] = attr_confidence
# Differentiate id/text for complex attributes
for attr in self.complex_attrs:
try:
val = getattr(obj, attr).get_value()
except AttributeError:
val = None
if val:
val_id = val.id
val_text = str(val.value)
attr_confidence = val.confidence
else:
val_id, val_text = '', ''
attr_confidence = '1'
obj_data[attr] = {}
obj_data[attr]['id'] = val_id
obj_data[attr]['text'] = val_text
obj_data[attr + '_confidence'] = attr_confidence
# Add optional attributes that are lists
for attr in self.list_attrs:
try:
lst = getattr(obj, attr).get_list()
except AttributeError:
lst = []
lst_no_nulls = [inst.get_value() for inst in lst if inst.get_value()]
if any(lst_no_nulls):
lst_confidence = lst_no_nulls[0].confidence
else:
lst_confidence = '1'
cleaned_lst = []
for inst in lst_no_nulls:
if attr != 'classification':
cleaned_lst.append({
'id': inst.id,
'text': str(inst.value)
})
else:
# For classificaitons, we want to get the Classification
# model, not the OrganizationClassification model
cleaned_lst.append({
'id': inst.value.id,
'text': str(inst.value)
})
obj_data[attr] = cleaned_lst
obj_data[attr + '_confidence'] = lst_confidence
# Add objects corresponding to foreign keys
for attr, fkey in self.set_attrs.items():
try:
lst = getattr(obj, attr).all()
except AttributeError:
lst = []
lst_refs = [getattr(inst.object_ref, fkey) for inst in lst
if getattr(inst.object_ref, fkey, None)]
lst_values = [inst.get_value().value for inst in lst_refs if inst.get_value()]
# We need to traverse the relationship again due to the particular
# membership relationships on complex fields
lst_values = [inst.get_value() for inst in lst_values if inst.get_value()]
if any(lst_values):
lst_confidence = lst_values[0].confidence
else:
lst_confidence = '1'
cleaned_lst = []
for inst in lst_values:
cleaned_lst.append({
'id': inst.id,
'text': str(inst.value)
})
obj_data[attr] = cleaned_lst
obj_data[attr + '_confidence'] = lst_confidence
collected_attrs.append(obj_data)
return collected_attrs
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
class VersionsMixin(object):
'''
Model mixin to get version diff for a given model instance
'''
def _getDiff(self, differ):
skip_fields = ['date_updated', 'id']
def | (change_type):
for field in getattr(differ, change_type)():
if field not in skip_fields:
if change_type == 'changed':
yield {
'field_name': field,
'to': differ.current_dict[field],
'from': differ.past_dict[field],
}
elif change_type == 'added':
yield {
'field_name': field,
'to': differ.current_dict[field],
'from': None
}
elif change_type == 'removed':
yield {
'field_name': field,
'to': None,
'from': differ.past_dict[field]
}
additions = [a for a in makeIt('added')]
changes = [c for c in makeIt('changed')]
removals = [r for r in makeIt('removed')]
return additions, changes, removals
def getRevisions(self, versions):
from source.models import Source, AccessPoint
revisions = []
for version in versions:
complete_revision = {
'id': version.revision.id
}
revision_meta = {
'modification_date': version.revision.date_created,
'comment': version.revision.comment,
'user': version.revision.user,
}
complex_list_models = [c.field_model._meta.model_name for c in getattr(self, 'complex_lists', [])]
for object_property in version.revision.version_set.all():
if object_property.object != self or isinstance(self, Source):
serialized_data = json.loads(object_property.serialized_data)[0]
# a bit of a hack in order to get sources and access points
# to work
field_names = []
if 'value' in serialized_data['fields']:
field_names.append((serialized_data['fields']['value'],
serialized_data['model'].split('.')[1]))
else:
for field in serialized_data['fields']:
field_names.append((serialized_data['fields'][field], field))
for value, field_name in field_names:
if field_name in complex_list_models:
try:
complete_revision[field_name].add(value)
except KeyError:
complete_revision[field_name] = {value}
else:
complete_revision[field_name] = value
revisions.append((complete_revision, version.revision))
return revisions
def getDifferences(self, revisions):
differences = []
for index, (version, revision) in enumerate(revisions):
if (index - 1) > 0:
try:
previous, previous_revision = revisions[index - 1]
except (IndexError, AssertionError):
continue
else:
continue
differ = DictDiffer(previous, version)
fields_added, fields_changed, fields_removed = self._getDiff(differ)
diff = {
'modification_date': previous_revision.date_created,
'comment': previous_revision.comment,
'user': previous_revision.user,
'from_id': version['id'],
'to_id': previous_revision.id,
'fields_added': fields_added,
'fields_changed': fields_changed,
'fields_removed': fields_removed,
'model': self._meta.object_name,
}
differences.append(diff)
return differences
def getVersions(self, versions=None):
if not versions:
versions = Version.objects.get_for_object(self)
revisions = self.getRevisions(versions)
return self.getDifferences(revisions)
def execute_sql(file_path):
'''
Execute arbitrary SQL code from a file location.
'''
with open(file_path) as f:
statements = f.read().split(';')
with connection.cursor() as c:
for statement in statements:
if statement.strip():
c.execute(statement.strip())
def class_for_name(class_name, module_name="person.models"):
# Check for irregular class names (names where we cannot infer the class
# name by capitalizing the first letter of class_name)
irregular_names = (
('Membershipperson', 'MembershipPerson'),
('Membershiporganization', 'MembershipOrganization'),
('Personextra', 'PersonExtra'),
('Personbiography', 'PersonBiography')
)
for name, formatted_name in irregular_names:
if class_name == name:
class_name = formatted_name
break
if class_name not in settings.ALLOWED_CLASS_FOR_NAME:
raise Exception("Unallowed class for name")
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
return class_
def get_osm_by_id(osm_id):
osm_feature = None
cursor = connection.cursor()
query = '''
SELECT
ST_X(ST_Centroid(geometry)),
ST_Y(ST_Centroid(geometry)),
*
FROM osm_data
WHERE id = {osm_id}
'''.format(osm_id=osm_id)
cursor.execute(query)
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('OSMFeature', columns)
row = cursor.fetchone()
if row:
osm_feature = results_tuple(*row)
return osm_feature
def get_hierarchy_by_id(osm_id):
hierarchy = '''
SELECT parents.*
FROM osm_data AS parents
JOIN (
SELECT
UNNEST(hierarchy) AS h_id,
localname,
tags,
admin_level,
name,
geometry
FROM osm_data
WHERE id = %s
) AS child
ON parents.id = child.h_id::integer
'''
cursor = connection.cursor()
cursor.execute(hierarchy, [osm_id])
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('OSMFeature', columns)
hierarchy = [results_tuple(*r) for r in cursor]
return hierarchy
def generate_hierarchy(query, q_args, rel_field, sources=False):
cursor = connection.cursor()
cursor.execute(query, q_args)
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('Organization', columns)
hierarchy = [(idx, results_tuple(*r)) for idx, r in enumerate(cursor)]
trimmed_hierarchy = []
for org_id, orgs in itertools.groupby(hierarchy, key=lambda x: x[1].id):
group = list(orgs)
lowest_index = min(g[0] for g in group)
orgs = [o[1] for o in group]
start_date = None
if orgs[0].start_date:
start_date = orgs[0].start_date
end_date = None
if orgs[0].end_date:
end_date = orgs[0].end_date
# Create a label, which we display on the charts for person and unit "parents."
label = '<b>' + orgs[0].name + '</b>' + '\n\n' + _('Unknown commander')
if orgs[0].commander:
label = '<b>' + orgs[0].name + '</b>' + '\n\n' + orgs[0].commander
trimmed = {
'id': org_id,
'label': str(label),
'detail_id': str(orgs[0].org_org_id),
'name': orgs[0].name,
'other_names': list({o.alias.strip() for o in orgs if o.alias}),
'classifications': list({o.classification.strip() for o in orgs if o.classification}),
'division_id': orgs[0].division_id,
'date_first_cited': start_date,
'date_last_cited': end_date,
'commander': orgs[0].commander,
}
trimmed[rel_field] = getattr(orgs[0], rel_field)
if sources:
trimmed['sources'] = []
source_ids = []
for o in orgs:
org_source = json.loads(o.source)
if org_source['id'] not in source_ids:
trimmed['sources'].append(org_source)
source_ids.append(org_source['id'])
trimmed['confidence'] = REVERSE_CONFIDENCE[int(orgs[0].confidence)].title()
trimmed_hierarchy.append((lowest_index, trimmed))
hierarchy = [i[1] for i in sorted(trimmed_hierarchy, key=lambda x: x[0])]
return hierarchy
# this makes an edge list that shows the parent relationships (see child_id)
solr = pysolr.Solr(settings.SOLR_URL)
def get_org_hierarchy_by_id(org_id,
when=None,
sources=False,
direction='up',
authenticated=False):
'''
org_id: uuid for the organization
when: date for limiting the search
'''
base_url = settings.SOLR_URL
if direction == 'up':
from_ = 'child'
to = 'parent'
elif direction == 'down':
from_ = 'parent'
to = 'child'
filter_query = '{!graph from=composition_%s_id_s_fct to=composition_%s_id_s_fct returnRoot=true}composition_%s_id_s_fct:%s' % (from_, to, from_, org_id)
if when:
filter_query += ' AND {!field f=composition_daterange_dr op=contains}%s' % when
if not authenticated:
filter_query += ' AND published_b:T'
results = solr.search('*:*', fq=filter_query)
if when:
for result in results:
for key in [from_, to]:
org = result['composition_{}_id_s_fct'.format(key)]
args = (org, when)
query = 'commander_org_id_s_fct:%s AND {!field f=commander_assignment_range_dr op=contains}%s' % args
if not authenticated:
query += ' AND published_b:T'
commanders = solr.search(query)
# We need to deduplicate commanders and then throw out the open ended date ranges.
result['commanders-{}'.format(key)] = []
for commander in commanders:
label_fmt = '{name} ({start} - {end})'
assignment_range = commander['commander_assignment_range_dr']
start, end = assignment_range.replace('[', '').replace(']', '').split(' TO ')
if start == '*':
start = '?'
if end == '*':
end = '?'
label = label_fmt.format(name=commander['commander_person_name_s'],
start=start,
end=end)
commander['label'] = label
result['commanders-{}'.format(key)].append(commander)
return results
def get_child_orgs_by_id(org_id, when=None, sources=False):
hierarchy = '''
WITH RECURSIVE parents AS (
SELECT
o.*,
NULL::VARCHAR AS parent_id,
NULL::VARCHAR AS parent_name,
NULL::DATE AS start_date,
NULL::DATE AS end_date,
NULL::VARCHAR AS comp_open_ended,
NULL::VARCHAR AS source,
NULL::VARCHAR AS confidence
FROM organization As o
WHERE id = %s
UNION
SELECT
o.*,
h.parent_id::VARCHAR AS parent_id,
parents.name AS parent_name,
h.start_date::date,
h.end_date::date,
h.open_ended AS comp_open_ended,
row_to_json(ss.*)::VARCHAR AS source,
ccc.confidence
FROM organization AS o
JOIN composition AS h
ON o.id = h.child_id
JOIN composition_compositionchild AS ccc
ON h.id = ccc.object_ref_id
LEFT JOIN composition_compositionchild_sources AS cccs
ON ccc.id = cccs.compositionchild_id
LEFT JOIN source_source AS ss
ON cccs.source_id = ss.uuid
JOIN parents
ON parents.id = h.parent_id
) SELECT * FROM parents WHERE id != %s
'''
q_args = [org_id, org_id]
if when:
hierarchy = '''
{}
AND CASE
WHEN (start_date IS NOT NULL AND
end_date IS NOT NULL AND
comp_open_ended IN ('N', 'E'))
THEN (%s::date BETWEEN start_date::date AND end_date::date)
WHEN (start_date IS NOT NULL AND
end_date IS NOT NULL AND
comp_open_ended = 'Y')
THEN (%s::date BETWEEN start_date::date AND NOW()::date)
WHEN (start_date IS NOT NULL AND
end_date IS NULL AND
comp_open_ended IN ('N', 'E'))
THEN (start_date::date = %s::date)
WHEN (start_date IS NOT NULL AND
end_date IS NULL AND
comp_open_ended = 'Y')
THEN (%s::date BETWEEN start_date::date AND NOW()::date)
WHEN (start_date IS NULL AND
end_date IS NOT NULL AND
comp_open_ended IN ('N', 'E'))
THEN (end_date::date = %s)
WHEN (start_date IS NULL AND
end_date IS NOT NULL AND
comp_open_ended = 'Y')
THEN TRUE
END
'''.format(hierarchy)
q_args.extend([when] * 5)
hierarchy = '{} ORDER BY id'.format(hierarchy)
hierarchy = generate_hierarchy(hierarchy, q_args, 'parent_id', sources=sources)
return hierarchy
def deleted_in_str(objects):
index = 0
for obj in objects:
if isinstance(obj, list):
objects[index] = deleted_in_str(obj)
else:
if hasattr(obj, 'field_name'):
name = obj.field_name + ": " + str(obj)
else:
name = type(obj).__name__ + ": " + str(obj)
if '_sources' in name:
objects[index] = "Object sources"
else:
objects[index] = name
index += 1
return objects
def import_class(cl):
d = cl.rfind('.')
classname = cl[d+1:len(cl)]
m = __import__(cl[0:d], globals(), locals(), [classname])
return getattr(m, classname)
def format_facets(facet_dict):
'''
pysolr formats facets in a weird way. This helper function converts their
list-like data structure into a dict that we can iterate more easily.
Basic idea: convert counts from a list to a dict containing a list of tuples
and a flag for whether any facets were found, e.g.:
['foo', 1, 'bar', 2] --> {'any': True, 'counts': [('foo', 1), ('bar': 2)]}
'''
facet_types = ['facet_queries', 'facet_fields', 'facet_ranges',
'facet_heatmaps', 'facet_intervals']
out = {}
for ftype, facets in facet_dict.items():
updated_facets = {}
for facet, items in facets.items():
if isinstance(items, dict):
# Ranges have a slightly different format
item_list = items['counts']
else:
item_list = items
counts = []
for i, el in enumerate(item_list):
# The attribute name always comes first; use them as keys
if i % 2 == 0:
count = item_list[i+1]
counts.append((el, count))
else:
# We already bunched this one, so skip it
continue
updated_facets[facet] = {}
updated_facets[facet]['counts'] = counts
# Check to see if there are any facets in this category
any_facets = sum(count[1] for count in counts) > 0
updated_facets[facet]['any'] = any_facets
out[ftype] = updated_facets
return out
def get_command_edges(org_id, when=None, parents=True):
edges = []
if parents:
hierarchy_list = get_org_hierarchy_by_id(org_id, when=when)
from_key, to_key = 'id', 'child_id'
else:
hierarchy_list = get_child_orgs_by_id(org_id, when=when)
from_key, to_key = 'parent_id', 'id'
# Iterate over the hierarchy_list, and create nodes
for org in hierarchy_list:
edges.append({'from': str(org[from_key]), 'to': org[to_key]})
return edges
def get_command_nodes(org_id, when=None):
hierarchy_list = get_org_hierarchy_by_id(org_id, when=when)
# Iterate over the hierarchy_list, and convert/modify hierarchy object, as needed.
nodes = []
for org in hierarchy_list:
trimmed = {
'id': str(org['id']),
'label': org['label'],
'detail_id': org['detail_id']
}
nodes.append(trimmed)
return nodes
def get_source_context(field_name, access_point, uncommitted=True):
context = {
'field_name': field_name,
'uncommitted': uncommitted,
'id': access_point.uuid,
'publication': access_point.source.publication,
'publication_country': access_point.source.publication_country,
'title': access_point.source.title,
'date_added': None,
'published_on': str(access_point.source.published_date),
'access_point': str(access_point),
'source_url': access_point.source.source_url,
'source_detail_url': reverse('view-source', kwargs={'pk': access_point.source.uuid}),
'archive_url': access_point.archive_url,
'source_id': access_point.source.uuid,
'page_number': access_point.trigger,
'accessed_on': None,
}
if access_point.source.date_added:
context['date_added'] = access_point.source.date_added.strftime('%Y-%m-%d')
if access_point.accessed_on:
context['accessed_on'] = access_point.accessed_on.strftime('%Y-%m-%dT%H:%M:%S')
return context
| makeIt | identifier_name |
utils.py | import re
import importlib
from collections import namedtuple
import itertools
import json
from io import StringIO, BytesIO
import zipfile
import csv
import pysolr
from reversion.models import Version
from django.conf import settings
from django.utils.translation import gettext as _
from django.contrib.auth.decorators import login_required
from django.db import connection
from django.urls import reverse
CONFIDENCE_MAP = {
'low': 1,
'medium': 2,
'high': 3,
}
REVERSE_CONFIDENCE = {v:k for k,v in CONFIDENCE_MAP.items()}
class Autofill(object):
'''
Helper class for getting attributes that we already know about entities
based on autocomplete queries.
'''
def __init__(self, objects=[], simple_attrs=[], complex_attrs=[],
list_attrs=[], set_attrs={}):
# Model objects that we want to query
self.objects = objects
# Simple (non-complex) fields
self.simple_attrs = simple_attrs
# Complex single-select fields
self.complex_attrs = complex_attrs
# Complex multiselect fields
self.list_attrs = list_attrs
# Querysets from foreign key relationships
# (Requires both the attr name, and the foreign key name,
# like: {'membershippersonmember_set', 'organization'})
self.set_attrs = set_attrs
@property
def attrs(self):
collected_attrs = []
for obj in self.objects:
obj_data = {
'text': str(obj.name),
'id': obj.id
}
# Add optional attributes, with confidence values, to the results
for attr in self.simple_attrs:
try:
val = getattr(obj, attr).get_value()
except AttributeError:
val = None
if val:
if attr.endswith('date'):
display_value = repr(val.value)
else:
display_value = str(val.value)
attr_confidence = val.confidence
else:
display_value = ''
attr_confidence = '1'
obj_data[attr] = display_value
obj_data[attr + '_confidence'] = attr_confidence
# Differentiate id/text for complex attributes
for attr in self.complex_attrs:
try:
val = getattr(obj, attr).get_value()
except AttributeError:
val = None
if val:
val_id = val.id
val_text = str(val.value)
attr_confidence = val.confidence
else:
val_id, val_text = '', ''
attr_confidence = '1'
obj_data[attr] = {}
obj_data[attr]['id'] = val_id
obj_data[attr]['text'] = val_text
obj_data[attr + '_confidence'] = attr_confidence
# Add optional attributes that are lists
for attr in self.list_attrs:
try:
lst = getattr(obj, attr).get_list()
except AttributeError:
lst = []
lst_no_nulls = [inst.get_value() for inst in lst if inst.get_value()]
if any(lst_no_nulls):
lst_confidence = lst_no_nulls[0].confidence
else:
lst_confidence = '1'
cleaned_lst = []
for inst in lst_no_nulls:
if attr != 'classification':
cleaned_lst.append({
'id': inst.id,
'text': str(inst.value)
})
else:
# For classificaitons, we want to get the Classification
# model, not the OrganizationClassification model
cleaned_lst.append({
'id': inst.value.id,
'text': str(inst.value)
})
obj_data[attr] = cleaned_lst
obj_data[attr + '_confidence'] = lst_confidence
# Add objects corresponding to foreign keys
for attr, fkey in self.set_attrs.items():
try:
lst = getattr(obj, attr).all()
except AttributeError:
lst = []
lst_refs = [getattr(inst.object_ref, fkey) for inst in lst
if getattr(inst.object_ref, fkey, None)]
lst_values = [inst.get_value().value for inst in lst_refs if inst.get_value()]
# We need to traverse the relationship again due to the particular
# membership relationships on complex fields
lst_values = [inst.get_value() for inst in lst_values if inst.get_value()]
if any(lst_values):
lst_confidence = lst_values[0].confidence
else:
lst_confidence = '1'
cleaned_lst = []
for inst in lst_values:
cleaned_lst.append({
'id': inst.id,
'text': str(inst.value)
})
obj_data[attr] = cleaned_lst
obj_data[attr + '_confidence'] = lst_confidence
collected_attrs.append(obj_data)
return collected_attrs
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
class VersionsMixin(object):
'''
Model mixin to get version diff for a given model instance
'''
def _getDiff(self, differ):
skip_fields = ['date_updated', 'id']
def makeIt(change_type):
for field in getattr(differ, change_type)():
if field not in skip_fields:
if change_type == 'changed':
yield {
'field_name': field,
'to': differ.current_dict[field],
'from': differ.past_dict[field],
}
elif change_type == 'added':
yield {
'field_name': field,
'to': differ.current_dict[field],
'from': None
}
elif change_type == 'removed':
yield {
'field_name': field,
'to': None,
'from': differ.past_dict[field]
}
additions = [a for a in makeIt('added')]
changes = [c for c in makeIt('changed')]
removals = [r for r in makeIt('removed')]
return additions, changes, removals
def getRevisions(self, versions):
from source.models import Source, AccessPoint
revisions = []
for version in versions:
complete_revision = {
'id': version.revision.id
}
revision_meta = {
'modification_date': version.revision.date_created,
'comment': version.revision.comment,
'user': version.revision.user,
}
complex_list_models = [c.field_model._meta.model_name for c in getattr(self, 'complex_lists', [])]
for object_property in version.revision.version_set.all():
if object_property.object != self or isinstance(self, Source):
serialized_data = json.loads(object_property.serialized_data)[0]
# a bit of a hack in order to get sources and access points
# to work
field_names = []
if 'value' in serialized_data['fields']:
field_names.append((serialized_data['fields']['value'],
serialized_data['model'].split('.')[1]))
else:
for field in serialized_data['fields']:
field_names.append((serialized_data['fields'][field], field))
for value, field_name in field_names:
if field_name in complex_list_models:
try:
complete_revision[field_name].add(value)
except KeyError:
complete_revision[field_name] = {value}
else:
complete_revision[field_name] = value
revisions.append((complete_revision, version.revision))
return revisions
def getDifferences(self, revisions):
differences = []
for index, (version, revision) in enumerate(revisions):
if (index - 1) > 0:
try:
previous, previous_revision = revisions[index - 1]
except (IndexError, AssertionError):
continue
else:
continue
differ = DictDiffer(previous, version)
fields_added, fields_changed, fields_removed = self._getDiff(differ)
diff = {
'modification_date': previous_revision.date_created,
'comment': previous_revision.comment,
'user': previous_revision.user,
'from_id': version['id'],
'to_id': previous_revision.id,
'fields_added': fields_added,
'fields_changed': fields_changed,
'fields_removed': fields_removed,
'model': self._meta.object_name,
}
differences.append(diff)
return differences
def getVersions(self, versions=None):
if not versions:
versions = Version.objects.get_for_object(self)
revisions = self.getRevisions(versions)
return self.getDifferences(revisions)
def execute_sql(file_path):
'''
Execute arbitrary SQL code from a file location.
'''
with open(file_path) as f:
statements = f.read().split(';')
with connection.cursor() as c:
for statement in statements:
if statement.strip():
c.execute(statement.strip())
def class_for_name(class_name, module_name="person.models"):
# Check for irregular class names (names where we cannot infer the class
# name by capitalizing the first letter of class_name)
irregular_names = (
('Membershipperson', 'MembershipPerson'),
('Membershiporganization', 'MembershipOrganization'),
('Personextra', 'PersonExtra'),
('Personbiography', 'PersonBiography')
)
for name, formatted_name in irregular_names:
if class_name == name:
class_name = formatted_name
break
if class_name not in settings.ALLOWED_CLASS_FOR_NAME:
raise Exception("Unallowed class for name")
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
return class_
def get_osm_by_id(osm_id):
osm_feature = None
cursor = connection.cursor()
query = '''
SELECT
ST_X(ST_Centroid(geometry)),
ST_Y(ST_Centroid(geometry)),
*
FROM osm_data
WHERE id = {osm_id}
'''.format(osm_id=osm_id)
cursor.execute(query)
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('OSMFeature', columns)
row = cursor.fetchone()
if row:
osm_feature = results_tuple(*row)
return osm_feature
def get_hierarchy_by_id(osm_id):
hierarchy = '''
SELECT parents.*
FROM osm_data AS parents
JOIN (
SELECT
UNNEST(hierarchy) AS h_id,
localname,
tags,
admin_level,
name,
geometry
FROM osm_data
WHERE id = %s
) AS child
ON parents.id = child.h_id::integer
'''
cursor = connection.cursor()
cursor.execute(hierarchy, [osm_id])
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('OSMFeature', columns)
hierarchy = [results_tuple(*r) for r in cursor]
return hierarchy
def generate_hierarchy(query, q_args, rel_field, sources=False):
cursor = connection.cursor()
cursor.execute(query, q_args)
columns = [c[0] for c in cursor.description]
results_tuple = namedtuple('Organization', columns)
hierarchy = [(idx, results_tuple(*r)) for idx, r in enumerate(cursor)]
trimmed_hierarchy = []
for org_id, orgs in itertools.groupby(hierarchy, key=lambda x: x[1].id):
group = list(orgs)
lowest_index = min(g[0] for g in group)
orgs = [o[1] for o in group]
start_date = None
if orgs[0].start_date:
start_date = orgs[0].start_date
end_date = None
if orgs[0].end_date:
end_date = orgs[0].end_date
# Create a label, which we display on the charts for person and unit "parents."
label = '<b>' + orgs[0].name + '</b>' + '\n\n' + _('Unknown commander')
if orgs[0].commander:
label = '<b>' + orgs[0].name + '</b>' + '\n\n' + orgs[0].commander
trimmed = {
'id': org_id,
'label': str(label),
'detail_id': str(orgs[0].org_org_id),
'name': orgs[0].name,
'other_names': list({o.alias.strip() for o in orgs if o.alias}),
'classifications': list({o.classification.strip() for o in orgs if o.classification}),
'division_id': orgs[0].division_id,
'date_first_cited': start_date,
'date_last_cited': end_date,
'commander': orgs[0].commander,
}
trimmed[rel_field] = getattr(orgs[0], rel_field)
if sources:
trimmed['sources'] = []
source_ids = []
for o in orgs:
org_source = json.loads(o.source)
if org_source['id'] not in source_ids:
trimmed['sources'].append(org_source)
source_ids.append(org_source['id'])
trimmed['confidence'] = REVERSE_CONFIDENCE[int(orgs[0].confidence)].title()
trimmed_hierarchy.append((lowest_index, trimmed))
hierarchy = [i[1] for i in sorted(trimmed_hierarchy, key=lambda x: x[0])]
return hierarchy
# this makes an edge list that shows the parent relationships (see child_id)
solr = pysolr.Solr(settings.SOLR_URL)
def get_org_hierarchy_by_id(org_id,
when=None,
sources=False,
direction='up',
authenticated=False):
'''
org_id: uuid for the organization
when: date for limiting the search
'''
base_url = settings.SOLR_URL
if direction == 'up':
from_ = 'child'
to = 'parent'
elif direction == 'down':
from_ = 'parent'
to = 'child'
filter_query = '{!graph from=composition_%s_id_s_fct to=composition_%s_id_s_fct returnRoot=true}composition_%s_id_s_fct:%s' % (from_, to, from_, org_id)
if when:
filter_query += ' AND {!field f=composition_daterange_dr op=contains}%s' % when
if not authenticated:
filter_query += ' AND published_b:T'
results = solr.search('*:*', fq=filter_query)
if when:
for result in results:
for key in [from_, to]:
org = result['composition_{}_id_s_fct'.format(key)]
args = (org, when)
query = 'commander_org_id_s_fct:%s AND {!field f=commander_assignment_range_dr op=contains}%s' % args
if not authenticated:
query += ' AND published_b:T'
commanders = solr.search(query)
# We need to deduplicate commanders and then throw out the open ended date ranges.
result['commanders-{}'.format(key)] = []
for commander in commanders:
label_fmt = '{name} ({start} - {end})'
assignment_range = commander['commander_assignment_range_dr']
start, end = assignment_range.replace('[', '').replace(']', '').split(' TO ')
if start == '*':
start = '?'
if end == '*':
end = '?'
label = label_fmt.format(name=commander['commander_person_name_s'],
start=start,
end=end)
commander['label'] = label
result['commanders-{}'.format(key)].append(commander)
return results
def get_child_orgs_by_id(org_id, when=None, sources=False):
hierarchy = '''
WITH RECURSIVE parents AS (
SELECT
o.*,
NULL::VARCHAR AS parent_id,
NULL::VARCHAR AS parent_name,
NULL::DATE AS start_date,
NULL::DATE AS end_date,
NULL::VARCHAR AS comp_open_ended,
NULL::VARCHAR AS source,
NULL::VARCHAR AS confidence
FROM organization As o
WHERE id = %s
UNION
SELECT
o.*,
h.parent_id::VARCHAR AS parent_id,
parents.name AS parent_name,
h.start_date::date,
h.end_date::date,
h.open_ended AS comp_open_ended,
row_to_json(ss.*)::VARCHAR AS source,
ccc.confidence
FROM organization AS o
JOIN composition AS h
ON o.id = h.child_id
JOIN composition_compositionchild AS ccc
ON h.id = ccc.object_ref_id
LEFT JOIN composition_compositionchild_sources AS cccs
ON ccc.id = cccs.compositionchild_id
LEFT JOIN source_source AS ss
ON cccs.source_id = ss.uuid
JOIN parents
ON parents.id = h.parent_id
) SELECT * FROM parents WHERE id != %s
'''
q_args = [org_id, org_id]
if when:
hierarchy = '''
{}
AND CASE
WHEN (start_date IS NOT NULL AND
end_date IS NOT NULL AND
comp_open_ended IN ('N', 'E'))
THEN (%s::date BETWEEN start_date::date AND end_date::date)
WHEN (start_date IS NOT NULL AND
end_date IS NOT NULL AND
comp_open_ended = 'Y')
THEN (%s::date BETWEEN start_date::date AND NOW()::date)
WHEN (start_date IS NOT NULL AND
end_date IS NULL AND
comp_open_ended IN ('N', 'E'))
THEN (start_date::date = %s::date)
WHEN (start_date IS NOT NULL AND
end_date IS NULL AND
comp_open_ended = 'Y')
THEN (%s::date BETWEEN start_date::date AND NOW()::date)
WHEN (start_date IS NULL AND
end_date IS NOT NULL AND
comp_open_ended IN ('N', 'E'))
THEN (end_date::date = %s)
WHEN (start_date IS NULL AND
end_date IS NOT NULL AND
comp_open_ended = 'Y')
THEN TRUE
END
'''.format(hierarchy)
q_args.extend([when] * 5)
hierarchy = '{} ORDER BY id'.format(hierarchy)
hierarchy = generate_hierarchy(hierarchy, q_args, 'parent_id', sources=sources)
return hierarchy
def deleted_in_str(objects):
index = 0
for obj in objects:
if isinstance(obj, list):
objects[index] = deleted_in_str(obj)
else:
if hasattr(obj, 'field_name'):
name = obj.field_name + ": " + str(obj)
else:
name = type(obj).__name__ + ": " + str(obj)
if '_sources' in name:
objects[index] = "Object sources"
else:
objects[index] = name
index += 1
return objects
def import_class(cl):
|
def format_facets(facet_dict):
'''
pysolr formats facets in a weird way. This helper function converts their
list-like data structure into a dict that we can iterate more easily.
Basic idea: convert counts from a list to a dict containing a list of tuples
and a flag for whether any facets were found, e.g.:
['foo', 1, 'bar', 2] --> {'any': True, 'counts': [('foo', 1), ('bar': 2)]}
'''
facet_types = ['facet_queries', 'facet_fields', 'facet_ranges',
'facet_heatmaps', 'facet_intervals']
out = {}
for ftype, facets in facet_dict.items():
updated_facets = {}
for facet, items in facets.items():
if isinstance(items, dict):
# Ranges have a slightly different format
item_list = items['counts']
else:
item_list = items
counts = []
for i, el in enumerate(item_list):
# The attribute name always comes first; use them as keys
if i % 2 == 0:
count = item_list[i+1]
counts.append((el, count))
else:
# We already bunched this one, so skip it
continue
updated_facets[facet] = {}
updated_facets[facet]['counts'] = counts
# Check to see if there are any facets in this category
any_facets = sum(count[1] for count in counts) > 0
updated_facets[facet]['any'] = any_facets
out[ftype] = updated_facets
return out
def get_command_edges(org_id, when=None, parents=True):
edges = []
if parents:
hierarchy_list = get_org_hierarchy_by_id(org_id, when=when)
from_key, to_key = 'id', 'child_id'
else:
hierarchy_list = get_child_orgs_by_id(org_id, when=when)
from_key, to_key = 'parent_id', 'id'
# Iterate over the hierarchy_list, and create nodes
for org in hierarchy_list:
edges.append({'from': str(org[from_key]), 'to': org[to_key]})
return edges
def get_command_nodes(org_id, when=None):
hierarchy_list = get_org_hierarchy_by_id(org_id, when=when)
# Iterate over the hierarchy_list, and convert/modify hierarchy object, as needed.
nodes = []
for org in hierarchy_list:
trimmed = {
'id': str(org['id']),
'label': org['label'],
'detail_id': org['detail_id']
}
nodes.append(trimmed)
return nodes
def get_source_context(field_name, access_point, uncommitted=True):
context = {
'field_name': field_name,
'uncommitted': uncommitted,
'id': access_point.uuid,
'publication': access_point.source.publication,
'publication_country': access_point.source.publication_country,
'title': access_point.source.title,
'date_added': None,
'published_on': str(access_point.source.published_date),
'access_point': str(access_point),
'source_url': access_point.source.source_url,
'source_detail_url': reverse('view-source', kwargs={'pk': access_point.source.uuid}),
'archive_url': access_point.archive_url,
'source_id': access_point.source.uuid,
'page_number': access_point.trigger,
'accessed_on': None,
}
if access_point.source.date_added:
context['date_added'] = access_point.source.date_added.strftime('%Y-%m-%d')
if access_point.accessed_on:
context['accessed_on'] = access_point.accessed_on.strftime('%Y-%m-%dT%H:%M:%S')
return context
| d = cl.rfind('.')
classname = cl[d+1:len(cl)]
m = __import__(cl[0:d], globals(), locals(), [classname])
return getattr(m, classname) | identifier_body |
listing-ctrl.js | angular.module('listing.roommi', ['uiGmapgoogle-maps', 'ui.bootstrap', 'parse-angular', 'fbook.roommi'])
.config(function(uiGmapGoogleMapApiProvider) {
console.log('config maps');
uiGmapGoogleMapApiProvider.configure({
key: 'AIzaSyCCMEJsPzyGW-oLOShTOJw_Pe9Qv2YMAZo',
v: '3.17',
libraries: 'places'
});
})
.controller("ListingCtrl", function($scope, $rootScope, $state, $stateParams, $modal, fbookFactory, FacebookAngularPatch, uiGmapGoogleMapApi, markersFactory) {
$scope.pictureFiles = [];
$scope.picUrls = [];
var Listing = Parse.Object.extend("listing");
var userquery = new Parse.Query(Parse.User);
var query = new Parse.Query(Listing);
var listing = $stateParams.listingId;
console.log(listing);
query.get(listing, {
success: function(listing) {
console.log(listing);
$scope.listing = listing;
$scope.listing.listingid = listing.id;
$scope.listing.ownerId = listing.get('ownerId');
$scope.listing.apps = listing.get('applications');
$scope.listing.appids = _.pluck($scope.listing.apps, 'id');
$scope.listing.address = listing.get("address");
$scope.pageTitle = $scope.listing.address;
$scope.place = { types: ['street_address'] };
$scope.listing.beds = listing.get("beds");
$scope.listing.baths = listing.get("baths");
$scope.listing.rent = listing.get("rent");
$scope.listing.aptnum = listing.get("aptnum")
$scope.listing.tempdt = listing.get("availableDate");
$scope.listing.dt = $scope.listing.tempdt.toLocaleDateString();
$scope.listing.tempenddt = listing.get("enddate");
$scope.listing.enddt = $scope.listing.tempenddt.toLocaleDateString();
$scope.listing.gender = listing.get("gender");
$scope.listing.desc = listing.get("description");
$scope.listing.templaundry = listing.get("laundry");
$scope.listing.tempair = listing.get("air");
$scope.listing.appcount = listing.get('applications').length;
$scope.pictureFiles[0] = $scope.listing.get("photo0");
$scope.pictureFiles[1] = $scope.listing.get("photo1");
$scope.pictureFiles[2] = $scope.listing.get("photo2");
$scope.pictureFiles[3] = $scope.listing.get("photo3");
$scope.pictureFiles[4] = $scope.listing.get("photo4");
$scope.picUrls[0] = $scope.pictureFiles[0].url();
$scope.picUrls[1] = $scope.pictureFiles[1].url();
$scope.picUrls[2] = $scope.pictureFiles[2].url();
$scope.picUrls[3] = $scope.pictureFiles[3].url();
$scope.picUrls[4] = $scope.pictureFiles[4].url();
$scope.share = {};
var desclen = $scope.listing.desc.length;
var desccut = "";
if (desclen < 141) {
desccut = $scope.listing.desc;
} else {
desccut = $scope.listing.desc.substr(0, 140) + "...";
};
$scope.share.title = $scope.listing.beds + " bed, " + $scope.listing.baths + "bath for $" + $scope.listing.rent + "/month";
$scope.share.desc = "Located at " + $scope.listing.address + " and available starting " + $scope.listing.dt + ". " + desccut;
$scope.share.url = "http://piip.parseapp.com/#!/listing/" + $scope.listing.listingid;
$scope.share.geopoint = $scope.listing.get('geopoint');
//$scope.share.url = "http://piip.parseapp.com/?_escaped_fragment_=%2Flisting%2F" + $scope.listing.listingid;
$scope.share.image = $scope.picUrls[0];
$scope.$emit('metaUpdate', $scope.share);
$("<img/>").attr("src", $scope.picUrls[0]).load(function(){
s = {w:this.width, h:this.height};
$scope.share.imgw = s.w
$scope.share.imgh = s.h;
$scope.$emit('metaUpdate', $scope.share);
console.log('$emit triggered');
});
if ($scope.listing.gender == 'both') {
$scope.listing.gender = 'Male and Female';
}
if ($scope.listing.templaundry == true) {
$scope.listing.laundry = 'In Building';
}
if ($scope.listing.tempair == true) {
$scope.listing.air = 'A/C';
}
$scope.map = {
center: { latitude: $scope.listing.attributes.geopoint._latitude, longitude: $scope.listing.attributes.geopoint._longitude },
zoom: 13
// options: {
// types: "locality, neighborhood, postal_town, colloquial_area, political, postal_code"
// }
};
$scope.marker = {
id: listing.id,
coords: { latitude: $scope.listing.attributes.geopoint._latitude, longitude: $scope.listing.attributes.geopoint._longitude },
};
uiGmapGoogleMapApi.then(function(maps) {
});
userquery.get($scope.listing.ownerId, {
success: function(profile) {
$scope.profile = profile;
$scope.profile.name = profile.get('name');
$scope.profile.email = profile.get('email');
$scope.profile.gender = profile.get('gender');
$scope.profile.age = profile.get('age');
$scope.profile.education = profile.get('education').reverse();
$scope.profile.work = profile.get('work');
$scope.profile.propics = profile.get('imgUser');
},
error: function(error) {
console.log(error);
}
})
},
error: function(error) {
console.log(error);
}
});
$scope.listflag = function () {
$scope.listing.increment('flag');
$scope.listing.save();
$scope.flagged = true;
}
$scope.apply = function () {
if ($rootScope.currentUser) {
var tempindex = _.indexOf($scope.listing.appids, $rootScope.currentUser.id);
if (tempindex == -1) {
$scope.listing.addUnique('applications', {
id: $rootScope.currentUser.id,
appstatus: 'new',
chatstatus: false,
modifieddt: new Date()
});
$rootScope.currentUser.addUnique('applications', $scope.listing.listingid);
$scope.listing.save(null, {
success: function(result) {
// Notification Email
Parse.Cloud.run('emailNewApp', {
recname: $scope.profile.name,
recemail: $scope.profile.email,
sender: $rootScope.currentUser.get('name')
}, {
success: function(message) {
},
error: function(result, error) {
console.log(error)
}
});
},
error: function(result, error) {
console.log(error);
}
});
$rootScope.currentUser.save(null, {
success: function(result) {
$scope.applyalert = "Applied!";
},
error: function(error) {
console.log(error);
}
});
} else {
$scope.applyalert = "Applied!";
};
};
}
$scope.fbConnect = function () {
// NB: this is a contrived example for demo purposes, you would never write the following code in a real app
// normally you would define a User.js data module for all your user objects and the method below would be on the user, e.g. $rootScope.currentUser.fbConnect()
Parse.FacebookUtils.logIn("public_profile,email,user_birthday,user_education_history,user_photos,user_work_history", {}).then(function(user) {
console.log('facebook connected!');
$rootScope.currentUser = Parse.User.current();
updateProf();
}, function(error) {
console.log('something went wrong, try again');
});
}
$scope.fbShare = function() {
FB.ui({
method: 'share',
url: 'http://piip.parseapp.com/#!/listing/' + $scope.listing.listingid,
title: $scope.share.title,
description: $scope.share.desc,
image: $scope.share.image,
href: 'http://piip.parseapp.com/#!/listing/' + $scope.listing.listingid
}, function(response){});
}
var user = {};
function | (dateString) {
var today = new Date();
var birthDate = new Date(dateString);
var age = today.getFullYear() - birthDate.getFullYear();
var m = today.getMonth() - birthDate.getMonth();
if (m < 0 || (m === 0 && today.getDate() < birthDate.getDate())) {
age--;
}
return age;
}
// FACEBOOK PROFILE UPDATE
updateProf = function () {
FB.apiAngular('me?fields=id,name,first_name,last_name,email,birthday,education,gender,work,albums')
.then(function (profileData) {
user.profileData = profileData;
user.age = calculateAge(profileData.birthday);
})
.then(function (resList) {
user.profilePhotos = [];
for (var i = user.profileData.albums.data.length - 1; i >= 0; i--) {
if (user.profileData.albums.data[i].name == "Profile Pictures") {
user.albumid = user.profileData.albums.data[i].id;
};
};
FB.apiAngular('/'+user.albumid+'/photos')
.then(function (photoData) {
for (var i = 10 - 1; i >= 0; i--) {
user.profilePhotos.push(photoData.data[i].images[1].source);
};
var User = Parse.User.current();
var shortname = user.profileData.first_name + " " + user.profileData.last_name.charAt(0) + ".";
if (user.profileData.gender == 'male') {
user.profileData.gender = 'Male';
} else if (user.profileData.gender == 'female') {
user.profileData.gender = 'Female';
};
User.set("name",shortname);
User.set("fullname",user.profileData.name);
User.set("gender",user.profileData.gender);
User.set("bday",user.profileData.birthday);
User.set("education",user.profileData.education);
User.set("work",user.profileData.work);
User.set("imgUser",user.profilePhotos.reverse());
User.set("age",user.age);
User.set("email",user.profileData.email);
User.save(null, {
success: function(fbUser) {
// Execute any logic that should take place after the object is saved.
if (typeof $scope.$close() == 'function') {
$scope.$close(user);
};
},
error: function(fbUser, error) {
// Execute any logic that should take place if the save fails.
// error is a Parse.Error with an error code and message.
//reject('Failed to create new object, with error code: ' + error.message);
console.log(error);
}
});
});
});
}
}) | calculateAge | identifier_name |
listing-ctrl.js | angular.module('listing.roommi', ['uiGmapgoogle-maps', 'ui.bootstrap', 'parse-angular', 'fbook.roommi'])
.config(function(uiGmapGoogleMapApiProvider) {
console.log('config maps');
uiGmapGoogleMapApiProvider.configure({
key: 'AIzaSyCCMEJsPzyGW-oLOShTOJw_Pe9Qv2YMAZo',
v: '3.17',
libraries: 'places'
});
})
.controller("ListingCtrl", function($scope, $rootScope, $state, $stateParams, $modal, fbookFactory, FacebookAngularPatch, uiGmapGoogleMapApi, markersFactory) {
$scope.pictureFiles = [];
$scope.picUrls = [];
var Listing = Parse.Object.extend("listing");
var userquery = new Parse.Query(Parse.User);
var query = new Parse.Query(Listing);
var listing = $stateParams.listingId;
console.log(listing);
query.get(listing, {
success: function(listing) {
console.log(listing);
$scope.listing = listing;
$scope.listing.listingid = listing.id;
$scope.listing.ownerId = listing.get('ownerId');
$scope.listing.apps = listing.get('applications');
$scope.listing.appids = _.pluck($scope.listing.apps, 'id');
$scope.listing.address = listing.get("address");
$scope.pageTitle = $scope.listing.address;
$scope.place = { types: ['street_address'] };
$scope.listing.beds = listing.get("beds");
$scope.listing.baths = listing.get("baths");
$scope.listing.rent = listing.get("rent");
$scope.listing.aptnum = listing.get("aptnum")
$scope.listing.tempdt = listing.get("availableDate");
$scope.listing.dt = $scope.listing.tempdt.toLocaleDateString();
$scope.listing.tempenddt = listing.get("enddate");
$scope.listing.enddt = $scope.listing.tempenddt.toLocaleDateString();
$scope.listing.gender = listing.get("gender");
$scope.listing.desc = listing.get("description");
$scope.listing.templaundry = listing.get("laundry");
$scope.listing.tempair = listing.get("air");
$scope.listing.appcount = listing.get('applications').length;
$scope.pictureFiles[0] = $scope.listing.get("photo0");
$scope.pictureFiles[1] = $scope.listing.get("photo1");
$scope.pictureFiles[2] = $scope.listing.get("photo2");
$scope.pictureFiles[3] = $scope.listing.get("photo3");
$scope.pictureFiles[4] = $scope.listing.get("photo4");
$scope.picUrls[0] = $scope.pictureFiles[0].url();
$scope.picUrls[1] = $scope.pictureFiles[1].url();
$scope.picUrls[2] = $scope.pictureFiles[2].url();
$scope.picUrls[3] = $scope.pictureFiles[3].url();
$scope.picUrls[4] = $scope.pictureFiles[4].url();
$scope.share = {};
var desclen = $scope.listing.desc.length;
var desccut = "";
if (desclen < 141) {
desccut = $scope.listing.desc;
} else {
desccut = $scope.listing.desc.substr(0, 140) + "...";
};
$scope.share.title = $scope.listing.beds + " bed, " + $scope.listing.baths + "bath for $" + $scope.listing.rent + "/month";
$scope.share.desc = "Located at " + $scope.listing.address + " and available starting " + $scope.listing.dt + ". " + desccut;
$scope.share.url = "http://piip.parseapp.com/#!/listing/" + $scope.listing.listingid;
$scope.share.geopoint = $scope.listing.get('geopoint');
//$scope.share.url = "http://piip.parseapp.com/?_escaped_fragment_=%2Flisting%2F" + $scope.listing.listingid;
$scope.share.image = $scope.picUrls[0];
$scope.$emit('metaUpdate', $scope.share);
$("<img/>").attr("src", $scope.picUrls[0]).load(function(){
s = {w:this.width, h:this.height};
$scope.share.imgw = s.w
$scope.share.imgh = s.h;
$scope.$emit('metaUpdate', $scope.share);
console.log('$emit triggered');
});
if ($scope.listing.gender == 'both') {
$scope.listing.gender = 'Male and Female';
}
if ($scope.listing.templaundry == true) {
$scope.listing.laundry = 'In Building';
}
if ($scope.listing.tempair == true) {
$scope.listing.air = 'A/C';
}
$scope.map = {
center: { latitude: $scope.listing.attributes.geopoint._latitude, longitude: $scope.listing.attributes.geopoint._longitude },
zoom: 13
// options: {
// types: "locality, neighborhood, postal_town, colloquial_area, political, postal_code"
// }
};
$scope.marker = {
id: listing.id,
coords: { latitude: $scope.listing.attributes.geopoint._latitude, longitude: $scope.listing.attributes.geopoint._longitude },
};
uiGmapGoogleMapApi.then(function(maps) {
});
userquery.get($scope.listing.ownerId, {
success: function(profile) {
$scope.profile = profile;
$scope.profile.name = profile.get('name');
$scope.profile.email = profile.get('email');
$scope.profile.gender = profile.get('gender');
$scope.profile.age = profile.get('age');
$scope.profile.education = profile.get('education').reverse();
$scope.profile.work = profile.get('work');
$scope.profile.propics = profile.get('imgUser');
},
error: function(error) {
console.log(error);
}
})
},
error: function(error) {
console.log(error);
}
});
$scope.listflag = function () {
$scope.listing.increment('flag');
$scope.listing.save();
$scope.flagged = true;
}
$scope.apply = function () {
if ($rootScope.currentUser) {
var tempindex = _.indexOf($scope.listing.appids, $rootScope.currentUser.id);
if (tempindex == -1) {
$scope.listing.addUnique('applications', {
id: $rootScope.currentUser.id,
appstatus: 'new',
chatstatus: false,
modifieddt: new Date()
});
$rootScope.currentUser.addUnique('applications', $scope.listing.listingid);
$scope.listing.save(null, {
success: function(result) {
// Notification Email
Parse.Cloud.run('emailNewApp', {
recname: $scope.profile.name,
recemail: $scope.profile.email,
sender: $rootScope.currentUser.get('name')
}, {
success: function(message) {
},
error: function(result, error) {
console.log(error)
}
});
},
error: function(result, error) {
console.log(error);
}
});
$rootScope.currentUser.save(null, {
success: function(result) {
$scope.applyalert = "Applied!";
},
error: function(error) {
console.log(error);
}
});
} else {
$scope.applyalert = "Applied!";
};
};
}
$scope.fbConnect = function () {
// NB: this is a contrived example for demo purposes, you would never write the following code in a real app
// normally you would define a User.js data module for all your user objects and the method below would be on the user, e.g. $rootScope.currentUser.fbConnect()
Parse.FacebookUtils.logIn("public_profile,email,user_birthday,user_education_history,user_photos,user_work_history", {}).then(function(user) {
console.log('facebook connected!');
$rootScope.currentUser = Parse.User.current();
updateProf();
}, function(error) {
console.log('something went wrong, try again');
});
}
$scope.fbShare = function() {
FB.ui({
method: 'share',
url: 'http://piip.parseapp.com/#!/listing/' + $scope.listing.listingid,
title: $scope.share.title,
description: $scope.share.desc,
image: $scope.share.image,
href: 'http://piip.parseapp.com/#!/listing/' + $scope.listing.listingid
}, function(response){});
}
var user = {};
function calculateAge(dateString) {
var today = new Date();
var birthDate = new Date(dateString);
var age = today.getFullYear() - birthDate.getFullYear();
var m = today.getMonth() - birthDate.getMonth();
if (m < 0 || (m === 0 && today.getDate() < birthDate.getDate())) {
age--;
}
return age;
}
// FACEBOOK PROFILE UPDATE
updateProf = function () {
FB.apiAngular('me?fields=id,name,first_name,last_name,email,birthday,education,gender,work,albums')
.then(function (profileData) {
user.profileData = profileData;
user.age = calculateAge(profileData.birthday);
})
.then(function (resList) {
user.profilePhotos = [];
for (var i = user.profileData.albums.data.length - 1; i >= 0; i--) {
if (user.profileData.albums.data[i].name == "Profile Pictures") | ;
};
FB.apiAngular('/'+user.albumid+'/photos')
.then(function (photoData) {
for (var i = 10 - 1; i >= 0; i--) {
user.profilePhotos.push(photoData.data[i].images[1].source);
};
var User = Parse.User.current();
var shortname = user.profileData.first_name + " " + user.profileData.last_name.charAt(0) + ".";
if (user.profileData.gender == 'male') {
user.profileData.gender = 'Male';
} else if (user.profileData.gender == 'female') {
user.profileData.gender = 'Female';
};
User.set("name",shortname);
User.set("fullname",user.profileData.name);
User.set("gender",user.profileData.gender);
User.set("bday",user.profileData.birthday);
User.set("education",user.profileData.education);
User.set("work",user.profileData.work);
User.set("imgUser",user.profilePhotos.reverse());
User.set("age",user.age);
User.set("email",user.profileData.email);
User.save(null, {
success: function(fbUser) {
// Execute any logic that should take place after the object is saved.
if (typeof $scope.$close() == 'function') {
$scope.$close(user);
};
},
error: function(fbUser, error) {
// Execute any logic that should take place if the save fails.
// error is a Parse.Error with an error code and message.
//reject('Failed to create new object, with error code: ' + error.message);
console.log(error);
}
});
});
});
}
}) | {
user.albumid = user.profileData.albums.data[i].id;
} | conditional_block |
listing-ctrl.js | angular.module('listing.roommi', ['uiGmapgoogle-maps', 'ui.bootstrap', 'parse-angular', 'fbook.roommi']) | v: '3.17',
libraries: 'places'
});
})
.controller("ListingCtrl", function($scope, $rootScope, $state, $stateParams, $modal, fbookFactory, FacebookAngularPatch, uiGmapGoogleMapApi, markersFactory) {
$scope.pictureFiles = [];
$scope.picUrls = [];
var Listing = Parse.Object.extend("listing");
var userquery = new Parse.Query(Parse.User);
var query = new Parse.Query(Listing);
var listing = $stateParams.listingId;
console.log(listing);
query.get(listing, {
success: function(listing) {
console.log(listing);
$scope.listing = listing;
$scope.listing.listingid = listing.id;
$scope.listing.ownerId = listing.get('ownerId');
$scope.listing.apps = listing.get('applications');
$scope.listing.appids = _.pluck($scope.listing.apps, 'id');
$scope.listing.address = listing.get("address");
$scope.pageTitle = $scope.listing.address;
$scope.place = { types: ['street_address'] };
$scope.listing.beds = listing.get("beds");
$scope.listing.baths = listing.get("baths");
$scope.listing.rent = listing.get("rent");
$scope.listing.aptnum = listing.get("aptnum")
$scope.listing.tempdt = listing.get("availableDate");
$scope.listing.dt = $scope.listing.tempdt.toLocaleDateString();
$scope.listing.tempenddt = listing.get("enddate");
$scope.listing.enddt = $scope.listing.tempenddt.toLocaleDateString();
$scope.listing.gender = listing.get("gender");
$scope.listing.desc = listing.get("description");
$scope.listing.templaundry = listing.get("laundry");
$scope.listing.tempair = listing.get("air");
$scope.listing.appcount = listing.get('applications').length;
$scope.pictureFiles[0] = $scope.listing.get("photo0");
$scope.pictureFiles[1] = $scope.listing.get("photo1");
$scope.pictureFiles[2] = $scope.listing.get("photo2");
$scope.pictureFiles[3] = $scope.listing.get("photo3");
$scope.pictureFiles[4] = $scope.listing.get("photo4");
$scope.picUrls[0] = $scope.pictureFiles[0].url();
$scope.picUrls[1] = $scope.pictureFiles[1].url();
$scope.picUrls[2] = $scope.pictureFiles[2].url();
$scope.picUrls[3] = $scope.pictureFiles[3].url();
$scope.picUrls[4] = $scope.pictureFiles[4].url();
$scope.share = {};
var desclen = $scope.listing.desc.length;
var desccut = "";
if (desclen < 141) {
desccut = $scope.listing.desc;
} else {
desccut = $scope.listing.desc.substr(0, 140) + "...";
};
$scope.share.title = $scope.listing.beds + " bed, " + $scope.listing.baths + "bath for $" + $scope.listing.rent + "/month";
$scope.share.desc = "Located at " + $scope.listing.address + " and available starting " + $scope.listing.dt + ". " + desccut;
$scope.share.url = "http://piip.parseapp.com/#!/listing/" + $scope.listing.listingid;
$scope.share.geopoint = $scope.listing.get('geopoint');
//$scope.share.url = "http://piip.parseapp.com/?_escaped_fragment_=%2Flisting%2F" + $scope.listing.listingid;
$scope.share.image = $scope.picUrls[0];
$scope.$emit('metaUpdate', $scope.share);
$("<img/>").attr("src", $scope.picUrls[0]).load(function(){
s = {w:this.width, h:this.height};
$scope.share.imgw = s.w
$scope.share.imgh = s.h;
$scope.$emit('metaUpdate', $scope.share);
console.log('$emit triggered');
});
if ($scope.listing.gender == 'both') {
$scope.listing.gender = 'Male and Female';
}
if ($scope.listing.templaundry == true) {
$scope.listing.laundry = 'In Building';
}
if ($scope.listing.tempair == true) {
$scope.listing.air = 'A/C';
}
$scope.map = {
center: { latitude: $scope.listing.attributes.geopoint._latitude, longitude: $scope.listing.attributes.geopoint._longitude },
zoom: 13
// options: {
// types: "locality, neighborhood, postal_town, colloquial_area, political, postal_code"
// }
};
$scope.marker = {
id: listing.id,
coords: { latitude: $scope.listing.attributes.geopoint._latitude, longitude: $scope.listing.attributes.geopoint._longitude },
};
uiGmapGoogleMapApi.then(function(maps) {
});
userquery.get($scope.listing.ownerId, {
success: function(profile) {
$scope.profile = profile;
$scope.profile.name = profile.get('name');
$scope.profile.email = profile.get('email');
$scope.profile.gender = profile.get('gender');
$scope.profile.age = profile.get('age');
$scope.profile.education = profile.get('education').reverse();
$scope.profile.work = profile.get('work');
$scope.profile.propics = profile.get('imgUser');
},
error: function(error) {
console.log(error);
}
})
},
error: function(error) {
console.log(error);
}
});
$scope.listflag = function () {
$scope.listing.increment('flag');
$scope.listing.save();
$scope.flagged = true;
}
$scope.apply = function () {
if ($rootScope.currentUser) {
var tempindex = _.indexOf($scope.listing.appids, $rootScope.currentUser.id);
if (tempindex == -1) {
$scope.listing.addUnique('applications', {
id: $rootScope.currentUser.id,
appstatus: 'new',
chatstatus: false,
modifieddt: new Date()
});
$rootScope.currentUser.addUnique('applications', $scope.listing.listingid);
$scope.listing.save(null, {
success: function(result) {
// Notification Email
Parse.Cloud.run('emailNewApp', {
recname: $scope.profile.name,
recemail: $scope.profile.email,
sender: $rootScope.currentUser.get('name')
}, {
success: function(message) {
},
error: function(result, error) {
console.log(error)
}
});
},
error: function(result, error) {
console.log(error);
}
});
$rootScope.currentUser.save(null, {
success: function(result) {
$scope.applyalert = "Applied!";
},
error: function(error) {
console.log(error);
}
});
} else {
$scope.applyalert = "Applied!";
};
};
}
$scope.fbConnect = function () {
// NB: this is a contrived example for demo purposes, you would never write the following code in a real app
// normally you would define a User.js data module for all your user objects and the method below would be on the user, e.g. $rootScope.currentUser.fbConnect()
Parse.FacebookUtils.logIn("public_profile,email,user_birthday,user_education_history,user_photos,user_work_history", {}).then(function(user) {
console.log('facebook connected!');
$rootScope.currentUser = Parse.User.current();
updateProf();
}, function(error) {
console.log('something went wrong, try again');
});
}
$scope.fbShare = function() {
FB.ui({
method: 'share',
url: 'http://piip.parseapp.com/#!/listing/' + $scope.listing.listingid,
title: $scope.share.title,
description: $scope.share.desc,
image: $scope.share.image,
href: 'http://piip.parseapp.com/#!/listing/' + $scope.listing.listingid
}, function(response){});
}
var user = {};
function calculateAge(dateString) {
var today = new Date();
var birthDate = new Date(dateString);
var age = today.getFullYear() - birthDate.getFullYear();
var m = today.getMonth() - birthDate.getMonth();
if (m < 0 || (m === 0 && today.getDate() < birthDate.getDate())) {
age--;
}
return age;
}
// FACEBOOK PROFILE UPDATE
updateProf = function () {
FB.apiAngular('me?fields=id,name,first_name,last_name,email,birthday,education,gender,work,albums')
.then(function (profileData) {
user.profileData = profileData;
user.age = calculateAge(profileData.birthday);
})
.then(function (resList) {
user.profilePhotos = [];
for (var i = user.profileData.albums.data.length - 1; i >= 0; i--) {
if (user.profileData.albums.data[i].name == "Profile Pictures") {
user.albumid = user.profileData.albums.data[i].id;
};
};
FB.apiAngular('/'+user.albumid+'/photos')
.then(function (photoData) {
for (var i = 10 - 1; i >= 0; i--) {
user.profilePhotos.push(photoData.data[i].images[1].source);
};
var User = Parse.User.current();
var shortname = user.profileData.first_name + " " + user.profileData.last_name.charAt(0) + ".";
if (user.profileData.gender == 'male') {
user.profileData.gender = 'Male';
} else if (user.profileData.gender == 'female') {
user.profileData.gender = 'Female';
};
User.set("name",shortname);
User.set("fullname",user.profileData.name);
User.set("gender",user.profileData.gender);
User.set("bday",user.profileData.birthday);
User.set("education",user.profileData.education);
User.set("work",user.profileData.work);
User.set("imgUser",user.profilePhotos.reverse());
User.set("age",user.age);
User.set("email",user.profileData.email);
User.save(null, {
success: function(fbUser) {
// Execute any logic that should take place after the object is saved.
if (typeof $scope.$close() == 'function') {
$scope.$close(user);
};
},
error: function(fbUser, error) {
// Execute any logic that should take place if the save fails.
// error is a Parse.Error with an error code and message.
//reject('Failed to create new object, with error code: ' + error.message);
console.log(error);
}
});
});
});
}
}) |
.config(function(uiGmapGoogleMapApiProvider) {
console.log('config maps');
uiGmapGoogleMapApiProvider.configure({
key: 'AIzaSyCCMEJsPzyGW-oLOShTOJw_Pe9Qv2YMAZo', | random_line_split |
listing-ctrl.js | angular.module('listing.roommi', ['uiGmapgoogle-maps', 'ui.bootstrap', 'parse-angular', 'fbook.roommi'])
.config(function(uiGmapGoogleMapApiProvider) {
console.log('config maps');
uiGmapGoogleMapApiProvider.configure({
key: 'AIzaSyCCMEJsPzyGW-oLOShTOJw_Pe9Qv2YMAZo',
v: '3.17',
libraries: 'places'
});
})
.controller("ListingCtrl", function($scope, $rootScope, $state, $stateParams, $modal, fbookFactory, FacebookAngularPatch, uiGmapGoogleMapApi, markersFactory) {
$scope.pictureFiles = [];
$scope.picUrls = [];
var Listing = Parse.Object.extend("listing");
var userquery = new Parse.Query(Parse.User);
var query = new Parse.Query(Listing);
var listing = $stateParams.listingId;
console.log(listing);
query.get(listing, {
success: function(listing) {
console.log(listing);
$scope.listing = listing;
$scope.listing.listingid = listing.id;
$scope.listing.ownerId = listing.get('ownerId');
$scope.listing.apps = listing.get('applications');
$scope.listing.appids = _.pluck($scope.listing.apps, 'id');
$scope.listing.address = listing.get("address");
$scope.pageTitle = $scope.listing.address;
$scope.place = { types: ['street_address'] };
$scope.listing.beds = listing.get("beds");
$scope.listing.baths = listing.get("baths");
$scope.listing.rent = listing.get("rent");
$scope.listing.aptnum = listing.get("aptnum")
$scope.listing.tempdt = listing.get("availableDate");
$scope.listing.dt = $scope.listing.tempdt.toLocaleDateString();
$scope.listing.tempenddt = listing.get("enddate");
$scope.listing.enddt = $scope.listing.tempenddt.toLocaleDateString();
$scope.listing.gender = listing.get("gender");
$scope.listing.desc = listing.get("description");
$scope.listing.templaundry = listing.get("laundry");
$scope.listing.tempair = listing.get("air");
$scope.listing.appcount = listing.get('applications').length;
$scope.pictureFiles[0] = $scope.listing.get("photo0");
$scope.pictureFiles[1] = $scope.listing.get("photo1");
$scope.pictureFiles[2] = $scope.listing.get("photo2");
$scope.pictureFiles[3] = $scope.listing.get("photo3");
$scope.pictureFiles[4] = $scope.listing.get("photo4");
$scope.picUrls[0] = $scope.pictureFiles[0].url();
$scope.picUrls[1] = $scope.pictureFiles[1].url();
$scope.picUrls[2] = $scope.pictureFiles[2].url();
$scope.picUrls[3] = $scope.pictureFiles[3].url();
$scope.picUrls[4] = $scope.pictureFiles[4].url();
$scope.share = {};
var desclen = $scope.listing.desc.length;
var desccut = "";
if (desclen < 141) {
desccut = $scope.listing.desc;
} else {
desccut = $scope.listing.desc.substr(0, 140) + "...";
};
$scope.share.title = $scope.listing.beds + " bed, " + $scope.listing.baths + "bath for $" + $scope.listing.rent + "/month";
$scope.share.desc = "Located at " + $scope.listing.address + " and available starting " + $scope.listing.dt + ". " + desccut;
$scope.share.url = "http://piip.parseapp.com/#!/listing/" + $scope.listing.listingid;
$scope.share.geopoint = $scope.listing.get('geopoint');
//$scope.share.url = "http://piip.parseapp.com/?_escaped_fragment_=%2Flisting%2F" + $scope.listing.listingid;
$scope.share.image = $scope.picUrls[0];
$scope.$emit('metaUpdate', $scope.share);
$("<img/>").attr("src", $scope.picUrls[0]).load(function(){
s = {w:this.width, h:this.height};
$scope.share.imgw = s.w
$scope.share.imgh = s.h;
$scope.$emit('metaUpdate', $scope.share);
console.log('$emit triggered');
});
if ($scope.listing.gender == 'both') {
$scope.listing.gender = 'Male and Female';
}
if ($scope.listing.templaundry == true) {
$scope.listing.laundry = 'In Building';
}
if ($scope.listing.tempair == true) {
$scope.listing.air = 'A/C';
}
$scope.map = {
center: { latitude: $scope.listing.attributes.geopoint._latitude, longitude: $scope.listing.attributes.geopoint._longitude },
zoom: 13
// options: {
// types: "locality, neighborhood, postal_town, colloquial_area, political, postal_code"
// }
};
$scope.marker = {
id: listing.id,
coords: { latitude: $scope.listing.attributes.geopoint._latitude, longitude: $scope.listing.attributes.geopoint._longitude },
};
uiGmapGoogleMapApi.then(function(maps) {
});
userquery.get($scope.listing.ownerId, {
success: function(profile) {
$scope.profile = profile;
$scope.profile.name = profile.get('name');
$scope.profile.email = profile.get('email');
$scope.profile.gender = profile.get('gender');
$scope.profile.age = profile.get('age');
$scope.profile.education = profile.get('education').reverse();
$scope.profile.work = profile.get('work');
$scope.profile.propics = profile.get('imgUser');
},
error: function(error) {
console.log(error);
}
})
},
error: function(error) {
console.log(error);
}
});
$scope.listflag = function () {
$scope.listing.increment('flag');
$scope.listing.save();
$scope.flagged = true;
}
$scope.apply = function () {
if ($rootScope.currentUser) {
var tempindex = _.indexOf($scope.listing.appids, $rootScope.currentUser.id);
if (tempindex == -1) {
$scope.listing.addUnique('applications', {
id: $rootScope.currentUser.id,
appstatus: 'new',
chatstatus: false,
modifieddt: new Date()
});
$rootScope.currentUser.addUnique('applications', $scope.listing.listingid);
$scope.listing.save(null, {
success: function(result) {
// Notification Email
Parse.Cloud.run('emailNewApp', {
recname: $scope.profile.name,
recemail: $scope.profile.email,
sender: $rootScope.currentUser.get('name')
}, {
success: function(message) {
},
error: function(result, error) {
console.log(error)
}
});
},
error: function(result, error) {
console.log(error);
}
});
$rootScope.currentUser.save(null, {
success: function(result) {
$scope.applyalert = "Applied!";
},
error: function(error) {
console.log(error);
}
});
} else {
$scope.applyalert = "Applied!";
};
};
}
$scope.fbConnect = function () {
// NB: this is a contrived example for demo purposes, you would never write the following code in a real app
// normally you would define a User.js data module for all your user objects and the method below would be on the user, e.g. $rootScope.currentUser.fbConnect()
Parse.FacebookUtils.logIn("public_profile,email,user_birthday,user_education_history,user_photos,user_work_history", {}).then(function(user) {
console.log('facebook connected!');
$rootScope.currentUser = Parse.User.current();
updateProf();
}, function(error) {
console.log('something went wrong, try again');
});
}
$scope.fbShare = function() {
FB.ui({
method: 'share',
url: 'http://piip.parseapp.com/#!/listing/' + $scope.listing.listingid,
title: $scope.share.title,
description: $scope.share.desc,
image: $scope.share.image,
href: 'http://piip.parseapp.com/#!/listing/' + $scope.listing.listingid
}, function(response){});
}
var user = {};
function calculateAge(dateString) |
// FACEBOOK PROFILE UPDATE
updateProf = function () {
FB.apiAngular('me?fields=id,name,first_name,last_name,email,birthday,education,gender,work,albums')
.then(function (profileData) {
user.profileData = profileData;
user.age = calculateAge(profileData.birthday);
})
.then(function (resList) {
user.profilePhotos = [];
for (var i = user.profileData.albums.data.length - 1; i >= 0; i--) {
if (user.profileData.albums.data[i].name == "Profile Pictures") {
user.albumid = user.profileData.albums.data[i].id;
};
};
FB.apiAngular('/'+user.albumid+'/photos')
.then(function (photoData) {
for (var i = 10 - 1; i >= 0; i--) {
user.profilePhotos.push(photoData.data[i].images[1].source);
};
var User = Parse.User.current();
var shortname = user.profileData.first_name + " " + user.profileData.last_name.charAt(0) + ".";
if (user.profileData.gender == 'male') {
user.profileData.gender = 'Male';
} else if (user.profileData.gender == 'female') {
user.profileData.gender = 'Female';
};
User.set("name",shortname);
User.set("fullname",user.profileData.name);
User.set("gender",user.profileData.gender);
User.set("bday",user.profileData.birthday);
User.set("education",user.profileData.education);
User.set("work",user.profileData.work);
User.set("imgUser",user.profilePhotos.reverse());
User.set("age",user.age);
User.set("email",user.profileData.email);
User.save(null, {
success: function(fbUser) {
// Execute any logic that should take place after the object is saved.
if (typeof $scope.$close() == 'function') {
$scope.$close(user);
};
},
error: function(fbUser, error) {
// Execute any logic that should take place if the save fails.
// error is a Parse.Error with an error code and message.
//reject('Failed to create new object, with error code: ' + error.message);
console.log(error);
}
});
});
});
}
}) | {
var today = new Date();
var birthDate = new Date(dateString);
var age = today.getFullYear() - birthDate.getFullYear();
var m = today.getMonth() - birthDate.getMonth();
if (m < 0 || (m === 0 && today.getDate() < birthDate.getDate())) {
age--;
}
return age;
} | identifier_body |
main.rs | /*
Authors: Prof. John Lindsay
Created: 15/08/2023 (oringinally in Whitebox Toolset Extension)
Last Modified: 15/08/2023
License: MIT
*/
use rstar::primitives::GeomWithData;
use rstar::RTree;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::ops::Index;
use std::path;
use std::str;
use std::time::Instant;
use std::collections::VecDeque;
use whitebox_common::structures::Point2D;
use whitebox_common::utils::{
get_formatted_elapsed_time,
wrapped_print
};
use whitebox_vector::{
// AttributeField,
// FieldData,
// FieldDataType,
Shapefile,
ShapefileGeometry,
ShapeType
};
const EPSILON: f64 = std::f64::EPSILON;
/// This tool can be used to resolve many of the topological errors and inconsistencies associated with
/// manually digitized vector stream networks, i.e. hydrography data. A properly structured stream network
/// should consist of a series of stream segments that connect a channel head to a downstream confluence,
/// or an upstream confluence to a downstream confluence/outlet. This tool will join vector arcs that
/// connect at arbitrary, non-confluence points along stream segments. It also splits an arc where
/// a tributary stream connects at a mid-point, thereby creating a proper confluence where two upstream
/// triburaries converge into a downstream segment. The tool also handles non-connecting tributaries
/// caused by dangling arcs, i.e. overshoots and undershoots.
///
/// 
///
/// The user may optinally specify the name of the input vector stream network (`--input`) and the output file
/// (`--output`). Note that if an input file is not specified by the user, the tool will search for all vector
/// files (*.shp) files contained within the current working directory. This feature can be very useful when
/// you need to process a large number of stream files contained within a single directory. The tool will
/// process the files in parallel in this batch mode.
///
/// A distance threshold for snapping dangling arcs (`--snap`) must be specified by the user. This distance
/// is in the input layer's x-y units. The tool works best on projected input
/// data, however, if the input are in geographic coordinates (latitude and longitude), then specifying a
/// small valued snap distance is advisable.
///
/// Notice that the attributes of the input layer will not be
/// carried over to the output file because there is not a one-for-one feature correspondence between the
/// two files due to the joins and splits of stream segments. Instead the output attribute table will
/// only contain a feature ID (FID) entry.
///
/// > Note: this tool should be used to pre-process vector streams that are input to the
/// > `VectorStreamNetworkAnalysis` tool.
///
/// # See Also
/// `VectorStreamNetworkAnalysis`, `FixDanglingArcs`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("correct_stream_vector_direction{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
This tool resolves topological errors and inconsistencies associated with digitized vector streams.
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
--routes Name of the input routes vector file.
-o, --output Name of the output HTML file.
--length Maximum segment length (m).
--dist Search distance, in grid cells, used in visibility analysis.
| working directory contained in the WhiteboxTools settings.json file.
Example Usage:
>> .*EXE_NAME run --routes=footpath.shp --dem=DEM.tif -o=assessedRoutes.shp --length=50.0 --dist=200
Note: Use of this tool requires a valid license. To obtain a license,
contact Whitebox Geospatial Inc. (support@whiteboxgeo.com).
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"correct_stream_vector_direction v{} by Dr. John B. Lindsay (c) 2023.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("CorrectStreamVectorDirection") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if !working_directory.is_empty() && !working_directory.ends_with(&sep) {
working_directory += &sep;
}
// read the arguments
let mut input_file = String::new();
let mut outlet_file = String::new();
let mut output_file: String = String::new();
let mut snap_dist = 1.0;
if args.len() <= 1 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with too few parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-outlet" {
outlet_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-snap" || flag_val == "-dist" {
snap_dist = if keyval {
vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
} else {
args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
};
}
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
// let mut progress: usize;
let mut old_progress: usize = 1;
let start = Instant::now();
if !input_file.contains(path::MAIN_SEPARATOR) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !outlet_file.contains(path::MAIN_SEPARATOR) && !outlet_file.contains("/") {
outlet_file = format!("{}{}", working_directory, outlet_file);
}
if output_file.is_empty() {
output_file = input_file
.clone()
.replace(".shp", "_corrected.shp")
.replace(".SHP", "_corrected.shp");
}
if !output_file.contains(path::MAIN_SEPARATOR) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if snap_dist <= 0f64 {
if configurations.verbose_mode {
wrapped_print("Error: The snap distance must be greater than 0.0.", 50);
}
}
let input = Shapefile::read(&input_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if input.header.shape_type.base_shape_type() != ShapeType::PolyLine {
// return Err(Error::new(
// ErrorKind::InvalidInput,
// "The vector data must be of PolyLine base shape type.",
// ));
panic!("The vector stream data must be of PolyLine base shape type.");
}
let outlets = Shapefile::read(&outlet_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if outlets.header.shape_type.base_shape_type() != ShapeType::Point {
panic!("The vector outlets data must be of POINT base shape type.");
}
let mut progress: usize;
// Read each line segment into an rtree.
type Location = GeomWithData<[f64; 2], (usize, bool)>;
let mut end_nodes = vec![];
let (mut part_start, mut part_end): (usize, usize);
let mut fid = 0usize; // fid is unique to each part in the vector
let mut polylines = vec![];
for record_num in 0..input.num_records {
let record = input.get_record(record_num);
for part in 0..record.num_parts as usize {
part_start = record.parts[part] as usize;
part_end = if part < record.num_parts as usize - 1 {
record.parts[part + 1] as usize - 1
} else {
record.num_points as usize - 1
};
polylines.push(
Polyline::new(
&record.points[part_start..=part_end],
record_num
)
);
end_nodes.push(Location::new(
[record.points[part_start].x, record.points[part_start].y],
(fid, true)
));
end_nodes.push(Location::new(
[record.points[part_end].x, record.points[part_end].y],
(fid, false)
));
fid += 1;
}
if configurations.verbose_mode {
progress = (100.0_f64 * (record_num + 1) as f64 / input.num_records as f64) as usize;
if progress != old_progress {
println!("Reading vector: {}%", progress);
old_progress = progress;
}
}
}
let num_polylines = polylines.len(); // will be updated after the joins.
let snap_dist_sq = snap_dist * snap_dist;
let endnode_tree = RTree::bulk_load(end_nodes);
let precision = EPSILON * 10f64;
let mut outlet_pt: Point2D;
let mut p1: Point2D;
let mut visited = vec![false; num_polylines];
let mut reverse = vec![false; num_polylines];
for record_num in 0..outlets.num_records {
let record = outlets.get_record(record_num);
if record.shape_type != ShapeType::Null {
for p in 0..record.points.len() {
outlet_pt = record.points[p];
let ret = endnode_tree.locate_within_distance([outlet_pt.x, outlet_pt.y], snap_dist_sq);
for pt in ret {
let (fid, is_start) = pt.data;
if !visited[fid] {
visited[fid] = true;
// now find all connected line segments
let mut queue: VecDeque<(usize, bool)> = VecDeque::with_capacity(num_polylines);
queue.push_back((fid, is_start));
while !queue.is_empty() {
let (fid2, is_start2) = queue.pop_front().unwrap();
// Find the point associated with the other end of this polyline
p1 = if !is_start2 {
polylines[fid2].get_first_node()
} else {
// To get here means that you first encountered the beginning of the polyline, which
// shouldn't happen if it is correctly directed, since we are doing a bottom-up
// scan of the network. Therefore, reverse the line in the output.
reverse[fid2] = true;
polylines[fid2].get_last_node()
};
// Find the neighbouring endnodes of p1
let ret2 = endnode_tree.locate_within_distance([p1.x, p1.y], precision);
for pt2 in ret2 {
let (fid_n, is_start_n) = pt2.data;
if fid_n != fid2 && !visited[fid_n] {
// Add this newly encountered polyline to the queue
queue.push_back((fid_n, is_start_n));
visited[fid_n] = true;
}
}
}
}
}
}
}
if configurations.verbose_mode {
progress = (100.0_f64 * (fid + 1) as f64 / num_polylines as f64) as usize;
if progress != old_progress {
println!("Looking for reverse-oriented arcs: {}%", progress);
old_progress = progress;
}
}
}
let mut num_reversed = 0;
for fid in 0..polylines.len() {
if reverse[fid] {
let mut line = polylines[fid].vertices.clone();
line.reverse();
polylines[fid].vertices = line;
num_reversed += 1;
}
}
println!("num. reversed arcs: {num_reversed}");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file.replace(".shp", "_reversed_arcs.shp"), &input, ShapeType::PolyLine, true).expect("Error creating output file");
let mut sfg: ShapefileGeometry;
for fid in 0..polylines.len() {
if reverse[fid] {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[fid].vertices);
output.add_record(sfg);
let record_num = polylines[fid].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
}
}
output.write().expect("Error writing file.");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file, &input, ShapeType::PolyLine, true).expect("Error creating output file"); //?;
// add the attributes
// let in_atts = input.attributes.get_fields();
let mut sfg: ShapefileGeometry;
for poly_id in 0..polylines.len() {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[poly_id].vertices);
output.add_record(sfg);
let record_num = polylines[poly_id].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
if configurations.verbose_mode {
progress = (100.0_f64 * (poly_id + 1) as f64 / polylines.len() as f64) as usize;
if progress != old_progress {
println!("Looking for dangling arcs: {}%", progress);
old_progress = progress;
}
}
}
if configurations.verbose_mode {
println!("Saving data...")
};
output.write().expect("Error writing file.");
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!(
"\n{}",
&format!("Elapsed Time (Including I/O): {}", elapsed_time)
);
}
Ok(())
}
#[derive(Default, Clone, Debug)]
struct Polyline {
vertices: Vec<Point2D>,
id: usize,
}
impl Index<usize> for Polyline {
type Output = Point2D;
fn index<'a>(&'a self, index: usize) -> &'a Point2D {
&self.vertices[index]
}
}
impl Polyline {
// Creates a new Polyline from vertices
fn new(vertices: &[Point2D], id: usize) -> Self {
Polyline {
vertices: vertices.clone().to_vec(),
id,
}
}
fn get_first_node(&self) -> Point2D {
self[0]
}
fn get_last_node(&self) -> Point2D {
self[self.vertices.len() - 1]
}
} | Input/output file names can be fully qualified, or can rely on the | random_line_split |
main.rs | /*
Authors: Prof. John Lindsay
Created: 15/08/2023 (oringinally in Whitebox Toolset Extension)
Last Modified: 15/08/2023
License: MIT
*/
use rstar::primitives::GeomWithData;
use rstar::RTree;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::ops::Index;
use std::path;
use std::str;
use std::time::Instant;
use std::collections::VecDeque;
use whitebox_common::structures::Point2D;
use whitebox_common::utils::{
get_formatted_elapsed_time,
wrapped_print
};
use whitebox_vector::{
// AttributeField,
// FieldData,
// FieldDataType,
Shapefile,
ShapefileGeometry,
ShapeType
};
const EPSILON: f64 = std::f64::EPSILON;
/// This tool can be used to resolve many of the topological errors and inconsistencies associated with
/// manually digitized vector stream networks, i.e. hydrography data. A properly structured stream network
/// should consist of a series of stream segments that connect a channel head to a downstream confluence,
/// or an upstream confluence to a downstream confluence/outlet. This tool will join vector arcs that
/// connect at arbitrary, non-confluence points along stream segments. It also splits an arc where
/// a tributary stream connects at a mid-point, thereby creating a proper confluence where two upstream
/// triburaries converge into a downstream segment. The tool also handles non-connecting tributaries
/// caused by dangling arcs, i.e. overshoots and undershoots.
///
/// 
///
/// The user may optinally specify the name of the input vector stream network (`--input`) and the output file
/// (`--output`). Note that if an input file is not specified by the user, the tool will search for all vector
/// files (*.shp) files contained within the current working directory. This feature can be very useful when
/// you need to process a large number of stream files contained within a single directory. The tool will
/// process the files in parallel in this batch mode.
///
/// A distance threshold for snapping dangling arcs (`--snap`) must be specified by the user. This distance
/// is in the input layer's x-y units. The tool works best on projected input
/// data, however, if the input are in geographic coordinates (latitude and longitude), then specifying a
/// small valued snap distance is advisable.
///
/// Notice that the attributes of the input layer will not be
/// carried over to the output file because there is not a one-for-one feature correspondence between the
/// two files due to the joins and splits of stream segments. Instead the output attribute table will
/// only contain a feature ID (FID) entry.
///
/// > Note: this tool should be used to pre-process vector streams that are input to the
/// > `VectorStreamNetworkAnalysis` tool.
///
/// # See Also
/// `VectorStreamNetworkAnalysis`, `FixDanglingArcs`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("correct_stream_vector_direction{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
This tool resolves topological errors and inconsistencies associated with digitized vector streams.
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
--routes Name of the input routes vector file.
-o, --output Name of the output HTML file.
--length Maximum segment length (m).
--dist Search distance, in grid cells, used in visibility analysis.
Input/output file names can be fully qualified, or can rely on the
working directory contained in the WhiteboxTools settings.json file.
Example Usage:
>> .*EXE_NAME run --routes=footpath.shp --dem=DEM.tif -o=assessedRoutes.shp --length=50.0 --dist=200
Note: Use of this tool requires a valid license. To obtain a license,
contact Whitebox Geospatial Inc. (support@whiteboxgeo.com).
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"correct_stream_vector_direction v{} by Dr. John B. Lindsay (c) 2023.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("CorrectStreamVectorDirection") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if !working_directory.is_empty() && !working_directory.ends_with(&sep) {
working_directory += &sep;
}
// read the arguments
let mut input_file = String::new();
let mut outlet_file = String::new();
let mut output_file: String = String::new();
let mut snap_dist = 1.0;
if args.len() <= 1 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with too few parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" | else if flag_val == "-outlet" {
outlet_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-snap" || flag_val == "-dist" {
snap_dist = if keyval {
vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
} else {
args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
};
}
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
// let mut progress: usize;
let mut old_progress: usize = 1;
let start = Instant::now();
if !input_file.contains(path::MAIN_SEPARATOR) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !outlet_file.contains(path::MAIN_SEPARATOR) && !outlet_file.contains("/") {
outlet_file = format!("{}{}", working_directory, outlet_file);
}
if output_file.is_empty() {
output_file = input_file
.clone()
.replace(".shp", "_corrected.shp")
.replace(".SHP", "_corrected.shp");
}
if !output_file.contains(path::MAIN_SEPARATOR) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if snap_dist <= 0f64 {
if configurations.verbose_mode {
wrapped_print("Error: The snap distance must be greater than 0.0.", 50);
}
}
let input = Shapefile::read(&input_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if input.header.shape_type.base_shape_type() != ShapeType::PolyLine {
// return Err(Error::new(
// ErrorKind::InvalidInput,
// "The vector data must be of PolyLine base shape type.",
// ));
panic!("The vector stream data must be of PolyLine base shape type.");
}
let outlets = Shapefile::read(&outlet_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if outlets.header.shape_type.base_shape_type() != ShapeType::Point {
panic!("The vector outlets data must be of POINT base shape type.");
}
let mut progress: usize;
// Read each line segment into an rtree.
type Location = GeomWithData<[f64; 2], (usize, bool)>;
let mut end_nodes = vec![];
let (mut part_start, mut part_end): (usize, usize);
let mut fid = 0usize; // fid is unique to each part in the vector
let mut polylines = vec![];
for record_num in 0..input.num_records {
let record = input.get_record(record_num);
for part in 0..record.num_parts as usize {
part_start = record.parts[part] as usize;
part_end = if part < record.num_parts as usize - 1 {
record.parts[part + 1] as usize - 1
} else {
record.num_points as usize - 1
};
polylines.push(
Polyline::new(
&record.points[part_start..=part_end],
record_num
)
);
end_nodes.push(Location::new(
[record.points[part_start].x, record.points[part_start].y],
(fid, true)
));
end_nodes.push(Location::new(
[record.points[part_end].x, record.points[part_end].y],
(fid, false)
));
fid += 1;
}
if configurations.verbose_mode {
progress = (100.0_f64 * (record_num + 1) as f64 / input.num_records as f64) as usize;
if progress != old_progress {
println!("Reading vector: {}%", progress);
old_progress = progress;
}
}
}
let num_polylines = polylines.len(); // will be updated after the joins.
let snap_dist_sq = snap_dist * snap_dist;
let endnode_tree = RTree::bulk_load(end_nodes);
let precision = EPSILON * 10f64;
let mut outlet_pt: Point2D;
let mut p1: Point2D;
let mut visited = vec![false; num_polylines];
let mut reverse = vec![false; num_polylines];
for record_num in 0..outlets.num_records {
let record = outlets.get_record(record_num);
if record.shape_type != ShapeType::Null {
for p in 0..record.points.len() {
outlet_pt = record.points[p];
let ret = endnode_tree.locate_within_distance([outlet_pt.x, outlet_pt.y], snap_dist_sq);
for pt in ret {
let (fid, is_start) = pt.data;
if !visited[fid] {
visited[fid] = true;
// now find all connected line segments
let mut queue: VecDeque<(usize, bool)> = VecDeque::with_capacity(num_polylines);
queue.push_back((fid, is_start));
while !queue.is_empty() {
let (fid2, is_start2) = queue.pop_front().unwrap();
// Find the point associated with the other end of this polyline
p1 = if !is_start2 {
polylines[fid2].get_first_node()
} else {
// To get here means that you first encountered the beginning of the polyline, which
// shouldn't happen if it is correctly directed, since we are doing a bottom-up
// scan of the network. Therefore, reverse the line in the output.
reverse[fid2] = true;
polylines[fid2].get_last_node()
};
// Find the neighbouring endnodes of p1
let ret2 = endnode_tree.locate_within_distance([p1.x, p1.y], precision);
for pt2 in ret2 {
let (fid_n, is_start_n) = pt2.data;
if fid_n != fid2 && !visited[fid_n] {
// Add this newly encountered polyline to the queue
queue.push_back((fid_n, is_start_n));
visited[fid_n] = true;
}
}
}
}
}
}
}
if configurations.verbose_mode {
progress = (100.0_f64 * (fid + 1) as f64 / num_polylines as f64) as usize;
if progress != old_progress {
println!("Looking for reverse-oriented arcs: {}%", progress);
old_progress = progress;
}
}
}
let mut num_reversed = 0;
for fid in 0..polylines.len() {
if reverse[fid] {
let mut line = polylines[fid].vertices.clone();
line.reverse();
polylines[fid].vertices = line;
num_reversed += 1;
}
}
println!("num. reversed arcs: {num_reversed}");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file.replace(".shp", "_reversed_arcs.shp"), &input, ShapeType::PolyLine, true).expect("Error creating output file");
let mut sfg: ShapefileGeometry;
for fid in 0..polylines.len() {
if reverse[fid] {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[fid].vertices);
output.add_record(sfg);
let record_num = polylines[fid].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
}
}
output.write().expect("Error writing file.");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file, &input, ShapeType::PolyLine, true).expect("Error creating output file"); //?;
// add the attributes
// let in_atts = input.attributes.get_fields();
let mut sfg: ShapefileGeometry;
for poly_id in 0..polylines.len() {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[poly_id].vertices);
output.add_record(sfg);
let record_num = polylines[poly_id].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
if configurations.verbose_mode {
progress = (100.0_f64 * (poly_id + 1) as f64 / polylines.len() as f64) as usize;
if progress != old_progress {
println!("Looking for dangling arcs: {}%", progress);
old_progress = progress;
}
}
}
if configurations.verbose_mode {
println!("Saving data...")
};
output.write().expect("Error writing file.");
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!(
"\n{}",
&format!("Elapsed Time (Including I/O): {}", elapsed_time)
);
}
Ok(())
}
#[derive(Default, Clone, Debug)]
struct Polyline {
vertices: Vec<Point2D>,
id: usize,
}
impl Index<usize> for Polyline {
type Output = Point2D;
fn index<'a>(&'a self, index: usize) -> &'a Point2D {
&self.vertices[index]
}
}
impl Polyline {
// Creates a new Polyline from vertices
fn new(vertices: &[Point2D], id: usize) -> Self {
Polyline {
vertices: vertices.clone().to_vec(),
id,
}
}
fn get_first_node(&self) -> Point2D {
self[0]
}
fn get_last_node(&self) -> Point2D {
self[self.vertices.len() - 1]
}
} | {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} | conditional_block |
main.rs | /*
Authors: Prof. John Lindsay
Created: 15/08/2023 (oringinally in Whitebox Toolset Extension)
Last Modified: 15/08/2023
License: MIT
*/
use rstar::primitives::GeomWithData;
use rstar::RTree;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::ops::Index;
use std::path;
use std::str;
use std::time::Instant;
use std::collections::VecDeque;
use whitebox_common::structures::Point2D;
use whitebox_common::utils::{
get_formatted_elapsed_time,
wrapped_print
};
use whitebox_vector::{
// AttributeField,
// FieldData,
// FieldDataType,
Shapefile,
ShapefileGeometry,
ShapeType
};
const EPSILON: f64 = std::f64::EPSILON;
/// This tool can be used to resolve many of the topological errors and inconsistencies associated with
/// manually digitized vector stream networks, i.e. hydrography data. A properly structured stream network
/// should consist of a series of stream segments that connect a channel head to a downstream confluence,
/// or an upstream confluence to a downstream confluence/outlet. This tool will join vector arcs that
/// connect at arbitrary, non-confluence points along stream segments. It also splits an arc where
/// a tributary stream connects at a mid-point, thereby creating a proper confluence where two upstream
/// triburaries converge into a downstream segment. The tool also handles non-connecting tributaries
/// caused by dangling arcs, i.e. overshoots and undershoots.
///
/// 
///
/// The user may optinally specify the name of the input vector stream network (`--input`) and the output file
/// (`--output`). Note that if an input file is not specified by the user, the tool will search for all vector
/// files (*.shp) files contained within the current working directory. This feature can be very useful when
/// you need to process a large number of stream files contained within a single directory. The tool will
/// process the files in parallel in this batch mode.
///
/// A distance threshold for snapping dangling arcs (`--snap`) must be specified by the user. This distance
/// is in the input layer's x-y units. The tool works best on projected input
/// data, however, if the input are in geographic coordinates (latitude and longitude), then specifying a
/// small valued snap distance is advisable.
///
/// Notice that the attributes of the input layer will not be
/// carried over to the output file because there is not a one-for-one feature correspondence between the
/// two files due to the joins and splits of stream segments. Instead the output attribute table will
/// only contain a feature ID (FID) entry.
///
/// > Note: this tool should be used to pre-process vector streams that are input to the
/// > `VectorStreamNetworkAnalysis` tool.
///
/// # See Also
/// `VectorStreamNetworkAnalysis`, `FixDanglingArcs`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("correct_stream_vector_direction{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
This tool resolves topological errors and inconsistencies associated with digitized vector streams.
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
--routes Name of the input routes vector file.
-o, --output Name of the output HTML file.
--length Maximum segment length (m).
--dist Search distance, in grid cells, used in visibility analysis.
Input/output file names can be fully qualified, or can rely on the
working directory contained in the WhiteboxTools settings.json file.
Example Usage:
>> .*EXE_NAME run --routes=footpath.shp --dem=DEM.tif -o=assessedRoutes.shp --length=50.0 --dist=200
Note: Use of this tool requires a valid license. To obtain a license,
contact Whitebox Geospatial Inc. (support@whiteboxgeo.com).
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() |
fn get_tool_name() -> String {
String::from("CorrectStreamVectorDirection") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if !working_directory.is_empty() && !working_directory.ends_with(&sep) {
working_directory += &sep;
}
// read the arguments
let mut input_file = String::new();
let mut outlet_file = String::new();
let mut output_file: String = String::new();
let mut snap_dist = 1.0;
if args.len() <= 1 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with too few parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-outlet" {
outlet_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-snap" || flag_val == "-dist" {
snap_dist = if keyval {
vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
} else {
args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
};
}
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
// let mut progress: usize;
let mut old_progress: usize = 1;
let start = Instant::now();
if !input_file.contains(path::MAIN_SEPARATOR) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !outlet_file.contains(path::MAIN_SEPARATOR) && !outlet_file.contains("/") {
outlet_file = format!("{}{}", working_directory, outlet_file);
}
if output_file.is_empty() {
output_file = input_file
.clone()
.replace(".shp", "_corrected.shp")
.replace(".SHP", "_corrected.shp");
}
if !output_file.contains(path::MAIN_SEPARATOR) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if snap_dist <= 0f64 {
if configurations.verbose_mode {
wrapped_print("Error: The snap distance must be greater than 0.0.", 50);
}
}
let input = Shapefile::read(&input_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if input.header.shape_type.base_shape_type() != ShapeType::PolyLine {
// return Err(Error::new(
// ErrorKind::InvalidInput,
// "The vector data must be of PolyLine base shape type.",
// ));
panic!("The vector stream data must be of PolyLine base shape type.");
}
let outlets = Shapefile::read(&outlet_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if outlets.header.shape_type.base_shape_type() != ShapeType::Point {
panic!("The vector outlets data must be of POINT base shape type.");
}
let mut progress: usize;
// Read each line segment into an rtree.
type Location = GeomWithData<[f64; 2], (usize, bool)>;
let mut end_nodes = vec![];
let (mut part_start, mut part_end): (usize, usize);
let mut fid = 0usize; // fid is unique to each part in the vector
let mut polylines = vec![];
for record_num in 0..input.num_records {
let record = input.get_record(record_num);
for part in 0..record.num_parts as usize {
part_start = record.parts[part] as usize;
part_end = if part < record.num_parts as usize - 1 {
record.parts[part + 1] as usize - 1
} else {
record.num_points as usize - 1
};
polylines.push(
Polyline::new(
&record.points[part_start..=part_end],
record_num
)
);
end_nodes.push(Location::new(
[record.points[part_start].x, record.points[part_start].y],
(fid, true)
));
end_nodes.push(Location::new(
[record.points[part_end].x, record.points[part_end].y],
(fid, false)
));
fid += 1;
}
if configurations.verbose_mode {
progress = (100.0_f64 * (record_num + 1) as f64 / input.num_records as f64) as usize;
if progress != old_progress {
println!("Reading vector: {}%", progress);
old_progress = progress;
}
}
}
let num_polylines = polylines.len(); // will be updated after the joins.
let snap_dist_sq = snap_dist * snap_dist;
let endnode_tree = RTree::bulk_load(end_nodes);
let precision = EPSILON * 10f64;
let mut outlet_pt: Point2D;
let mut p1: Point2D;
let mut visited = vec![false; num_polylines];
let mut reverse = vec![false; num_polylines];
for record_num in 0..outlets.num_records {
let record = outlets.get_record(record_num);
if record.shape_type != ShapeType::Null {
for p in 0..record.points.len() {
outlet_pt = record.points[p];
let ret = endnode_tree.locate_within_distance([outlet_pt.x, outlet_pt.y], snap_dist_sq);
for pt in ret {
let (fid, is_start) = pt.data;
if !visited[fid] {
visited[fid] = true;
// now find all connected line segments
let mut queue: VecDeque<(usize, bool)> = VecDeque::with_capacity(num_polylines);
queue.push_back((fid, is_start));
while !queue.is_empty() {
let (fid2, is_start2) = queue.pop_front().unwrap();
// Find the point associated with the other end of this polyline
p1 = if !is_start2 {
polylines[fid2].get_first_node()
} else {
// To get here means that you first encountered the beginning of the polyline, which
// shouldn't happen if it is correctly directed, since we are doing a bottom-up
// scan of the network. Therefore, reverse the line in the output.
reverse[fid2] = true;
polylines[fid2].get_last_node()
};
// Find the neighbouring endnodes of p1
let ret2 = endnode_tree.locate_within_distance([p1.x, p1.y], precision);
for pt2 in ret2 {
let (fid_n, is_start_n) = pt2.data;
if fid_n != fid2 && !visited[fid_n] {
// Add this newly encountered polyline to the queue
queue.push_back((fid_n, is_start_n));
visited[fid_n] = true;
}
}
}
}
}
}
}
if configurations.verbose_mode {
progress = (100.0_f64 * (fid + 1) as f64 / num_polylines as f64) as usize;
if progress != old_progress {
println!("Looking for reverse-oriented arcs: {}%", progress);
old_progress = progress;
}
}
}
let mut num_reversed = 0;
for fid in 0..polylines.len() {
if reverse[fid] {
let mut line = polylines[fid].vertices.clone();
line.reverse();
polylines[fid].vertices = line;
num_reversed += 1;
}
}
println!("num. reversed arcs: {num_reversed}");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file.replace(".shp", "_reversed_arcs.shp"), &input, ShapeType::PolyLine, true).expect("Error creating output file");
let mut sfg: ShapefileGeometry;
for fid in 0..polylines.len() {
if reverse[fid] {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[fid].vertices);
output.add_record(sfg);
let record_num = polylines[fid].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
}
}
output.write().expect("Error writing file.");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file, &input, ShapeType::PolyLine, true).expect("Error creating output file"); //?;
// add the attributes
// let in_atts = input.attributes.get_fields();
let mut sfg: ShapefileGeometry;
for poly_id in 0..polylines.len() {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[poly_id].vertices);
output.add_record(sfg);
let record_num = polylines[poly_id].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
if configurations.verbose_mode {
progress = (100.0_f64 * (poly_id + 1) as f64 / polylines.len() as f64) as usize;
if progress != old_progress {
println!("Looking for dangling arcs: {}%", progress);
old_progress = progress;
}
}
}
if configurations.verbose_mode {
println!("Saving data...")
};
output.write().expect("Error writing file.");
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!(
"\n{}",
&format!("Elapsed Time (Including I/O): {}", elapsed_time)
);
}
Ok(())
}
#[derive(Default, Clone, Debug)]
struct Polyline {
vertices: Vec<Point2D>,
id: usize,
}
impl Index<usize> for Polyline {
type Output = Point2D;
fn index<'a>(&'a self, index: usize) -> &'a Point2D {
&self.vertices[index]
}
}
impl Polyline {
// Creates a new Polyline from vertices
fn new(vertices: &[Point2D], id: usize) -> Self {
Polyline {
vertices: vertices.clone().to_vec(),
id,
}
}
fn get_first_node(&self) -> Point2D {
self[0]
}
fn get_last_node(&self) -> Point2D {
self[self.vertices.len() - 1]
}
} | {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"correct_stream_vector_direction v{} by Dr. John B. Lindsay (c) 2023.",
VERSION.unwrap_or("Unknown version")
);
} | identifier_body |
main.rs | /*
Authors: Prof. John Lindsay
Created: 15/08/2023 (oringinally in Whitebox Toolset Extension)
Last Modified: 15/08/2023
License: MIT
*/
use rstar::primitives::GeomWithData;
use rstar::RTree;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::ops::Index;
use std::path;
use std::str;
use std::time::Instant;
use std::collections::VecDeque;
use whitebox_common::structures::Point2D;
use whitebox_common::utils::{
get_formatted_elapsed_time,
wrapped_print
};
use whitebox_vector::{
// AttributeField,
// FieldData,
// FieldDataType,
Shapefile,
ShapefileGeometry,
ShapeType
};
const EPSILON: f64 = std::f64::EPSILON;
/// This tool can be used to resolve many of the topological errors and inconsistencies associated with
/// manually digitized vector stream networks, i.e. hydrography data. A properly structured stream network
/// should consist of a series of stream segments that connect a channel head to a downstream confluence,
/// or an upstream confluence to a downstream confluence/outlet. This tool will join vector arcs that
/// connect at arbitrary, non-confluence points along stream segments. It also splits an arc where
/// a tributary stream connects at a mid-point, thereby creating a proper confluence where two upstream
/// triburaries converge into a downstream segment. The tool also handles non-connecting tributaries
/// caused by dangling arcs, i.e. overshoots and undershoots.
///
/// 
///
/// The user may optinally specify the name of the input vector stream network (`--input`) and the output file
/// (`--output`). Note that if an input file is not specified by the user, the tool will search for all vector
/// files (*.shp) files contained within the current working directory. This feature can be very useful when
/// you need to process a large number of stream files contained within a single directory. The tool will
/// process the files in parallel in this batch mode.
///
/// A distance threshold for snapping dangling arcs (`--snap`) must be specified by the user. This distance
/// is in the input layer's x-y units. The tool works best on projected input
/// data, however, if the input are in geographic coordinates (latitude and longitude), then specifying a
/// small valued snap distance is advisable.
///
/// Notice that the attributes of the input layer will not be
/// carried over to the output file because there is not a one-for-one feature correspondence between the
/// two files due to the joins and splits of stream segments. Instead the output attribute table will
/// only contain a feature ID (FID) entry.
///
/// > Note: this tool should be used to pre-process vector streams that are input to the
/// > `VectorStreamNetworkAnalysis` tool.
///
/// # See Also
/// `VectorStreamNetworkAnalysis`, `FixDanglingArcs`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("correct_stream_vector_direction{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
This tool resolves topological errors and inconsistencies associated with digitized vector streams.
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
--routes Name of the input routes vector file.
-o, --output Name of the output HTML file.
--length Maximum segment length (m).
--dist Search distance, in grid cells, used in visibility analysis.
Input/output file names can be fully qualified, or can rely on the
working directory contained in the WhiteboxTools settings.json file.
Example Usage:
>> .*EXE_NAME run --routes=footpath.shp --dem=DEM.tif -o=assessedRoutes.shp --length=50.0 --dist=200
Note: Use of this tool requires a valid license. To obtain a license,
contact Whitebox Geospatial Inc. (support@whiteboxgeo.com).
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"correct_stream_vector_direction v{} by Dr. John B. Lindsay (c) 2023.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("CorrectStreamVectorDirection") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if !working_directory.is_empty() && !working_directory.ends_with(&sep) {
working_directory += &sep;
}
// read the arguments
let mut input_file = String::new();
let mut outlet_file = String::new();
let mut output_file: String = String::new();
let mut snap_dist = 1.0;
if args.len() <= 1 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with too few parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-outlet" {
outlet_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-snap" || flag_val == "-dist" {
snap_dist = if keyval {
vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
} else {
args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
};
}
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
// let mut progress: usize;
let mut old_progress: usize = 1;
let start = Instant::now();
if !input_file.contains(path::MAIN_SEPARATOR) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !outlet_file.contains(path::MAIN_SEPARATOR) && !outlet_file.contains("/") {
outlet_file = format!("{}{}", working_directory, outlet_file);
}
if output_file.is_empty() {
output_file = input_file
.clone()
.replace(".shp", "_corrected.shp")
.replace(".SHP", "_corrected.shp");
}
if !output_file.contains(path::MAIN_SEPARATOR) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if snap_dist <= 0f64 {
if configurations.verbose_mode {
wrapped_print("Error: The snap distance must be greater than 0.0.", 50);
}
}
let input = Shapefile::read(&input_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if input.header.shape_type.base_shape_type() != ShapeType::PolyLine {
// return Err(Error::new(
// ErrorKind::InvalidInput,
// "The vector data must be of PolyLine base shape type.",
// ));
panic!("The vector stream data must be of PolyLine base shape type.");
}
let outlets = Shapefile::read(&outlet_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if outlets.header.shape_type.base_shape_type() != ShapeType::Point {
panic!("The vector outlets data must be of POINT base shape type.");
}
let mut progress: usize;
// Read each line segment into an rtree.
type Location = GeomWithData<[f64; 2], (usize, bool)>;
let mut end_nodes = vec![];
let (mut part_start, mut part_end): (usize, usize);
let mut fid = 0usize; // fid is unique to each part in the vector
let mut polylines = vec![];
for record_num in 0..input.num_records {
let record = input.get_record(record_num);
for part in 0..record.num_parts as usize {
part_start = record.parts[part] as usize;
part_end = if part < record.num_parts as usize - 1 {
record.parts[part + 1] as usize - 1
} else {
record.num_points as usize - 1
};
polylines.push(
Polyline::new(
&record.points[part_start..=part_end],
record_num
)
);
end_nodes.push(Location::new(
[record.points[part_start].x, record.points[part_start].y],
(fid, true)
));
end_nodes.push(Location::new(
[record.points[part_end].x, record.points[part_end].y],
(fid, false)
));
fid += 1;
}
if configurations.verbose_mode {
progress = (100.0_f64 * (record_num + 1) as f64 / input.num_records as f64) as usize;
if progress != old_progress {
println!("Reading vector: {}%", progress);
old_progress = progress;
}
}
}
let num_polylines = polylines.len(); // will be updated after the joins.
let snap_dist_sq = snap_dist * snap_dist;
let endnode_tree = RTree::bulk_load(end_nodes);
let precision = EPSILON * 10f64;
let mut outlet_pt: Point2D;
let mut p1: Point2D;
let mut visited = vec![false; num_polylines];
let mut reverse = vec![false; num_polylines];
for record_num in 0..outlets.num_records {
let record = outlets.get_record(record_num);
if record.shape_type != ShapeType::Null {
for p in 0..record.points.len() {
outlet_pt = record.points[p];
let ret = endnode_tree.locate_within_distance([outlet_pt.x, outlet_pt.y], snap_dist_sq);
for pt in ret {
let (fid, is_start) = pt.data;
if !visited[fid] {
visited[fid] = true;
// now find all connected line segments
let mut queue: VecDeque<(usize, bool)> = VecDeque::with_capacity(num_polylines);
queue.push_back((fid, is_start));
while !queue.is_empty() {
let (fid2, is_start2) = queue.pop_front().unwrap();
// Find the point associated with the other end of this polyline
p1 = if !is_start2 {
polylines[fid2].get_first_node()
} else {
// To get here means that you first encountered the beginning of the polyline, which
// shouldn't happen if it is correctly directed, since we are doing a bottom-up
// scan of the network. Therefore, reverse the line in the output.
reverse[fid2] = true;
polylines[fid2].get_last_node()
};
// Find the neighbouring endnodes of p1
let ret2 = endnode_tree.locate_within_distance([p1.x, p1.y], precision);
for pt2 in ret2 {
let (fid_n, is_start_n) = pt2.data;
if fid_n != fid2 && !visited[fid_n] {
// Add this newly encountered polyline to the queue
queue.push_back((fid_n, is_start_n));
visited[fid_n] = true;
}
}
}
}
}
}
}
if configurations.verbose_mode {
progress = (100.0_f64 * (fid + 1) as f64 / num_polylines as f64) as usize;
if progress != old_progress {
println!("Looking for reverse-oriented arcs: {}%", progress);
old_progress = progress;
}
}
}
let mut num_reversed = 0;
for fid in 0..polylines.len() {
if reverse[fid] {
let mut line = polylines[fid].vertices.clone();
line.reverse();
polylines[fid].vertices = line;
num_reversed += 1;
}
}
println!("num. reversed arcs: {num_reversed}");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file.replace(".shp", "_reversed_arcs.shp"), &input, ShapeType::PolyLine, true).expect("Error creating output file");
let mut sfg: ShapefileGeometry;
for fid in 0..polylines.len() {
if reverse[fid] {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[fid].vertices);
output.add_record(sfg);
let record_num = polylines[fid].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
}
}
output.write().expect("Error writing file.");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file, &input, ShapeType::PolyLine, true).expect("Error creating output file"); //?;
// add the attributes
// let in_atts = input.attributes.get_fields();
let mut sfg: ShapefileGeometry;
for poly_id in 0..polylines.len() {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[poly_id].vertices);
output.add_record(sfg);
let record_num = polylines[poly_id].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
if configurations.verbose_mode {
progress = (100.0_f64 * (poly_id + 1) as f64 / polylines.len() as f64) as usize;
if progress != old_progress {
println!("Looking for dangling arcs: {}%", progress);
old_progress = progress;
}
}
}
if configurations.verbose_mode {
println!("Saving data...")
};
output.write().expect("Error writing file.");
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!(
"\n{}",
&format!("Elapsed Time (Including I/O): {}", elapsed_time)
);
}
Ok(())
}
#[derive(Default, Clone, Debug)]
struct Polyline {
vertices: Vec<Point2D>,
id: usize,
}
impl Index<usize> for Polyline {
type Output = Point2D;
fn index<'a>(&'a self, index: usize) -> &'a Point2D {
&self.vertices[index]
}
}
impl Polyline {
// Creates a new Polyline from vertices
fn new(vertices: &[Point2D], id: usize) -> Self {
Polyline {
vertices: vertices.clone().to_vec(),
id,
}
}
fn | (&self) -> Point2D {
self[0]
}
fn get_last_node(&self) -> Point2D {
self[self.vertices.len() - 1]
}
} | get_first_node | identifier_name |
spmc.rs | pub use super::{
NoRecv,
RecvErr::{self, *},
};
use incin::Pause;
use owned_alloc::OwnedAlloc;
use ptr::{bypass_null, check_null_align};
use removable::Removable;
use std::{
fmt,
ptr::{null_mut, NonNull},
sync::{
atomic::{AtomicPtr, Ordering::*},
Arc,
},
};
/// Creates an asynchronous lock-free Single-Producer-Multi-Consumer (SPMC)
/// channel. In order to allow multiple consumers, [`Receiver`] is clonable and
/// does not require mutability.
pub fn create<T>() -> (Sender<T>, Receiver<T>) {
with_incin(SharedIncin::new())
}
/// Same as [`create`], but use a passed incinerator instead of creating a new
/// one.
pub fn with_incin<T>(incin: SharedIncin<T>) -> (Sender<T>, Receiver<T>) {
check_null_align::<Node<T>>();
// First we create a single node shared between two ends.
let alloc = OwnedAlloc::new(Node {
message: Removable::empty(),
next: AtomicPtr::new(null_mut()),
});
let single_node = alloc.into_raw();
// Then put it on back and on the front.
let sender = Sender { back: single_node };
let receiver = Receiver {
inner: Arc::new(ReceiverInner {
front: AtomicPtr::new(single_node.as_ptr()),
incin,
}),
};
(sender, receiver)
}
/// The [`Sender`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function.
pub struct | <T> {
back: NonNull<Node<T>>,
}
impl<T> Sender<T> {
/// Sends a message and if the receiver disconnected, an error is returned.
pub fn send(&mut self, message: T) -> Result<(), NoRecv<T>> {
// First we allocate the node for our message.
let alloc = OwnedAlloc::new(Node {
message: Removable::new(message),
next: AtomicPtr::new(null_mut()),
});
let nnptr = alloc.into_raw();
// This dereferral is safe because the queue has at least one node. We
// possess a single node in the back, and if the queue has just one
// node, it is stored in the back (and in the front). Also, we are the
// only ones with access to the back.
let res = unsafe {
// We try to update the back's next pointer. We want to catch any
// bit marking here. A marked lower bit means the receiver
// disconnected.
self.back.as_ref().next.compare_exchange(
null_mut(),
nnptr.as_ptr(),
Release,
Relaxed,
)
};
if res.is_ok() {
// If we succeeded, let's update the back so we keep the invariant
// "the back has a single node".
self.back = nnptr;
Ok(())
} else {
// If we failed, receiver disconnected. It is safe to dealloc
// because this is the node we just allocated, and we did not share
// it with anyone (cas failed).
let mut alloc = unsafe { OwnedAlloc::from_raw(nnptr) };
let message = alloc.message.replace(None).unwrap();
Err(NoRecv { message })
}
}
/// Tests if there are any [`Receiver`]s still connected. There are no
/// guarantees that [`send`](Sender::send) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
pub fn is_connected(&self) -> bool {
// Safe because we always have at least one node, which is only dropped
// in the last side to disconnect's drop.
let back = unsafe { self.back.as_ref() };
back.next.load(Relaxed).is_null()
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
// This dereferral is safe because the queue always have at least one
// node. This single node is only dropped when the last side to
// disconnect drops.
let res = unsafe {
// Let's try to mark next's bit so that receiver will see we
// disconnected, if it hasn't disconnected by itself. It is ok to
// just swap, since we have only two possible values (null and
// null | 1) and we everyone will be setting to the same value
// (null | 1).
self.back
.as_ref()
.next
.swap((null_mut::<Node<T>>() as usize | 1) as *mut _, Relaxed)
};
// If the previously stored value was not null, receiver has already
// disconnected. It is safe to drop because we are the only ones that
// have a pointer to the node.
if !res.is_null() {
unsafe { OwnedAlloc::from_raw(self.back) };
}
}
}
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
fmtr.write_str("spmc::Sender")
}
}
unsafe impl<T> Send for Sender<T> where T: Send {}
unsafe impl<T> Sync for Sender<T> where T: Send {}
/// The [`Receiver`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function. It is clonable and does not require mutability.
pub struct Receiver<T> {
inner: Arc<ReceiverInner<T>>,
}
impl<T> Receiver<T> {
/// Tries to receive a message. If no message is available,
/// [`Err`]`(`[`RecvErr::NoMessage`]`)` is returned. If the sender
/// disconnected, [`Err`]`(`[`RecvErr::NoSender`]`)` is returned.
#[allow(unused_must_use)]
pub fn recv(&self) -> Result<T, RecvErr> {
// We have to pause the incinerator due to ABA problem. This channel
// suffers from it, yeah.
let pause = self.inner.incin.inner.pause();
// Bypassing null check is safe because we never store null in
// the front.
let mut front_nnptr = unsafe {
// First we load pointer stored in the front.
bypass_null(self.inner.front.load(Relaxed))
};
loop {
// Let's remove the node logically first. Safe to derefer this
// pointer because we paused the incinerator and we only
// delete nodes via incinerator.
match unsafe { front_nnptr.as_ref().message.take(AcqRel) } {
Some(val) => {
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are
// passing.
unsafe { self.try_clear_first(front_nnptr, &pause) };
break Ok(val);
},
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are passing.
None => unsafe {
front_nnptr = self.try_clear_first(front_nnptr, &pause)?;
},
}
}
}
/// Tests if there are any [`Sender`]s still connected. There are no
/// guarantees that [`recv`](Receiver::recv) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
/// This method may also return `true` if the [`Sender`] disconnected
/// but there are messages pending in the buffer. Note that another
/// [`Receiver`] may pop out the pending messages after this method was
/// called.
pub fn is_connected(&self) -> bool {
// We need this pause because of use-after-free.
let _pause = self.inner.incin.inner.pause();
// Safe to derefer this pointer because we paused the incinerator and we
// only delete nodes via incinerator.
let front = unsafe { &*self.inner.front.load(Relaxed) };
front.message.is_present(Relaxed)
|| front.next.load(Relaxed) as usize & 1 == 0
}
/// The shared incinerator used by this [`Receiver`].
pub fn incin(&self) -> SharedIncin<T> {
self.inner.incin.clone()
}
// This function is unsafe because passing the wrong pointer will lead to
// undefined behavior. The pointer must have been loaded from the front
// during the passed pause.
unsafe fn try_clear_first(
&self,
expected: NonNull<Node<T>>,
pause: &Pause<OwnedAlloc<Node<T>>>,
) -> Result<NonNull<Node<T>>, RecvErr> {
let next = expected.as_ref().next.load(Acquire);
if next as usize & 1 == 1 {
// If the next is bit flagged, sender disconnected, no more messages
// ever.
Err(RecvErr::NoSender)
} else if next.is_null() {
// No bit flag means sender is still there but we have no message.
Err(RecvErr::NoMessage)
} else {
let ptr = expected.as_ptr();
// We are not oblied to succeed. This is just cleanup and some other
// thread might do it.
let next = match self
.inner
.front
.compare_exchange(ptr, next, Relaxed, Relaxed)
{
Ok(_) => {
// Only deleting nodes via incinerator due to ABA
// problem and use-after-frees.
pause.add_to_incin(OwnedAlloc::from_raw(expected));
next
},
Err(found) => found,
};
// Safe to by-pass the check since we only store non-null
// pointers on the front.
Ok(bypass_null(next))
}
}
}
impl<T> Clone for Receiver<T> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(fmtr, "spmc::Receiver {} ptr: {:p} {}", '{', self.inner, '}')
}
}
unsafe impl<T> Send for Receiver<T> where T: Send {}
unsafe impl<T> Sync for Receiver<T> where T: Send {}
struct ReceiverInner<T> {
// never null
front: AtomicPtr<Node<T>>,
incin: SharedIncin<T>,
}
impl<T> Drop for ReceiverInner<T> {
fn drop(&mut self) {
let front = self.front.get_mut();
loop {
// This null-check-by-pass is safe because we never store null in
// the front.
let front_nnptr = unsafe { bypass_null(*front) };
// This is safe because we are the only receiver left and the list
// will always have at least one node, even in the drop. Of course,
// unless we are the last side to drop (then we do drop it all).
let res = unsafe {
// Let's try to mark the next (which means we disconnected). We
// might fail because either this is not the last node or the
// sender already disconnected and marked this pointer.
front_nnptr.as_ref().next.compare_exchange(
null_mut(),
(null_mut::<Node<T>>() as usize | 1) as *mut _,
AcqRel,
Acquire,
)
};
match res {
// If the succeeded, we are the first side to disconnect and we
// must keep at least one node in the queue.
Ok(_) => break,
Err(next) => {
// Ok, safe to deallocate the front now. We already loaded
// the next field and it is not null.
// Either the queue won't be empty or the
// sender disconnected.
unsafe { OwnedAlloc::from_raw(front_nnptr) };
// This means the sender disconnected we reached the end of
// the queue.
if next as usize & 1 == 1 {
break;
}
// Now let's keep going until the list is empty.
*front = next;
},
}
}
}
}
#[repr(align(/* at least */ 2))]
struct Node<T> {
message: Removable<T>,
// lower bit is 1 if the other side disconnected, 0 means nothing
next: AtomicPtr<Node<T>>,
}
make_shared_incin! {
{ "`spmc::Receiver`" }
pub SharedIncin<T> of OwnedAlloc<Node<T>>
}
#[cfg(test)]
mod test {
use channel::spmc;
use std::{
sync::{
atomic::{AtomicBool, Ordering::*},
Arc,
},
thread,
};
#[test]
fn correct_numbers() {
const THREADS: usize = 8;
const MSGS: usize = 512;
let mut done = Vec::with_capacity(MSGS);
for _ in 0 .. MSGS {
done.push(AtomicBool::new(false));
}
let done = Arc::<[AtomicBool]>::from(done);
let (mut sender, receiver) = spmc::create::<usize>();
let mut threads = Vec::with_capacity(THREADS);
for _ in 0 .. THREADS {
let done = done.clone();
let receiver = receiver.clone();
threads.push(thread::spawn(move || loop {
match receiver.recv() {
Ok(i) => assert!(!done[i].swap(true, AcqRel)),
Err(spmc::NoSender) => break,
Err(spmc::NoMessage) => (),
}
}))
}
for i in 0 .. MSGS {
sender.send(i).unwrap();
}
drop(sender);
for thread in threads {
thread.join().unwrap();
}
for status in done.iter() {
assert!(status.load(Relaxed));
}
}
}
| Sender | identifier_name |
spmc.rs | pub use super::{
NoRecv,
RecvErr::{self, *},
};
use incin::Pause;
use owned_alloc::OwnedAlloc;
use ptr::{bypass_null, check_null_align};
use removable::Removable;
use std::{
fmt,
ptr::{null_mut, NonNull},
sync::{
atomic::{AtomicPtr, Ordering::*},
Arc,
},
};
/// Creates an asynchronous lock-free Single-Producer-Multi-Consumer (SPMC)
/// channel. In order to allow multiple consumers, [`Receiver`] is clonable and
/// does not require mutability.
pub fn create<T>() -> (Sender<T>, Receiver<T>) {
with_incin(SharedIncin::new())
}
/// Same as [`create`], but use a passed incinerator instead of creating a new
/// one.
pub fn with_incin<T>(incin: SharedIncin<T>) -> (Sender<T>, Receiver<T>) {
check_null_align::<Node<T>>();
// First we create a single node shared between two ends.
let alloc = OwnedAlloc::new(Node {
message: Removable::empty(),
next: AtomicPtr::new(null_mut()),
});
let single_node = alloc.into_raw();
// Then put it on back and on the front.
let sender = Sender { back: single_node };
let receiver = Receiver {
inner: Arc::new(ReceiverInner {
front: AtomicPtr::new(single_node.as_ptr()),
incin,
}),
};
(sender, receiver)
}
/// The [`Sender`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function.
pub struct Sender<T> {
back: NonNull<Node<T>>,
}
impl<T> Sender<T> {
/// Sends a message and if the receiver disconnected, an error is returned.
pub fn send(&mut self, message: T) -> Result<(), NoRecv<T>> {
// First we allocate the node for our message.
let alloc = OwnedAlloc::new(Node {
message: Removable::new(message),
next: AtomicPtr::new(null_mut()),
});
let nnptr = alloc.into_raw();
// This dereferral is safe because the queue has at least one node. We
// possess a single node in the back, and if the queue has just one
// node, it is stored in the back (and in the front). Also, we are the
// only ones with access to the back.
let res = unsafe {
// We try to update the back's next pointer. We want to catch any
// bit marking here. A marked lower bit means the receiver
// disconnected.
self.back.as_ref().next.compare_exchange(
null_mut(),
nnptr.as_ptr(),
Release,
Relaxed,
)
};
if res.is_ok() {
// If we succeeded, let's update the back so we keep the invariant
// "the back has a single node".
self.back = nnptr;
Ok(())
} else {
// If we failed, receiver disconnected. It is safe to dealloc
// because this is the node we just allocated, and we did not share
// it with anyone (cas failed).
let mut alloc = unsafe { OwnedAlloc::from_raw(nnptr) };
let message = alloc.message.replace(None).unwrap();
Err(NoRecv { message })
}
}
/// Tests if there are any [`Receiver`]s still connected. There are no
/// guarantees that [`send`](Sender::send) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
pub fn is_connected(&self) -> bool |
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
// This dereferral is safe because the queue always have at least one
// node. This single node is only dropped when the last side to
// disconnect drops.
let res = unsafe {
// Let's try to mark next's bit so that receiver will see we
// disconnected, if it hasn't disconnected by itself. It is ok to
// just swap, since we have only two possible values (null and
// null | 1) and we everyone will be setting to the same value
// (null | 1).
self.back
.as_ref()
.next
.swap((null_mut::<Node<T>>() as usize | 1) as *mut _, Relaxed)
};
// If the previously stored value was not null, receiver has already
// disconnected. It is safe to drop because we are the only ones that
// have a pointer to the node.
if !res.is_null() {
unsafe { OwnedAlloc::from_raw(self.back) };
}
}
}
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
fmtr.write_str("spmc::Sender")
}
}
unsafe impl<T> Send for Sender<T> where T: Send {}
unsafe impl<T> Sync for Sender<T> where T: Send {}
/// The [`Receiver`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function. It is clonable and does not require mutability.
pub struct Receiver<T> {
inner: Arc<ReceiverInner<T>>,
}
impl<T> Receiver<T> {
/// Tries to receive a message. If no message is available,
/// [`Err`]`(`[`RecvErr::NoMessage`]`)` is returned. If the sender
/// disconnected, [`Err`]`(`[`RecvErr::NoSender`]`)` is returned.
#[allow(unused_must_use)]
pub fn recv(&self) -> Result<T, RecvErr> {
// We have to pause the incinerator due to ABA problem. This channel
// suffers from it, yeah.
let pause = self.inner.incin.inner.pause();
// Bypassing null check is safe because we never store null in
// the front.
let mut front_nnptr = unsafe {
// First we load pointer stored in the front.
bypass_null(self.inner.front.load(Relaxed))
};
loop {
// Let's remove the node logically first. Safe to derefer this
// pointer because we paused the incinerator and we only
// delete nodes via incinerator.
match unsafe { front_nnptr.as_ref().message.take(AcqRel) } {
Some(val) => {
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are
// passing.
unsafe { self.try_clear_first(front_nnptr, &pause) };
break Ok(val);
},
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are passing.
None => unsafe {
front_nnptr = self.try_clear_first(front_nnptr, &pause)?;
},
}
}
}
/// Tests if there are any [`Sender`]s still connected. There are no
/// guarantees that [`recv`](Receiver::recv) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
/// This method may also return `true` if the [`Sender`] disconnected
/// but there are messages pending in the buffer. Note that another
/// [`Receiver`] may pop out the pending messages after this method was
/// called.
pub fn is_connected(&self) -> bool {
// We need this pause because of use-after-free.
let _pause = self.inner.incin.inner.pause();
// Safe to derefer this pointer because we paused the incinerator and we
// only delete nodes via incinerator.
let front = unsafe { &*self.inner.front.load(Relaxed) };
front.message.is_present(Relaxed)
|| front.next.load(Relaxed) as usize & 1 == 0
}
/// The shared incinerator used by this [`Receiver`].
pub fn incin(&self) -> SharedIncin<T> {
self.inner.incin.clone()
}
// This function is unsafe because passing the wrong pointer will lead to
// undefined behavior. The pointer must have been loaded from the front
// during the passed pause.
unsafe fn try_clear_first(
&self,
expected: NonNull<Node<T>>,
pause: &Pause<OwnedAlloc<Node<T>>>,
) -> Result<NonNull<Node<T>>, RecvErr> {
let next = expected.as_ref().next.load(Acquire);
if next as usize & 1 == 1 {
// If the next is bit flagged, sender disconnected, no more messages
// ever.
Err(RecvErr::NoSender)
} else if next.is_null() {
// No bit flag means sender is still there but we have no message.
Err(RecvErr::NoMessage)
} else {
let ptr = expected.as_ptr();
// We are not oblied to succeed. This is just cleanup and some other
// thread might do it.
let next = match self
.inner
.front
.compare_exchange(ptr, next, Relaxed, Relaxed)
{
Ok(_) => {
// Only deleting nodes via incinerator due to ABA
// problem and use-after-frees.
pause.add_to_incin(OwnedAlloc::from_raw(expected));
next
},
Err(found) => found,
};
// Safe to by-pass the check since we only store non-null
// pointers on the front.
Ok(bypass_null(next))
}
}
}
impl<T> Clone for Receiver<T> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(fmtr, "spmc::Receiver {} ptr: {:p} {}", '{', self.inner, '}')
}
}
unsafe impl<T> Send for Receiver<T> where T: Send {}
unsafe impl<T> Sync for Receiver<T> where T: Send {}
struct ReceiverInner<T> {
// never null
front: AtomicPtr<Node<T>>,
incin: SharedIncin<T>,
}
impl<T> Drop for ReceiverInner<T> {
fn drop(&mut self) {
let front = self.front.get_mut();
loop {
// This null-check-by-pass is safe because we never store null in
// the front.
let front_nnptr = unsafe { bypass_null(*front) };
// This is safe because we are the only receiver left and the list
// will always have at least one node, even in the drop. Of course,
// unless we are the last side to drop (then we do drop it all).
let res = unsafe {
// Let's try to mark the next (which means we disconnected). We
// might fail because either this is not the last node or the
// sender already disconnected and marked this pointer.
front_nnptr.as_ref().next.compare_exchange(
null_mut(),
(null_mut::<Node<T>>() as usize | 1) as *mut _,
AcqRel,
Acquire,
)
};
match res {
// If the succeeded, we are the first side to disconnect and we
// must keep at least one node in the queue.
Ok(_) => break,
Err(next) => {
// Ok, safe to deallocate the front now. We already loaded
// the next field and it is not null.
// Either the queue won't be empty or the
// sender disconnected.
unsafe { OwnedAlloc::from_raw(front_nnptr) };
// This means the sender disconnected we reached the end of
// the queue.
if next as usize & 1 == 1 {
break;
}
// Now let's keep going until the list is empty.
*front = next;
},
}
}
}
}
#[repr(align(/* at least */ 2))]
struct Node<T> {
message: Removable<T>,
// lower bit is 1 if the other side disconnected, 0 means nothing
next: AtomicPtr<Node<T>>,
}
make_shared_incin! {
{ "`spmc::Receiver`" }
pub SharedIncin<T> of OwnedAlloc<Node<T>>
}
#[cfg(test)]
mod test {
use channel::spmc;
use std::{
sync::{
atomic::{AtomicBool, Ordering::*},
Arc,
},
thread,
};
#[test]
fn correct_numbers() {
const THREADS: usize = 8;
const MSGS: usize = 512;
let mut done = Vec::with_capacity(MSGS);
for _ in 0 .. MSGS {
done.push(AtomicBool::new(false));
}
let done = Arc::<[AtomicBool]>::from(done);
let (mut sender, receiver) = spmc::create::<usize>();
let mut threads = Vec::with_capacity(THREADS);
for _ in 0 .. THREADS {
let done = done.clone();
let receiver = receiver.clone();
threads.push(thread::spawn(move || loop {
match receiver.recv() {
Ok(i) => assert!(!done[i].swap(true, AcqRel)),
Err(spmc::NoSender) => break,
Err(spmc::NoMessage) => (),
}
}))
}
for i in 0 .. MSGS {
sender.send(i).unwrap();
}
drop(sender);
for thread in threads {
thread.join().unwrap();
}
for status in done.iter() {
assert!(status.load(Relaxed));
}
}
}
| {
// Safe because we always have at least one node, which is only dropped
// in the last side to disconnect's drop.
let back = unsafe { self.back.as_ref() };
back.next.load(Relaxed).is_null()
} | identifier_body |
spmc.rs | pub use super::{
NoRecv,
RecvErr::{self, *},
};
use incin::Pause;
use owned_alloc::OwnedAlloc;
use ptr::{bypass_null, check_null_align};
use removable::Removable;
use std::{
fmt,
ptr::{null_mut, NonNull},
sync::{
atomic::{AtomicPtr, Ordering::*},
Arc,
},
};
/// Creates an asynchronous lock-free Single-Producer-Multi-Consumer (SPMC)
/// channel. In order to allow multiple consumers, [`Receiver`] is clonable and
/// does not require mutability.
pub fn create<T>() -> (Sender<T>, Receiver<T>) {
with_incin(SharedIncin::new())
}
/// Same as [`create`], but use a passed incinerator instead of creating a new
/// one.
pub fn with_incin<T>(incin: SharedIncin<T>) -> (Sender<T>, Receiver<T>) {
check_null_align::<Node<T>>();
// First we create a single node shared between two ends.
let alloc = OwnedAlloc::new(Node {
message: Removable::empty(),
next: AtomicPtr::new(null_mut()),
});
let single_node = alloc.into_raw();
// Then put it on back and on the front.
let sender = Sender { back: single_node };
let receiver = Receiver {
inner: Arc::new(ReceiverInner {
front: AtomicPtr::new(single_node.as_ptr()),
incin,
}),
};
(sender, receiver)
}
/// The [`Sender`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function.
pub struct Sender<T> {
back: NonNull<Node<T>>,
}
impl<T> Sender<T> {
/// Sends a message and if the receiver disconnected, an error is returned.
pub fn send(&mut self, message: T) -> Result<(), NoRecv<T>> {
// First we allocate the node for our message.
let alloc = OwnedAlloc::new(Node {
message: Removable::new(message),
next: AtomicPtr::new(null_mut()),
});
let nnptr = alloc.into_raw();
// This dereferral is safe because the queue has at least one node. We
// possess a single node in the back, and if the queue has just one
// node, it is stored in the back (and in the front). Also, we are the
// only ones with access to the back.
let res = unsafe {
// We try to update the back's next pointer. We want to catch any
// bit marking here. A marked lower bit means the receiver
// disconnected.
self.back.as_ref().next.compare_exchange(
null_mut(),
nnptr.as_ptr(),
Release,
Relaxed,
)
};
if res.is_ok() {
// If we succeeded, let's update the back so we keep the invariant
// "the back has a single node".
self.back = nnptr;
Ok(())
} else {
// If we failed, receiver disconnected. It is safe to dealloc
// because this is the node we just allocated, and we did not share
// it with anyone (cas failed).
let mut alloc = unsafe { OwnedAlloc::from_raw(nnptr) };
let message = alloc.message.replace(None).unwrap();
Err(NoRecv { message })
}
}
/// Tests if there are any [`Receiver`]s still connected. There are no
/// guarantees that [`send`](Sender::send) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
pub fn is_connected(&self) -> bool {
// Safe because we always have at least one node, which is only dropped
// in the last side to disconnect's drop.
let back = unsafe { self.back.as_ref() };
back.next.load(Relaxed).is_null()
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
// This dereferral is safe because the queue always have at least one
// node. This single node is only dropped when the last side to
// disconnect drops.
let res = unsafe {
// Let's try to mark next's bit so that receiver will see we
// disconnected, if it hasn't disconnected by itself. It is ok to
// just swap, since we have only two possible values (null and
// null | 1) and we everyone will be setting to the same value
// (null | 1).
self.back
.as_ref()
.next
.swap((null_mut::<Node<T>>() as usize | 1) as *mut _, Relaxed)
};
// If the previously stored value was not null, receiver has already
// disconnected. It is safe to drop because we are the only ones that
// have a pointer to the node.
if !res.is_null() {
unsafe { OwnedAlloc::from_raw(self.back) };
}
}
}
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
fmtr.write_str("spmc::Sender")
}
}
unsafe impl<T> Send for Sender<T> where T: Send {}
unsafe impl<T> Sync for Sender<T> where T: Send {}
/// The [`Receiver`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function. It is clonable and does not require mutability.
pub struct Receiver<T> {
inner: Arc<ReceiverInner<T>>,
}
impl<T> Receiver<T> {
/// Tries to receive a message. If no message is available,
/// [`Err`]`(`[`RecvErr::NoMessage`]`)` is returned. If the sender
/// disconnected, [`Err`]`(`[`RecvErr::NoSender`]`)` is returned.
#[allow(unused_must_use)]
pub fn recv(&self) -> Result<T, RecvErr> {
// We have to pause the incinerator due to ABA problem. This channel
// suffers from it, yeah.
let pause = self.inner.incin.inner.pause();
// Bypassing null check is safe because we never store null in
// the front.
let mut front_nnptr = unsafe {
// First we load pointer stored in the front.
bypass_null(self.inner.front.load(Relaxed))
};
loop {
// Let's remove the node logically first. Safe to derefer this
// pointer because we paused the incinerator and we only
// delete nodes via incinerator.
match unsafe { front_nnptr.as_ref().message.take(AcqRel) } {
Some(val) => {
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are
// passing.
unsafe { self.try_clear_first(front_nnptr, &pause) };
break Ok(val);
},
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are passing.
None => unsafe {
front_nnptr = self.try_clear_first(front_nnptr, &pause)?;
},
}
}
}
/// Tests if there are any [`Sender`]s still connected. There are no
/// guarantees that [`recv`](Receiver::recv) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
/// This method may also return `true` if the [`Sender`] disconnected
/// but there are messages pending in the buffer. Note that another
/// [`Receiver`] may pop out the pending messages after this method was
/// called.
pub fn is_connected(&self) -> bool {
// We need this pause because of use-after-free.
let _pause = self.inner.incin.inner.pause();
// Safe to derefer this pointer because we paused the incinerator and we
// only delete nodes via incinerator.
let front = unsafe { &*self.inner.front.load(Relaxed) };
front.message.is_present(Relaxed)
|| front.next.load(Relaxed) as usize & 1 == 0
}
/// The shared incinerator used by this [`Receiver`].
pub fn incin(&self) -> SharedIncin<T> {
self.inner.incin.clone()
}
// This function is unsafe because passing the wrong pointer will lead to
// undefined behavior. The pointer must have been loaded from the front
// during the passed pause.
unsafe fn try_clear_first(
&self,
expected: NonNull<Node<T>>,
pause: &Pause<OwnedAlloc<Node<T>>>,
) -> Result<NonNull<Node<T>>, RecvErr> {
let next = expected.as_ref().next.load(Acquire);
if next as usize & 1 == 1 {
// If the next is bit flagged, sender disconnected, no more messages
// ever.
Err(RecvErr::NoSender)
} else if next.is_null() {
// No bit flag means sender is still there but we have no message.
Err(RecvErr::NoMessage)
} else |
}
}
impl<T> Clone for Receiver<T> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(fmtr, "spmc::Receiver {} ptr: {:p} {}", '{', self.inner, '}')
}
}
unsafe impl<T> Send for Receiver<T> where T: Send {}
unsafe impl<T> Sync for Receiver<T> where T: Send {}
struct ReceiverInner<T> {
// never null
front: AtomicPtr<Node<T>>,
incin: SharedIncin<T>,
}
impl<T> Drop for ReceiverInner<T> {
fn drop(&mut self) {
let front = self.front.get_mut();
loop {
// This null-check-by-pass is safe because we never store null in
// the front.
let front_nnptr = unsafe { bypass_null(*front) };
// This is safe because we are the only receiver left and the list
// will always have at least one node, even in the drop. Of course,
// unless we are the last side to drop (then we do drop it all).
let res = unsafe {
// Let's try to mark the next (which means we disconnected). We
// might fail because either this is not the last node or the
// sender already disconnected and marked this pointer.
front_nnptr.as_ref().next.compare_exchange(
null_mut(),
(null_mut::<Node<T>>() as usize | 1) as *mut _,
AcqRel,
Acquire,
)
};
match res {
// If the succeeded, we are the first side to disconnect and we
// must keep at least one node in the queue.
Ok(_) => break,
Err(next) => {
// Ok, safe to deallocate the front now. We already loaded
// the next field and it is not null.
// Either the queue won't be empty or the
// sender disconnected.
unsafe { OwnedAlloc::from_raw(front_nnptr) };
// This means the sender disconnected we reached the end of
// the queue.
if next as usize & 1 == 1 {
break;
}
// Now let's keep going until the list is empty.
*front = next;
},
}
}
}
}
#[repr(align(/* at least */ 2))]
struct Node<T> {
message: Removable<T>,
// lower bit is 1 if the other side disconnected, 0 means nothing
next: AtomicPtr<Node<T>>,
}
make_shared_incin! {
{ "`spmc::Receiver`" }
pub SharedIncin<T> of OwnedAlloc<Node<T>>
}
#[cfg(test)]
mod test {
use channel::spmc;
use std::{
sync::{
atomic::{AtomicBool, Ordering::*},
Arc,
},
thread,
};
#[test]
fn correct_numbers() {
const THREADS: usize = 8;
const MSGS: usize = 512;
let mut done = Vec::with_capacity(MSGS);
for _ in 0 .. MSGS {
done.push(AtomicBool::new(false));
}
let done = Arc::<[AtomicBool]>::from(done);
let (mut sender, receiver) = spmc::create::<usize>();
let mut threads = Vec::with_capacity(THREADS);
for _ in 0 .. THREADS {
let done = done.clone();
let receiver = receiver.clone();
threads.push(thread::spawn(move || loop {
match receiver.recv() {
Ok(i) => assert!(!done[i].swap(true, AcqRel)),
Err(spmc::NoSender) => break,
Err(spmc::NoMessage) => (),
}
}))
}
for i in 0 .. MSGS {
sender.send(i).unwrap();
}
drop(sender);
for thread in threads {
thread.join().unwrap();
}
for status in done.iter() {
assert!(status.load(Relaxed));
}
}
}
| {
let ptr = expected.as_ptr();
// We are not oblied to succeed. This is just cleanup and some other
// thread might do it.
let next = match self
.inner
.front
.compare_exchange(ptr, next, Relaxed, Relaxed)
{
Ok(_) => {
// Only deleting nodes via incinerator due to ABA
// problem and use-after-frees.
pause.add_to_incin(OwnedAlloc::from_raw(expected));
next
},
Err(found) => found,
};
// Safe to by-pass the check since we only store non-null
// pointers on the front.
Ok(bypass_null(next))
} | conditional_block |
spmc.rs | pub use super::{
NoRecv,
RecvErr::{self, *},
};
use incin::Pause;
use owned_alloc::OwnedAlloc;
use ptr::{bypass_null, check_null_align};
use removable::Removable;
use std::{
fmt,
ptr::{null_mut, NonNull},
sync::{
atomic::{AtomicPtr, Ordering::*},
Arc,
},
};
/// Creates an asynchronous lock-free Single-Producer-Multi-Consumer (SPMC)
/// channel. In order to allow multiple consumers, [`Receiver`] is clonable and
/// does not require mutability.
pub fn create<T>() -> (Sender<T>, Receiver<T>) {
with_incin(SharedIncin::new())
}
/// Same as [`create`], but use a passed incinerator instead of creating a new
/// one.
pub fn with_incin<T>(incin: SharedIncin<T>) -> (Sender<T>, Receiver<T>) {
check_null_align::<Node<T>>();
// First we create a single node shared between two ends.
let alloc = OwnedAlloc::new(Node {
message: Removable::empty(),
next: AtomicPtr::new(null_mut()),
});
let single_node = alloc.into_raw();
// Then put it on back and on the front.
let sender = Sender { back: single_node };
let receiver = Receiver {
inner: Arc::new(ReceiverInner {
front: AtomicPtr::new(single_node.as_ptr()),
incin,
}),
};
(sender, receiver)
}
/// The [`Sender`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function.
pub struct Sender<T> {
back: NonNull<Node<T>>,
}
impl<T> Sender<T> {
/// Sends a message and if the receiver disconnected, an error is returned.
pub fn send(&mut self, message: T) -> Result<(), NoRecv<T>> {
// First we allocate the node for our message.
let alloc = OwnedAlloc::new(Node {
message: Removable::new(message),
next: AtomicPtr::new(null_mut()),
});
let nnptr = alloc.into_raw();
// This dereferral is safe because the queue has at least one node. We
// possess a single node in the back, and if the queue has just one
// node, it is stored in the back (and in the front). Also, we are the
// only ones with access to the back.
let res = unsafe {
// We try to update the back's next pointer. We want to catch any
// bit marking here. A marked lower bit means the receiver
// disconnected.
self.back.as_ref().next.compare_exchange(
null_mut(),
nnptr.as_ptr(),
Release,
Relaxed,
)
};
if res.is_ok() {
// If we succeeded, let's update the back so we keep the invariant
// "the back has a single node".
self.back = nnptr;
Ok(())
} else {
// If we failed, receiver disconnected. It is safe to dealloc
// because this is the node we just allocated, and we did not share
// it with anyone (cas failed).
let mut alloc = unsafe { OwnedAlloc::from_raw(nnptr) };
let message = alloc.message.replace(None).unwrap();
Err(NoRecv { message })
}
}
/// Tests if there are any [`Receiver`]s still connected. There are no
/// guarantees that [`send`](Sender::send) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
pub fn is_connected(&self) -> bool {
// Safe because we always have at least one node, which is only dropped
// in the last side to disconnect's drop.
let back = unsafe { self.back.as_ref() };
back.next.load(Relaxed).is_null()
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
// This dereferral is safe because the queue always have at least one
// node. This single node is only dropped when the last side to
// disconnect drops.
let res = unsafe {
// Let's try to mark next's bit so that receiver will see we
// disconnected, if it hasn't disconnected by itself. It is ok to
// just swap, since we have only two possible values (null and
// null | 1) and we everyone will be setting to the same value
// (null | 1).
self.back
.as_ref()
.next
.swap((null_mut::<Node<T>>() as usize | 1) as *mut _, Relaxed)
};
// If the previously stored value was not null, receiver has already
// disconnected. It is safe to drop because we are the only ones that
// have a pointer to the node.
if !res.is_null() {
unsafe { OwnedAlloc::from_raw(self.back) };
}
}
}
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
fmtr.write_str("spmc::Sender")
}
}
unsafe impl<T> Send for Sender<T> where T: Send {}
unsafe impl<T> Sync for Sender<T> where T: Send {}
/// The [`Receiver`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function. It is clonable and does not require mutability.
pub struct Receiver<T> {
inner: Arc<ReceiverInner<T>>,
}
impl<T> Receiver<T> {
/// Tries to receive a message. If no message is available,
/// [`Err`]`(`[`RecvErr::NoMessage`]`)` is returned. If the sender
/// disconnected, [`Err`]`(`[`RecvErr::NoSender`]`)` is returned.
#[allow(unused_must_use)]
pub fn recv(&self) -> Result<T, RecvErr> {
// We have to pause the incinerator due to ABA problem. This channel | // the front.
let mut front_nnptr = unsafe {
// First we load pointer stored in the front.
bypass_null(self.inner.front.load(Relaxed))
};
loop {
// Let's remove the node logically first. Safe to derefer this
// pointer because we paused the incinerator and we only
// delete nodes via incinerator.
match unsafe { front_nnptr.as_ref().message.take(AcqRel) } {
Some(val) => {
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are
// passing.
unsafe { self.try_clear_first(front_nnptr, &pause) };
break Ok(val);
},
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are passing.
None => unsafe {
front_nnptr = self.try_clear_first(front_nnptr, &pause)?;
},
}
}
}
/// Tests if there are any [`Sender`]s still connected. There are no
/// guarantees that [`recv`](Receiver::recv) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
/// This method may also return `true` if the [`Sender`] disconnected
/// but there are messages pending in the buffer. Note that another
/// [`Receiver`] may pop out the pending messages after this method was
/// called.
pub fn is_connected(&self) -> bool {
// We need this pause because of use-after-free.
let _pause = self.inner.incin.inner.pause();
// Safe to derefer this pointer because we paused the incinerator and we
// only delete nodes via incinerator.
let front = unsafe { &*self.inner.front.load(Relaxed) };
front.message.is_present(Relaxed)
|| front.next.load(Relaxed) as usize & 1 == 0
}
/// The shared incinerator used by this [`Receiver`].
pub fn incin(&self) -> SharedIncin<T> {
self.inner.incin.clone()
}
// This function is unsafe because passing the wrong pointer will lead to
// undefined behavior. The pointer must have been loaded from the front
// during the passed pause.
unsafe fn try_clear_first(
&self,
expected: NonNull<Node<T>>,
pause: &Pause<OwnedAlloc<Node<T>>>,
) -> Result<NonNull<Node<T>>, RecvErr> {
let next = expected.as_ref().next.load(Acquire);
if next as usize & 1 == 1 {
// If the next is bit flagged, sender disconnected, no more messages
// ever.
Err(RecvErr::NoSender)
} else if next.is_null() {
// No bit flag means sender is still there but we have no message.
Err(RecvErr::NoMessage)
} else {
let ptr = expected.as_ptr();
// We are not oblied to succeed. This is just cleanup and some other
// thread might do it.
let next = match self
.inner
.front
.compare_exchange(ptr, next, Relaxed, Relaxed)
{
Ok(_) => {
// Only deleting nodes via incinerator due to ABA
// problem and use-after-frees.
pause.add_to_incin(OwnedAlloc::from_raw(expected));
next
},
Err(found) => found,
};
// Safe to by-pass the check since we only store non-null
// pointers on the front.
Ok(bypass_null(next))
}
}
}
impl<T> Clone for Receiver<T> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(fmtr, "spmc::Receiver {} ptr: {:p} {}", '{', self.inner, '}')
}
}
unsafe impl<T> Send for Receiver<T> where T: Send {}
unsafe impl<T> Sync for Receiver<T> where T: Send {}
struct ReceiverInner<T> {
// never null
front: AtomicPtr<Node<T>>,
incin: SharedIncin<T>,
}
impl<T> Drop for ReceiverInner<T> {
fn drop(&mut self) {
let front = self.front.get_mut();
loop {
// This null-check-by-pass is safe because we never store null in
// the front.
let front_nnptr = unsafe { bypass_null(*front) };
// This is safe because we are the only receiver left and the list
// will always have at least one node, even in the drop. Of course,
// unless we are the last side to drop (then we do drop it all).
let res = unsafe {
// Let's try to mark the next (which means we disconnected). We
// might fail because either this is not the last node or the
// sender already disconnected and marked this pointer.
front_nnptr.as_ref().next.compare_exchange(
null_mut(),
(null_mut::<Node<T>>() as usize | 1) as *mut _,
AcqRel,
Acquire,
)
};
match res {
// If the succeeded, we are the first side to disconnect and we
// must keep at least one node in the queue.
Ok(_) => break,
Err(next) => {
// Ok, safe to deallocate the front now. We already loaded
// the next field and it is not null.
// Either the queue won't be empty or the
// sender disconnected.
unsafe { OwnedAlloc::from_raw(front_nnptr) };
// This means the sender disconnected we reached the end of
// the queue.
if next as usize & 1 == 1 {
break;
}
// Now let's keep going until the list is empty.
*front = next;
},
}
}
}
}
#[repr(align(/* at least */ 2))]
struct Node<T> {
message: Removable<T>,
// lower bit is 1 if the other side disconnected, 0 means nothing
next: AtomicPtr<Node<T>>,
}
make_shared_incin! {
{ "`spmc::Receiver`" }
pub SharedIncin<T> of OwnedAlloc<Node<T>>
}
#[cfg(test)]
mod test {
use channel::spmc;
use std::{
sync::{
atomic::{AtomicBool, Ordering::*},
Arc,
},
thread,
};
#[test]
fn correct_numbers() {
const THREADS: usize = 8;
const MSGS: usize = 512;
let mut done = Vec::with_capacity(MSGS);
for _ in 0 .. MSGS {
done.push(AtomicBool::new(false));
}
let done = Arc::<[AtomicBool]>::from(done);
let (mut sender, receiver) = spmc::create::<usize>();
let mut threads = Vec::with_capacity(THREADS);
for _ in 0 .. THREADS {
let done = done.clone();
let receiver = receiver.clone();
threads.push(thread::spawn(move || loop {
match receiver.recv() {
Ok(i) => assert!(!done[i].swap(true, AcqRel)),
Err(spmc::NoSender) => break,
Err(spmc::NoMessage) => (),
}
}))
}
for i in 0 .. MSGS {
sender.send(i).unwrap();
}
drop(sender);
for thread in threads {
thread.join().unwrap();
}
for status in done.iter() {
assert!(status.load(Relaxed));
}
}
} | // suffers from it, yeah.
let pause = self.inner.incin.inner.pause();
// Bypassing null check is safe because we never store null in | random_line_split |
threeHandle.js | import * as THREE from 'three'
import Stats from 'three/examples/jsm/libs/stats.module.js'
import { OBJLoader } from 'three/examples/jsm/loaders/OBJLoader.js'
import { MTLLoader } from 'three/examples/jsm/loaders/MTLLoader.js'
import { DDSLoader } from 'three/examples/jsm/loaders/DDSLoader'
import { FBXLoader } from 'three/examples/jsm/loaders/FBXLoader.js'
import { TGALoader } from 'three/examples/jsm/loaders/TGALoader.js'
import { LoadingManager } from 'three/src/loaders/LoadingManager'
import { STLLoader } from 'three/examples/jsm/loaders/STLLoader'
import { PLYLoader } from 'three/examples/jsm/loaders/PLYLoader'
// import { ObjectLoader, Mesh, MeshPhongMaterial } from 'three'
import { ObjectLoader } from 'three'
// const OrbitControls = require( 'three-orbit-controls' )( THREE ) | this.width = 500
this.height = 500
this.scene = null
this.light = null
this.camera = null
this.controls = null
this.renderer = null
this.fov = 60
this.mixer = null
this.Stats = null
this.manager = null
this.crossOrigin = 'anonymous'
this.requestHeader = {}
this.near = 1
this.far = 2000
}
async init( { container, width, height } ) {
this.scene = await this.setScene()
this.light = await this.setLgiht()
await this.scene.add( this.light )
this.width = width || 500
this.height = height || 500
this.camera = await this.setCamera()
this.renderer = await this.setRenderer()
this.renderer.setClearColor( 'rgb(92,92,92)', 1.0 )
this.controls = await this.setControls()
container.appendChild( this.renderer.domElement )
await this.setStats( container )
await this.setClock()
window.addEventListener( 'resize', this.onWindowResize, false )
return {
scene : this.scene,
light : this.light,
camera : this.camera,
renderer : this.renderer,
controls : this.controls,
stats : this.stats,
clock : this.clock
}
}
setScene() {
const scene = new THREE.Scene()
scene.add( new THREE.AmbientLight( 0x999999 ) )
return scene
}
setLgiht() {
const light = new THREE.DirectionalLight( 0xdfebff, 0.45 )
light.position.set( 50, 200, 100 )
light.position.multiplyScalar( 0.3 )
return light
}
setCamera() {
const camera = new THREE.PerspectiveCamera(
this.fov,
this.width / this.height,
this.near,
this.far
)
camera.position.set( 10, 90, 65 )
camera.up.set( 0, 1, 0 )
camera.lookAt( this.scene.position )
return camera
}
setRenderer() {
const renderer = new THREE.WebGLRenderer( {
alpha : true
} )
// 为了兼容高清屏幕
renderer.setPixelRatio( window.devicePixelRatio )
renderer.setSize( this.width, this.height )
renderer.shadowMap.enabled = true
return renderer
}
setControls() {
const controls = new OrbitControls( this.camera, this.renderer.domElement )
controls.target.set( 0, 0, 0 )
controls.minDistance = 20
controls.maxDistance = 100000
controls.maxPolarAngle = Math.PI / 3
controls.update()
controls.enableDamping = true
controls.dampingFactor = 0.25
controls.enableZoom = true
controls.autoRotate = false
controls.enablePan = true
return controls
}
setStats( container ) {
this.stats = new Stats()
this.stats.domElement.style.position = 'absolute'
this.stats.domElement.style.left = '5px'
this.stats.domElement.style.top = '5px'
this.stats.domElement.style.display = 'none'
container.appendChild( this.stats.dom )
}
setClock() {
this.clock = new THREE.Clock()
}
onWindowResize() {
this.camera.aspect = this.width / this.height
this.camera.updateProjectionMatrix()
this.renderer.setSize( this.width, this.height )
}
render() {
this.renderer.render( this.scene, this.camera )
}
fbxAnimate() {
const delta = this.clock.getDelta()
if ( this.mixer ) this.mixer.update( delta )
this.stats.update()
}
// 线条绘制立方体
// v6----- v5
// /| /|
// v1------v0|
// | | | |
// | |v7---| |v4
// | / | /
// v2------v3
// v0 - v1 - v2 - v3 - v0 - v5 - v4 - v4 - v0 - v5 - v6 - v1 - v2 - v7 - v4 - v5 - v6 - v7
async drawCubeByLines( { width, height, depth } ) {
const objects = []
const geometryBox = await this.box( width, height, depth )
const lineSegments = await new THREE.LineSegments(
geometryBox,
await new THREE.LineDashedMaterial( {
color : 0xffaa00,
dashSize : 3,
gapSize : 1
} )
)
await lineSegments.computeLineDistances()
await objects.push( lineSegments )
await this.scene.add( lineSegments )
return lineSegments
}
async box( width, height, depth ) {
const geometry = await new THREE.BufferGeometry()
const position = [
-width,
-height,
-depth,
-width,
height,
-depth,
-width,
height,
-depth,
width,
height,
-depth,
width,
height,
-depth,
width,
-height,
-depth,
width,
-height,
-depth,
-width,
-height,
-depth,
-width,
-height,
depth,
-width,
height,
depth,
-width,
height,
depth,
width,
height,
depth,
width,
height,
depth,
width,
-height,
depth,
width,
-height,
depth,
-width,
-height,
depth,
-width,
-height,
-depth,
-width,
-height,
depth,
-width,
height,
-depth,
-width,
height,
depth,
width,
height,
-depth,
width,
height,
depth,
width,
-height,
-depth,
width,
-height,
depth
]
geometry.setAttribute(
'position',
await new THREE.Float32BufferAttribute( position, 3 )
)
return geometry
}
updateCamera( { x, y, z } ) {
this.camera.position.set( x, y, z )
this.camera.lookAt( this.scene.position )
}
setGridHelper(
size = 224,
divisions = 50,
color1 = 0x303030,
color2 = 0x303030
) {
const gridHelper = new THREE.GridHelper( size, divisions, color1, color2 )
return gridHelper
}
addAnimateFbx( object ) {
this.mixer = new THREE.AnimationMixer( object )
const len = object.animations.length // 动画的数量
if ( len > 0 ) {
const action = this.mixer.clipAction( object.animations[0] )
action.play()
}
return this.mixer
}
loadFbx( baseUrl, fn ) {
const loader = new FBXLoader( this.manager )
loader.setCrossOrigin( this.crossOrigin )
loader.setRequestHeader( this.requestHeader )
const that = this
loader.load(
baseUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
}
// fbx模型加载贴图
loadImage( url ) {
const loader = new THREE.TextureLoader()
const texturePlante = loader.load( url )
const material = new THREE.MeshPhongMaterial( {
map : texturePlante
} )
return material
}
loadObj( baseUrl, objUrl, materials, fn ) {
const loader = new OBJLoader( this.manager )
loader.setRequestHeader( this.requestHeader )
const that = this
if ( materials ) {
loader
.setMaterials( materials )
.setPath( baseUrl )
.load(
objUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
} else {
loader.setPath( baseUrl ).load(
objUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
}
}
loadMtl( baseUrl, mtlUrl, fn ) {
const loader = new MTLLoader( this.manager )
loader.setCrossOrigin( this.crossOrigin )
loader.setRequestHeader( this.requestHeader )
const that = this
loader.setPath( baseUrl ).load(
mtlUrl,
async( materials ) => {
materials.preload()
fn && fn( materials )
},
that.onProgress,
that.onError
)
}
loadJSON( baseUrl, fn ) {
const that = this
const loader = new ObjectLoader()
loader.setCrossOrigin( that.crossOrigin )
loader.setRequestHeader( that.requestHeader )
loader.load(
baseUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
}
loadStl( baseUrl, fn ) {
const that = this
const loader = new STLLoader()
loader.load(
baseUrl,
( geometry ) => {
const material = new THREE.MeshPhongMaterial( {
color : 0xff5533,
specular : 0x111111,
shininess : 200
} )
const object = new THREE.Mesh( geometry, material )
fn && fn( object )
},
that.onProgress,
that.onError
)
}
loadPly( baseUrl, fn ) {
const that = this
const loader = new PLYLoader()
loader.load(
baseUrl,
( geometry ) => {
// console.log( 'loadPly', geometry )
geometry.computeVertexNormals()
const loadGeometry = geometry.clone()
var material = new THREE.MeshStandardMaterial( {
color : 0x7fff00,
flatShading : true
} )
var mesh = new THREE.Mesh( loadGeometry, material )
mesh.translateZ( 5 )
mesh.translateY( 2 )
// scene.add(mesh);
// eslint-disable-next-line no-unused-vars
const mateial = new THREE.PointsMaterial( {
color : 0xffffff,
size : 0.05,
opacity : 0.5,
transparent : true,
blending : THREE.AdditiveBlending
// map: generateSprite()
} )
fn && fn( mesh )
},
that.onProgress,
that.onError
)
}
// https://threejs.org/docs/index.html?q=LoadingManager#api/en/loaders/managers/LoadingManager
loadManager( type = 'dds' ) {
let reg
let loader
// eslint-disable-next-line default-case
switch ( type ) {
case 'dds':
reg = /\.dds$/i
loader = new DDSLoader()
break
case 'tga':
reg = /\.tga$/i
loader = new TGALoader()
break
}
this.manager = new LoadingManager()
this.manager.addHandler( reg, loader )
}
async distoryEvent() {
window.removeEventListener( 'resize', this.onWindowResize, false )
}
async clearScene() {
const that = this
const groups = this.scene.children.filter( ( item ) => item.type == 'Group' )
const LineSegments = this.scene.children.filter(
( item ) => item.type == 'LineSegments'
)
that.getGroup( groups )
that.getGroup( LineSegments )
}
getGroup( groups ) {
const that = this
if ( groups.length > 0 ) {
for ( let i = 0; i < groups.length; i++ ) {
const currObj = groups[i]
if ( currObj instanceof THREE.Scene ) {
const children = currObj.children
for ( let j = 0; j < children.length; j++ ) {
that.deleteGroup( children[j] )
}
} else {
that.deleteGroup( currObj )
}
that.scene.remove( currObj )
}
}
}
deleteGroup( group ) {
if ( !group ) return
group.traverse( function( item ) {
if ( item instanceof THREE.Mesh ) {
item.geometry.dispose()
item.material.dispose()
}
} )
}
onProgress( xhr ) {
console.log( ( xhr.loaded / xhr.total ) * 100 + '% loaded' )
if ( xhr.lengthComputable ) {
// const percentComplete = xhr.loaded / xhr.total * 100
// console.log( Math.round( percentComplete, 2 ) + '% downloaded' )
}
}
onError() {
console.error( '加载模型出错,请检查你的模型文件是否合法' )
}
// https://threejs.org/docs/index.html#api/zh/math/Box3.makeEmpty
// https://blog.csdn.net/ithanmang/article/details/82217963
async getSize( object ) {
const box = new THREE.Box3().setFromObject( object )
const boxSize = box.getSize( new THREE.Vector3() )
const length = boxSize.length()
const boxCenter = box.getCenter( new THREE.Vector3() )
return {
boxSize,
length,
boxCenter
}
}
getFitScaleValue( obj ) {
const boxHelper = new THREE.BoxHelper( obj )
boxHelper.geometry.computeBoundingBox()
const box = boxHelper.geometry.boundingBox
const maxDiameter = Math.max(
box.max.x - box.min.x,
box.max.y - box.min.y,
box.max.z - box.min.z
)
const size = this.camera.position.z / maxDiameter
return size
}
setScaleToFitSize( obj ) {
const scaleValue = this.getFitScaleValue( obj )
obj.scale.set( scaleValue, scaleValue, scaleValue )
return scaleValue
}
}
export default new ThreeHandle() | import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls'
class ThreeHandle {
constructor() { | random_line_split |
threeHandle.js |
import * as THREE from 'three'
import Stats from 'three/examples/jsm/libs/stats.module.js'
import { OBJLoader } from 'three/examples/jsm/loaders/OBJLoader.js'
import { MTLLoader } from 'three/examples/jsm/loaders/MTLLoader.js'
import { DDSLoader } from 'three/examples/jsm/loaders/DDSLoader'
import { FBXLoader } from 'three/examples/jsm/loaders/FBXLoader.js'
import { TGALoader } from 'three/examples/jsm/loaders/TGALoader.js'
import { LoadingManager } from 'three/src/loaders/LoadingManager'
import { STLLoader } from 'three/examples/jsm/loaders/STLLoader'
import { PLYLoader } from 'three/examples/jsm/loaders/PLYLoader'
// import { ObjectLoader, Mesh, MeshPhongMaterial } from 'three'
import { ObjectLoader } from 'three'
// const OrbitControls = require( 'three-orbit-controls' )( THREE )
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls'
class ThreeHandle {
constructor() {
this.width = 500
this.height = 500
this.scene = null
this.light = null
this.camera = null
this.controls = null
this.renderer = null
this.fov = 60
this.mixer = null
this.Stats = null
this.manager = null
this.crossOrigin = 'anonymous'
this.requestHeader = {}
this.near = 1
this.far = 2000
}
async init( { container, width, height } ) {
this.scene = await this.setScene()
this.light = await this.setLgiht()
await this.scene.add( this.light )
this.width = width || 500
this.height = height || 500
this.camera = await this.setCamera()
this.renderer = await this.setRenderer()
this.renderer.setClearColor( 'rgb(92,92,92)', 1.0 )
this.controls = await this.setControls()
container.appendChild( this.renderer.domElement )
await this.setStats( container )
await this.setClock()
window.addEventListener( 'resize', this.onWindowResize, false )
return {
scene : this.scene,
light : this.light,
camera : this.camera,
renderer : this.renderer,
controls : this.controls,
stats : this.stats,
clock : this.clock
}
}
setScene() {
const scene = new THREE.Scene()
scene.add( new THREE.AmbientLight( 0x999999 ) )
return scene
}
setLgiht() {
const light = new THREE.DirectionalLight( 0xdfebff, 0.45 )
light.position.set( 50, 200, 100 )
light.position.multiplyScalar( 0.3 )
return light
}
setCamera() {
const camera = new THREE.PerspectiveCamera(
this.fov,
this.width / this.height,
this.near,
this.far
)
camera.position.set( 10, 90, 65 )
camera.up.set( 0, 1, 0 )
camera.lookAt( this.scene.position )
return camera
}
setRenderer() {
const renderer = new THREE.WebGLRenderer( {
alpha : true
} )
// 为了兼容高清屏幕
renderer.setPixelRatio( window.devicePixelRatio )
renderer.setSize( this.width, this.height )
renderer.shadowMap.enabled = true
return renderer
}
setControls() {
const controls = new OrbitControls( this.camera, this.renderer.domElement )
controls.target.set( 0, 0, 0 )
controls.minDistance = 20
controls.maxDistance = 100000
controls.maxPolarAngle = Math.PI / 3
controls.update()
controls.enableDamping = true
controls.dampingFactor = 0.25
controls.enableZoom = true
controls.autoRotate = false
controls.enablePan = true
return controls
}
setStats( container ) {
this.stats = new Stats()
this.stats.domElement.style.position = 'absolute'
this.stats.domElement.style.left = '5px'
this.stats.domElement.style.top = '5px'
this.stats.domElement.style.display = 'none'
container.appendChild( this.stats.dom )
}
setClock() {
this.clock = new THREE.Clock()
}
onWindowResize() {
this.camera.aspect = this.width / this.height
this.camera.updateProjectionMatrix()
this.renderer.setSize( this.width, this.height )
}
render() {
this.renderer.render( this.scene, this.camera )
}
fbxAnimate() {
const delta = this.clock.getDelta()
if ( this.mixer ) this.mixer.update( delta )
this.stats.update()
}
// 线条绘制立方体
// v6----- v5
// /| /|
// v1------v0|
// | | | |
// | |v7---| |v4
// | / | /
// v2------v3
// v0 - v1 - v2 - v3 - v0 - v5 - v4 - v4 - v0 - v5 - v6 - v1 - v2 - v7 - v4 - v5 - v6 - v7
async drawCubeByLines( { width, height, depth } ) {
const objects = []
const geometryBox = await this.box( width, height, depth )
const lineSegments = await new THREE.LineSegments(
geometryBox,
await new THREE.LineDashedMaterial( {
color : 0xffaa00,
dashSize : 3,
gapSize : 1
} )
)
await lineSegments.computeLineDistances()
await objects.push( lineSegments )
await this.scene.add( lineSegments )
return lineSegments
}
async box( width, height, depth ) {
const geometry = await new THREE.BufferGeometry()
const position = [
-width,
-height,
-depth,
-width,
height,
-depth,
-width,
height,
-depth,
width,
height,
-depth,
width,
height,
-depth,
width,
-height,
-depth,
width,
-height,
-depth,
-width,
-height,
-depth,
-width,
-height,
depth,
-width,
height,
depth,
-width,
height,
depth,
width,
height,
depth,
width,
height,
depth,
width,
-height,
depth,
width,
-height,
depth,
-width,
-height,
depth,
-width,
-height,
-depth,
-width,
-height,
depth,
-width,
height,
-depth,
-width,
height,
depth,
width,
height,
-depth,
width,
height,
depth,
width,
-height,
-depth,
width,
-height,
depth
]
geometry.setAttribute(
'position',
await new THREE.Float32BufferAttribute( position, 3 )
)
return geometry
}
updateCamera( { x, y, z } ) {
this.camera.position.set( x, y, z )
this.camera.lookAt( this.scene.position )
}
setGridHelper(
size = 224,
divisions = 50,
color1 = 0x303030,
color2 = 0x303030
) {
const gridHelper = new THREE.GridHelper( size, divisions, color1, color2 )
return gridHelper
}
addAnimateFbx( object ) {
this.mixer = new THREE.AnimationMixer( object )
const len = object.animations.length // 动画的数量
if ( len > 0 ) {
const action = this.mixer.clipAction( object.animations[0] )
action.play()
}
return this.mixer
}
loadFbx( baseUrl, fn ) {
const loader = new FBXLoader( this.manager )
loader.setCrossOrigin( this.crossOrigin )
loader.setRequestHeader( this.requestHeader )
const that = this
loader.load(
baseUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
}
// fbx模型加载贴图
loadImage( url ) {
const loader = new THREE.TextureLoader()
const texturePlante = loader.load( url )
const material = new THREE.MeshPhongMaterial( {
map : texturePlante
} )
return material
}
loadObj( baseUrl, objUrl, materials, fn ) {
const loader = new OBJLoader( this.manager )
loader.setRequestHeader( this.requestHeader )
const that = this
if ( materials ) {
loader
.setMaterials( materials )
.setPath( baseUrl )
.load(
objUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
} else {
loader.setPath( baseUrl ).load(
objUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
}
}
loadMtl( baseUrl, mtlUrl, fn ) {
const loader = new MTLLoader( this.manager )
loader.setCrossOrigin( this.crossOrigin )
loader.setRequestHeader( this.requestHeader )
const that = this
loader.setPath( baseUrl ).load(
mtlUrl,
async( materials ) => {
materials.preload()
fn && fn( materials )
},
that.onProgress,
that.onError
)
}
loadJSON( baseUrl, fn ) {
const that = this
const loader = new ObjectLoader()
loader.setCrossOrigin( that.crossOrigin )
loader.setRequestHeader( that.requestHeader )
loader.load(
baseUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
}
loadStl( baseUrl, fn ) {
const that = this
const loader = new STLLoader()
loader.load(
baseUrl,
( geometry ) => {
const material = new THREE.MeshPhongMaterial( {
color : 0xff5533,
specular : 0x111111,
shininess : 200
} )
const object = new THREE.Mesh( geometry, material )
fn && fn( object )
},
that.onProgress,
that.onError
)
}
loadPly( baseUrl, fn ) {
const that = this
const loader = new PLYLoader()
loader.load(
baseUrl,
( geometry ) => {
// console.log( 'loadPly', geometry )
geometry.computeVertexNormals()
const loadGeometry = geometry.clone()
var material = new THREE.MeshStandardMaterial( {
color : 0x7fff00,
flatShading : true
} )
var mesh = new THREE.Mesh( loadGeometry, material )
mesh.translateZ( 5 )
mesh.translateY( 2 )
// scene.add(mesh);
// eslint-disable-next-line no-unused-vars
const mateial = new THREE.PointsMaterial( {
color : 0xffffff,
size : 0.05,
opacity : 0.5,
transparent : true,
blending : THREE.AdditiveBlending
// map: generateSprite()
} )
fn && fn( mesh )
},
that.onProgress,
that.onError
)
}
// https://threejs.org/docs/index.html?q=LoadingManager#api/en/loaders/managers/LoadingManager
loadManager( type = 'dds' ) {
let reg
let loader
// eslint-disable-next-line default-case
switch ( type ) {
case 'dds':
reg = /\.dds$/i
loader = new DDSLoader()
break
case 'tga':
reg = /\.tga$/i
loader = new TGALoader()
break
}
this.manager = new LoadingManager()
this.manager.addHandler( reg, loader )
}
async distoryEvent() {
window.removeEventListener( 'resize', this.onWindowResize, false )
}
async clearScene() {
const that = this
const groups = this.scene.children.filter( ( item ) => item.type == 'Group' )
const LineSegments = this.scene.children.filter(
( item ) => item.type == 'LineSegments'
)
that.getGroup( groups )
that.getGroup( LineSegments )
}
getGroup( groups ) {
const that = this
if ( groups.length > 0 ) {
for ( let i = 0; i < groups.length; i++ ) {
const currObj = groups[i]
if ( currObj instanceof THREE.Scene ) {
const children = currObj.children
for ( let j = 0; j < children.length; j++ ) {
that.deleteGroup( children[j] )
}
} else {
that.deleteGroup( currObj )
}
that.scene.remove( currObj )
}
}
}
deleteGroup( group ) {
if ( !group ) return
group.traverse( function( item ) {
if ( item instanceof THREE.Mesh ) {
item.geometry.dispose()
item.material.dispose()
}
} )
}
onProgress( xhr ) {
console.log( ( xhr.loaded / xhr.total ) * 100 + '% loaded' )
if ( xhr.lengthComputable ) {
// const percentComplete = xhr.loaded / xhr.total * 100
// console.log( Math.round( percentComplete, 2 ) + '% downloaded' )
}
}
onError() {
console.error( '加载模型出错,请检查你的模型文件是否合法 |
// https://threejs.org/docs/index.html#api/zh/math/Box3.makeEmpty
// https://blog.csdn.net/ithanmang/article/details/82217963
async getSize( object ) {
const box = new THREE.Box3().setFromObject( object )
const boxSize = box.getSize( new THREE.Vector3() )
const length = boxSize.length()
const boxCenter = box.getCenter( new THREE.Vector3() )
return {
boxSize,
length,
boxCenter
}
}
getFitScaleValue( obj ) {
const boxHelper = new THREE.BoxHelper( obj )
boxHelper.geometry.computeBoundingBox()
const box = boxHelper.geometry.boundingBox
const maxDiameter = Math.max(
box.max.x - box.min.x,
box.max.y - box.min.y,
box.max.z - box.min.z
)
const size = this.camera.position.z / maxDiameter
return size
}
setScaleToFitSize( obj ) {
const scaleValue = this.getFitScaleValue( obj )
obj.scale.set( scaleValue, scaleValue, scaleValue )
return scaleValue
}
}
export default new ThreeHandle()
| ' )
} | identifier_name |
threeHandle.js |
import * as THREE from 'three'
import Stats from 'three/examples/jsm/libs/stats.module.js'
import { OBJLoader } from 'three/examples/jsm/loaders/OBJLoader.js'
import { MTLLoader } from 'three/examples/jsm/loaders/MTLLoader.js'
import { DDSLoader } from 'three/examples/jsm/loaders/DDSLoader'
import { FBXLoader } from 'three/examples/jsm/loaders/FBXLoader.js'
import { TGALoader } from 'three/examples/jsm/loaders/TGALoader.js'
import { LoadingManager } from 'three/src/loaders/LoadingManager'
import { STLLoader } from 'three/examples/jsm/loaders/STLLoader'
import { PLYLoader } from 'three/examples/jsm/loaders/PLYLoader'
// import { ObjectLoader, Mesh, MeshPhongMaterial } from 'three'
import { ObjectLoader } from 'three'
// const OrbitControls = require( 'three-orbit-controls' )( THREE )
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls'
class ThreeHandle {
constructor() {
this.width = 500
this.height = 500
this.scene = null
this.light = null
this.camera = null
this.controls = null
this.renderer = null
this.fov = 60
this.mixer = null
this.Stats = null
this.manager = null
this.crossOrigin = 'anonymous'
this.requestHeader = {}
this.near = 1
this.far = 2000
}
async init( { container, width, height } ) {
this.scene = await this.setScene()
this.light = await this.setLgiht()
await this.scene.add( this.light )
this.width = width || 500
this.height = height || 500
this.camera = await this.setCamera()
this.renderer = await this.setRenderer()
this.renderer.setClearColor( 'rgb(92,92,92)', 1.0 )
this.controls = await this.setControls()
container.appendChild( this.renderer.domElement )
await this.setStats( container )
await this.setClock()
window.addEventListener( 'resize', this.onWindowResize, false )
return {
scene : this.scene,
light : this.light,
camera : this.camera,
renderer : this.renderer,
controls : this.controls,
stats : this.stats,
clock : this.clock
}
}
setScene() {
const scene = new THREE.Scene()
scene.add( new THREE.AmbientLight( 0x999999 ) )
return scene
}
setLgiht() {
const light = new THREE.DirectionalLight( 0xdfebff, 0.45 )
light.position.set( 50, 200, 100 )
light.position.multiplyScalar( 0.3 )
return light
}
setCamera() {
const camera = new THREE.PerspectiveCamera(
this.fov,
this.width / this.height,
this.near,
this.far
)
camera.position.set( 10, 90, 65 )
camera.up.set( 0, 1, 0 )
camera.lookAt( this.scene.position )
return camera
}
setRenderer() {
const renderer = new THREE.WebGLRenderer( {
alpha : true
} )
// 为了兼容高清屏幕
renderer.setPixelRatio( window.devicePixelRatio )
renderer.setSize( this.width, this.height )
renderer.shadowMap.enabled = true
return renderer
}
setControls() {
const controls = new OrbitControls( this.camera, this.renderer.domElement )
controls.target.set( 0, 0, 0 )
controls.minDistance = 20
controls.maxDistance = 100000
controls.maxPolarAngle = Math.PI / 3
controls.update()
controls.enableDamping = true
controls.dampingFactor = 0.25
controls.enableZoom = true
controls.autoRotate = false
controls.enablePan = true
return controls
}
setStats( container ) {
this.stats = new Stats()
this.stats.domElement.style.position = 'absolute'
this.stats.domElement.style.left = '5px'
this.stats.domElement.style.top = '5px'
this.stats.domElement.style.display = 'none'
container.appendChild( this.stats.dom )
}
setClock() {
this.clock = new THREE.Clock()
}
onWindowResize() {
this.camera.aspect = this.width / this.height
this.camera.updateProjectionMatrix()
this.renderer.setSize( this.width, this.height )
}
render() {
this.renderer.render( this.scene, this.camera )
}
fbxAnimate() {
const delta = this.clock.getDelta()
if ( this.mixer ) this.mixer.update( delta )
this.stats.update()
}
// 线条绘制立方体
// v6----- v5
// /| /|
// v1------v0|
// | | | |
// | |v7---| |v4
// | / | /
// v2------v3
// v0 - v1 - v2 - v3 - v0 - v5 - v4 - v4 - v0 - v5 - v6 - v1 - v2 - v7 - v4 - v5 - v6 - v7
async drawCubeByLines( { width, height, depth } ) {
const objects = []
const geometryBox = await this.box( width, height, depth )
const lineSegments = await new THREE.LineSegments(
geometryBox,
await new THREE.LineDashedMaterial( {
color : 0xffaa00,
dashSize : 3,
gapSize : 1
} )
)
await lineSegments.computeLineDistances()
await objects.push( lineSegments )
await this.scene.add( lineSegments )
return lineSegments
}
async box( width, height, depth ) {
const geometry = await new THREE.BufferGeometry()
const position = [
-width,
-height,
-depth,
-width,
height,
-depth,
-width,
height,
-depth,
width,
height,
-depth,
width,
height,
-depth,
width,
-height,
-depth,
width,
-height,
-depth,
-width,
-height,
-depth,
-width,
-height,
depth,
-width,
height,
depth,
-width,
height,
depth,
width,
height,
depth,
width,
height,
depth,
width,
-height,
depth,
width,
-height,
depth,
-width,
-height,
depth,
-width,
-height,
-depth,
-width,
-height,
depth,
-width,
height,
-depth,
-width,
height,
depth,
width,
height,
-depth,
width,
height,
depth,
width,
-height,
-depth,
width,
-height,
depth
]
geometry.setAttribute(
'position',
await new THREE.Float32BufferAttribute( position, 3 )
)
return geometry
}
updateCamera( { x, y, z } ) {
this.camera.position.set( x, y, z )
this.camera.lookAt( this.scene.position )
}
setGridHelper(
size = 224,
divisions = 50,
color1 = 0x303030,
color2 = 0x303030
) {
const gridHelper = new THREE.GridHelper( size, divisions, color1, color2 )
return gridHelper
}
addAnimateFbx( object ) {
this.mixer = new THREE.AnimationMixer( object )
const len = object.animations.length // 动画的数量
if ( len > 0 ) {
const action = this.mixer.clipAction( object.animations[0] )
action.play()
}
return this.mixer
}
loadFbx( baseUrl, fn ) {
const loader = new FBXLoader( this | const loader = new THREE.TextureLoader()
const texturePlante = loader.load( url )
const material = new THREE.MeshPhongMaterial( {
map : texturePlante
} )
return material
}
loadObj( baseUrl, objUrl, materials, fn ) {
const loader = new OBJLoader( this.manager )
loader.setRequestHeader( this.requestHeader )
const that = this
if ( materials ) {
loader
.setMaterials( materials )
.setPath( baseUrl )
.load(
objUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
} else {
loader.setPath( baseUrl ).load(
objUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
}
}
loadMtl( baseUrl, mtlUrl, fn ) {
const loader = new MTLLoader( this.manager )
loader.setCrossOrigin( this.crossOrigin )
loader.setRequestHeader( this.requestHeader )
const that = this
loader.setPath( baseUrl ).load(
mtlUrl,
async( materials ) => {
materials.preload()
fn && fn( materials )
},
that.onProgress,
that.onError
)
}
loadJSON( baseUrl, fn ) {
const that = this
const loader = new ObjectLoader()
loader.setCrossOrigin( that.crossOrigin )
loader.setRequestHeader( that.requestHeader )
loader.load(
baseUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
}
loadStl( baseUrl, fn ) {
const that = this
const loader = new STLLoader()
loader.load(
baseUrl,
( geometry ) => {
const material = new THREE.MeshPhongMaterial( {
color : 0xff5533,
specular : 0x111111,
shininess : 200
} )
const object = new THREE.Mesh( geometry, material )
fn && fn( object )
},
that.onProgress,
that.onError
)
}
loadPly( baseUrl, fn ) {
const that = this
const loader = new PLYLoader()
loader.load(
baseUrl,
( geometry ) => {
// console.log( 'loadPly', geometry )
geometry.computeVertexNormals()
const loadGeometry = geometry.clone()
var material = new THREE.MeshStandardMaterial( {
color : 0x7fff00,
flatShading : true
} )
var mesh = new THREE.Mesh( loadGeometry, material )
mesh.translateZ( 5 )
mesh.translateY( 2 )
// scene.add(mesh);
// eslint-disable-next-line no-unused-vars
const mateial = new THREE.PointsMaterial( {
color : 0xffffff,
size : 0.05,
opacity : 0.5,
transparent : true,
blending : THREE.AdditiveBlending
// map: generateSprite()
} )
fn && fn( mesh )
},
that.onProgress,
that.onError
)
}
// https://threejs.org/docs/index.html?q=LoadingManager#api/en/loaders/managers/LoadingManager
loadManager( type = 'dds' ) {
let reg
let loader
// eslint-disable-next-line default-case
switch ( type ) {
case 'dds':
reg = /\.dds$/i
loader = new DDSLoader()
break
case 'tga':
reg = /\.tga$/i
loader = new TGALoader()
break
}
this.manager = new LoadingManager()
this.manager.addHandler( reg, loader )
}
async distoryEvent() {
window.removeEventListener( 'resize', this.onWindowResize, false )
}
async clearScene() {
const that = this
const groups = this.scene.children.filter( ( item ) => item.type == 'Group' )
const LineSegments = this.scene.children.filter(
( item ) => item.type == 'LineSegments'
)
that.getGroup( groups )
that.getGroup( LineSegments )
}
getGroup( groups ) {
const that = this
if ( groups.length > 0 ) {
for ( let i = 0; i < groups.length; i++ ) {
const currObj = groups[i]
if ( currObj instanceof THREE.Scene ) {
const children = currObj.children
for ( let j = 0; j < children.length; j++ ) {
that.deleteGroup( children[j] )
}
} else {
that.deleteGroup( currObj )
}
that.scene.remove( currObj )
}
}
}
deleteGroup( group ) {
if ( !group ) return
group.traverse( function( item ) {
if ( item instanceof THREE.Mesh ) {
item.geometry.dispose()
item.material.dispose()
}
} )
}
onProgress( xhr ) {
console.log( ( xhr.loaded / xhr.total ) * 100 + '% loaded' )
if ( xhr.lengthComputable ) {
// const percentComplete = xhr.loaded / xhr.total * 100
// console.log( Math.round( percentComplete, 2 ) + '% downloaded' )
}
}
onError() {
console.error( '加载模型出错,请检查你的模型文件是否合法' )
}
// https://threejs.org/docs/index.html#api/zh/math/Box3.makeEmpty
// https://blog.csdn.net/ithanmang/article/details/82217963
async getSize( object ) {
const box = new THREE.Box3().setFromObject( object )
const boxSize = box.getSize( new THREE.Vector3() )
const length = boxSize.length()
const boxCenter = box.getCenter( new THREE.Vector3() )
return {
boxSize,
length,
boxCenter
}
}
getFitScaleValue( obj ) {
const boxHelper = new THREE.BoxHelper( obj )
boxHelper.geometry.computeBoundingBox()
const box = boxHelper.geometry.boundingBox
const maxDiameter = Math.max(
box.max.x - box.min.x,
box.max.y - box.min.y,
box.max.z - box.min.z
)
const size = this.camera.position.z / maxDiameter
return size
}
setScaleToFitSize( obj ) {
const scaleValue = this.getFitScaleValue( obj )
obj.scale.set( scaleValue, scaleValue, scaleValue )
return scaleValue
}
}
export default new ThreeHandle()
| .manager )
loader.setCrossOrigin( this.crossOrigin )
loader.setRequestHeader( this.requestHeader )
const that = this
loader.load(
baseUrl,
( object ) => {
fn && fn( object )
},
that.onProgress,
that.onError
)
}
// fbx模型加载贴图
loadImage( url ) {
| identifier_body |
demo.py | #Copyright (c) 2018-2020 William Emerison Six
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
# PURPOSE
#
# Make the rotations work correctly by thinking about the problem
# more clearly
#
# In the previous demo, The initial translate is effectively canceled out,
# leaving a rotation and then a translation.
# Translate inverse(Translate) Rotate Translate
#
# Translate inverse(Translate) = Identity. i.e. 5 * 1/5 = 1,
# so we really just need to do a rotation first, and then a translation,
# but this can be counterintuitive at first because we like to think
# in relative terms.
# To understand why the code in this demo works, you can think
# about it in one of two ways. Either there is a sequence
# of function calls, all of which happen relative to the global
# origin; or, you can read the transformations backwards,
# where instead of doing operations on points, the operations
# all modify the current axis to a new relative axis,
# and all subsequent functions move those relative axises to
# new relative axises.
# Strong suggestion for computer graphics, especially from
# modelspace to global space:
# Read the transformations in the latter.
# See the transformations below, and the associated animated gifs.
import sys
import os
import numpy as np
import math
from OpenGL.GL import *
import glfw
if not glfw.init():
sys.exit()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR,1)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR,4)
window = glfw.create_window(500,
500,
"ModelViewProjection Demo 9",
None,
None)
if not window:
glfw.terminate()
sys.exit()
# Make the window's context current
glfw.make_context_current(window)
# Install a key handler
def on_key(window, key, scancode, action, mods):
if key == glfw.KEY_ESCAPE and action == glfw.PRESS:
glfw.set_window_should_close(window,1)
glfw.set_key_callback(window, on_key)
glClearColor(0.0,
0.0,
0.0,
1.0)
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
def draw_in_square_viewport():
# clear to gray.
glClearColor(0.2, #r
0.2, #g
0.2, #b
1.0) #a
glClear(GL_COLOR_BUFFER_BIT)
width, height = glfw.get_framebuffer_size(window)
# figure out the minimum dimension of the window
min = width if width < height else height
# per pixel than just it's current color.
glEnable(GL_SCISSOR_TEST)
glScissor(int((width - min)/2.0), #min x
int((height - min)/2.0), #min y
min, #width x
min) #width y
glClearColor(0.0, #r
0.0, #g
0.0, #b
1.0) #a
# gl clear will only update the square to black values.
glClear(GL_COLOR_BUFFER_BIT)
# disable the scissor test, so now any opengl calls will
# happen as usual.
glDisable(GL_SCISSOR_TEST)
# But, we only want to draw within the black square.
# We set the viewport, so that the NDC coordinates
# will be mapped the the region of screen coordinates
# that we care about, which is the black square.
glViewport(int(0.0 + (width - min)/2.0), #min x
int(0.0 + (height - min)/2.0), #min y
min, #width x
min) #width y
class Vertex:
def __init__(self,x,y):
self.x = x
self.y = y
def __repr__(self):
return f"Vertex(x={repr(self.x)},y={repr(self.y)})"
def translate(self, tx, ty):
return Vertex(x=self.x + tx, y=self.y + ty)
def scale(self, scale_x, scale_y):
return Vertex(x=self.x * scale_x, y=self.y * scale_y)
def rotate(self,angle_in_radians):
return Vertex(x= self.x * math.cos(angle_in_radians) - self.y * math.sin(angle_in_radians),
y= self.x * math.sin(angle_in_radians) + self.y * math.cos(angle_in_radians))
# NEW
# removed rotate_around, as it was useless for our purpose
class Paddle:
def __init__(self,vertices, r, g, b, initial_position, rotation=0.0, input_offset_x=0.0, input_offset_y=0.0):
self.vertices = vertices
self.r = r
self.g = g
self.b = b
self.rotation = rotation
self.input_offset_x = input_offset_x
self.input_offset_y = input_offset_y
self.initial_position = initial_position
def __repr__(self):
return f"Paddle(vertices={repr(self.vertices)},r={repr(self.r)},g={repr(self.g)},b={repr(self.b)},initial_position={repr(self.initial_position)},rotation={repr(self.rotation)},input_offset_x={repr(self.input_offset_x)},input_offset_y={repr({self.input_offset_y})})"
paddle1 = Paddle(vertices=[Vertex(x=-10.0, y=-30.0),
Vertex(x= 10.0, y=-30.0),
Vertex(x= 10.0, y=30.0),
Vertex(x=-10.0, y=30.0)],
r=0.578123,
g=0.0,
b=1.0,
initial_position=Vertex(-90.0,0.0))
paddle2 = Paddle(vertices=[Vertex(x=-10.0, y=-30.0),
Vertex(x= 10.0, y=-30.0),
Vertex(x= 10.0, y=30.0),
Vertex(x=-10.0, y=30.0)],
r=1.0,
g=0.0,
b=0.0,
initial_position=Vertex(90.0,0.0))
def handle_movement_of_paddles():
|
TARGET_FRAMERATE = 60 # fps
# to try to standardize on 60 fps, compare times between frames
time_at_beginning_of_previous_frame = glfw.get_time()
# Loop until the user closes the window
while not glfw.window_should_close(window):
# poll the time to try to get a constant framerate
while glfw.get_time() < time_at_beginning_of_previous_frame + 1.0/TARGET_FRAMERATE:
pass
# set for comparison on the next frame
time_at_beginning_of_previous_frame = glfw.get_time()
# Poll for and process events
glfw.poll_events()
width, height = glfw.get_framebuffer_size(window)
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# render scene
draw_in_square_viewport()
handle_movement_of_paddles()
# draw paddle1
glColor3f(paddle1.r,
paddle1.g,
paddle1.b)
# if you read the operations below as rotate, translate1, translate2,
# you should imagine it as follows
# eog ../images/rotation1F.gif
# if instead you read them backwards, imagine the transformations
# as follows
# eog ../images/rotation1B.gif
# side note. Typically I use a debugger as an interactive evaluator,
# in order to understand how code which I do not understand works.
# In computer graphics, the debugger is of limited help because
# the transformations on the individual points is not worth
# thinking about, and therefore the intermediat results
# are worthless for reasoning.
#
# In order to be successful, I highly recommend reading the transformations
# backwards, with a moving/rotating/scaled axises.
#
# (This advise will be modified when I introduce transformation stacks,
# but the same principle will apply. Also, on the note of transformation
# stacks, N.B. that the scaling from world space to ndc is shared
# for both paddles, and that changing the code in one place would
# required changing the code for all shapes.)
#
# I prefer to think graphically instead of symbolically.
# Another way you can think of this is to rotate the the x axis
# and y axis, create graph paper (tick marks) along those new
# axis, and then draw the geometry on that new "basis",
# instead of the natural basis. (Natural basis just means
# the normal x and y axis).
# Think of basis as an origin, a unit in various directions,
# a graph paper lines drawn. Then your geometry is drawn
# in that space.
# In revisting demo 6's space, if we read all of the transformations
# below in order, it's following the order of function application.
#
# If instead we read the transformation between spaces backwards in code,
# (and going the opposite direction of the arrows), we can view a coordinate
# system that is changing (origin can move, and axises can rotate/scale)
# eog ../images/demo06.png
# ALSO, see mvpVisualization/demo.py and mvpVisualization/demoAnimation.py.
# THESE WILL SHOW THE TRANSMORTAIONS backwards much more intuitively.
glBegin(GL_QUADS)
for model_space in paddle1.vertices:
world_space = model_space.rotate(paddle1.rotation) \
.translate(tx=paddle1.initial_position.x,
ty=paddle1.initial_position.y) \
.translate(tx=paddle1.input_offset_x,
ty=paddle1.input_offset_y)
ndc_space = world_space.scale(scale_x=1.0/100.0,
scale_y=1.0/100.0)
glVertex2f(ndc_space.x,
ndc_space.y)
glEnd()
# draw paddle2
glColor3f(paddle2.r,
paddle2.g,
paddle2.b)
# Same thing for the second paddle.
# eog ../images/rotation2F.gif
# eog ../images/rotation2B.gif
glBegin(GL_QUADS)
for model_space in paddle2.vertices:
world_space = model_space.rotate(paddle2.rotation) \
.translate(tx=paddle2.initial_position.x,
ty=paddle2.initial_position.y) \
.translate(tx=paddle2.input_offset_x,
ty=paddle2.input_offset_y)
ndc_space = world_space.scale(scale_x=1.0/100.0,
scale_y=1.0/100.0)
glVertex2f(ndc_space.x,
ndc_space.y)
glEnd()
# done with frame, flush and swap buffers
# Swap front and back buffers
glfw.swap_buffers(window)
glfw.terminate()
| global paddle1, paddle2
if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:
paddle1.input_offset_y -= 10.0
if glfw.get_key(window, glfw.KEY_W) == glfw.PRESS:
paddle1.input_offset_y += 10.0
if glfw.get_key(window, glfw.KEY_K) == glfw.PRESS:
paddle2.input_offset_y -= 10.0
if glfw.get_key(window, glfw.KEY_I) == glfw.PRESS:
paddle2.input_offset_y += 10.0
global paddle_1_rotation, paddle_2_rotation
if glfw.get_key(window, glfw.KEY_A) == glfw.PRESS:
paddle1.rotation += 0.1
if glfw.get_key(window, glfw.KEY_D) == glfw.PRESS:
paddle1.rotation -= 0.1
if glfw.get_key(window, glfw.KEY_J) == glfw.PRESS:
paddle2.rotation += 0.1
if glfw.get_key(window, glfw.KEY_L) == glfw.PRESS:
paddle2.rotation -= 0.1 | identifier_body |
demo.py | #Copyright (c) 2018-2020 William Emerison Six
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
# PURPOSE
#
# Make the rotations work correctly by thinking about the problem
# more clearly
#
# In the previous demo, The initial translate is effectively canceled out,
# leaving a rotation and then a translation.
# Translate inverse(Translate) Rotate Translate
#
# Translate inverse(Translate) = Identity. i.e. 5 * 1/5 = 1,
# so we really just need to do a rotation first, and then a translation,
# but this can be counterintuitive at first because we like to think
# in relative terms.
# To understand why the code in this demo works, you can think
# about it in one of two ways. Either there is a sequence
# of function calls, all of which happen relative to the global
# origin; or, you can read the transformations backwards,
# where instead of doing operations on points, the operations
# all modify the current axis to a new relative axis,
# and all subsequent functions move those relative axises to
# new relative axises.
# Strong suggestion for computer graphics, especially from
# modelspace to global space:
# Read the transformations in the latter.
# See the transformations below, and the associated animated gifs.
import sys
import os
import numpy as np
import math
from OpenGL.GL import *
import glfw
if not glfw.init():
sys.exit()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR,1)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR,4)
window = glfw.create_window(500,
500,
"ModelViewProjection Demo 9",
None,
None)
if not window:
glfw.terminate()
sys.exit()
# Make the window's context current
glfw.make_context_current(window)
# Install a key handler
def on_key(window, key, scancode, action, mods):
if key == glfw.KEY_ESCAPE and action == glfw.PRESS:
glfw.set_window_should_close(window,1)
glfw.set_key_callback(window, on_key)
glClearColor(0.0,
0.0,
0.0,
1.0)
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
def draw_in_square_viewport():
# clear to gray.
glClearColor(0.2, #r
0.2, #g
0.2, #b
1.0) #a
glClear(GL_COLOR_BUFFER_BIT)
width, height = glfw.get_framebuffer_size(window)
# figure out the minimum dimension of the window
min = width if width < height else height
# per pixel than just it's current color.
glEnable(GL_SCISSOR_TEST)
glScissor(int((width - min)/2.0), #min x
int((height - min)/2.0), #min y
min, #width x
min) #width y
glClearColor(0.0, #r
0.0, #g
0.0, #b
1.0) #a
# gl clear will only update the square to black values.
glClear(GL_COLOR_BUFFER_BIT)
# disable the scissor test, so now any opengl calls will
# happen as usual.
glDisable(GL_SCISSOR_TEST)
# But, we only want to draw within the black square.
# We set the viewport, so that the NDC coordinates
# will be mapped the the region of screen coordinates
# that we care about, which is the black square.
glViewport(int(0.0 + (width - min)/2.0), #min x
int(0.0 + (height - min)/2.0), #min y
min, #width x
min) #width y
class Vertex:
def __init__(self,x,y):
self.x = x
self.y = y
def __repr__(self):
return f"Vertex(x={repr(self.x)},y={repr(self.y)})"
def translate(self, tx, ty):
return Vertex(x=self.x + tx, y=self.y + ty)
def scale(self, scale_x, scale_y):
return Vertex(x=self.x * scale_x, y=self.y * scale_y)
def rotate(self,angle_in_radians):
return Vertex(x= self.x * math.cos(angle_in_radians) - self.y * math.sin(angle_in_radians),
y= self.x * math.sin(angle_in_radians) + self.y * math.cos(angle_in_radians))
# NEW
# removed rotate_around, as it was useless for our purpose
class Paddle:
def __init__(self,vertices, r, g, b, initial_position, rotation=0.0, input_offset_x=0.0, input_offset_y=0.0):
self.vertices = vertices
self.r = r
self.g = g
self.b = b
self.rotation = rotation
self.input_offset_x = input_offset_x
self.input_offset_y = input_offset_y
self.initial_position = initial_position
def __repr__(self):
return f"Paddle(vertices={repr(self.vertices)},r={repr(self.r)},g={repr(self.g)},b={repr(self.b)},initial_position={repr(self.initial_position)},rotation={repr(self.rotation)},input_offset_x={repr(self.input_offset_x)},input_offset_y={repr({self.input_offset_y})})"
paddle1 = Paddle(vertices=[Vertex(x=-10.0, y=-30.0),
Vertex(x= 10.0, y=-30.0), | Vertex(x= 10.0, y=30.0),
Vertex(x=-10.0, y=30.0)],
r=0.578123,
g=0.0,
b=1.0,
initial_position=Vertex(-90.0,0.0))
paddle2 = Paddle(vertices=[Vertex(x=-10.0, y=-30.0),
Vertex(x= 10.0, y=-30.0),
Vertex(x= 10.0, y=30.0),
Vertex(x=-10.0, y=30.0)],
r=1.0,
g=0.0,
b=0.0,
initial_position=Vertex(90.0,0.0))
def handle_movement_of_paddles():
global paddle1, paddle2
if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:
paddle1.input_offset_y -= 10.0
if glfw.get_key(window, glfw.KEY_W) == glfw.PRESS:
paddle1.input_offset_y += 10.0
if glfw.get_key(window, glfw.KEY_K) == glfw.PRESS:
paddle2.input_offset_y -= 10.0
if glfw.get_key(window, glfw.KEY_I) == glfw.PRESS:
paddle2.input_offset_y += 10.0
global paddle_1_rotation, paddle_2_rotation
if glfw.get_key(window, glfw.KEY_A) == glfw.PRESS:
paddle1.rotation += 0.1
if glfw.get_key(window, glfw.KEY_D) == glfw.PRESS:
paddle1.rotation -= 0.1
if glfw.get_key(window, glfw.KEY_J) == glfw.PRESS:
paddle2.rotation += 0.1
if glfw.get_key(window, glfw.KEY_L) == glfw.PRESS:
paddle2.rotation -= 0.1
TARGET_FRAMERATE = 60 # fps
# to try to standardize on 60 fps, compare times between frames
time_at_beginning_of_previous_frame = glfw.get_time()
# Loop until the user closes the window
while not glfw.window_should_close(window):
# poll the time to try to get a constant framerate
while glfw.get_time() < time_at_beginning_of_previous_frame + 1.0/TARGET_FRAMERATE:
pass
# set for comparison on the next frame
time_at_beginning_of_previous_frame = glfw.get_time()
# Poll for and process events
glfw.poll_events()
width, height = glfw.get_framebuffer_size(window)
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# render scene
draw_in_square_viewport()
handle_movement_of_paddles()
# draw paddle1
glColor3f(paddle1.r,
paddle1.g,
paddle1.b)
# if you read the operations below as rotate, translate1, translate2,
# you should imagine it as follows
# eog ../images/rotation1F.gif
# if instead you read them backwards, imagine the transformations
# as follows
# eog ../images/rotation1B.gif
# side note. Typically I use a debugger as an interactive evaluator,
# in order to understand how code which I do not understand works.
# In computer graphics, the debugger is of limited help because
# the transformations on the individual points is not worth
# thinking about, and therefore the intermediat results
# are worthless for reasoning.
#
# In order to be successful, I highly recommend reading the transformations
# backwards, with a moving/rotating/scaled axises.
#
# (This advise will be modified when I introduce transformation stacks,
# but the same principle will apply. Also, on the note of transformation
# stacks, N.B. that the scaling from world space to ndc is shared
# for both paddles, and that changing the code in one place would
# required changing the code for all shapes.)
#
# I prefer to think graphically instead of symbolically.
# Another way you can think of this is to rotate the the x axis
# and y axis, create graph paper (tick marks) along those new
# axis, and then draw the geometry on that new "basis",
# instead of the natural basis. (Natural basis just means
# the normal x and y axis).
# Think of basis as an origin, a unit in various directions,
# a graph paper lines drawn. Then your geometry is drawn
# in that space.
# In revisting demo 6's space, if we read all of the transformations
# below in order, it's following the order of function application.
#
# If instead we read the transformation between spaces backwards in code,
# (and going the opposite direction of the arrows), we can view a coordinate
# system that is changing (origin can move, and axises can rotate/scale)
# eog ../images/demo06.png
# ALSO, see mvpVisualization/demo.py and mvpVisualization/demoAnimation.py.
# THESE WILL SHOW THE TRANSMORTAIONS backwards much more intuitively.
glBegin(GL_QUADS)
for model_space in paddle1.vertices:
world_space = model_space.rotate(paddle1.rotation) \
.translate(tx=paddle1.initial_position.x,
ty=paddle1.initial_position.y) \
.translate(tx=paddle1.input_offset_x,
ty=paddle1.input_offset_y)
ndc_space = world_space.scale(scale_x=1.0/100.0,
scale_y=1.0/100.0)
glVertex2f(ndc_space.x,
ndc_space.y)
glEnd()
# draw paddle2
glColor3f(paddle2.r,
paddle2.g,
paddle2.b)
# Same thing for the second paddle.
# eog ../images/rotation2F.gif
# eog ../images/rotation2B.gif
glBegin(GL_QUADS)
for model_space in paddle2.vertices:
world_space = model_space.rotate(paddle2.rotation) \
.translate(tx=paddle2.initial_position.x,
ty=paddle2.initial_position.y) \
.translate(tx=paddle2.input_offset_x,
ty=paddle2.input_offset_y)
ndc_space = world_space.scale(scale_x=1.0/100.0,
scale_y=1.0/100.0)
glVertex2f(ndc_space.x,
ndc_space.y)
glEnd()
# done with frame, flush and swap buffers
# Swap front and back buffers
glfw.swap_buffers(window)
glfw.terminate() | random_line_split | |
demo.py | #Copyright (c) 2018-2020 William Emerison Six
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
# PURPOSE
#
# Make the rotations work correctly by thinking about the problem
# more clearly
#
# In the previous demo, The initial translate is effectively canceled out,
# leaving a rotation and then a translation.
# Translate inverse(Translate) Rotate Translate
#
# Translate inverse(Translate) = Identity. i.e. 5 * 1/5 = 1,
# so we really just need to do a rotation first, and then a translation,
# but this can be counterintuitive at first because we like to think
# in relative terms.
# To understand why the code in this demo works, you can think
# about it in one of two ways. Either there is a sequence
# of function calls, all of which happen relative to the global
# origin; or, you can read the transformations backwards,
# where instead of doing operations on points, the operations
# all modify the current axis to a new relative axis,
# and all subsequent functions move those relative axises to
# new relative axises.
# Strong suggestion for computer graphics, especially from
# modelspace to global space:
# Read the transformations in the latter.
# See the transformations below, and the associated animated gifs.
import sys
import os
import numpy as np
import math
from OpenGL.GL import *
import glfw
if not glfw.init():
sys.exit()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR,1)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR,4)
window = glfw.create_window(500,
500,
"ModelViewProjection Demo 9",
None,
None)
if not window:
glfw.terminate()
sys.exit()
# Make the window's context current
glfw.make_context_current(window)
# Install a key handler
def on_key(window, key, scancode, action, mods):
if key == glfw.KEY_ESCAPE and action == glfw.PRESS:
glfw.set_window_should_close(window,1)
glfw.set_key_callback(window, on_key)
glClearColor(0.0,
0.0,
0.0,
1.0)
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
def draw_in_square_viewport():
# clear to gray.
glClearColor(0.2, #r
0.2, #g
0.2, #b
1.0) #a
glClear(GL_COLOR_BUFFER_BIT)
width, height = glfw.get_framebuffer_size(window)
# figure out the minimum dimension of the window
min = width if width < height else height
# per pixel than just it's current color.
glEnable(GL_SCISSOR_TEST)
glScissor(int((width - min)/2.0), #min x
int((height - min)/2.0), #min y
min, #width x
min) #width y
glClearColor(0.0, #r
0.0, #g
0.0, #b
1.0) #a
# gl clear will only update the square to black values.
glClear(GL_COLOR_BUFFER_BIT)
# disable the scissor test, so now any opengl calls will
# happen as usual.
glDisable(GL_SCISSOR_TEST)
# But, we only want to draw within the black square.
# We set the viewport, so that the NDC coordinates
# will be mapped the the region of screen coordinates
# that we care about, which is the black square.
glViewport(int(0.0 + (width - min)/2.0), #min x
int(0.0 + (height - min)/2.0), #min y
min, #width x
min) #width y
class Vertex:
def __init__(self,x,y):
self.x = x
self.y = y
def | (self):
return f"Vertex(x={repr(self.x)},y={repr(self.y)})"
def translate(self, tx, ty):
return Vertex(x=self.x + tx, y=self.y + ty)
def scale(self, scale_x, scale_y):
return Vertex(x=self.x * scale_x, y=self.y * scale_y)
def rotate(self,angle_in_radians):
return Vertex(x= self.x * math.cos(angle_in_radians) - self.y * math.sin(angle_in_radians),
y= self.x * math.sin(angle_in_radians) + self.y * math.cos(angle_in_radians))
# NEW
# removed rotate_around, as it was useless for our purpose
class Paddle:
def __init__(self,vertices, r, g, b, initial_position, rotation=0.0, input_offset_x=0.0, input_offset_y=0.0):
self.vertices = vertices
self.r = r
self.g = g
self.b = b
self.rotation = rotation
self.input_offset_x = input_offset_x
self.input_offset_y = input_offset_y
self.initial_position = initial_position
def __repr__(self):
return f"Paddle(vertices={repr(self.vertices)},r={repr(self.r)},g={repr(self.g)},b={repr(self.b)},initial_position={repr(self.initial_position)},rotation={repr(self.rotation)},input_offset_x={repr(self.input_offset_x)},input_offset_y={repr({self.input_offset_y})})"
paddle1 = Paddle(vertices=[Vertex(x=-10.0, y=-30.0),
Vertex(x= 10.0, y=-30.0),
Vertex(x= 10.0, y=30.0),
Vertex(x=-10.0, y=30.0)],
r=0.578123,
g=0.0,
b=1.0,
initial_position=Vertex(-90.0,0.0))
paddle2 = Paddle(vertices=[Vertex(x=-10.0, y=-30.0),
Vertex(x= 10.0, y=-30.0),
Vertex(x= 10.0, y=30.0),
Vertex(x=-10.0, y=30.0)],
r=1.0,
g=0.0,
b=0.0,
initial_position=Vertex(90.0,0.0))
def handle_movement_of_paddles():
global paddle1, paddle2
if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:
paddle1.input_offset_y -= 10.0
if glfw.get_key(window, glfw.KEY_W) == glfw.PRESS:
paddle1.input_offset_y += 10.0
if glfw.get_key(window, glfw.KEY_K) == glfw.PRESS:
paddle2.input_offset_y -= 10.0
if glfw.get_key(window, glfw.KEY_I) == glfw.PRESS:
paddle2.input_offset_y += 10.0
global paddle_1_rotation, paddle_2_rotation
if glfw.get_key(window, glfw.KEY_A) == glfw.PRESS:
paddle1.rotation += 0.1
if glfw.get_key(window, glfw.KEY_D) == glfw.PRESS:
paddle1.rotation -= 0.1
if glfw.get_key(window, glfw.KEY_J) == glfw.PRESS:
paddle2.rotation += 0.1
if glfw.get_key(window, glfw.KEY_L) == glfw.PRESS:
paddle2.rotation -= 0.1
TARGET_FRAMERATE = 60 # fps
# to try to standardize on 60 fps, compare times between frames
time_at_beginning_of_previous_frame = glfw.get_time()
# Loop until the user closes the window
while not glfw.window_should_close(window):
# poll the time to try to get a constant framerate
while glfw.get_time() < time_at_beginning_of_previous_frame + 1.0/TARGET_FRAMERATE:
pass
# set for comparison on the next frame
time_at_beginning_of_previous_frame = glfw.get_time()
# Poll for and process events
glfw.poll_events()
width, height = glfw.get_framebuffer_size(window)
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# render scene
draw_in_square_viewport()
handle_movement_of_paddles()
# draw paddle1
glColor3f(paddle1.r,
paddle1.g,
paddle1.b)
# if you read the operations below as rotate, translate1, translate2,
# you should imagine it as follows
# eog ../images/rotation1F.gif
# if instead you read them backwards, imagine the transformations
# as follows
# eog ../images/rotation1B.gif
# side note. Typically I use a debugger as an interactive evaluator,
# in order to understand how code which I do not understand works.
# In computer graphics, the debugger is of limited help because
# the transformations on the individual points is not worth
# thinking about, and therefore the intermediat results
# are worthless for reasoning.
#
# In order to be successful, I highly recommend reading the transformations
# backwards, with a moving/rotating/scaled axises.
#
# (This advise will be modified when I introduce transformation stacks,
# but the same principle will apply. Also, on the note of transformation
# stacks, N.B. that the scaling from world space to ndc is shared
# for both paddles, and that changing the code in one place would
# required changing the code for all shapes.)
#
# I prefer to think graphically instead of symbolically.
# Another way you can think of this is to rotate the the x axis
# and y axis, create graph paper (tick marks) along those new
# axis, and then draw the geometry on that new "basis",
# instead of the natural basis. (Natural basis just means
# the normal x and y axis).
# Think of basis as an origin, a unit in various directions,
# a graph paper lines drawn. Then your geometry is drawn
# in that space.
# In revisting demo 6's space, if we read all of the transformations
# below in order, it's following the order of function application.
#
# If instead we read the transformation between spaces backwards in code,
# (and going the opposite direction of the arrows), we can view a coordinate
# system that is changing (origin can move, and axises can rotate/scale)
# eog ../images/demo06.png
# ALSO, see mvpVisualization/demo.py and mvpVisualization/demoAnimation.py.
# THESE WILL SHOW THE TRANSMORTAIONS backwards much more intuitively.
glBegin(GL_QUADS)
for model_space in paddle1.vertices:
world_space = model_space.rotate(paddle1.rotation) \
.translate(tx=paddle1.initial_position.x,
ty=paddle1.initial_position.y) \
.translate(tx=paddle1.input_offset_x,
ty=paddle1.input_offset_y)
ndc_space = world_space.scale(scale_x=1.0/100.0,
scale_y=1.0/100.0)
glVertex2f(ndc_space.x,
ndc_space.y)
glEnd()
# draw paddle2
glColor3f(paddle2.r,
paddle2.g,
paddle2.b)
# Same thing for the second paddle.
# eog ../images/rotation2F.gif
# eog ../images/rotation2B.gif
glBegin(GL_QUADS)
for model_space in paddle2.vertices:
world_space = model_space.rotate(paddle2.rotation) \
.translate(tx=paddle2.initial_position.x,
ty=paddle2.initial_position.y) \
.translate(tx=paddle2.input_offset_x,
ty=paddle2.input_offset_y)
ndc_space = world_space.scale(scale_x=1.0/100.0,
scale_y=1.0/100.0)
glVertex2f(ndc_space.x,
ndc_space.y)
glEnd()
# done with frame, flush and swap buffers
# Swap front and back buffers
glfw.swap_buffers(window)
glfw.terminate()
| __repr__ | identifier_name |
demo.py | #Copyright (c) 2018-2020 William Emerison Six
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
# PURPOSE
#
# Make the rotations work correctly by thinking about the problem
# more clearly
#
# In the previous demo, The initial translate is effectively canceled out,
# leaving a rotation and then a translation.
# Translate inverse(Translate) Rotate Translate
#
# Translate inverse(Translate) = Identity. i.e. 5 * 1/5 = 1,
# so we really just need to do a rotation first, and then a translation,
# but this can be counterintuitive at first because we like to think
# in relative terms.
# To understand why the code in this demo works, you can think
# about it in one of two ways. Either there is a sequence
# of function calls, all of which happen relative to the global
# origin; or, you can read the transformations backwards,
# where instead of doing operations on points, the operations
# all modify the current axis to a new relative axis,
# and all subsequent functions move those relative axises to
# new relative axises.
# Strong suggestion for computer graphics, especially from
# modelspace to global space:
# Read the transformations in the latter.
# See the transformations below, and the associated animated gifs.
import sys
import os
import numpy as np
import math
from OpenGL.GL import *
import glfw
if not glfw.init():
sys.exit()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR,1)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR,4)
window = glfw.create_window(500,
500,
"ModelViewProjection Demo 9",
None,
None)
if not window:
glfw.terminate()
sys.exit()
# Make the window's context current
glfw.make_context_current(window)
# Install a key handler
def on_key(window, key, scancode, action, mods):
if key == glfw.KEY_ESCAPE and action == glfw.PRESS:
glfw.set_window_should_close(window,1)
glfw.set_key_callback(window, on_key)
glClearColor(0.0,
0.0,
0.0,
1.0)
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
def draw_in_square_viewport():
# clear to gray.
glClearColor(0.2, #r
0.2, #g
0.2, #b
1.0) #a
glClear(GL_COLOR_BUFFER_BIT)
width, height = glfw.get_framebuffer_size(window)
# figure out the minimum dimension of the window
min = width if width < height else height
# per pixel than just it's current color.
glEnable(GL_SCISSOR_TEST)
glScissor(int((width - min)/2.0), #min x
int((height - min)/2.0), #min y
min, #width x
min) #width y
glClearColor(0.0, #r
0.0, #g
0.0, #b
1.0) #a
# gl clear will only update the square to black values.
glClear(GL_COLOR_BUFFER_BIT)
# disable the scissor test, so now any opengl calls will
# happen as usual.
glDisable(GL_SCISSOR_TEST)
# But, we only want to draw within the black square.
# We set the viewport, so that the NDC coordinates
# will be mapped the the region of screen coordinates
# that we care about, which is the black square.
glViewport(int(0.0 + (width - min)/2.0), #min x
int(0.0 + (height - min)/2.0), #min y
min, #width x
min) #width y
class Vertex:
def __init__(self,x,y):
self.x = x
self.y = y
def __repr__(self):
return f"Vertex(x={repr(self.x)},y={repr(self.y)})"
def translate(self, tx, ty):
return Vertex(x=self.x + tx, y=self.y + ty)
def scale(self, scale_x, scale_y):
return Vertex(x=self.x * scale_x, y=self.y * scale_y)
def rotate(self,angle_in_radians):
return Vertex(x= self.x * math.cos(angle_in_radians) - self.y * math.sin(angle_in_radians),
y= self.x * math.sin(angle_in_radians) + self.y * math.cos(angle_in_radians))
# NEW
# removed rotate_around, as it was useless for our purpose
class Paddle:
def __init__(self,vertices, r, g, b, initial_position, rotation=0.0, input_offset_x=0.0, input_offset_y=0.0):
self.vertices = vertices
self.r = r
self.g = g
self.b = b
self.rotation = rotation
self.input_offset_x = input_offset_x
self.input_offset_y = input_offset_y
self.initial_position = initial_position
def __repr__(self):
return f"Paddle(vertices={repr(self.vertices)},r={repr(self.r)},g={repr(self.g)},b={repr(self.b)},initial_position={repr(self.initial_position)},rotation={repr(self.rotation)},input_offset_x={repr(self.input_offset_x)},input_offset_y={repr({self.input_offset_y})})"
paddle1 = Paddle(vertices=[Vertex(x=-10.0, y=-30.0),
Vertex(x= 10.0, y=-30.0),
Vertex(x= 10.0, y=30.0),
Vertex(x=-10.0, y=30.0)],
r=0.578123,
g=0.0,
b=1.0,
initial_position=Vertex(-90.0,0.0))
paddle2 = Paddle(vertices=[Vertex(x=-10.0, y=-30.0),
Vertex(x= 10.0, y=-30.0),
Vertex(x= 10.0, y=30.0),
Vertex(x=-10.0, y=30.0)],
r=1.0,
g=0.0,
b=0.0,
initial_position=Vertex(90.0,0.0))
def handle_movement_of_paddles():
global paddle1, paddle2
if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:
paddle1.input_offset_y -= 10.0
if glfw.get_key(window, glfw.KEY_W) == glfw.PRESS:
paddle1.input_offset_y += 10.0
if glfw.get_key(window, glfw.KEY_K) == glfw.PRESS:
paddle2.input_offset_y -= 10.0
if glfw.get_key(window, glfw.KEY_I) == glfw.PRESS:
|
global paddle_1_rotation, paddle_2_rotation
if glfw.get_key(window, glfw.KEY_A) == glfw.PRESS:
paddle1.rotation += 0.1
if glfw.get_key(window, glfw.KEY_D) == glfw.PRESS:
paddle1.rotation -= 0.1
if glfw.get_key(window, glfw.KEY_J) == glfw.PRESS:
paddle2.rotation += 0.1
if glfw.get_key(window, glfw.KEY_L) == glfw.PRESS:
paddle2.rotation -= 0.1
TARGET_FRAMERATE = 60 # fps
# to try to standardize on 60 fps, compare times between frames
time_at_beginning_of_previous_frame = glfw.get_time()
# Loop until the user closes the window
while not glfw.window_should_close(window):
# poll the time to try to get a constant framerate
while glfw.get_time() < time_at_beginning_of_previous_frame + 1.0/TARGET_FRAMERATE:
pass
# set for comparison on the next frame
time_at_beginning_of_previous_frame = glfw.get_time()
# Poll for and process events
glfw.poll_events()
width, height = glfw.get_framebuffer_size(window)
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# render scene
draw_in_square_viewport()
handle_movement_of_paddles()
# draw paddle1
glColor3f(paddle1.r,
paddle1.g,
paddle1.b)
# if you read the operations below as rotate, translate1, translate2,
# you should imagine it as follows
# eog ../images/rotation1F.gif
# if instead you read them backwards, imagine the transformations
# as follows
# eog ../images/rotation1B.gif
# side note. Typically I use a debugger as an interactive evaluator,
# in order to understand how code which I do not understand works.
# In computer graphics, the debugger is of limited help because
# the transformations on the individual points is not worth
# thinking about, and therefore the intermediat results
# are worthless for reasoning.
#
# In order to be successful, I highly recommend reading the transformations
# backwards, with a moving/rotating/scaled axises.
#
# (This advise will be modified when I introduce transformation stacks,
# but the same principle will apply. Also, on the note of transformation
# stacks, N.B. that the scaling from world space to ndc is shared
# for both paddles, and that changing the code in one place would
# required changing the code for all shapes.)
#
# I prefer to think graphically instead of symbolically.
# Another way you can think of this is to rotate the the x axis
# and y axis, create graph paper (tick marks) along those new
# axis, and then draw the geometry on that new "basis",
# instead of the natural basis. (Natural basis just means
# the normal x and y axis).
# Think of basis as an origin, a unit in various directions,
# a graph paper lines drawn. Then your geometry is drawn
# in that space.
# In revisting demo 6's space, if we read all of the transformations
# below in order, it's following the order of function application.
#
# If instead we read the transformation between spaces backwards in code,
# (and going the opposite direction of the arrows), we can view a coordinate
# system that is changing (origin can move, and axises can rotate/scale)
# eog ../images/demo06.png
# ALSO, see mvpVisualization/demo.py and mvpVisualization/demoAnimation.py.
# THESE WILL SHOW THE TRANSMORTAIONS backwards much more intuitively.
glBegin(GL_QUADS)
for model_space in paddle1.vertices:
world_space = model_space.rotate(paddle1.rotation) \
.translate(tx=paddle1.initial_position.x,
ty=paddle1.initial_position.y) \
.translate(tx=paddle1.input_offset_x,
ty=paddle1.input_offset_y)
ndc_space = world_space.scale(scale_x=1.0/100.0,
scale_y=1.0/100.0)
glVertex2f(ndc_space.x,
ndc_space.y)
glEnd()
# draw paddle2
glColor3f(paddle2.r,
paddle2.g,
paddle2.b)
# Same thing for the second paddle.
# eog ../images/rotation2F.gif
# eog ../images/rotation2B.gif
glBegin(GL_QUADS)
for model_space in paddle2.vertices:
world_space = model_space.rotate(paddle2.rotation) \
.translate(tx=paddle2.initial_position.x,
ty=paddle2.initial_position.y) \
.translate(tx=paddle2.input_offset_x,
ty=paddle2.input_offset_y)
ndc_space = world_space.scale(scale_x=1.0/100.0,
scale_y=1.0/100.0)
glVertex2f(ndc_space.x,
ndc_space.y)
glEnd()
# done with frame, flush and swap buffers
# Swap front and back buffers
glfw.swap_buffers(window)
glfw.terminate()
| paddle2.input_offset_y += 10.0 | conditional_block |
workplace_preparation.py | import math
import deepdish as dd
import shutil
import subprocess
from os import path, makedirs, remove, listdir
from argparse import ArgumentParser
from threading import Thread
import numpy as np
from matplotlib import pyplot as plt
number_of_images_in_temp_model = 10
def parse_args() -> str:
"""
Function to parse user argument
:return: workspace_path
"""
ap = ArgumentParser(description='Create camera_pose files.')
ap.add_argument('--workspace_path', required=True)
args = vars(ap.parse_args())
return args['workspace_path']
def remove_extra_images(path_to_images: str, number_of_images: int) -> None:
"""
The function removes all the extra images created in the images folder
:param path_to_images: path to the model images folder
:param number_of_images: the number of images to reconstruct our model
"""
last_image = 'image' + str(number_of_images) + '.jpg'
while last_image in listdir(path_to_images):
last_image_path = path.join(path_to_images, last_image)
remove(last_image_path)
print(f"remove {last_image}")
number_of_images += 1
last_image = 'image' + str(number_of_images) + '.jpg'
def prepare_video(path_to_video: str, number_of_images=87) -> None:
"""
The function prepares the images for our model based on a given video
:param path_to_video: video in h264 format
:param number_of_images: the number of images to reconstruct our model (87 by default)
"""
temp_video = path.join(path_to_video, 'temp_outpy.mp4')
video = path.join(path_to_video, 'outpy.h264')
# create mp4 video for metadata and compute video duration
subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])
result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
"format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", temp_video],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
video_duration = float(result.stdout)
# create images folder
path_to_images = path.join(path_to_video, 'images')
if path.exists(path_to_images) and path.isdir(path_to_images):
shutil.rmtree(path_to_images)
makedirs(path_to_images)
# split the given video into images
subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',
path.join(path_to_images, 'image%d.jpg')])
# remove extra files
remove_extra_images(path_to_images, number_of_images)
remove(temp_video)
def create_temp_model(temp_dir_path: str) -> str:
"""
The function prepares the images for our model based on a given video
:param temp_dir_path: video in h264 format
:return number_of_images: path to temporary model folder
"""
# create temp images folder
path_to_temp_model = path.join(temp_dir_path, 'temp_model')
path_to_temp_images = path.join(path_to_temp_model, 'temp_images')
# remove old temporary folder if exists
if path.exists(path_to_temp_model) and path.isdir(path_to_temp_model):
shutil.rmtree(path_to_temp_model)
number_of_temp_images = 0
path_to_images = path.join(temp_dir_path, 'images')
# take only part of the images for the temp model
while number_of_temp_images < number_of_images_in_temp_model:
try:
number_of_temp_images = len([name for name in listdir(path_to_images) if name.endswith('.jpg')])
except FileNotFoundError:
number_of_temp_images = 0
# copy subdirectory example
shutil.copytree(path_to_images, path_to_temp_images)
# run colmap to create model for the first 10 images in video
subprocess.run(['colmap', 'automatic_reconstructor',
'--workspace_path', path_to_temp_model, '--image_path', path_to_temp_images,
'--data_type=video', '--quality=extreme'])
return path_to_temp_model
def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np:
"""
The function converts the quaternion vector to a rotation matrix
https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/
:param q0: the value of qw
:param q1: the value of qx
:param q2: the value of qy
:param q3: the value of qz
:return rot_matrix: rotation matrix 3x3 as NumPy array
"""
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# 3x3 rotation matrix
rot_matrix = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
return rot_matrix
def rotation_matrix_to_quaternion(rotation_matrix: np) -> object:
"""
The function converts rotation matrix to quaternion vector
https://learnopencv.com/rotation-matrix-to-euler-angles/
:param rotation_matrix: rotation matrix 3x3 represented by NumPy array
:return quaternion vector: defined by (qx, qy, qz, qw)
"""
cosine_for_pitch = math.sqrt(rotation_matrix[0][0] ** 2 + rotation_matrix[1][0] ** 2)
is_singular = cosine_for_pitch < 10 ** -6
if not is_singular:
yaw = math.atan2(rotation_matrix[1][0], rotation_matrix[0][0])
pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)
roll = math.atan2(rotation_matrix[2][1], rotation_matrix[2][2])
else:
yaw = math.atan2(-rotation_matrix[1][2], rotation_matrix[1][1])
pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)
roll = 0
e = (yaw, pitch, roll)
return euler_to_quaternion(e)
def euler_to_quaternion(euler: tuple) -> object:
"""
The function convert Euler angle to quaternion object
:param Euler: angle represented by yaw, pitch, roll
:return quaternion vector: defined by (qx, qy, qz, qw)
"""
(yaw, pitch, roll) = (euler[0], euler[1], euler[2])
qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)
qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)
qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
return qx, qy, qz, qw
def get_first_image_pose(image_src: str) -> list:
|
def draw_rel_camera_pose(image: int, origin: list, camera_pose: list, plot_dir_path: str) -> None:
"""
Debug function for plotting the relative camera poses
:param image: number of current image
:param origin: list of [x,y,z] of the origin
:param camera_pose: list of [x1,y1,z1][x2,y2,z2] of the camera pose (three 2d vectors)
:param plot_dir_path: path to plot directory
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elev=10.)
ax.set_title('camera pose image: %d' % image)
scale = 7
ax.set_xlim3d(-scale, scale)
ax.set_ylim3d(-scale, scale)
ax.set_zlim3d(-scale, scale)
# replace the Y-Axis with Z-Axis
ax.scatter(origin[0], origin[2], origin[1], c='black')
for i in range(3):
ax.plot([origin[0], camera_pose[i][0]], [origin[2], camera_pose[i][2]], [origin[1], camera_pose[i][1]])
i += 1
fig.savefig(f'{plot_dir_path}/%d.png' % image)
plt.close(fig)
plt.clf()
def compute_absolut_camera_pose(camera_pose_rel_dict: dict, first_image_pose: list,
workspace_path: str, do_plot=False) -> dict:
"""
The function return a dictionary with recovered R&T for each image
:param camera_pose_rel_dict: dictionary of relative camera poses for each image
:param first_image_pose: absolute R&T of the first image
:param workspace_path: path to workspace_path
:param do_plot: boolean flag for debug purpose
:return: camera_pose_recover dictionary
"""
# create directory for reference plots
ref_pose_images_path = path.join(workspace_path, 'ref_images')
if do_plot:
makedirs(ref_pose_images_path)
# initialize parameters for computing absolut camera poses
camera_pose_recover = {}
rotation = first_image_pose[0]
translation = first_image_pose[1]
is_first = True
prev_rotation = np.identity(3)
prev_translation = np.zeros(3)
# foreach image compute the absolut pose out of the reference pose
for image in camera_pose_rel_dict.keys():
rel_rotation = camera_pose_rel_dict[image][0]
rel_translation = camera_pose_rel_dict[image][1]
# for the first image, take the values from the temporary model
if not is_first:
rotation = rel_rotation @ np.linalg.inv(prev_rotation.T)
translation = rel_translation + prev_translation
# compute the absolut camera pose
camera_pose = rotation + translation
if do_plot:
draw_rel_camera_pose(image, translation, camera_pose, ref_pose_images_path)
# save the values foreach image (in R & T format)
camera_pose_recover[image] = [rotation, translation]
prev_rotation = rotation
prev_translation = translation
is_first = False
return camera_pose_recover
def write_camera_pose_to_file(camera_pose_abs_dict: dict, pose_dir_path: str) -> None:
"""
The function write the recovered camera poses according to COLMAP documentation
:param camera_pose_abs_dict: A dictionary of recovered camera poses for each image
:param pose_dir_path: path to image file
"""
image_dst = path.join(pose_dir_path, 'images.txt')
with open(image_dst, 'w+') as file:
file.write('# Image list with two lines of data per image:\n')
file.write('# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n')
file.write('# POINTS2D[] as (X, Y, POINT3D_ID)\n')
file.write(f'# Number of images: {len(camera_pose_abs_dict.keys())}\n')
# write each camera pose to file
for image in camera_pose_abs_dict.keys():
image_pose_data = []
t_vector = camera_pose_abs_dict[image][1]
qx, qy, qz, qw = rotation_matrix_to_quaternion(camera_pose_abs_dict[image][0])
image_pose_data.append(str(image))
# image_pose_data.append(f'{qw} {qx} {qy} {qz}')
image_pose_data.append(f'{qz} {qy} {qx} {qw}')
image_pose_data.append(' '.join(map(str, t_vector)))
image_pose_data.append('1')
image_pose_data.append(f'image{image}.jpg')
file.write(' '.join(image_pose_data) + '\n\n')
def clear_workspace(workspace_path: str) -> None:
"""
The function deletes all the files in the workspace folder except the input video
"""
# make sure the workspace in empty
for filename in listdir(workspace_path):
if filename.endswith('.h264'):
continue
path_to_node = path.join(workspace_path, filename)
if path.isdir(path_to_node):
shutil.rmtree(path_to_node)
else:
remove(path_to_node)
def main():
# Parse input arguments:
workspace_path = parse_args()
clear_workspace(workspace_path)
# prepare video and create the images for our model
video_thread = Thread(target=prepare_video, args=(workspace_path, 87))
video_thread.start()
# create temp folder for temp model
temp_model_workspace_path = create_temp_model(workspace_path)
# create camera pose parameters
pose_output_path = path.join(workspace_path, 'camera_poses')
makedirs(pose_output_path)
# create camera input file
camera_src = path.join(temp_model_workspace_path, 'sparse/0/cameras.txt')
camera_dst = path.join(pose_output_path, 'cameras.txt')
shutil.copyfile(camera_src, camera_dst)
# create an empty points input file
points_dst = path.join(pose_output_path, 'points3D.txt')
open(points_dst, 'w').close()
# get camera poses for first image
image_src = path.join(temp_model_workspace_path, 'sparse/0/images.txt')
first_image_pose = get_first_image_pose(image_src)
if not first_image_pose:
print("Error in temp model - cant compute the camera pose for the first image")
exit(1)
# reading the reference pose model from file
camera_pose_rel_dict = dd.io.load('ref_camera_pose.h5')
camera_pose_abs_dict = compute_absolut_camera_pose(camera_pose_rel_dict, first_image_pose, workspace_path,
do_plot=False)
# create the image file according to COLMAP documentation
write_camera_pose_to_file(camera_pose_abs_dict, pose_output_path)
# wait for the video thread before closing the process
video_thread.join()
if __name__ == "__main__":
print('==============================================================================')
print('workplace preparation')
print('==============================================================================')
main()
| """
The function return the absolut R & T for the first image in temp model
:param image_src: path to image file (colmap output)
:return R&T: R = list[0], T list[1] or None if image1 not exists
"""
# read images file
with open(image_src, 'r') as file:
lines = file.readlines()[4::2]
# create absolut camera pose dictionary
for line in lines:
columns = line.split()
image_name = columns[9].split('.')[0]
image_id = int(image_name.split('e')[1])
# convert and return the camera pose for the first image in model
if image_id == 1:
qw = float(columns[1])
qx = float(columns[2])
qy = float(columns[4])
qz = float(columns[3])
rotation_matrix = quaternion_to_rotation_matrix(qw, qx, qy, qz)
tx = float(columns[5])
ty = float(columns[7])
tz = float(columns[6])
translation_vector = np.array([tx, ty, tz])
return [rotation_matrix, translation_vector]
return [] | identifier_body |
workplace_preparation.py | import math
import deepdish as dd
import shutil
import subprocess
from os import path, makedirs, remove, listdir
from argparse import ArgumentParser
from threading import Thread
import numpy as np
from matplotlib import pyplot as plt
number_of_images_in_temp_model = 10
def parse_args() -> str:
"""
Function to parse user argument
:return: workspace_path
"""
ap = ArgumentParser(description='Create camera_pose files.')
ap.add_argument('--workspace_path', required=True)
args = vars(ap.parse_args())
return args['workspace_path']
def remove_extra_images(path_to_images: str, number_of_images: int) -> None:
"""
The function removes all the extra images created in the images folder
:param path_to_images: path to the model images folder
:param number_of_images: the number of images to reconstruct our model
"""
last_image = 'image' + str(number_of_images) + '.jpg'
while last_image in listdir(path_to_images):
last_image_path = path.join(path_to_images, last_image)
remove(last_image_path)
print(f"remove {last_image}")
number_of_images += 1
last_image = 'image' + str(number_of_images) + '.jpg'
def prepare_video(path_to_video: str, number_of_images=87) -> None:
"""
The function prepares the images for our model based on a given video
:param path_to_video: video in h264 format
:param number_of_images: the number of images to reconstruct our model (87 by default)
"""
temp_video = path.join(path_to_video, 'temp_outpy.mp4')
video = path.join(path_to_video, 'outpy.h264')
# create mp4 video for metadata and compute video duration
subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])
result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
"format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", temp_video],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
video_duration = float(result.stdout)
# create images folder
path_to_images = path.join(path_to_video, 'images')
if path.exists(path_to_images) and path.isdir(path_to_images):
shutil.rmtree(path_to_images)
makedirs(path_to_images)
# split the given video into images
subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',
path.join(path_to_images, 'image%d.jpg')])
# remove extra files
remove_extra_images(path_to_images, number_of_images)
remove(temp_video)
def create_temp_model(temp_dir_path: str) -> str:
"""
The function prepares the images for our model based on a given video
:param temp_dir_path: video in h264 format
:return number_of_images: path to temporary model folder
"""
# create temp images folder
path_to_temp_model = path.join(temp_dir_path, 'temp_model')
path_to_temp_images = path.join(path_to_temp_model, 'temp_images')
# remove old temporary folder if exists
if path.exists(path_to_temp_model) and path.isdir(path_to_temp_model):
shutil.rmtree(path_to_temp_model)
number_of_temp_images = 0
path_to_images = path.join(temp_dir_path, 'images')
# take only part of the images for the temp model
while number_of_temp_images < number_of_images_in_temp_model:
try:
number_of_temp_images = len([name for name in listdir(path_to_images) if name.endswith('.jpg')])
except FileNotFoundError:
number_of_temp_images = 0
# copy subdirectory example
shutil.copytree(path_to_images, path_to_temp_images)
# run colmap to create model for the first 10 images in video
subprocess.run(['colmap', 'automatic_reconstructor',
'--workspace_path', path_to_temp_model, '--image_path', path_to_temp_images,
'--data_type=video', '--quality=extreme'])
return path_to_temp_model
def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np:
"""
The function converts the quaternion vector to a rotation matrix
https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/
:param q0: the value of qw
:param q1: the value of qx
:param q2: the value of qy
:param q3: the value of qz
:return rot_matrix: rotation matrix 3x3 as NumPy array
"""
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# 3x3 rotation matrix
rot_matrix = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
return rot_matrix
def rotation_matrix_to_quaternion(rotation_matrix: np) -> object:
"""
The function converts rotation matrix to quaternion vector
https://learnopencv.com/rotation-matrix-to-euler-angles/
:param rotation_matrix: rotation matrix 3x3 represented by NumPy array
:return quaternion vector: defined by (qx, qy, qz, qw)
"""
cosine_for_pitch = math.sqrt(rotation_matrix[0][0] ** 2 + rotation_matrix[1][0] ** 2)
is_singular = cosine_for_pitch < 10 ** -6
if not is_singular:
yaw = math.atan2(rotation_matrix[1][0], rotation_matrix[0][0])
pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)
roll = math.atan2(rotation_matrix[2][1], rotation_matrix[2][2])
else:
yaw = math.atan2(-rotation_matrix[1][2], rotation_matrix[1][1])
pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)
roll = 0
e = (yaw, pitch, roll)
return euler_to_quaternion(e)
def euler_to_quaternion(euler: tuple) -> object:
"""
The function convert Euler angle to quaternion object
:param Euler: angle represented by yaw, pitch, roll
:return quaternion vector: defined by (qx, qy, qz, qw)
"""
(yaw, pitch, roll) = (euler[0], euler[1], euler[2])
qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)
qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)
qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
return qx, qy, qz, qw
def get_first_image_pose(image_src: str) -> list:
"""
The function return the absolut R & T for the first image in temp model
:param image_src: path to image file (colmap output)
:return R&T: R = list[0], T list[1] or None if image1 not exists
"""
# read images file
with open(image_src, 'r') as file:
lines = file.readlines()[4::2]
# create absolut camera pose dictionary
for line in lines:
columns = line.split()
image_name = columns[9].split('.')[0]
image_id = int(image_name.split('e')[1])
# convert and return the camera pose for the first image in model
if image_id == 1:
qw = float(columns[1])
qx = float(columns[2])
qy = float(columns[4])
qz = float(columns[3])
rotation_matrix = quaternion_to_rotation_matrix(qw, qx, qy, qz)
tx = float(columns[5])
ty = float(columns[7])
tz = float(columns[6])
translation_vector = np.array([tx, ty, tz])
return [rotation_matrix, translation_vector]
return []
def draw_rel_camera_pose(image: int, origin: list, camera_pose: list, plot_dir_path: str) -> None:
"""
Debug function for plotting the relative camera poses
:param image: number of current image
:param origin: list of [x,y,z] of the origin
:param camera_pose: list of [x1,y1,z1][x2,y2,z2] of the camera pose (three 2d vectors)
:param plot_dir_path: path to plot directory
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elev=10.)
ax.set_title('camera pose image: %d' % image)
scale = 7
ax.set_xlim3d(-scale, scale)
ax.set_ylim3d(-scale, scale)
ax.set_zlim3d(-scale, scale)
# replace the Y-Axis with Z-Axis
ax.scatter(origin[0], origin[2], origin[1], c='black')
for i in range(3):
ax.plot([origin[0], camera_pose[i][0]], [origin[2], camera_pose[i][2]], [origin[1], camera_pose[i][1]])
i += 1
fig.savefig(f'{plot_dir_path}/%d.png' % image)
plt.close(fig)
plt.clf()
def compute_absolut_camera_pose(camera_pose_rel_dict: dict, first_image_pose: list,
workspace_path: str, do_plot=False) -> dict:
"""
The function return a dictionary with recovered R&T for each image
:param camera_pose_rel_dict: dictionary of relative camera poses for each image
:param first_image_pose: absolute R&T of the first image
:param workspace_path: path to workspace_path
:param do_plot: boolean flag for debug purpose
:return: camera_pose_recover dictionary
"""
# create directory for reference plots
ref_pose_images_path = path.join(workspace_path, 'ref_images')
if do_plot:
makedirs(ref_pose_images_path)
# initialize parameters for computing absolut camera poses
camera_pose_recover = {}
rotation = first_image_pose[0]
translation = first_image_pose[1]
is_first = True
prev_rotation = np.identity(3)
prev_translation = np.zeros(3)
# foreach image compute the absolut pose out of the reference pose
for image in camera_pose_rel_dict.keys():
rel_rotation = camera_pose_rel_dict[image][0]
rel_translation = camera_pose_rel_dict[image][1]
# for the first image, take the values from the temporary model
if not is_first:
|
# compute the absolut camera pose
camera_pose = rotation + translation
if do_plot:
draw_rel_camera_pose(image, translation, camera_pose, ref_pose_images_path)
# save the values foreach image (in R & T format)
camera_pose_recover[image] = [rotation, translation]
prev_rotation = rotation
prev_translation = translation
is_first = False
return camera_pose_recover
def write_camera_pose_to_file(camera_pose_abs_dict: dict, pose_dir_path: str) -> None:
"""
The function write the recovered camera poses according to COLMAP documentation
:param camera_pose_abs_dict: A dictionary of recovered camera poses for each image
:param pose_dir_path: path to image file
"""
image_dst = path.join(pose_dir_path, 'images.txt')
with open(image_dst, 'w+') as file:
file.write('# Image list with two lines of data per image:\n')
file.write('# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n')
file.write('# POINTS2D[] as (X, Y, POINT3D_ID)\n')
file.write(f'# Number of images: {len(camera_pose_abs_dict.keys())}\n')
# write each camera pose to file
for image in camera_pose_abs_dict.keys():
image_pose_data = []
t_vector = camera_pose_abs_dict[image][1]
qx, qy, qz, qw = rotation_matrix_to_quaternion(camera_pose_abs_dict[image][0])
image_pose_data.append(str(image))
# image_pose_data.append(f'{qw} {qx} {qy} {qz}')
image_pose_data.append(f'{qz} {qy} {qx} {qw}')
image_pose_data.append(' '.join(map(str, t_vector)))
image_pose_data.append('1')
image_pose_data.append(f'image{image}.jpg')
file.write(' '.join(image_pose_data) + '\n\n')
def clear_workspace(workspace_path: str) -> None:
"""
The function deletes all the files in the workspace folder except the input video
"""
# make sure the workspace in empty
for filename in listdir(workspace_path):
if filename.endswith('.h264'):
continue
path_to_node = path.join(workspace_path, filename)
if path.isdir(path_to_node):
shutil.rmtree(path_to_node)
else:
remove(path_to_node)
def main():
# Parse input arguments:
workspace_path = parse_args()
clear_workspace(workspace_path)
# prepare video and create the images for our model
video_thread = Thread(target=prepare_video, args=(workspace_path, 87))
video_thread.start()
# create temp folder for temp model
temp_model_workspace_path = create_temp_model(workspace_path)
# create camera pose parameters
pose_output_path = path.join(workspace_path, 'camera_poses')
makedirs(pose_output_path)
# create camera input file
camera_src = path.join(temp_model_workspace_path, 'sparse/0/cameras.txt')
camera_dst = path.join(pose_output_path, 'cameras.txt')
shutil.copyfile(camera_src, camera_dst)
# create an empty points input file
points_dst = path.join(pose_output_path, 'points3D.txt')
open(points_dst, 'w').close()
# get camera poses for first image
image_src = path.join(temp_model_workspace_path, 'sparse/0/images.txt')
first_image_pose = get_first_image_pose(image_src)
if not first_image_pose:
print("Error in temp model - cant compute the camera pose for the first image")
exit(1)
# reading the reference pose model from file
camera_pose_rel_dict = dd.io.load('ref_camera_pose.h5')
camera_pose_abs_dict = compute_absolut_camera_pose(camera_pose_rel_dict, first_image_pose, workspace_path,
do_plot=False)
# create the image file according to COLMAP documentation
write_camera_pose_to_file(camera_pose_abs_dict, pose_output_path)
# wait for the video thread before closing the process
video_thread.join()
if __name__ == "__main__":
print('==============================================================================')
print('workplace preparation')
print('==============================================================================')
main()
| rotation = rel_rotation @ np.linalg.inv(prev_rotation.T)
translation = rel_translation + prev_translation | conditional_block |
workplace_preparation.py | import math
import deepdish as dd
import shutil
import subprocess
from os import path, makedirs, remove, listdir
from argparse import ArgumentParser
from threading import Thread
import numpy as np
from matplotlib import pyplot as plt
number_of_images_in_temp_model = 10
def parse_args() -> str:
"""
Function to parse user argument
:return: workspace_path
"""
ap = ArgumentParser(description='Create camera_pose files.')
ap.add_argument('--workspace_path', required=True)
args = vars(ap.parse_args())
return args['workspace_path']
def remove_extra_images(path_to_images: str, number_of_images: int) -> None:
"""
The function removes all the extra images created in the images folder
:param path_to_images: path to the model images folder
:param number_of_images: the number of images to reconstruct our model
"""
last_image = 'image' + str(number_of_images) + '.jpg'
while last_image in listdir(path_to_images):
last_image_path = path.join(path_to_images, last_image)
remove(last_image_path)
print(f"remove {last_image}")
number_of_images += 1
last_image = 'image' + str(number_of_images) + '.jpg'
def prepare_video(path_to_video: str, number_of_images=87) -> None:
"""
The function prepares the images for our model based on a given video
:param path_to_video: video in h264 format
:param number_of_images: the number of images to reconstruct our model (87 by default)
"""
temp_video = path.join(path_to_video, 'temp_outpy.mp4')
video = path.join(path_to_video, 'outpy.h264')
# create mp4 video for metadata and compute video duration
subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])
result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
"format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", temp_video],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
video_duration = float(result.stdout)
# create images folder
path_to_images = path.join(path_to_video, 'images')
if path.exists(path_to_images) and path.isdir(path_to_images):
shutil.rmtree(path_to_images)
makedirs(path_to_images)
# split the given video into images
subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',
path.join(path_to_images, 'image%d.jpg')])
# remove extra files
remove_extra_images(path_to_images, number_of_images)
remove(temp_video)
def create_temp_model(temp_dir_path: str) -> str:
"""
The function prepares the images for our model based on a given video
:param temp_dir_path: video in h264 format
:return number_of_images: path to temporary model folder
"""
# create temp images folder
path_to_temp_model = path.join(temp_dir_path, 'temp_model')
path_to_temp_images = path.join(path_to_temp_model, 'temp_images')
# remove old temporary folder if exists
if path.exists(path_to_temp_model) and path.isdir(path_to_temp_model):
shutil.rmtree(path_to_temp_model)
number_of_temp_images = 0
path_to_images = path.join(temp_dir_path, 'images')
# take only part of the images for the temp model
while number_of_temp_images < number_of_images_in_temp_model:
try:
number_of_temp_images = len([name for name in listdir(path_to_images) if name.endswith('.jpg')])
except FileNotFoundError:
number_of_temp_images = 0
# copy subdirectory example
shutil.copytree(path_to_images, path_to_temp_images)
# run colmap to create model for the first 10 images in video
subprocess.run(['colmap', 'automatic_reconstructor',
'--workspace_path', path_to_temp_model, '--image_path', path_to_temp_images,
'--data_type=video', '--quality=extreme'])
return path_to_temp_model
def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np:
"""
The function converts the quaternion vector to a rotation matrix
https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/
:param q0: the value of qw
:param q1: the value of qx
:param q2: the value of qy
:param q3: the value of qz
:return rot_matrix: rotation matrix 3x3 as NumPy array
"""
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# 3x3 rotation matrix
rot_matrix = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
return rot_matrix
def rotation_matrix_to_quaternion(rotation_matrix: np) -> object:
"""
The function converts rotation matrix to quaternion vector
https://learnopencv.com/rotation-matrix-to-euler-angles/
:param rotation_matrix: rotation matrix 3x3 represented by NumPy array
:return quaternion vector: defined by (qx, qy, qz, qw)
"""
cosine_for_pitch = math.sqrt(rotation_matrix[0][0] ** 2 + rotation_matrix[1][0] ** 2)
is_singular = cosine_for_pitch < 10 ** -6
if not is_singular:
yaw = math.atan2(rotation_matrix[1][0], rotation_matrix[0][0])
pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)
roll = math.atan2(rotation_matrix[2][1], rotation_matrix[2][2])
else:
yaw = math.atan2(-rotation_matrix[1][2], rotation_matrix[1][1])
pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)
roll = 0
e = (yaw, pitch, roll)
return euler_to_quaternion(e)
def euler_to_quaternion(euler: tuple) -> object:
"""
The function convert Euler angle to quaternion object
:param Euler: angle represented by yaw, pitch, roll
:return quaternion vector: defined by (qx, qy, qz, qw)
"""
(yaw, pitch, roll) = (euler[0], euler[1], euler[2])
qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)
qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)
qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
return qx, qy, qz, qw
def get_first_image_pose(image_src: str) -> list:
"""
The function return the absolut R & T for the first image in temp model
:param image_src: path to image file (colmap output)
:return R&T: R = list[0], T list[1] or None if image1 not exists
"""
# read images file
with open(image_src, 'r') as file:
lines = file.readlines()[4::2]
# create absolut camera pose dictionary
for line in lines:
columns = line.split()
image_name = columns[9].split('.')[0]
image_id = int(image_name.split('e')[1])
# convert and return the camera pose for the first image in model
if image_id == 1:
qw = float(columns[1])
qx = float(columns[2])
qy = float(columns[4])
qz = float(columns[3])
rotation_matrix = quaternion_to_rotation_matrix(qw, qx, qy, qz)
tx = float(columns[5])
ty = float(columns[7])
tz = float(columns[6])
translation_vector = np.array([tx, ty, tz])
return [rotation_matrix, translation_vector]
return []
def draw_rel_camera_pose(image: int, origin: list, camera_pose: list, plot_dir_path: str) -> None:
"""
Debug function for plotting the relative camera poses
:param image: number of current image
:param origin: list of [x,y,z] of the origin
:param camera_pose: list of [x1,y1,z1][x2,y2,z2] of the camera pose (three 2d vectors)
:param plot_dir_path: path to plot directory
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elev=10.)
ax.set_title('camera pose image: %d' % image)
scale = 7
ax.set_xlim3d(-scale, scale)
ax.set_ylim3d(-scale, scale)
ax.set_zlim3d(-scale, scale)
# replace the Y-Axis with Z-Axis
ax.scatter(origin[0], origin[2], origin[1], c='black')
for i in range(3):
ax.plot([origin[0], camera_pose[i][0]], [origin[2], camera_pose[i][2]], [origin[1], camera_pose[i][1]])
i += 1
fig.savefig(f'{plot_dir_path}/%d.png' % image)
plt.close(fig)
plt.clf()
def compute_absolut_camera_pose(camera_pose_rel_dict: dict, first_image_pose: list,
workspace_path: str, do_plot=False) -> dict:
"""
The function return a dictionary with recovered R&T for each image
:param camera_pose_rel_dict: dictionary of relative camera poses for each image
:param first_image_pose: absolute R&T of the first image
:param workspace_path: path to workspace_path
:param do_plot: boolean flag for debug purpose
:return: camera_pose_recover dictionary
"""
# create directory for reference plots
ref_pose_images_path = path.join(workspace_path, 'ref_images')
if do_plot:
makedirs(ref_pose_images_path)
# initialize parameters for computing absolut camera poses
camera_pose_recover = {}
rotation = first_image_pose[0]
translation = first_image_pose[1]
is_first = True
prev_rotation = np.identity(3)
prev_translation = np.zeros(3)
# foreach image compute the absolut pose out of the reference pose
for image in camera_pose_rel_dict.keys():
rel_rotation = camera_pose_rel_dict[image][0]
rel_translation = camera_pose_rel_dict[image][1]
# for the first image, take the values from the temporary model
if not is_first:
rotation = rel_rotation @ np.linalg.inv(prev_rotation.T)
translation = rel_translation + prev_translation
# compute the absolut camera pose
camera_pose = rotation + translation
if do_plot:
draw_rel_camera_pose(image, translation, camera_pose, ref_pose_images_path)
# save the values foreach image (in R & T format)
camera_pose_recover[image] = [rotation, translation]
prev_rotation = rotation
prev_translation = translation
is_first = False
return camera_pose_recover
def write_camera_pose_to_file(camera_pose_abs_dict: dict, pose_dir_path: str) -> None:
"""
The function write the recovered camera poses according to COLMAP documentation
:param camera_pose_abs_dict: A dictionary of recovered camera poses for each image
:param pose_dir_path: path to image file
"""
image_dst = path.join(pose_dir_path, 'images.txt')
with open(image_dst, 'w+') as file:
file.write('# Image list with two lines of data per image:\n')
file.write('# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n')
file.write('# POINTS2D[] as (X, Y, POINT3D_ID)\n')
file.write(f'# Number of images: {len(camera_pose_abs_dict.keys())}\n')
# write each camera pose to file
for image in camera_pose_abs_dict.keys():
image_pose_data = []
t_vector = camera_pose_abs_dict[image][1]
qx, qy, qz, qw = rotation_matrix_to_quaternion(camera_pose_abs_dict[image][0])
image_pose_data.append(str(image))
# image_pose_data.append(f'{qw} {qx} {qy} {qz}')
image_pose_data.append(f'{qz} {qy} {qx} {qw}')
image_pose_data.append(' '.join(map(str, t_vector)))
image_pose_data.append('1')
image_pose_data.append(f'image{image}.jpg')
file.write(' '.join(image_pose_data) + '\n\n')
def | (workspace_path: str) -> None:
"""
The function deletes all the files in the workspace folder except the input video
"""
# make sure the workspace in empty
for filename in listdir(workspace_path):
if filename.endswith('.h264'):
continue
path_to_node = path.join(workspace_path, filename)
if path.isdir(path_to_node):
shutil.rmtree(path_to_node)
else:
remove(path_to_node)
def main():
# Parse input arguments:
workspace_path = parse_args()
clear_workspace(workspace_path)
# prepare video and create the images for our model
video_thread = Thread(target=prepare_video, args=(workspace_path, 87))
video_thread.start()
# create temp folder for temp model
temp_model_workspace_path = create_temp_model(workspace_path)
# create camera pose parameters
pose_output_path = path.join(workspace_path, 'camera_poses')
makedirs(pose_output_path)
# create camera input file
camera_src = path.join(temp_model_workspace_path, 'sparse/0/cameras.txt')
camera_dst = path.join(pose_output_path, 'cameras.txt')
shutil.copyfile(camera_src, camera_dst)
# create an empty points input file
points_dst = path.join(pose_output_path, 'points3D.txt')
open(points_dst, 'w').close()
# get camera poses for first image
image_src = path.join(temp_model_workspace_path, 'sparse/0/images.txt')
first_image_pose = get_first_image_pose(image_src)
if not first_image_pose:
print("Error in temp model - cant compute the camera pose for the first image")
exit(1)
# reading the reference pose model from file
camera_pose_rel_dict = dd.io.load('ref_camera_pose.h5')
camera_pose_abs_dict = compute_absolut_camera_pose(camera_pose_rel_dict, first_image_pose, workspace_path,
do_plot=False)
# create the image file according to COLMAP documentation
write_camera_pose_to_file(camera_pose_abs_dict, pose_output_path)
# wait for the video thread before closing the process
video_thread.join()
if __name__ == "__main__":
print('==============================================================================')
print('workplace preparation')
print('==============================================================================')
main()
| clear_workspace | identifier_name |
workplace_preparation.py | import math
import deepdish as dd
import shutil
import subprocess
from os import path, makedirs, remove, listdir
from argparse import ArgumentParser
from threading import Thread
import numpy as np
from matplotlib import pyplot as plt
number_of_images_in_temp_model = 10
def parse_args() -> str:
"""
Function to parse user argument
:return: workspace_path
"""
ap = ArgumentParser(description='Create camera_pose files.')
ap.add_argument('--workspace_path', required=True)
args = vars(ap.parse_args())
return args['workspace_path']
def remove_extra_images(path_to_images: str, number_of_images: int) -> None:
"""
The function removes all the extra images created in the images folder
:param path_to_images: path to the model images folder
:param number_of_images: the number of images to reconstruct our model
"""
last_image = 'image' + str(number_of_images) + '.jpg'
while last_image in listdir(path_to_images):
last_image_path = path.join(path_to_images, last_image)
remove(last_image_path)
print(f"remove {last_image}")
number_of_images += 1
last_image = 'image' + str(number_of_images) + '.jpg'
def prepare_video(path_to_video: str, number_of_images=87) -> None:
"""
The function prepares the images for our model based on a given video
:param path_to_video: video in h264 format
:param number_of_images: the number of images to reconstruct our model (87 by default)
"""
temp_video = path.join(path_to_video, 'temp_outpy.mp4')
video = path.join(path_to_video, 'outpy.h264')
# create mp4 video for metadata and compute video duration
subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])
result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
"format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", temp_video],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
video_duration = float(result.stdout)
# create images folder
path_to_images = path.join(path_to_video, 'images')
if path.exists(path_to_images) and path.isdir(path_to_images):
shutil.rmtree(path_to_images)
makedirs(path_to_images)
# split the given video into images
subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',
path.join(path_to_images, 'image%d.jpg')])
# remove extra files
remove_extra_images(path_to_images, number_of_images)
remove(temp_video)
def create_temp_model(temp_dir_path: str) -> str: | """
# create temp images folder
path_to_temp_model = path.join(temp_dir_path, 'temp_model')
path_to_temp_images = path.join(path_to_temp_model, 'temp_images')
# remove old temporary folder if exists
if path.exists(path_to_temp_model) and path.isdir(path_to_temp_model):
shutil.rmtree(path_to_temp_model)
number_of_temp_images = 0
path_to_images = path.join(temp_dir_path, 'images')
# take only part of the images for the temp model
while number_of_temp_images < number_of_images_in_temp_model:
try:
number_of_temp_images = len([name for name in listdir(path_to_images) if name.endswith('.jpg')])
except FileNotFoundError:
number_of_temp_images = 0
# copy subdirectory example
shutil.copytree(path_to_images, path_to_temp_images)
# run colmap to create model for the first 10 images in video
subprocess.run(['colmap', 'automatic_reconstructor',
'--workspace_path', path_to_temp_model, '--image_path', path_to_temp_images,
'--data_type=video', '--quality=extreme'])
return path_to_temp_model
def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np:
"""
The function converts the quaternion vector to a rotation matrix
https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/
:param q0: the value of qw
:param q1: the value of qx
:param q2: the value of qy
:param q3: the value of qz
:return rot_matrix: rotation matrix 3x3 as NumPy array
"""
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# 3x3 rotation matrix
rot_matrix = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
return rot_matrix
def rotation_matrix_to_quaternion(rotation_matrix: np) -> object:
"""
The function converts rotation matrix to quaternion vector
https://learnopencv.com/rotation-matrix-to-euler-angles/
:param rotation_matrix: rotation matrix 3x3 represented by NumPy array
:return quaternion vector: defined by (qx, qy, qz, qw)
"""
cosine_for_pitch = math.sqrt(rotation_matrix[0][0] ** 2 + rotation_matrix[1][0] ** 2)
is_singular = cosine_for_pitch < 10 ** -6
if not is_singular:
yaw = math.atan2(rotation_matrix[1][0], rotation_matrix[0][0])
pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)
roll = math.atan2(rotation_matrix[2][1], rotation_matrix[2][2])
else:
yaw = math.atan2(-rotation_matrix[1][2], rotation_matrix[1][1])
pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)
roll = 0
e = (yaw, pitch, roll)
return euler_to_quaternion(e)
def euler_to_quaternion(euler: tuple) -> object:
"""
The function convert Euler angle to quaternion object
:param Euler: angle represented by yaw, pitch, roll
:return quaternion vector: defined by (qx, qy, qz, qw)
"""
(yaw, pitch, roll) = (euler[0], euler[1], euler[2])
qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)
qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)
qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
return qx, qy, qz, qw
def get_first_image_pose(image_src: str) -> list:
"""
The function return the absolut R & T for the first image in temp model
:param image_src: path to image file (colmap output)
:return R&T: R = list[0], T list[1] or None if image1 not exists
"""
# read images file
with open(image_src, 'r') as file:
lines = file.readlines()[4::2]
# create absolut camera pose dictionary
for line in lines:
columns = line.split()
image_name = columns[9].split('.')[0]
image_id = int(image_name.split('e')[1])
# convert and return the camera pose for the first image in model
if image_id == 1:
qw = float(columns[1])
qx = float(columns[2])
qy = float(columns[4])
qz = float(columns[3])
rotation_matrix = quaternion_to_rotation_matrix(qw, qx, qy, qz)
tx = float(columns[5])
ty = float(columns[7])
tz = float(columns[6])
translation_vector = np.array([tx, ty, tz])
return [rotation_matrix, translation_vector]
return []
def draw_rel_camera_pose(image: int, origin: list, camera_pose: list, plot_dir_path: str) -> None:
"""
Debug function for plotting the relative camera poses
:param image: number of current image
:param origin: list of [x,y,z] of the origin
:param camera_pose: list of [x1,y1,z1][x2,y2,z2] of the camera pose (three 2d vectors)
:param plot_dir_path: path to plot directory
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elev=10.)
ax.set_title('camera pose image: %d' % image)
scale = 7
ax.set_xlim3d(-scale, scale)
ax.set_ylim3d(-scale, scale)
ax.set_zlim3d(-scale, scale)
# replace the Y-Axis with Z-Axis
ax.scatter(origin[0], origin[2], origin[1], c='black')
for i in range(3):
ax.plot([origin[0], camera_pose[i][0]], [origin[2], camera_pose[i][2]], [origin[1], camera_pose[i][1]])
i += 1
fig.savefig(f'{plot_dir_path}/%d.png' % image)
plt.close(fig)
plt.clf()
def compute_absolut_camera_pose(camera_pose_rel_dict: dict, first_image_pose: list,
workspace_path: str, do_plot=False) -> dict:
"""
The function return a dictionary with recovered R&T for each image
:param camera_pose_rel_dict: dictionary of relative camera poses for each image
:param first_image_pose: absolute R&T of the first image
:param workspace_path: path to workspace_path
:param do_plot: boolean flag for debug purpose
:return: camera_pose_recover dictionary
"""
# create directory for reference plots
ref_pose_images_path = path.join(workspace_path, 'ref_images')
if do_plot:
makedirs(ref_pose_images_path)
# initialize parameters for computing absolut camera poses
camera_pose_recover = {}
rotation = first_image_pose[0]
translation = first_image_pose[1]
is_first = True
prev_rotation = np.identity(3)
prev_translation = np.zeros(3)
# foreach image compute the absolut pose out of the reference pose
for image in camera_pose_rel_dict.keys():
rel_rotation = camera_pose_rel_dict[image][0]
rel_translation = camera_pose_rel_dict[image][1]
# for the first image, take the values from the temporary model
if not is_first:
rotation = rel_rotation @ np.linalg.inv(prev_rotation.T)
translation = rel_translation + prev_translation
# compute the absolut camera pose
camera_pose = rotation + translation
if do_plot:
draw_rel_camera_pose(image, translation, camera_pose, ref_pose_images_path)
# save the values foreach image (in R & T format)
camera_pose_recover[image] = [rotation, translation]
prev_rotation = rotation
prev_translation = translation
is_first = False
return camera_pose_recover
def write_camera_pose_to_file(camera_pose_abs_dict: dict, pose_dir_path: str) -> None:
"""
The function write the recovered camera poses according to COLMAP documentation
:param camera_pose_abs_dict: A dictionary of recovered camera poses for each image
:param pose_dir_path: path to image file
"""
image_dst = path.join(pose_dir_path, 'images.txt')
with open(image_dst, 'w+') as file:
file.write('# Image list with two lines of data per image:\n')
file.write('# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n')
file.write('# POINTS2D[] as (X, Y, POINT3D_ID)\n')
file.write(f'# Number of images: {len(camera_pose_abs_dict.keys())}\n')
# write each camera pose to file
for image in camera_pose_abs_dict.keys():
image_pose_data = []
t_vector = camera_pose_abs_dict[image][1]
qx, qy, qz, qw = rotation_matrix_to_quaternion(camera_pose_abs_dict[image][0])
image_pose_data.append(str(image))
# image_pose_data.append(f'{qw} {qx} {qy} {qz}')
image_pose_data.append(f'{qz} {qy} {qx} {qw}')
image_pose_data.append(' '.join(map(str, t_vector)))
image_pose_data.append('1')
image_pose_data.append(f'image{image}.jpg')
file.write(' '.join(image_pose_data) + '\n\n')
def clear_workspace(workspace_path: str) -> None:
"""
The function deletes all the files in the workspace folder except the input video
"""
# make sure the workspace in empty
for filename in listdir(workspace_path):
if filename.endswith('.h264'):
continue
path_to_node = path.join(workspace_path, filename)
if path.isdir(path_to_node):
shutil.rmtree(path_to_node)
else:
remove(path_to_node)
def main():
# Parse input arguments:
workspace_path = parse_args()
clear_workspace(workspace_path)
# prepare video and create the images for our model
video_thread = Thread(target=prepare_video, args=(workspace_path, 87))
video_thread.start()
# create temp folder for temp model
temp_model_workspace_path = create_temp_model(workspace_path)
# create camera pose parameters
pose_output_path = path.join(workspace_path, 'camera_poses')
makedirs(pose_output_path)
# create camera input file
camera_src = path.join(temp_model_workspace_path, 'sparse/0/cameras.txt')
camera_dst = path.join(pose_output_path, 'cameras.txt')
shutil.copyfile(camera_src, camera_dst)
# create an empty points input file
points_dst = path.join(pose_output_path, 'points3D.txt')
open(points_dst, 'w').close()
# get camera poses for first image
image_src = path.join(temp_model_workspace_path, 'sparse/0/images.txt')
first_image_pose = get_first_image_pose(image_src)
if not first_image_pose:
print("Error in temp model - cant compute the camera pose for the first image")
exit(1)
# reading the reference pose model from file
camera_pose_rel_dict = dd.io.load('ref_camera_pose.h5')
camera_pose_abs_dict = compute_absolut_camera_pose(camera_pose_rel_dict, first_image_pose, workspace_path,
do_plot=False)
# create the image file according to COLMAP documentation
write_camera_pose_to_file(camera_pose_abs_dict, pose_output_path)
# wait for the video thread before closing the process
video_thread.join()
if __name__ == "__main__":
print('==============================================================================')
print('workplace preparation')
print('==============================================================================')
main() | """
The function prepares the images for our model based on a given video
:param temp_dir_path: video in h264 format
:return number_of_images: path to temporary model folder | random_line_split |
common.go | package common
import (
"context"
"crypto/rand"
"errors"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
"os"
"os/user"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"time"
"unicode"
"github.com/thrasher-corp/gocryptotrader/common/file"
"github.com/thrasher-corp/gocryptotrader/log"
)
const (
// SimpleTimeFormatWithTimezone a common, but non-implemented time format in golang
SimpleTimeFormatWithTimezone = time.DateTime + " MST"
// GctExt is the extension for GCT Tengo script files
GctExt = ".gct"
defaultTimeout = time.Second * 15
)
// Strings representing the full lower, upper case English character alphabet and base-10 numbers for generating a random string.
const (
SmallLetters = "abcdefghijklmnopqrstuvwxyz"
CapitalLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
NumberCharacters = "0123456789"
)
var (
// emailRX represents email address matching pattern
emailRX = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
)
// Vars for common.go operations
var (
_HTTPClient *http.Client
_HTTPUserAgent string
m sync.RWMutex
// ErrNotYetImplemented defines a common error across the code base that
// alerts of a function that has not been completed or tied into main code
ErrNotYetImplemented = errors.New("not yet implemented")
// ErrFunctionNotSupported defines a standardised error for an unsupported
// wrapper function by an API
ErrFunctionNotSupported = errors.New("unsupported wrapper function")
errInvalidCryptoCurrency = errors.New("invalid crypto currency")
// ErrDateUnset is an error for start end check calculations
ErrDateUnset = errors.New("date unset")
// ErrStartAfterEnd is an error for start end check calculations
ErrStartAfterEnd = errors.New("start date after end date")
// ErrStartEqualsEnd is an error for start end check calculations
ErrStartEqualsEnd = errors.New("start date equals end date")
// ErrStartAfterTimeNow is an error for start end check calculations
ErrStartAfterTimeNow = errors.New("start date is after current time")
// ErrNilPointer defines an error for a nil pointer
ErrNilPointer = errors.New("nil pointer")
// ErrCannotCalculateOffline is returned when a request wishes to calculate
// something offline, but has an online requirement
ErrCannotCalculateOffline = errors.New("cannot calculate offline")
// ErrNoResponse is returned when a response has no entries/is empty
// when one is expected
ErrNoResponse = errors.New("no response")
errCannotSetInvalidTimeout = errors.New("cannot set new HTTP client with timeout that is equal or less than 0")
errUserAgentInvalid = errors.New("cannot set invalid user agent")
errHTTPClientInvalid = errors.New("custom http client cannot be nil")
zeroValueUnix = time.Unix(0, 0)
// ErrTypeAssertFailure defines an error when type assertion fails
ErrTypeAssertFailure = errors.New("type assert failure")
)
// MatchesEmailPattern ensures that the string is an email address by regexp check
func MatchesEmailPattern(value string) bool {
if len(value) < 3 || len(value) > 254 {
return false
}
return emailRX.MatchString(value)
}
// SetHTTPClientWithTimeout sets a new *http.Client with different timeout
// settings
func SetHTTPClientWithTimeout(t time.Duration) error {
if t <= 0 {
return errCannotSetInvalidTimeout
}
m.Lock()
_HTTPClient = NewHTTPClientWithTimeout(t)
m.Unlock()
return nil
}
// SetHTTPUserAgent sets the user agent which will be used for all common HTTP
// requests.
func SetHTTPUserAgent(agent string) error {
if agent == "" {
return errUserAgentInvalid
}
m.Lock()
_HTTPUserAgent = agent
m.Unlock()
return nil
}
// SetHTTPClient sets a custom HTTP client.
func SetHTTPClient(client *http.Client) error {
if client == nil {
return errHTTPClientInvalid
}
m.Lock()
_HTTPClient = client
m.Unlock()
return nil
}
// NewHTTPClientWithTimeout initialises a new HTTP client and its underlying
// transport IdleConnTimeout with the specified timeout duration
func NewHTTPClientWithTimeout(t time.Duration) *http.Client {
tr := &http.Transport{
// Added IdleConnTimeout to reduce the time of idle connections which
// could potentially slow macOS reconnection when there is a sudden
// network disconnection/issue
IdleConnTimeout: t,
Proxy: http.ProxyFromEnvironment,
}
h := &http.Client{
Transport: tr,
Timeout: t}
return h
}
// StringSliceDifference concatenates slices together based on its index and
// returns an individual string array
func StringSliceDifference(slice1, slice2 []string) []string {
var diff []string
for i := 0; i < 2; i++ {
for _, s1 := range slice1 {
found := false
for _, s2 := range slice2 {
if s1 == s2 {
found = true
break
}
}
if !found {
diff = append(diff, s1)
}
}
if i == 0 {
slice1, slice2 = slice2, slice1
}
}
return diff
}
// StringDataContains checks the substring array with an input and returns a bool
func StringDataContains(haystack []string, needle string) bool {
data := strings.Join(haystack, ",")
return strings.Contains(data, needle)
}
// StringDataCompare data checks the substring array with an input and returns a bool
func StringDataCompare(haystack []string, needle string) bool {
for x := range haystack {
if haystack[x] == needle {
return true
}
}
return false
}
// StringDataCompareInsensitive data checks the substring array with an input and returns
// a bool irrespective of lower or upper case strings
func StringDataCompareInsensitive(haystack []string, needle string) bool {
for x := range haystack {
if strings.EqualFold(haystack[x], needle) {
return true
}
}
return false
}
// StringDataContainsInsensitive checks the substring array with an input and returns
// a bool irrespective of lower or upper case strings
func StringDataContainsInsensitive(haystack []string, needle string) bool {
for _, data := range haystack {
if strings.Contains(strings.ToUpper(data), strings.ToUpper(needle)) {
return true
}
}
return false
}
// IsEnabled takes in a boolean param and returns a string if it is enabled
// or disabled
func IsEnabled(isEnabled bool) string {
if isEnabled {
return "Enabled"
}
return "Disabled"
}
// IsValidCryptoAddress validates your cryptocurrency address string using the
// regexp package // Validation issues occurring because "3" is contained in
// litecoin and Bitcoin addresses - non-fatal
func IsValidCryptoAddress(address, crypto string) (bool, error) { | case "eth":
return regexp.MatchString("^0x[a-km-z0-9]{40}$", address)
default:
return false, fmt.Errorf("%w %s", errInvalidCryptoCurrency, crypto)
}
}
// YesOrNo returns a boolean variable to check if input is "y" or "yes"
func YesOrNo(input string) bool {
if strings.EqualFold(input, "y") || strings.EqualFold(input, "yes") {
return true
}
return false
}
// SendHTTPRequest sends a request using the http package and returns the body
// contents
func SendHTTPRequest(ctx context.Context, method, urlPath string, headers map[string]string, body io.Reader, verbose bool) ([]byte, error) {
method = strings.ToUpper(method)
if method != http.MethodOptions && method != http.MethodGet &&
method != http.MethodHead && method != http.MethodPost &&
method != http.MethodPut && method != http.MethodDelete &&
method != http.MethodTrace && method != http.MethodConnect {
return nil, errors.New("invalid HTTP method specified")
}
req, err := http.NewRequestWithContext(ctx, method, urlPath, body)
if err != nil {
return nil, err
}
for k, v := range headers {
req.Header.Add(k, v)
}
if verbose {
log.Debugf(log.Global, "Request path: %s", urlPath)
for k, d := range req.Header {
log.Debugf(log.Global, "Request header [%s]: %s", k, d)
}
log.Debugf(log.Global, "Request type: %s", method)
if body != nil {
log.Debugf(log.Global, "Request body: %v", body)
}
}
m.RLock()
if _HTTPUserAgent != "" && req.Header.Get("User-Agent") == "" {
req.Header.Add("User-Agent", _HTTPUserAgent)
}
if _HTTPClient == nil {
m.RUnlock()
m.Lock()
// Set *http.Client with default timeout if not populated.
_HTTPClient = NewHTTPClientWithTimeout(defaultTimeout)
m.Unlock()
m.RLock()
}
resp, err := _HTTPClient.Do(req)
m.RUnlock()
if err != nil {
return nil, err
}
defer resp.Body.Close()
contents, err := io.ReadAll(resp.Body)
if verbose {
log.Debugf(log.Global, "HTTP status: %s, Code: %v",
resp.Status,
resp.StatusCode)
log.Debugf(log.Global, "Raw response: %s", string(contents))
}
return contents, err
}
// EncodeURLValues concatenates url values onto a url string and returns a
// string
func EncodeURLValues(urlPath string, values url.Values) string {
u := urlPath
if len(values) > 0 {
u += "?" + values.Encode()
}
return u
}
// ExtractHost returns the hostname out of a string
func ExtractHost(address string) string {
host, _, _ := net.SplitHostPort(address)
if host == "" {
return "localhost"
}
return host
}
// ExtractPort returns the port name out of a string
func ExtractPort(host string) int {
_, port, _ := net.SplitHostPort(host)
if port == "" {
return 80
}
portInt, _ := strconv.Atoi(port)
return portInt
}
// GetURIPath returns the path of a URL given a URI
func GetURIPath(uri string) string {
urip, err := url.Parse(uri)
if err != nil {
return ""
}
if urip.RawQuery != "" {
return urip.Path + "?" + urip.RawQuery
}
return urip.Path
}
// GetExecutablePath returns the executables launch path
func GetExecutablePath() (string, error) {
ex, err := os.Executable()
if err != nil {
return "", err
}
return filepath.Dir(ex), nil
}
// GetDefaultDataDir returns the default data directory
// Windows - C:\Users\%USER%\AppData\Roaming\GoCryptoTrader
// Linux/Unix or OSX - $HOME/.gocryptotrader
func GetDefaultDataDir(env string) string {
if env == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "GoCryptoTrader")
}
usr, err := user.Current()
if err == nil {
return filepath.Join(usr.HomeDir, ".gocryptotrader")
}
dir, err := os.UserHomeDir()
if err != nil {
log.Warnln(log.Global, "Environment variable unset, defaulting to current directory")
dir = "."
}
return filepath.Join(dir, ".gocryptotrader")
}
// CreateDir creates a directory based on the supplied parameter
func CreateDir(dir string) error {
_, err := os.Stat(dir)
if !os.IsNotExist(err) {
return nil
}
log.Warnf(log.Global, "Directory %s does not exist.. creating.\n", dir)
return os.MkdirAll(dir, file.DefaultPermissionOctal)
}
// ChangePermission lists all the directories and files in an array
func ChangePermission(directory string) error {
return filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode().Perm() != file.DefaultPermissionOctal {
return os.Chmod(path, file.DefaultPermissionOctal)
}
return nil
})
}
// SplitStringSliceByLimit splits a slice of strings into slices by input limit and returns a slice of slice of strings
func SplitStringSliceByLimit(in []string, limit uint) [][]string {
var stringSlice []string
sliceSlice := make([][]string, 0, len(in)/int(limit)+1)
for len(in) >= int(limit) {
stringSlice, in = in[:limit], in[limit:]
sliceSlice = append(sliceSlice, stringSlice)
}
if len(in) > 0 {
sliceSlice = append(sliceSlice, in)
}
return sliceSlice
}
// AddPaddingOnUpperCase adds padding to a string when detecting an upper case letter. If
// there are multiple upper case items like `ThisIsHTTPExample`, it will only
// pad between like this `This Is HTTP Example`.
func AddPaddingOnUpperCase(s string) string {
if s == "" {
return ""
}
var result []string
left := 0
for x := 0; x < len(s); x++ {
if x == 0 {
continue
}
if unicode.IsUpper(rune(s[x])) {
if !unicode.IsUpper(rune(s[x-1])) {
result = append(result, s[left:x])
left = x
}
} else if x > 1 && unicode.IsUpper(rune(s[x-1])) {
if s[left:x-1] == "" {
continue
}
result = append(result, s[left:x-1])
left = x - 1
}
}
result = append(result, s[left:])
return strings.Join(result, " ")
}
// InArray checks if _val_ belongs to _array_
func InArray(val, array interface{}) (exists bool, index int) {
exists = false
index = -1
if array == nil {
return
}
switch reflect.TypeOf(array).Kind() {
case reflect.Array, reflect.Slice:
s := reflect.ValueOf(array)
for i := 0; i < s.Len(); i++ {
if reflect.DeepEqual(val, s.Index(i).Interface()) {
index = i
exists = true
return
}
}
}
return
}
// multiError holds all the errors as a slice, this is unexported, so it forces
// inbuilt error handling.
type multiError struct {
loadedErrors []error
offset *int
}
// AppendError appends error in a more idiomatic way. This can start out as a
// standard error e.g. err := errors.New("random error")
// err = AppendError(err, errors.New("another random error"))
func AppendError(original, incoming error) error {
errSliceP, ok := original.(*multiError)
if ok {
errSliceP.offset = nil
}
if incoming == nil {
return original // Skip append - continue as normal.
}
if !ok {
// This assumes that a standard error is passed in and we can want to
// track it and add additional errors.
errSliceP = &multiError{}
if original != nil {
errSliceP.loadedErrors = append(errSliceP.loadedErrors, original)
}
}
if incomingSlice, ok := incoming.(*multiError); ok {
// Join slices if needed.
errSliceP.loadedErrors = append(errSliceP.loadedErrors, incomingSlice.loadedErrors...)
} else {
errSliceP.loadedErrors = append(errSliceP.loadedErrors, incoming)
}
return errSliceP
}
// Error displays all errors comma separated, if unwrapped has been called and
// has not been reset will display the individual error
func (e *multiError) Error() string {
if e.offset != nil {
return e.loadedErrors[*e.offset].Error()
}
allErrors := make([]string, len(e.loadedErrors))
for x := range e.loadedErrors {
allErrors[x] = e.loadedErrors[x].Error()
}
return strings.Join(allErrors, ", ")
}
// Unwrap increments the offset so errors.Is() can be called to its individual
// error for correct matching.
func (e *multiError) Unwrap() error {
if e.offset == nil {
e.offset = new(int)
} else {
*e.offset++
}
if *e.offset == len(e.loadedErrors) {
e.offset = nil
return nil // Force errors.Is package to return false.
}
return e
}
// Is checks to see if the errors match. It calls package errors.Is() so that
// we can keep fmt.Errorf() trimmings. This is called in errors package at
// interface assertion err.(interface{ Is(error) bool }).
func (e *multiError) Is(incoming error) bool {
if e.offset != nil && errors.Is(e.loadedErrors[*e.offset], incoming) {
e.offset = nil
return true
}
return false
}
// StartEndTimeCheck provides some basic checks which occur
// frequently in the codebase
func StartEndTimeCheck(start, end time.Time) error {
if start.IsZero() || start.Equal(zeroValueUnix) {
return fmt.Errorf("start %w", ErrDateUnset)
}
if end.IsZero() || end.Equal(zeroValueUnix) {
return fmt.Errorf("end %w", ErrDateUnset)
}
if start.After(end) {
return ErrStartAfterEnd
}
if start.Equal(end) {
return ErrStartEqualsEnd
}
if start.After(time.Now()) {
return ErrStartAfterTimeNow
}
return nil
}
// GenerateRandomString generates a random string provided a length and list of Character types { SmallLetters, CapitalLetters, NumberCharacters}.
// if no characters are provided, the function uses a NumberCharacters(string of numeric characters).
func GenerateRandomString(length uint, characters ...string) (string, error) {
if length == 0 {
return "", errors.New("invalid length, length must be non-zero positive integer")
}
b := make([]byte, length)
chars := strings.Replace(strings.Join(characters, ""), " ", "", -1)
if chars == "" && len(characters) != 0 {
return "", errors.New("invalid characters, character must not be empty")
} else if chars == "" {
chars = NumberCharacters
}
for i := range b {
nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(chars))))
if err != nil {
return "", err
}
n := nBig.Int64()
b[i] = chars[n]
}
return string(b), nil
}
// GetTypeAssertError returns additional information for when an assertion failure
// occurs.
// fieldDescription is an optional way to return what the affected field was for
func GetTypeAssertError(required string, received interface{}, fieldDescription ...string) error {
var description string
if len(fieldDescription) > 0 {
description = " for: " + strings.Join(fieldDescription, ", ")
}
return fmt.Errorf("%w from %T to %s%s", ErrTypeAssertFailure, received, required, description)
} | switch strings.ToLower(crypto) {
case "btc":
return regexp.MatchString("^(bc1|[13])[a-zA-HJ-NP-Z0-9]{25,90}$", address)
case "ltc":
return regexp.MatchString("^[L3M][a-km-zA-HJ-NP-Z1-9]{25,34}$", address) | random_line_split |
common.go | package common
import (
"context"
"crypto/rand"
"errors"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
"os"
"os/user"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"time"
"unicode"
"github.com/thrasher-corp/gocryptotrader/common/file"
"github.com/thrasher-corp/gocryptotrader/log"
)
const (
// SimpleTimeFormatWithTimezone a common, but non-implemented time format in golang
SimpleTimeFormatWithTimezone = time.DateTime + " MST"
// GctExt is the extension for GCT Tengo script files
GctExt = ".gct"
defaultTimeout = time.Second * 15
)
// Strings representing the full lower, upper case English character alphabet and base-10 numbers for generating a random string.
const (
SmallLetters = "abcdefghijklmnopqrstuvwxyz"
CapitalLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
NumberCharacters = "0123456789"
)
var (
// emailRX represents email address matching pattern
emailRX = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
)
// Vars for common.go operations
var (
_HTTPClient *http.Client
_HTTPUserAgent string
m sync.RWMutex
// ErrNotYetImplemented defines a common error across the code base that
// alerts of a function that has not been completed or tied into main code
ErrNotYetImplemented = errors.New("not yet implemented")
// ErrFunctionNotSupported defines a standardised error for an unsupported
// wrapper function by an API
ErrFunctionNotSupported = errors.New("unsupported wrapper function")
errInvalidCryptoCurrency = errors.New("invalid crypto currency")
// ErrDateUnset is an error for start end check calculations
ErrDateUnset = errors.New("date unset")
// ErrStartAfterEnd is an error for start end check calculations
ErrStartAfterEnd = errors.New("start date after end date")
// ErrStartEqualsEnd is an error for start end check calculations
ErrStartEqualsEnd = errors.New("start date equals end date")
// ErrStartAfterTimeNow is an error for start end check calculations
ErrStartAfterTimeNow = errors.New("start date is after current time")
// ErrNilPointer defines an error for a nil pointer
ErrNilPointer = errors.New("nil pointer")
// ErrCannotCalculateOffline is returned when a request wishes to calculate
// something offline, but has an online requirement
ErrCannotCalculateOffline = errors.New("cannot calculate offline")
// ErrNoResponse is returned when a response has no entries/is empty
// when one is expected
ErrNoResponse = errors.New("no response")
errCannotSetInvalidTimeout = errors.New("cannot set new HTTP client with timeout that is equal or less than 0")
errUserAgentInvalid = errors.New("cannot set invalid user agent")
errHTTPClientInvalid = errors.New("custom http client cannot be nil")
zeroValueUnix = time.Unix(0, 0)
// ErrTypeAssertFailure defines an error when type assertion fails
ErrTypeAssertFailure = errors.New("type assert failure")
)
// MatchesEmailPattern ensures that the string is an email address by regexp check
func MatchesEmailPattern(value string) bool {
if len(value) < 3 || len(value) > 254 {
return false
}
return emailRX.MatchString(value)
}
// SetHTTPClientWithTimeout sets a new *http.Client with different timeout
// settings
func SetHTTPClientWithTimeout(t time.Duration) error {
if t <= 0 {
return errCannotSetInvalidTimeout
}
m.Lock()
_HTTPClient = NewHTTPClientWithTimeout(t)
m.Unlock()
return nil
}
// SetHTTPUserAgent sets the user agent which will be used for all common HTTP
// requests.
func SetHTTPUserAgent(agent string) error |
// SetHTTPClient sets a custom HTTP client.
func SetHTTPClient(client *http.Client) error {
if client == nil {
return errHTTPClientInvalid
}
m.Lock()
_HTTPClient = client
m.Unlock()
return nil
}
// NewHTTPClientWithTimeout initialises a new HTTP client and its underlying
// transport IdleConnTimeout with the specified timeout duration
func NewHTTPClientWithTimeout(t time.Duration) *http.Client {
tr := &http.Transport{
// Added IdleConnTimeout to reduce the time of idle connections which
// could potentially slow macOS reconnection when there is a sudden
// network disconnection/issue
IdleConnTimeout: t,
Proxy: http.ProxyFromEnvironment,
}
h := &http.Client{
Transport: tr,
Timeout: t}
return h
}
// StringSliceDifference concatenates slices together based on its index and
// returns an individual string array
func StringSliceDifference(slice1, slice2 []string) []string {
var diff []string
for i := 0; i < 2; i++ {
for _, s1 := range slice1 {
found := false
for _, s2 := range slice2 {
if s1 == s2 {
found = true
break
}
}
if !found {
diff = append(diff, s1)
}
}
if i == 0 {
slice1, slice2 = slice2, slice1
}
}
return diff
}
// StringDataContains checks the substring array with an input and returns a bool
func StringDataContains(haystack []string, needle string) bool {
data := strings.Join(haystack, ",")
return strings.Contains(data, needle)
}
// StringDataCompare data checks the substring array with an input and returns a bool
func StringDataCompare(haystack []string, needle string) bool {
for x := range haystack {
if haystack[x] == needle {
return true
}
}
return false
}
// StringDataCompareInsensitive data checks the substring array with an input and returns
// a bool irrespective of lower or upper case strings
func StringDataCompareInsensitive(haystack []string, needle string) bool {
for x := range haystack {
if strings.EqualFold(haystack[x], needle) {
return true
}
}
return false
}
// StringDataContainsInsensitive checks the substring array with an input and returns
// a bool irrespective of lower or upper case strings
func StringDataContainsInsensitive(haystack []string, needle string) bool {
for _, data := range haystack {
if strings.Contains(strings.ToUpper(data), strings.ToUpper(needle)) {
return true
}
}
return false
}
// IsEnabled takes in a boolean param and returns a string if it is enabled
// or disabled
func IsEnabled(isEnabled bool) string {
if isEnabled {
return "Enabled"
}
return "Disabled"
}
// IsValidCryptoAddress validates your cryptocurrency address string using the
// regexp package // Validation issues occurring because "3" is contained in
// litecoin and Bitcoin addresses - non-fatal
func IsValidCryptoAddress(address, crypto string) (bool, error) {
switch strings.ToLower(crypto) {
case "btc":
return regexp.MatchString("^(bc1|[13])[a-zA-HJ-NP-Z0-9]{25,90}$", address)
case "ltc":
return regexp.MatchString("^[L3M][a-km-zA-HJ-NP-Z1-9]{25,34}$", address)
case "eth":
return regexp.MatchString("^0x[a-km-z0-9]{40}$", address)
default:
return false, fmt.Errorf("%w %s", errInvalidCryptoCurrency, crypto)
}
}
// YesOrNo returns a boolean variable to check if input is "y" or "yes"
func YesOrNo(input string) bool {
if strings.EqualFold(input, "y") || strings.EqualFold(input, "yes") {
return true
}
return false
}
// SendHTTPRequest sends a request using the http package and returns the body
// contents
func SendHTTPRequest(ctx context.Context, method, urlPath string, headers map[string]string, body io.Reader, verbose bool) ([]byte, error) {
method = strings.ToUpper(method)
if method != http.MethodOptions && method != http.MethodGet &&
method != http.MethodHead && method != http.MethodPost &&
method != http.MethodPut && method != http.MethodDelete &&
method != http.MethodTrace && method != http.MethodConnect {
return nil, errors.New("invalid HTTP method specified")
}
req, err := http.NewRequestWithContext(ctx, method, urlPath, body)
if err != nil {
return nil, err
}
for k, v := range headers {
req.Header.Add(k, v)
}
if verbose {
log.Debugf(log.Global, "Request path: %s", urlPath)
for k, d := range req.Header {
log.Debugf(log.Global, "Request header [%s]: %s", k, d)
}
log.Debugf(log.Global, "Request type: %s", method)
if body != nil {
log.Debugf(log.Global, "Request body: %v", body)
}
}
m.RLock()
if _HTTPUserAgent != "" && req.Header.Get("User-Agent") == "" {
req.Header.Add("User-Agent", _HTTPUserAgent)
}
if _HTTPClient == nil {
m.RUnlock()
m.Lock()
// Set *http.Client with default timeout if not populated.
_HTTPClient = NewHTTPClientWithTimeout(defaultTimeout)
m.Unlock()
m.RLock()
}
resp, err := _HTTPClient.Do(req)
m.RUnlock()
if err != nil {
return nil, err
}
defer resp.Body.Close()
contents, err := io.ReadAll(resp.Body)
if verbose {
log.Debugf(log.Global, "HTTP status: %s, Code: %v",
resp.Status,
resp.StatusCode)
log.Debugf(log.Global, "Raw response: %s", string(contents))
}
return contents, err
}
// EncodeURLValues concatenates url values onto a url string and returns a
// string
func EncodeURLValues(urlPath string, values url.Values) string {
u := urlPath
if len(values) > 0 {
u += "?" + values.Encode()
}
return u
}
// ExtractHost returns the hostname out of a string
func ExtractHost(address string) string {
host, _, _ := net.SplitHostPort(address)
if host == "" {
return "localhost"
}
return host
}
// ExtractPort returns the port name out of a string
func ExtractPort(host string) int {
_, port, _ := net.SplitHostPort(host)
if port == "" {
return 80
}
portInt, _ := strconv.Atoi(port)
return portInt
}
// GetURIPath returns the path of a URL given a URI
func GetURIPath(uri string) string {
urip, err := url.Parse(uri)
if err != nil {
return ""
}
if urip.RawQuery != "" {
return urip.Path + "?" + urip.RawQuery
}
return urip.Path
}
// GetExecutablePath returns the executables launch path
func GetExecutablePath() (string, error) {
ex, err := os.Executable()
if err != nil {
return "", err
}
return filepath.Dir(ex), nil
}
// GetDefaultDataDir returns the default data directory
// Windows - C:\Users\%USER%\AppData\Roaming\GoCryptoTrader
// Linux/Unix or OSX - $HOME/.gocryptotrader
func GetDefaultDataDir(env string) string {
if env == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "GoCryptoTrader")
}
usr, err := user.Current()
if err == nil {
return filepath.Join(usr.HomeDir, ".gocryptotrader")
}
dir, err := os.UserHomeDir()
if err != nil {
log.Warnln(log.Global, "Environment variable unset, defaulting to current directory")
dir = "."
}
return filepath.Join(dir, ".gocryptotrader")
}
// CreateDir creates a directory based on the supplied parameter
func CreateDir(dir string) error {
_, err := os.Stat(dir)
if !os.IsNotExist(err) {
return nil
}
log.Warnf(log.Global, "Directory %s does not exist.. creating.\n", dir)
return os.MkdirAll(dir, file.DefaultPermissionOctal)
}
// ChangePermission lists all the directories and files in an array
func ChangePermission(directory string) error {
return filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode().Perm() != file.DefaultPermissionOctal {
return os.Chmod(path, file.DefaultPermissionOctal)
}
return nil
})
}
// SplitStringSliceByLimit splits a slice of strings into slices by input limit and returns a slice of slice of strings
func SplitStringSliceByLimit(in []string, limit uint) [][]string {
var stringSlice []string
sliceSlice := make([][]string, 0, len(in)/int(limit)+1)
for len(in) >= int(limit) {
stringSlice, in = in[:limit], in[limit:]
sliceSlice = append(sliceSlice, stringSlice)
}
if len(in) > 0 {
sliceSlice = append(sliceSlice, in)
}
return sliceSlice
}
// AddPaddingOnUpperCase adds padding to a string when detecting an upper case letter. If
// there are multiple upper case items like `ThisIsHTTPExample`, it will only
// pad between like this `This Is HTTP Example`.
func AddPaddingOnUpperCase(s string) string {
if s == "" {
return ""
}
var result []string
left := 0
for x := 0; x < len(s); x++ {
if x == 0 {
continue
}
if unicode.IsUpper(rune(s[x])) {
if !unicode.IsUpper(rune(s[x-1])) {
result = append(result, s[left:x])
left = x
}
} else if x > 1 && unicode.IsUpper(rune(s[x-1])) {
if s[left:x-1] == "" {
continue
}
result = append(result, s[left:x-1])
left = x - 1
}
}
result = append(result, s[left:])
return strings.Join(result, " ")
}
// InArray checks if _val_ belongs to _array_
func InArray(val, array interface{}) (exists bool, index int) {
exists = false
index = -1
if array == nil {
return
}
switch reflect.TypeOf(array).Kind() {
case reflect.Array, reflect.Slice:
s := reflect.ValueOf(array)
for i := 0; i < s.Len(); i++ {
if reflect.DeepEqual(val, s.Index(i).Interface()) {
index = i
exists = true
return
}
}
}
return
}
// multiError holds all the errors as a slice, this is unexported, so it forces
// inbuilt error handling.
type multiError struct {
loadedErrors []error
offset *int
}
// AppendError appends error in a more idiomatic way. This can start out as a
// standard error e.g. err := errors.New("random error")
// err = AppendError(err, errors.New("another random error"))
func AppendError(original, incoming error) error {
errSliceP, ok := original.(*multiError)
if ok {
errSliceP.offset = nil
}
if incoming == nil {
return original // Skip append - continue as normal.
}
if !ok {
// This assumes that a standard error is passed in and we can want to
// track it and add additional errors.
errSliceP = &multiError{}
if original != nil {
errSliceP.loadedErrors = append(errSliceP.loadedErrors, original)
}
}
if incomingSlice, ok := incoming.(*multiError); ok {
// Join slices if needed.
errSliceP.loadedErrors = append(errSliceP.loadedErrors, incomingSlice.loadedErrors...)
} else {
errSliceP.loadedErrors = append(errSliceP.loadedErrors, incoming)
}
return errSliceP
}
// Error displays all errors comma separated, if unwrapped has been called and
// has not been reset will display the individual error
func (e *multiError) Error() string {
if e.offset != nil {
return e.loadedErrors[*e.offset].Error()
}
allErrors := make([]string, len(e.loadedErrors))
for x := range e.loadedErrors {
allErrors[x] = e.loadedErrors[x].Error()
}
return strings.Join(allErrors, ", ")
}
// Unwrap increments the offset so errors.Is() can be called to its individual
// error for correct matching.
func (e *multiError) Unwrap() error {
if e.offset == nil {
e.offset = new(int)
} else {
*e.offset++
}
if *e.offset == len(e.loadedErrors) {
e.offset = nil
return nil // Force errors.Is package to return false.
}
return e
}
// Is checks to see if the errors match. It calls package errors.Is() so that
// we can keep fmt.Errorf() trimmings. This is called in errors package at
// interface assertion err.(interface{ Is(error) bool }).
func (e *multiError) Is(incoming error) bool {
if e.offset != nil && errors.Is(e.loadedErrors[*e.offset], incoming) {
e.offset = nil
return true
}
return false
}
// StartEndTimeCheck provides some basic checks which occur
// frequently in the codebase
func StartEndTimeCheck(start, end time.Time) error {
if start.IsZero() || start.Equal(zeroValueUnix) {
return fmt.Errorf("start %w", ErrDateUnset)
}
if end.IsZero() || end.Equal(zeroValueUnix) {
return fmt.Errorf("end %w", ErrDateUnset)
}
if start.After(end) {
return ErrStartAfterEnd
}
if start.Equal(end) {
return ErrStartEqualsEnd
}
if start.After(time.Now()) {
return ErrStartAfterTimeNow
}
return nil
}
// GenerateRandomString generates a random string provided a length and list of Character types { SmallLetters, CapitalLetters, NumberCharacters}.
// if no characters are provided, the function uses a NumberCharacters(string of numeric characters).
func GenerateRandomString(length uint, characters ...string) (string, error) {
if length == 0 {
return "", errors.New("invalid length, length must be non-zero positive integer")
}
b := make([]byte, length)
chars := strings.Replace(strings.Join(characters, ""), " ", "", -1)
if chars == "" && len(characters) != 0 {
return "", errors.New("invalid characters, character must not be empty")
} else if chars == "" {
chars = NumberCharacters
}
for i := range b {
nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(chars))))
if err != nil {
return "", err
}
n := nBig.Int64()
b[i] = chars[n]
}
return string(b), nil
}
// GetTypeAssertError returns additional information for when an assertion failure
// occurs.
// fieldDescription is an optional way to return what the affected field was for
func GetTypeAssertError(required string, received interface{}, fieldDescription ...string) error {
var description string
if len(fieldDescription) > 0 {
description = " for: " + strings.Join(fieldDescription, ", ")
}
return fmt.Errorf("%w from %T to %s%s", ErrTypeAssertFailure, received, required, description)
}
| {
if agent == "" {
return errUserAgentInvalid
}
m.Lock()
_HTTPUserAgent = agent
m.Unlock()
return nil
} | identifier_body |
common.go | package common
import (
"context"
"crypto/rand"
"errors"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
"os"
"os/user"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"time"
"unicode"
"github.com/thrasher-corp/gocryptotrader/common/file"
"github.com/thrasher-corp/gocryptotrader/log"
)
const (
// SimpleTimeFormatWithTimezone a common, but non-implemented time format in golang
SimpleTimeFormatWithTimezone = time.DateTime + " MST"
// GctExt is the extension for GCT Tengo script files
GctExt = ".gct"
defaultTimeout = time.Second * 15
)
// Strings representing the full lower, upper case English character alphabet and base-10 numbers for generating a random string.
const (
SmallLetters = "abcdefghijklmnopqrstuvwxyz"
CapitalLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
NumberCharacters = "0123456789"
)
var (
// emailRX represents email address matching pattern
emailRX = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
)
// Vars for common.go operations
var (
_HTTPClient *http.Client
_HTTPUserAgent string
m sync.RWMutex
// ErrNotYetImplemented defines a common error across the code base that
// alerts of a function that has not been completed or tied into main code
ErrNotYetImplemented = errors.New("not yet implemented")
// ErrFunctionNotSupported defines a standardised error for an unsupported
// wrapper function by an API
ErrFunctionNotSupported = errors.New("unsupported wrapper function")
errInvalidCryptoCurrency = errors.New("invalid crypto currency")
// ErrDateUnset is an error for start end check calculations
ErrDateUnset = errors.New("date unset")
// ErrStartAfterEnd is an error for start end check calculations
ErrStartAfterEnd = errors.New("start date after end date")
// ErrStartEqualsEnd is an error for start end check calculations
ErrStartEqualsEnd = errors.New("start date equals end date")
// ErrStartAfterTimeNow is an error for start end check calculations
ErrStartAfterTimeNow = errors.New("start date is after current time")
// ErrNilPointer defines an error for a nil pointer
ErrNilPointer = errors.New("nil pointer")
// ErrCannotCalculateOffline is returned when a request wishes to calculate
// something offline, but has an online requirement
ErrCannotCalculateOffline = errors.New("cannot calculate offline")
// ErrNoResponse is returned when a response has no entries/is empty
// when one is expected
ErrNoResponse = errors.New("no response")
errCannotSetInvalidTimeout = errors.New("cannot set new HTTP client with timeout that is equal or less than 0")
errUserAgentInvalid = errors.New("cannot set invalid user agent")
errHTTPClientInvalid = errors.New("custom http client cannot be nil")
zeroValueUnix = time.Unix(0, 0)
// ErrTypeAssertFailure defines an error when type assertion fails
ErrTypeAssertFailure = errors.New("type assert failure")
)
// MatchesEmailPattern ensures that the string is an email address by regexp check
func MatchesEmailPattern(value string) bool {
if len(value) < 3 || len(value) > 254 {
return false
}
return emailRX.MatchString(value)
}
// SetHTTPClientWithTimeout sets a new *http.Client with different timeout
// settings
func SetHTTPClientWithTimeout(t time.Duration) error {
if t <= 0 {
return errCannotSetInvalidTimeout
}
m.Lock()
_HTTPClient = NewHTTPClientWithTimeout(t)
m.Unlock()
return nil
}
// SetHTTPUserAgent sets the user agent which will be used for all common HTTP
// requests.
func SetHTTPUserAgent(agent string) error {
if agent == "" {
return errUserAgentInvalid
}
m.Lock()
_HTTPUserAgent = agent
m.Unlock()
return nil
}
// SetHTTPClient sets a custom HTTP client.
func SetHTTPClient(client *http.Client) error {
if client == nil {
return errHTTPClientInvalid
}
m.Lock()
_HTTPClient = client
m.Unlock()
return nil
}
// NewHTTPClientWithTimeout initialises a new HTTP client and its underlying
// transport IdleConnTimeout with the specified timeout duration
func NewHTTPClientWithTimeout(t time.Duration) *http.Client {
tr := &http.Transport{
// Added IdleConnTimeout to reduce the time of idle connections which
// could potentially slow macOS reconnection when there is a sudden
// network disconnection/issue
IdleConnTimeout: t,
Proxy: http.ProxyFromEnvironment,
}
h := &http.Client{
Transport: tr,
Timeout: t}
return h
}
// StringSliceDifference concatenates slices together based on its index and
// returns an individual string array
func StringSliceDifference(slice1, slice2 []string) []string {
var diff []string
for i := 0; i < 2; i++ {
for _, s1 := range slice1 {
found := false
for _, s2 := range slice2 {
if s1 == s2 {
found = true
break
}
}
if !found {
diff = append(diff, s1)
}
}
if i == 0 {
slice1, slice2 = slice2, slice1
}
}
return diff
}
// StringDataContains checks the substring array with an input and returns a bool
func StringDataContains(haystack []string, needle string) bool {
data := strings.Join(haystack, ",")
return strings.Contains(data, needle)
}
// StringDataCompare data checks the substring array with an input and returns a bool
func StringDataCompare(haystack []string, needle string) bool {
for x := range haystack {
if haystack[x] == needle {
return true
}
}
return false
}
// StringDataCompareInsensitive data checks the substring array with an input and returns
// a bool irrespective of lower or upper case strings
func StringDataCompareInsensitive(haystack []string, needle string) bool {
for x := range haystack {
if strings.EqualFold(haystack[x], needle) {
return true
}
}
return false
}
// StringDataContainsInsensitive checks the substring array with an input and returns
// a bool irrespective of lower or upper case strings
func StringDataContainsInsensitive(haystack []string, needle string) bool {
for _, data := range haystack {
if strings.Contains(strings.ToUpper(data), strings.ToUpper(needle)) {
return true
}
}
return false
}
// IsEnabled takes in a boolean param and returns a string if it is enabled
// or disabled
func IsEnabled(isEnabled bool) string {
if isEnabled {
return "Enabled"
}
return "Disabled"
}
// IsValidCryptoAddress validates your cryptocurrency address string using the
// regexp package // Validation issues occurring because "3" is contained in
// litecoin and Bitcoin addresses - non-fatal
func IsValidCryptoAddress(address, crypto string) (bool, error) {
switch strings.ToLower(crypto) {
case "btc":
return regexp.MatchString("^(bc1|[13])[a-zA-HJ-NP-Z0-9]{25,90}$", address)
case "ltc":
return regexp.MatchString("^[L3M][a-km-zA-HJ-NP-Z1-9]{25,34}$", address)
case "eth":
return regexp.MatchString("^0x[a-km-z0-9]{40}$", address)
default:
return false, fmt.Errorf("%w %s", errInvalidCryptoCurrency, crypto)
}
}
// YesOrNo returns a boolean variable to check if input is "y" or "yes"
func YesOrNo(input string) bool {
if strings.EqualFold(input, "y") || strings.EqualFold(input, "yes") {
return true
}
return false
}
// SendHTTPRequest sends a request using the http package and returns the body
// contents
func SendHTTPRequest(ctx context.Context, method, urlPath string, headers map[string]string, body io.Reader, verbose bool) ([]byte, error) {
method = strings.ToUpper(method)
if method != http.MethodOptions && method != http.MethodGet &&
method != http.MethodHead && method != http.MethodPost &&
method != http.MethodPut && method != http.MethodDelete &&
method != http.MethodTrace && method != http.MethodConnect {
return nil, errors.New("invalid HTTP method specified")
}
req, err := http.NewRequestWithContext(ctx, method, urlPath, body)
if err != nil {
return nil, err
}
for k, v := range headers {
req.Header.Add(k, v)
}
if verbose {
log.Debugf(log.Global, "Request path: %s", urlPath)
for k, d := range req.Header {
log.Debugf(log.Global, "Request header [%s]: %s", k, d)
}
log.Debugf(log.Global, "Request type: %s", method)
if body != nil {
log.Debugf(log.Global, "Request body: %v", body)
}
}
m.RLock()
if _HTTPUserAgent != "" && req.Header.Get("User-Agent") == "" {
req.Header.Add("User-Agent", _HTTPUserAgent)
}
if _HTTPClient == nil {
m.RUnlock()
m.Lock()
// Set *http.Client with default timeout if not populated.
_HTTPClient = NewHTTPClientWithTimeout(defaultTimeout)
m.Unlock()
m.RLock()
}
resp, err := _HTTPClient.Do(req)
m.RUnlock()
if err != nil {
return nil, err
}
defer resp.Body.Close()
contents, err := io.ReadAll(resp.Body)
if verbose {
log.Debugf(log.Global, "HTTP status: %s, Code: %v",
resp.Status,
resp.StatusCode)
log.Debugf(log.Global, "Raw response: %s", string(contents))
}
return contents, err
}
// EncodeURLValues concatenates url values onto a url string and returns a
// string
func EncodeURLValues(urlPath string, values url.Values) string {
u := urlPath
if len(values) > 0 {
u += "?" + values.Encode()
}
return u
}
// ExtractHost returns the hostname out of a string
func ExtractHost(address string) string {
host, _, _ := net.SplitHostPort(address)
if host == "" {
return "localhost"
}
return host
}
// ExtractPort returns the port name out of a string
func ExtractPort(host string) int {
_, port, _ := net.SplitHostPort(host)
if port == "" {
return 80
}
portInt, _ := strconv.Atoi(port)
return portInt
}
// GetURIPath returns the path of a URL given a URI
func GetURIPath(uri string) string {
urip, err := url.Parse(uri)
if err != nil {
return ""
}
if urip.RawQuery != "" {
return urip.Path + "?" + urip.RawQuery
}
return urip.Path
}
// GetExecutablePath returns the executables launch path
func GetExecutablePath() (string, error) {
ex, err := os.Executable()
if err != nil {
return "", err
}
return filepath.Dir(ex), nil
}
// GetDefaultDataDir returns the default data directory
// Windows - C:\Users\%USER%\AppData\Roaming\GoCryptoTrader
// Linux/Unix or OSX - $HOME/.gocryptotrader
func GetDefaultDataDir(env string) string {
if env == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "GoCryptoTrader")
}
usr, err := user.Current()
if err == nil {
return filepath.Join(usr.HomeDir, ".gocryptotrader")
}
dir, err := os.UserHomeDir()
if err != nil {
log.Warnln(log.Global, "Environment variable unset, defaulting to current directory")
dir = "."
}
return filepath.Join(dir, ".gocryptotrader")
}
// CreateDir creates a directory based on the supplied parameter
func CreateDir(dir string) error {
_, err := os.Stat(dir)
if !os.IsNotExist(err) {
return nil
}
log.Warnf(log.Global, "Directory %s does not exist.. creating.\n", dir)
return os.MkdirAll(dir, file.DefaultPermissionOctal)
}
// ChangePermission lists all the directories and files in an array
func ChangePermission(directory string) error {
return filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode().Perm() != file.DefaultPermissionOctal {
return os.Chmod(path, file.DefaultPermissionOctal)
}
return nil
})
}
// SplitStringSliceByLimit splits a slice of strings into slices by input limit and returns a slice of slice of strings
func SplitStringSliceByLimit(in []string, limit uint) [][]string {
var stringSlice []string
sliceSlice := make([][]string, 0, len(in)/int(limit)+1)
for len(in) >= int(limit) {
stringSlice, in = in[:limit], in[limit:]
sliceSlice = append(sliceSlice, stringSlice)
}
if len(in) > 0 {
sliceSlice = append(sliceSlice, in)
}
return sliceSlice
}
// AddPaddingOnUpperCase adds padding to a string when detecting an upper case letter. If
// there are multiple upper case items like `ThisIsHTTPExample`, it will only
// pad between like this `This Is HTTP Example`.
func AddPaddingOnUpperCase(s string) string {
if s == "" {
return ""
}
var result []string
left := 0
for x := 0; x < len(s); x++ {
if x == 0 {
continue
}
if unicode.IsUpper(rune(s[x])) {
if !unicode.IsUpper(rune(s[x-1])) {
result = append(result, s[left:x])
left = x
}
} else if x > 1 && unicode.IsUpper(rune(s[x-1])) {
if s[left:x-1] == "" {
continue
}
result = append(result, s[left:x-1])
left = x - 1
}
}
result = append(result, s[left:])
return strings.Join(result, " ")
}
// InArray checks if _val_ belongs to _array_
func InArray(val, array interface{}) (exists bool, index int) {
exists = false
index = -1
if array == nil {
return
}
switch reflect.TypeOf(array).Kind() {
case reflect.Array, reflect.Slice:
s := reflect.ValueOf(array)
for i := 0; i < s.Len(); i++ {
if reflect.DeepEqual(val, s.Index(i).Interface()) {
index = i
exists = true
return
}
}
}
return
}
// multiError holds all the errors as a slice, this is unexported, so it forces
// inbuilt error handling.
type multiError struct {
loadedErrors []error
offset *int
}
// AppendError appends error in a more idiomatic way. This can start out as a
// standard error e.g. err := errors.New("random error")
// err = AppendError(err, errors.New("another random error"))
func | (original, incoming error) error {
errSliceP, ok := original.(*multiError)
if ok {
errSliceP.offset = nil
}
if incoming == nil {
return original // Skip append - continue as normal.
}
if !ok {
// This assumes that a standard error is passed in and we can want to
// track it and add additional errors.
errSliceP = &multiError{}
if original != nil {
errSliceP.loadedErrors = append(errSliceP.loadedErrors, original)
}
}
if incomingSlice, ok := incoming.(*multiError); ok {
// Join slices if needed.
errSliceP.loadedErrors = append(errSliceP.loadedErrors, incomingSlice.loadedErrors...)
} else {
errSliceP.loadedErrors = append(errSliceP.loadedErrors, incoming)
}
return errSliceP
}
// Error displays all errors comma separated, if unwrapped has been called and
// has not been reset will display the individual error
func (e *multiError) Error() string {
if e.offset != nil {
return e.loadedErrors[*e.offset].Error()
}
allErrors := make([]string, len(e.loadedErrors))
for x := range e.loadedErrors {
allErrors[x] = e.loadedErrors[x].Error()
}
return strings.Join(allErrors, ", ")
}
// Unwrap increments the offset so errors.Is() can be called to its individual
// error for correct matching.
func (e *multiError) Unwrap() error {
if e.offset == nil {
e.offset = new(int)
} else {
*e.offset++
}
if *e.offset == len(e.loadedErrors) {
e.offset = nil
return nil // Force errors.Is package to return false.
}
return e
}
// Is checks to see if the errors match. It calls package errors.Is() so that
// we can keep fmt.Errorf() trimmings. This is called in errors package at
// interface assertion err.(interface{ Is(error) bool }).
func (e *multiError) Is(incoming error) bool {
if e.offset != nil && errors.Is(e.loadedErrors[*e.offset], incoming) {
e.offset = nil
return true
}
return false
}
// StartEndTimeCheck provides some basic checks which occur
// frequently in the codebase
func StartEndTimeCheck(start, end time.Time) error {
if start.IsZero() || start.Equal(zeroValueUnix) {
return fmt.Errorf("start %w", ErrDateUnset)
}
if end.IsZero() || end.Equal(zeroValueUnix) {
return fmt.Errorf("end %w", ErrDateUnset)
}
if start.After(end) {
return ErrStartAfterEnd
}
if start.Equal(end) {
return ErrStartEqualsEnd
}
if start.After(time.Now()) {
return ErrStartAfterTimeNow
}
return nil
}
// GenerateRandomString generates a random string provided a length and list of Character types { SmallLetters, CapitalLetters, NumberCharacters}.
// if no characters are provided, the function uses a NumberCharacters(string of numeric characters).
func GenerateRandomString(length uint, characters ...string) (string, error) {
if length == 0 {
return "", errors.New("invalid length, length must be non-zero positive integer")
}
b := make([]byte, length)
chars := strings.Replace(strings.Join(characters, ""), " ", "", -1)
if chars == "" && len(characters) != 0 {
return "", errors.New("invalid characters, character must not be empty")
} else if chars == "" {
chars = NumberCharacters
}
for i := range b {
nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(chars))))
if err != nil {
return "", err
}
n := nBig.Int64()
b[i] = chars[n]
}
return string(b), nil
}
// GetTypeAssertError returns additional information for when an assertion failure
// occurs.
// fieldDescription is an optional way to return what the affected field was for
func GetTypeAssertError(required string, received interface{}, fieldDescription ...string) error {
var description string
if len(fieldDescription) > 0 {
description = " for: " + strings.Join(fieldDescription, ", ")
}
return fmt.Errorf("%w from %T to %s%s", ErrTypeAssertFailure, received, required, description)
}
| AppendError | identifier_name |
common.go | package common
import (
"context"
"crypto/rand"
"errors"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
"os"
"os/user"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"time"
"unicode"
"github.com/thrasher-corp/gocryptotrader/common/file"
"github.com/thrasher-corp/gocryptotrader/log"
)
const (
// SimpleTimeFormatWithTimezone a common, but non-implemented time format in golang
SimpleTimeFormatWithTimezone = time.DateTime + " MST"
// GctExt is the extension for GCT Tengo script files
GctExt = ".gct"
defaultTimeout = time.Second * 15
)
// Strings representing the full lower, upper case English character alphabet and base-10 numbers for generating a random string.
const (
SmallLetters = "abcdefghijklmnopqrstuvwxyz"
CapitalLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
NumberCharacters = "0123456789"
)
var (
// emailRX represents email address matching pattern
emailRX = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
)
// Vars for common.go operations
var (
_HTTPClient *http.Client
_HTTPUserAgent string
m sync.RWMutex
// ErrNotYetImplemented defines a common error across the code base that
// alerts of a function that has not been completed or tied into main code
ErrNotYetImplemented = errors.New("not yet implemented")
// ErrFunctionNotSupported defines a standardised error for an unsupported
// wrapper function by an API
ErrFunctionNotSupported = errors.New("unsupported wrapper function")
errInvalidCryptoCurrency = errors.New("invalid crypto currency")
// ErrDateUnset is an error for start end check calculations
ErrDateUnset = errors.New("date unset")
// ErrStartAfterEnd is an error for start end check calculations
ErrStartAfterEnd = errors.New("start date after end date")
// ErrStartEqualsEnd is an error for start end check calculations
ErrStartEqualsEnd = errors.New("start date equals end date")
// ErrStartAfterTimeNow is an error for start end check calculations
ErrStartAfterTimeNow = errors.New("start date is after current time")
// ErrNilPointer defines an error for a nil pointer
ErrNilPointer = errors.New("nil pointer")
// ErrCannotCalculateOffline is returned when a request wishes to calculate
// something offline, but has an online requirement
ErrCannotCalculateOffline = errors.New("cannot calculate offline")
// ErrNoResponse is returned when a response has no entries/is empty
// when one is expected
ErrNoResponse = errors.New("no response")
errCannotSetInvalidTimeout = errors.New("cannot set new HTTP client with timeout that is equal or less than 0")
errUserAgentInvalid = errors.New("cannot set invalid user agent")
errHTTPClientInvalid = errors.New("custom http client cannot be nil")
zeroValueUnix = time.Unix(0, 0)
// ErrTypeAssertFailure defines an error when type assertion fails
ErrTypeAssertFailure = errors.New("type assert failure")
)
// MatchesEmailPattern ensures that the string is an email address by regexp check
func MatchesEmailPattern(value string) bool {
if len(value) < 3 || len(value) > 254 {
return false
}
return emailRX.MatchString(value)
}
// SetHTTPClientWithTimeout sets a new *http.Client with different timeout
// settings
func SetHTTPClientWithTimeout(t time.Duration) error {
if t <= 0 {
return errCannotSetInvalidTimeout
}
m.Lock()
_HTTPClient = NewHTTPClientWithTimeout(t)
m.Unlock()
return nil
}
// SetHTTPUserAgent sets the user agent which will be used for all common HTTP
// requests.
func SetHTTPUserAgent(agent string) error {
if agent == "" {
return errUserAgentInvalid
}
m.Lock()
_HTTPUserAgent = agent
m.Unlock()
return nil
}
// SetHTTPClient sets a custom HTTP client.
func SetHTTPClient(client *http.Client) error {
if client == nil {
return errHTTPClientInvalid
}
m.Lock()
_HTTPClient = client
m.Unlock()
return nil
}
// NewHTTPClientWithTimeout initialises a new HTTP client and its underlying
// transport IdleConnTimeout with the specified timeout duration
func NewHTTPClientWithTimeout(t time.Duration) *http.Client {
tr := &http.Transport{
// Added IdleConnTimeout to reduce the time of idle connections which
// could potentially slow macOS reconnection when there is a sudden
// network disconnection/issue
IdleConnTimeout: t,
Proxy: http.ProxyFromEnvironment,
}
h := &http.Client{
Transport: tr,
Timeout: t}
return h
}
// StringSliceDifference concatenates slices together based on its index and
// returns an individual string array
func StringSliceDifference(slice1, slice2 []string) []string {
var diff []string
for i := 0; i < 2; i++ {
for _, s1 := range slice1 {
found := false
for _, s2 := range slice2 {
if s1 == s2 {
found = true
break
}
}
if !found {
diff = append(diff, s1)
}
}
if i == 0 {
slice1, slice2 = slice2, slice1
}
}
return diff
}
// StringDataContains checks the substring array with an input and returns a bool
func StringDataContains(haystack []string, needle string) bool {
data := strings.Join(haystack, ",")
return strings.Contains(data, needle)
}
// StringDataCompare data checks the substring array with an input and returns a bool
func StringDataCompare(haystack []string, needle string) bool {
for x := range haystack {
if haystack[x] == needle {
return true
}
}
return false
}
// StringDataCompareInsensitive data checks the substring array with an input and returns
// a bool irrespective of lower or upper case strings
func StringDataCompareInsensitive(haystack []string, needle string) bool {
for x := range haystack {
if strings.EqualFold(haystack[x], needle) {
return true
}
}
return false
}
// StringDataContainsInsensitive checks the substring array with an input and returns
// a bool irrespective of lower or upper case strings
func StringDataContainsInsensitive(haystack []string, needle string) bool {
for _, data := range haystack {
if strings.Contains(strings.ToUpper(data), strings.ToUpper(needle)) {
return true
}
}
return false
}
// IsEnabled takes in a boolean param and returns a string if it is enabled
// or disabled
func IsEnabled(isEnabled bool) string {
if isEnabled {
return "Enabled"
}
return "Disabled"
}
// IsValidCryptoAddress validates your cryptocurrency address string using the
// regexp package // Validation issues occurring because "3" is contained in
// litecoin and Bitcoin addresses - non-fatal
func IsValidCryptoAddress(address, crypto string) (bool, error) {
switch strings.ToLower(crypto) {
case "btc":
return regexp.MatchString("^(bc1|[13])[a-zA-HJ-NP-Z0-9]{25,90}$", address)
case "ltc":
return regexp.MatchString("^[L3M][a-km-zA-HJ-NP-Z1-9]{25,34}$", address)
case "eth":
return regexp.MatchString("^0x[a-km-z0-9]{40}$", address)
default:
return false, fmt.Errorf("%w %s", errInvalidCryptoCurrency, crypto)
}
}
// YesOrNo returns a boolean variable to check if input is "y" or "yes"
func YesOrNo(input string) bool {
if strings.EqualFold(input, "y") || strings.EqualFold(input, "yes") {
return true
}
return false
}
// SendHTTPRequest sends a request using the http package and returns the body
// contents
func SendHTTPRequest(ctx context.Context, method, urlPath string, headers map[string]string, body io.Reader, verbose bool) ([]byte, error) {
method = strings.ToUpper(method)
if method != http.MethodOptions && method != http.MethodGet &&
method != http.MethodHead && method != http.MethodPost &&
method != http.MethodPut && method != http.MethodDelete &&
method != http.MethodTrace && method != http.MethodConnect {
return nil, errors.New("invalid HTTP method specified")
}
req, err := http.NewRequestWithContext(ctx, method, urlPath, body)
if err != nil {
return nil, err
}
for k, v := range headers {
req.Header.Add(k, v)
}
if verbose {
log.Debugf(log.Global, "Request path: %s", urlPath)
for k, d := range req.Header {
log.Debugf(log.Global, "Request header [%s]: %s", k, d)
}
log.Debugf(log.Global, "Request type: %s", method)
if body != nil {
log.Debugf(log.Global, "Request body: %v", body)
}
}
m.RLock()
if _HTTPUserAgent != "" && req.Header.Get("User-Agent") == "" {
req.Header.Add("User-Agent", _HTTPUserAgent)
}
if _HTTPClient == nil {
m.RUnlock()
m.Lock()
// Set *http.Client with default timeout if not populated.
_HTTPClient = NewHTTPClientWithTimeout(defaultTimeout)
m.Unlock()
m.RLock()
}
resp, err := _HTTPClient.Do(req)
m.RUnlock()
if err != nil {
return nil, err
}
defer resp.Body.Close()
contents, err := io.ReadAll(resp.Body)
if verbose {
log.Debugf(log.Global, "HTTP status: %s, Code: %v",
resp.Status,
resp.StatusCode)
log.Debugf(log.Global, "Raw response: %s", string(contents))
}
return contents, err
}
// EncodeURLValues concatenates url values onto a url string and returns a
// string
func EncodeURLValues(urlPath string, values url.Values) string {
u := urlPath
if len(values) > 0 {
u += "?" + values.Encode()
}
return u
}
// ExtractHost returns the hostname out of a string
func ExtractHost(address string) string {
host, _, _ := net.SplitHostPort(address)
if host == "" {
return "localhost"
}
return host
}
// ExtractPort returns the port name out of a string
func ExtractPort(host string) int {
_, port, _ := net.SplitHostPort(host)
if port == "" {
return 80
}
portInt, _ := strconv.Atoi(port)
return portInt
}
// GetURIPath returns the path of a URL given a URI
func GetURIPath(uri string) string {
urip, err := url.Parse(uri)
if err != nil {
return ""
}
if urip.RawQuery != "" {
return urip.Path + "?" + urip.RawQuery
}
return urip.Path
}
// GetExecutablePath returns the executables launch path
func GetExecutablePath() (string, error) {
ex, err := os.Executable()
if err != nil {
return "", err
}
return filepath.Dir(ex), nil
}
// GetDefaultDataDir returns the default data directory
// Windows - C:\Users\%USER%\AppData\Roaming\GoCryptoTrader
// Linux/Unix or OSX - $HOME/.gocryptotrader
func GetDefaultDataDir(env string) string {
if env == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "GoCryptoTrader")
}
usr, err := user.Current()
if err == nil {
return filepath.Join(usr.HomeDir, ".gocryptotrader")
}
dir, err := os.UserHomeDir()
if err != nil {
log.Warnln(log.Global, "Environment variable unset, defaulting to current directory")
dir = "."
}
return filepath.Join(dir, ".gocryptotrader")
}
// CreateDir creates a directory based on the supplied parameter
func CreateDir(dir string) error {
_, err := os.Stat(dir)
if !os.IsNotExist(err) {
return nil
}
log.Warnf(log.Global, "Directory %s does not exist.. creating.\n", dir)
return os.MkdirAll(dir, file.DefaultPermissionOctal)
}
// ChangePermission lists all the directories and files in an array
func ChangePermission(directory string) error {
return filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode().Perm() != file.DefaultPermissionOctal {
return os.Chmod(path, file.DefaultPermissionOctal)
}
return nil
})
}
// SplitStringSliceByLimit splits a slice of strings into slices by input limit and returns a slice of slice of strings
func SplitStringSliceByLimit(in []string, limit uint) [][]string {
var stringSlice []string
sliceSlice := make([][]string, 0, len(in)/int(limit)+1)
for len(in) >= int(limit) {
stringSlice, in = in[:limit], in[limit:]
sliceSlice = append(sliceSlice, stringSlice)
}
if len(in) > 0 {
sliceSlice = append(sliceSlice, in)
}
return sliceSlice
}
// AddPaddingOnUpperCase adds padding to a string when detecting an upper case letter. If
// there are multiple upper case items like `ThisIsHTTPExample`, it will only
// pad between like this `This Is HTTP Example`.
func AddPaddingOnUpperCase(s string) string {
if s == "" {
return ""
}
var result []string
left := 0
for x := 0; x < len(s); x++ {
if x == 0 {
continue
}
if unicode.IsUpper(rune(s[x])) {
if !unicode.IsUpper(rune(s[x-1])) |
} else if x > 1 && unicode.IsUpper(rune(s[x-1])) {
if s[left:x-1] == "" {
continue
}
result = append(result, s[left:x-1])
left = x - 1
}
}
result = append(result, s[left:])
return strings.Join(result, " ")
}
// InArray checks if _val_ belongs to _array_
func InArray(val, array interface{}) (exists bool, index int) {
exists = false
index = -1
if array == nil {
return
}
switch reflect.TypeOf(array).Kind() {
case reflect.Array, reflect.Slice:
s := reflect.ValueOf(array)
for i := 0; i < s.Len(); i++ {
if reflect.DeepEqual(val, s.Index(i).Interface()) {
index = i
exists = true
return
}
}
}
return
}
// multiError holds all the errors as a slice, this is unexported, so it forces
// inbuilt error handling.
type multiError struct {
loadedErrors []error
offset *int
}
// AppendError appends error in a more idiomatic way. This can start out as a
// standard error e.g. err := errors.New("random error")
// err = AppendError(err, errors.New("another random error"))
func AppendError(original, incoming error) error {
errSliceP, ok := original.(*multiError)
if ok {
errSliceP.offset = nil
}
if incoming == nil {
return original // Skip append - continue as normal.
}
if !ok {
// This assumes that a standard error is passed in and we can want to
// track it and add additional errors.
errSliceP = &multiError{}
if original != nil {
errSliceP.loadedErrors = append(errSliceP.loadedErrors, original)
}
}
if incomingSlice, ok := incoming.(*multiError); ok {
// Join slices if needed.
errSliceP.loadedErrors = append(errSliceP.loadedErrors, incomingSlice.loadedErrors...)
} else {
errSliceP.loadedErrors = append(errSliceP.loadedErrors, incoming)
}
return errSliceP
}
// Error displays all errors comma separated, if unwrapped has been called and
// has not been reset will display the individual error
func (e *multiError) Error() string {
if e.offset != nil {
return e.loadedErrors[*e.offset].Error()
}
allErrors := make([]string, len(e.loadedErrors))
for x := range e.loadedErrors {
allErrors[x] = e.loadedErrors[x].Error()
}
return strings.Join(allErrors, ", ")
}
// Unwrap increments the offset so errors.Is() can be called to its individual
// error for correct matching.
func (e *multiError) Unwrap() error {
if e.offset == nil {
e.offset = new(int)
} else {
*e.offset++
}
if *e.offset == len(e.loadedErrors) {
e.offset = nil
return nil // Force errors.Is package to return false.
}
return e
}
// Is checks to see if the errors match. It calls package errors.Is() so that
// we can keep fmt.Errorf() trimmings. This is called in errors package at
// interface assertion err.(interface{ Is(error) bool }).
func (e *multiError) Is(incoming error) bool {
if e.offset != nil && errors.Is(e.loadedErrors[*e.offset], incoming) {
e.offset = nil
return true
}
return false
}
// StartEndTimeCheck provides some basic checks which occur
// frequently in the codebase
func StartEndTimeCheck(start, end time.Time) error {
if start.IsZero() || start.Equal(zeroValueUnix) {
return fmt.Errorf("start %w", ErrDateUnset)
}
if end.IsZero() || end.Equal(zeroValueUnix) {
return fmt.Errorf("end %w", ErrDateUnset)
}
if start.After(end) {
return ErrStartAfterEnd
}
if start.Equal(end) {
return ErrStartEqualsEnd
}
if start.After(time.Now()) {
return ErrStartAfterTimeNow
}
return nil
}
// GenerateRandomString generates a random string provided a length and list of Character types { SmallLetters, CapitalLetters, NumberCharacters}.
// if no characters are provided, the function uses a NumberCharacters(string of numeric characters).
func GenerateRandomString(length uint, characters ...string) (string, error) {
if length == 0 {
return "", errors.New("invalid length, length must be non-zero positive integer")
}
b := make([]byte, length)
chars := strings.Replace(strings.Join(characters, ""), " ", "", -1)
if chars == "" && len(characters) != 0 {
return "", errors.New("invalid characters, character must not be empty")
} else if chars == "" {
chars = NumberCharacters
}
for i := range b {
nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(chars))))
if err != nil {
return "", err
}
n := nBig.Int64()
b[i] = chars[n]
}
return string(b), nil
}
// GetTypeAssertError returns additional information for when an assertion failure
// occurs.
// fieldDescription is an optional way to return what the affected field was for
func GetTypeAssertError(required string, received interface{}, fieldDescription ...string) error {
var description string
if len(fieldDescription) > 0 {
description = " for: " + strings.Join(fieldDescription, ", ")
}
return fmt.Errorf("%w from %T to %s%s", ErrTypeAssertFailure, received, required, description)
}
| {
result = append(result, s[left:x])
left = x
} | conditional_block |
grafananet.go | package route
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"hash/fnv"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Dieterbe/go-metrics"
"github.com/golang/snappy"
dest "github.com/grafana/carbon-relay-ng/destination"
"github.com/grafana/carbon-relay-ng/matcher"
"github.com/grafana/carbon-relay-ng/persister"
"github.com/grafana/carbon-relay-ng/stats"
"github.com/grafana/carbon-relay-ng/util"
"github.com/jpillora/backoff"
log "github.com/sirupsen/logrus"
conf "github.com/grafana/carbon-relay-ng/pkg/mt-conf"
"github.com/grafana/metrictank/schema"
"github.com/grafana/metrictank/schema/msg"
)
type GrafanaNetConfig struct {
// mandatory
Addr string
ApiKey string
SchemasFile string
// optional
AggregationFile string
BufSize int // amount of messages we can buffer up.
FlushMaxNum int // flush after this many metrics seen
FlushMaxWait time.Duration // flush after this much time passed
Timeout time.Duration // timeout for http operations
Concurrency int // number of concurrent connections to tsdb-gw
OrgID int
SSLVerify bool
Blocking bool
Spool bool // ignored for now
// optional http backoff params for posting metrics and schemas
ErrBackoffMin time.Duration
ErrBackoffFactor float64
}
func NewGrafanaNetConfig(addr, apiKey, schemasFile, aggregationFile string) (GrafanaNetConfig, error) {
u, err := url.Parse(addr)
if err != nil || !u.IsAbs() || u.Host == "" { // apparently "http://" is a valid absolute URL (with empty host), but we don't want that
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: invalid value for 'addr': %q. need an absolute http[s] url", addr)
}
if !strings.HasSuffix(u.Path, "/metrics") && !strings.HasSuffix(u.Path, "/metrics/") {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: invalid value for 'addr': %q. needs to be a /metrics endpoint", addr)
}
if apiKey == "" {
return GrafanaNetConfig{}, errors.New("NewGrafanaNetConfig: invalid value for 'apiKey'. value must be set to non-empty string")
}
if schemasFile == "" {
return GrafanaNetConfig{}, errors.New("NewGrafanaNetConfig: invalid value for 'schemasFile'. value must be set to the path to your storage-schemas.conf file")
}
_, err = getSchemas(schemasFile)
if err != nil {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: could not read schemasFile %q: %s", schemasFile, err.Error())
}
if aggregationFile != "" {
_, err = conf.ReadAggregations(aggregationFile)
if err != nil {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: could not read aggregationFile %q: %s", aggregationFile, err.Error())
}
}
return GrafanaNetConfig{
Addr: addr,
ApiKey: apiKey,
SchemasFile: schemasFile,
AggregationFile: aggregationFile,
BufSize: 1e7, // since a message is typically around 100B this is 1GB
FlushMaxNum: 5000,
FlushMaxWait: time.Second / 2,
Timeout: 10 * time.Second,
Concurrency: 100,
OrgID: 1,
SSLVerify: true,
Blocking: false,
Spool: false,
ErrBackoffMin: 100 * time.Millisecond,
ErrBackoffFactor: 1.5,
}, nil
}
type GrafanaNet struct {
baseRoute
Cfg GrafanaNetConfig
schemas persister.WhisperSchemas
aggregation conf.Aggregations
schemasStr string
aggregationStr string
addrMetrics string
addrSchemas string
addrAggregation string
dispatch func(chan []byte, []byte, metrics.Gauge, metrics.Counter)
in []chan []byte
shutdown chan struct{}
wg *sync.WaitGroup
client *http.Client
numErrFlush metrics.Counter
numOut metrics.Counter // metrics successfully written to our buffered conn (no flushing yet)
numDropBuffFull metrics.Counter // metric drops due to queue full
durationTickFlush metrics.Timer // only updated after successful flush
durationManuFlush metrics.Timer // only updated after successful flush. not implemented yet
tickFlushSize metrics.Histogram // only updated after successful flush
manuFlushSize metrics.Histogram // only updated after successful flush. not implemented yet
numBuffered metrics.Gauge
bufferSize metrics.Gauge
}
// getGrafanaNetAddr returns the metrics, schemas and aggregation address (URL) for a given config URL
// The URL we instruct customers to use is the url to post metrics to, so that one is obvious
// but we support posting both to both /graphite/metrics and /metrics , whereas the schemas and
// aggregation URL should always get the /graphite prefix.
func getGrafanaNetAddr(addr string) (string, string, string) {
if strings.HasSuffix(addr, "/") {
addr = addr[:len(addr)-1]
}
if !strings.HasSuffix(addr, "/metrics") {
panic("getAddr called on an addr that does not end on /metrics or /metrics/ - this is not supported. Normally NewGrafanaNetConfig would already have validated this")
}
addrMetrics := addr
baseAddr := strings.TrimSuffix(addrMetrics, "/metrics")
if strings.HasSuffix(baseAddr, "/graphite") {
baseAddr = strings.TrimSuffix(baseAddr, "/graphite")
}
addrSchemas := baseAddr + "/graphite/config/storageSchema"
addrAggregation := baseAddr + "/graphite/config/storageAggregation"
return addrMetrics, addrSchemas, addrAggregation
}
// NewGrafanaNet creates a special route that writes to a grafana.net datastore
// We will automatically run the route and the destination
func NewGrafanaNet(key string, matcher matcher.Matcher, cfg GrafanaNetConfig) (Route, error) {
schemas, err := getSchemas(cfg.SchemasFile)
if err != nil {
return nil, err
}
schemasStr := schemas.String()
var aggregation conf.Aggregations
var aggregationStr string
if cfg.AggregationFile != "" {
aggregation, err = conf.ReadAggregations(cfg.AggregationFile)
if err != nil {
return nil, err
}
aggregationStr = aggregation.String()
}
cleanAddr := util.AddrToPath(cfg.Addr)
r := &GrafanaNet{
baseRoute: baseRoute{"GrafanaNet", sync.Mutex{}, atomic.Value{}, key},
Cfg: cfg,
schemas: schemas,
schemasStr: schemasStr,
aggregation: aggregation,
aggregationStr: aggregationStr,
in: make([]chan []byte, cfg.Concurrency),
shutdown: make(chan struct{}),
wg: new(sync.WaitGroup),
numErrFlush: stats.Counter("dest=" + cleanAddr + ".unit=Err.type=flush"),
numOut: stats.Counter("dest=" + cleanAddr + ".unit=Metric.direction=out"),
durationTickFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=ticker"),
durationManuFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=manual"),
tickFlushSize: stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=ticker"),
manuFlushSize: stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=manual"),
numBuffered: stats.Gauge("dest=" + cleanAddr + ".unit=Metric.what=numBuffered"),
bufferSize: stats.Gauge("dest=" + cleanAddr + ".unit=Metric.what=bufferSize"),
numDropBuffFull: stats.Counter("dest=" + cleanAddr + ".unit=Metric.action=drop.reason=queue_full"),
}
r.addrMetrics, r.addrSchemas, r.addrAggregation = getGrafanaNetAddr(cfg.Addr)
r.bufferSize.Update(int64(cfg.BufSize))
if cfg.Blocking | else {
r.dispatch = dispatchNonBlocking
}
r.wg.Add(cfg.Concurrency)
for i := 0; i < cfg.Concurrency; i++ {
r.in[i] = make(chan []byte, cfg.BufSize/cfg.Concurrency)
go r.run(r.in[i])
}
r.config.Store(baseConfig{matcher, make([]*dest.Destination, 0)})
// start off with a transport the same as Go's DefaultTransport
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: cfg.Concurrency,
MaxIdleConnsPerHost: cfg.Concurrency,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
// disable http 2.0 because there seems to be a compatibility problem between nginx hosts and the golang http2 implementation
// which would occasionally result in bogus `400 Bad Request` errors.
transport.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper)
if !cfg.SSLVerify {
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
r.client = &http.Client{
Timeout: cfg.Timeout,
Transport: transport,
}
go r.updateSchemas()
if cfg.AggregationFile != "" {
go r.updateAggregation()
}
return r, nil
}
// run manages incoming and outgoing data for a shard
func (route *GrafanaNet) run(in chan []byte) {
var metrics []*schema.MetricData
buffer := new(bytes.Buffer)
timer := time.NewTimer(route.Cfg.FlushMaxWait)
for {
select {
case buf := <-in:
route.numBuffered.Dec(1)
md, err := parseMetric(buf, route.schemas, route.Cfg.OrgID)
if err != nil {
log.Errorf("RouteGrafanaNet: parseMetric failed: %s. skipping metric", err)
continue
}
md.SetId()
metrics = append(metrics, md)
if len(metrics) == route.Cfg.FlushMaxNum {
metrics = route.retryFlush(metrics, buffer)
// reset our timer
if !timer.Stop() {
<-timer.C
}
timer.Reset(route.Cfg.FlushMaxWait)
}
case <-timer.C:
timer.Reset(route.Cfg.FlushMaxWait)
metrics = route.retryFlush(metrics, buffer)
case <-route.shutdown:
metrics = route.retryFlush(metrics, buffer)
return
}
}
route.wg.Done()
}
func (route *GrafanaNet) retryFlush(metrics []*schema.MetricData, buffer *bytes.Buffer) []*schema.MetricData {
if len(metrics) == 0 {
return metrics
}
mda := schema.MetricDataArray(metrics)
data, err := msg.CreateMsg(mda, 0, msg.FormatMetricDataArrayMsgp)
if err != nil {
panic(err)
}
route.numOut.Inc(int64(len(metrics)))
buffer.Reset()
snappyBody := snappy.NewWriter(buffer)
snappyBody.Write(data)
snappyBody.Close()
body := buffer.Bytes()
req, err := http.NewRequest("POST", route.addrMetrics, bytes.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Add("Authorization", "Bearer "+route.Cfg.ApiKey)
req.Header.Add("Content-Type", "rt-metric-binary-snappy")
req.Header.Add("User-Agent", UserAgent)
req.Header.Add("Carbon-Relay-NG-Instance", Instance)
boff := &backoff.Backoff{
Min: route.Cfg.ErrBackoffMin,
Max: 30 * time.Second,
Factor: route.Cfg.ErrBackoffFactor,
Jitter: true,
}
var dur time.Duration
for {
dur, err = route.flush(mda, req)
if err == nil {
break
}
route.numErrFlush.Inc(1)
b := boff.Duration()
log.Warnf("GrafanaNet failed to submit data to %s: %s - will try again in %s (this attempt took %s)", route.addrMetrics, err.Error(), b, dur)
time.Sleep(b)
// re-instantiate body, since the previous .Do() attempt would have Read it all the way
req.Body = ioutil.NopCloser(bytes.NewReader(body))
}
log.Debugf("GrafanaNet sent metrics in %s -msg size %d", dur, len(metrics))
route.durationTickFlush.Update(dur)
route.tickFlushSize.Update(int64(len(metrics)))
return metrics[:0]
}
func (route *GrafanaNet) flush(mda schema.MetricDataArray, req *http.Request) (time.Duration, error) {
pre := time.Now()
resp, err := route.client.Do(req)
dur := time.Since(pre)
if err != nil {
return dur, err
}
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
bod, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Warnf("GrafanaNet remote said %q, but could not read its response: %s", resp.Status, err.Error())
return dur, nil
}
var mResp MetricsResponse
err = json.Unmarshal(bod, &mResp)
if err != nil {
log.Warnf("GrafanaNet remote returned %q, but could not parse its response: %s", resp.Status, err.Error())
return dur, nil
}
if mResp.Invalid != 0 {
var b strings.Builder
fmt.Fprintf(&b, "request contained %d invalid metrics that were dropped (%d valid metrics were published in this request)\n", mResp.Invalid, mResp.Published)
for key, vErr := range mResp.ValidationErrors {
fmt.Fprintf(&b, " %q : %d metrics. Examples:\n", key, vErr.Count)
for _, idx := range vErr.ExampleIds {
fmt.Fprintf(&b, " - %#v\n", mda[idx])
}
}
log.Warn(b.String())
}
return dur, nil
}
buf := make([]byte, 300)
n, _ := resp.Body.Read(buf)
ioutil.ReadAll(resp.Body)
resp.Body.Close()
return dur, fmt.Errorf("http %d - %s", resp.StatusCode, buf[:n])
}
// Dispatch takes in the requested buf or drops it if blocking mode and queue of the shard is full
func (route *GrafanaNet) Dispatch(buf []byte) {
// should return as quickly as possible
log.Tracef("route %s sending to dest %s: %s", route.key, route.addrMetrics, buf)
buf = bytes.TrimSpace(buf)
index := bytes.Index(buf, []byte(" "))
if index == -1 {
log.Error("RouteGrafanaNet: invalid message")
return
}
key := buf[:index]
hasher := fnv.New32a()
hasher.Write(key)
shard := int(hasher.Sum32() % uint32(route.Cfg.Concurrency))
route.dispatch(route.in[shard], buf, route.numBuffered, route.numDropBuffFull)
}
func (route *GrafanaNet) Flush() error {
//conf := route.config.Load().(Config)
// no-op. Flush() is currently not called by anything.
return nil
}
func (route *GrafanaNet) updateSchemas() {
route.postConfig(route.addrSchemas, route.schemasStr)
for range time.Tick(6 * time.Hour) {
route.postConfig(route.addrSchemas, route.schemasStr)
}
}
func (route *GrafanaNet) updateAggregation() {
route.postConfig(route.addrAggregation, route.aggregationStr)
for range time.Tick(6 * time.Hour) {
route.postConfig(route.addrAggregation, route.aggregationStr)
}
}
func (route *GrafanaNet) postConfig(path, cfg string) {
boff := &backoff.Backoff{
Min: route.Cfg.ErrBackoffMin,
Max: 30 * time.Minute,
Factor: route.Cfg.ErrBackoffFactor,
Jitter: true,
}
for {
req, err := http.NewRequest("POST", path, strings.NewReader(cfg))
if err != nil {
panic(err)
}
req.Header.Add("Authorization", "Bearer "+route.Cfg.ApiKey)
req.Header.Add("User-Agent", UserAgent)
req.Header.Add("Carbon-Relay-NG-Instance", Instance)
resp, err := route.client.Do(req)
if err != nil {
log.Warnf("got error for %s: %s", path, err.Error())
time.Sleep(boff.Duration())
continue
}
boff.Reset()
if resp.StatusCode == http.StatusNotFound {
// if grafana cloud is not updated yet for this new feature.
// we are still done with our work. no need to log anything
} else if resp.StatusCode >= 200 && resp.StatusCode < 300 {
// it got accepted, we're done.
log.Infof("GrafanaNet %s submitted", path)
} else {
// if it's neither of the above, let's log it, but make it look not too scary
log.Infof("GrafanaNet %s resulted in code %s (should be harmless)", path, resp.Status)
}
ioutil.ReadAll(resp.Body)
resp.Body.Close()
return
}
}
func (route *GrafanaNet) Shutdown() error {
//conf := route.config.Load().(Config)
// trigger all of our queues to be flushed to the tsdb-gw
route.shutdown <- struct{}{}
// wait for all tsdb-gw writes to complete.
route.wg.Wait()
return nil
}
func (route *GrafanaNet) Snapshot() Snapshot {
snapshot := route.baseRoute.Snapshot()
snapshot.Addr = route.Cfg.Addr
return snapshot
}
| {
r.dispatch = dispatchBlocking
} | conditional_block |
grafananet.go | package route
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"hash/fnv"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Dieterbe/go-metrics"
"github.com/golang/snappy"
dest "github.com/grafana/carbon-relay-ng/destination"
"github.com/grafana/carbon-relay-ng/matcher"
"github.com/grafana/carbon-relay-ng/persister"
"github.com/grafana/carbon-relay-ng/stats"
"github.com/grafana/carbon-relay-ng/util"
"github.com/jpillora/backoff"
log "github.com/sirupsen/logrus"
conf "github.com/grafana/carbon-relay-ng/pkg/mt-conf"
"github.com/grafana/metrictank/schema"
"github.com/grafana/metrictank/schema/msg"
)
type GrafanaNetConfig struct {
// mandatory
Addr string
ApiKey string
SchemasFile string
// optional
AggregationFile string
BufSize int // amount of messages we can buffer up.
FlushMaxNum int // flush after this many metrics seen
FlushMaxWait time.Duration // flush after this much time passed
Timeout time.Duration // timeout for http operations
Concurrency int // number of concurrent connections to tsdb-gw
OrgID int
SSLVerify bool
Blocking bool
Spool bool // ignored for now
// optional http backoff params for posting metrics and schemas
ErrBackoffMin time.Duration
ErrBackoffFactor float64
}
func NewGrafanaNetConfig(addr, apiKey, schemasFile, aggregationFile string) (GrafanaNetConfig, error) {
u, err := url.Parse(addr)
if err != nil || !u.IsAbs() || u.Host == "" { // apparently "http://" is a valid absolute URL (with empty host), but we don't want that
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: invalid value for 'addr': %q. need an absolute http[s] url", addr)
}
if !strings.HasSuffix(u.Path, "/metrics") && !strings.HasSuffix(u.Path, "/metrics/") {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: invalid value for 'addr': %q. needs to be a /metrics endpoint", addr)
}
if apiKey == "" {
return GrafanaNetConfig{}, errors.New("NewGrafanaNetConfig: invalid value for 'apiKey'. value must be set to non-empty string")
}
if schemasFile == "" {
return GrafanaNetConfig{}, errors.New("NewGrafanaNetConfig: invalid value for 'schemasFile'. value must be set to the path to your storage-schemas.conf file")
}
_, err = getSchemas(schemasFile)
if err != nil {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: could not read schemasFile %q: %s", schemasFile, err.Error())
}
if aggregationFile != "" {
_, err = conf.ReadAggregations(aggregationFile)
if err != nil {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: could not read aggregationFile %q: %s", aggregationFile, err.Error())
}
}
return GrafanaNetConfig{
Addr: addr,
ApiKey: apiKey,
SchemasFile: schemasFile,
AggregationFile: aggregationFile,
BufSize: 1e7, // since a message is typically around 100B this is 1GB
FlushMaxNum: 5000,
FlushMaxWait: time.Second / 2,
Timeout: 10 * time.Second,
Concurrency: 100,
OrgID: 1,
SSLVerify: true,
Blocking: false,
Spool: false,
ErrBackoffMin: 100 * time.Millisecond,
ErrBackoffFactor: 1.5,
}, nil
}
type GrafanaNet struct {
baseRoute
Cfg GrafanaNetConfig
schemas persister.WhisperSchemas
aggregation conf.Aggregations
schemasStr string
aggregationStr string
addrMetrics string
addrSchemas string
addrAggregation string
dispatch func(chan []byte, []byte, metrics.Gauge, metrics.Counter)
in []chan []byte
shutdown chan struct{}
wg *sync.WaitGroup
client *http.Client
numErrFlush metrics.Counter
numOut metrics.Counter // metrics successfully written to our buffered conn (no flushing yet)
numDropBuffFull metrics.Counter // metric drops due to queue full
durationTickFlush metrics.Timer // only updated after successful flush
durationManuFlush metrics.Timer // only updated after successful flush. not implemented yet
tickFlushSize metrics.Histogram // only updated after successful flush
manuFlushSize metrics.Histogram // only updated after successful flush. not implemented yet
numBuffered metrics.Gauge
bufferSize metrics.Gauge
}
// getGrafanaNetAddr returns the metrics, schemas and aggregation address (URL) for a given config URL
// The URL we instruct customers to use is the url to post metrics to, so that one is obvious
// but we support posting both to both /graphite/metrics and /metrics , whereas the schemas and
// aggregation URL should always get the /graphite prefix.
func getGrafanaNetAddr(addr string) (string, string, string) {
if strings.HasSuffix(addr, "/") {
addr = addr[:len(addr)-1]
}
if !strings.HasSuffix(addr, "/metrics") {
panic("getAddr called on an addr that does not end on /metrics or /metrics/ - this is not supported. Normally NewGrafanaNetConfig would already have validated this")
}
addrMetrics := addr
baseAddr := strings.TrimSuffix(addrMetrics, "/metrics")
if strings.HasSuffix(baseAddr, "/graphite") {
baseAddr = strings.TrimSuffix(baseAddr, "/graphite")
}
addrSchemas := baseAddr + "/graphite/config/storageSchema"
addrAggregation := baseAddr + "/graphite/config/storageAggregation"
return addrMetrics, addrSchemas, addrAggregation
}
// NewGrafanaNet creates a special route that writes to a grafana.net datastore
// We will automatically run the route and the destination
func NewGrafanaNet(key string, matcher matcher.Matcher, cfg GrafanaNetConfig) (Route, error) {
schemas, err := getSchemas(cfg.SchemasFile)
if err != nil {
return nil, err
}
schemasStr := schemas.String()
var aggregation conf.Aggregations
var aggregationStr string
if cfg.AggregationFile != "" {
aggregation, err = conf.ReadAggregations(cfg.AggregationFile)
if err != nil {
return nil, err
}
aggregationStr = aggregation.String()
}
cleanAddr := util.AddrToPath(cfg.Addr)
r := &GrafanaNet{
baseRoute: baseRoute{"GrafanaNet", sync.Mutex{}, atomic.Value{}, key},
Cfg: cfg,
schemas: schemas,
schemasStr: schemasStr,
aggregation: aggregation,
aggregationStr: aggregationStr,
in: make([]chan []byte, cfg.Concurrency),
shutdown: make(chan struct{}),
wg: new(sync.WaitGroup),
numErrFlush: stats.Counter("dest=" + cleanAddr + ".unit=Err.type=flush"),
numOut: stats.Counter("dest=" + cleanAddr + ".unit=Metric.direction=out"),
durationTickFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=ticker"),
durationManuFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=manual"),
tickFlushSize: stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=ticker"),
manuFlushSize: stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=manual"),
numBuffered: stats.Gauge("dest=" + cleanAddr + ".unit=Metric.what=numBuffered"),
bufferSize: stats.Gauge("dest=" + cleanAddr + ".unit=Metric.what=bufferSize"),
numDropBuffFull: stats.Counter("dest=" + cleanAddr + ".unit=Metric.action=drop.reason=queue_full"),
}
r.addrMetrics, r.addrSchemas, r.addrAggregation = getGrafanaNetAddr(cfg.Addr)
r.bufferSize.Update(int64(cfg.BufSize))
if cfg.Blocking {
r.dispatch = dispatchBlocking
} else {
r.dispatch = dispatchNonBlocking
}
r.wg.Add(cfg.Concurrency)
for i := 0; i < cfg.Concurrency; i++ {
r.in[i] = make(chan []byte, cfg.BufSize/cfg.Concurrency)
go r.run(r.in[i])
}
r.config.Store(baseConfig{matcher, make([]*dest.Destination, 0)})
// start off with a transport the same as Go's DefaultTransport
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: cfg.Concurrency,
MaxIdleConnsPerHost: cfg.Concurrency,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
// disable http 2.0 because there seems to be a compatibility problem between nginx hosts and the golang http2 implementation
// which would occasionally result in bogus `400 Bad Request` errors.
transport.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper)
if !cfg.SSLVerify {
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
r.client = &http.Client{
Timeout: cfg.Timeout,
Transport: transport,
}
go r.updateSchemas()
if cfg.AggregationFile != "" {
go r.updateAggregation()
}
return r, nil
}
// run manages incoming and outgoing data for a shard
func (route *GrafanaNet) run(in chan []byte) {
var metrics []*schema.MetricData
buffer := new(bytes.Buffer)
timer := time.NewTimer(route.Cfg.FlushMaxWait)
for {
select {
case buf := <-in:
route.numBuffered.Dec(1)
md, err := parseMetric(buf, route.schemas, route.Cfg.OrgID)
if err != nil {
log.Errorf("RouteGrafanaNet: parseMetric failed: %s. skipping metric", err)
continue
}
md.SetId()
metrics = append(metrics, md)
if len(metrics) == route.Cfg.FlushMaxNum {
metrics = route.retryFlush(metrics, buffer)
// reset our timer
if !timer.Stop() {
<-timer.C
}
timer.Reset(route.Cfg.FlushMaxWait)
}
case <-timer.C:
timer.Reset(route.Cfg.FlushMaxWait)
metrics = route.retryFlush(metrics, buffer)
case <-route.shutdown:
metrics = route.retryFlush(metrics, buffer)
return
}
}
route.wg.Done()
}
func (route *GrafanaNet) retryFlush(metrics []*schema.MetricData, buffer *bytes.Buffer) []*schema.MetricData {
if len(metrics) == 0 {
return metrics
}
mda := schema.MetricDataArray(metrics)
data, err := msg.CreateMsg(mda, 0, msg.FormatMetricDataArrayMsgp)
if err != nil {
panic(err)
}
route.numOut.Inc(int64(len(metrics)))
buffer.Reset()
snappyBody := snappy.NewWriter(buffer)
snappyBody.Write(data)
snappyBody.Close()
body := buffer.Bytes()
req, err := http.NewRequest("POST", route.addrMetrics, bytes.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Add("Authorization", "Bearer "+route.Cfg.ApiKey)
req.Header.Add("Content-Type", "rt-metric-binary-snappy")
req.Header.Add("User-Agent", UserAgent)
req.Header.Add("Carbon-Relay-NG-Instance", Instance)
boff := &backoff.Backoff{
Min: route.Cfg.ErrBackoffMin,
Max: 30 * time.Second,
Factor: route.Cfg.ErrBackoffFactor,
Jitter: true,
}
var dur time.Duration
for {
dur, err = route.flush(mda, req)
if err == nil {
break
}
route.numErrFlush.Inc(1)
b := boff.Duration()
log.Warnf("GrafanaNet failed to submit data to %s: %s - will try again in %s (this attempt took %s)", route.addrMetrics, err.Error(), b, dur)
time.Sleep(b)
// re-instantiate body, since the previous .Do() attempt would have Read it all the way
req.Body = ioutil.NopCloser(bytes.NewReader(body))
}
log.Debugf("GrafanaNet sent metrics in %s -msg size %d", dur, len(metrics))
route.durationTickFlush.Update(dur)
route.tickFlushSize.Update(int64(len(metrics)))
return metrics[:0]
}
func (route *GrafanaNet) flush(mda schema.MetricDataArray, req *http.Request) (time.Duration, error) {
pre := time.Now()
resp, err := route.client.Do(req)
dur := time.Since(pre)
if err != nil {
return dur, err
}
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
bod, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Warnf("GrafanaNet remote said %q, but could not read its response: %s", resp.Status, err.Error())
return dur, nil
}
var mResp MetricsResponse
err = json.Unmarshal(bod, &mResp)
if err != nil {
log.Warnf("GrafanaNet remote returned %q, but could not parse its response: %s", resp.Status, err.Error())
return dur, nil
}
if mResp.Invalid != 0 {
var b strings.Builder
fmt.Fprintf(&b, "request contained %d invalid metrics that were dropped (%d valid metrics were published in this request)\n", mResp.Invalid, mResp.Published)
for key, vErr := range mResp.ValidationErrors {
fmt.Fprintf(&b, " %q : %d metrics. Examples:\n", key, vErr.Count)
for _, idx := range vErr.ExampleIds {
fmt.Fprintf(&b, " - %#v\n", mda[idx])
}
}
log.Warn(b.String())
}
return dur, nil
}
buf := make([]byte, 300)
n, _ := resp.Body.Read(buf)
ioutil.ReadAll(resp.Body)
resp.Body.Close()
return dur, fmt.Errorf("http %d - %s", resp.StatusCode, buf[:n])
}
// Dispatch takes in the requested buf or drops it if blocking mode and queue of the shard is full
func (route *GrafanaNet) Dispatch(buf []byte) {
// should return as quickly as possible
log.Tracef("route %s sending to dest %s: %s", route.key, route.addrMetrics, buf)
buf = bytes.TrimSpace(buf)
index := bytes.Index(buf, []byte(" "))
if index == -1 {
log.Error("RouteGrafanaNet: invalid message")
return
}
key := buf[:index]
hasher := fnv.New32a()
hasher.Write(key)
shard := int(hasher.Sum32() % uint32(route.Cfg.Concurrency))
route.dispatch(route.in[shard], buf, route.numBuffered, route.numDropBuffFull)
}
func (route *GrafanaNet) Flush() error {
//conf := route.config.Load().(Config)
// no-op. Flush() is currently not called by anything.
return nil
}
func (route *GrafanaNet) updateSchemas() {
route.postConfig(route.addrSchemas, route.schemasStr)
for range time.Tick(6 * time.Hour) {
route.postConfig(route.addrSchemas, route.schemasStr)
}
}
func (route *GrafanaNet) updateAggregation() {
route.postConfig(route.addrAggregation, route.aggregationStr)
for range time.Tick(6 * time.Hour) {
route.postConfig(route.addrAggregation, route.aggregationStr)
}
}
func (route *GrafanaNet) postConfig(path, cfg string) {
boff := &backoff.Backoff{
Min: route.Cfg.ErrBackoffMin,
Max: 30 * time.Minute,
Factor: route.Cfg.ErrBackoffFactor,
Jitter: true,
}
for {
req, err := http.NewRequest("POST", path, strings.NewReader(cfg))
if err != nil {
panic(err)
}
req.Header.Add("Authorization", "Bearer "+route.Cfg.ApiKey)
req.Header.Add("User-Agent", UserAgent)
req.Header.Add("Carbon-Relay-NG-Instance", Instance)
resp, err := route.client.Do(req)
if err != nil {
log.Warnf("got error for %s: %s", path, err.Error())
time.Sleep(boff.Duration())
continue
}
boff.Reset()
if resp.StatusCode == http.StatusNotFound {
// if grafana cloud is not updated yet for this new feature.
// we are still done with our work. no need to log anything
} else if resp.StatusCode >= 200 && resp.StatusCode < 300 {
// it got accepted, we're done.
log.Infof("GrafanaNet %s submitted", path) | }
ioutil.ReadAll(resp.Body)
resp.Body.Close()
return
}
}
func (route *GrafanaNet) Shutdown() error {
//conf := route.config.Load().(Config)
// trigger all of our queues to be flushed to the tsdb-gw
route.shutdown <- struct{}{}
// wait for all tsdb-gw writes to complete.
route.wg.Wait()
return nil
}
func (route *GrafanaNet) Snapshot() Snapshot {
snapshot := route.baseRoute.Snapshot()
snapshot.Addr = route.Cfg.Addr
return snapshot
} | } else {
// if it's neither of the above, let's log it, but make it look not too scary
log.Infof("GrafanaNet %s resulted in code %s (should be harmless)", path, resp.Status) | random_line_split |
grafananet.go | package route
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"hash/fnv"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Dieterbe/go-metrics"
"github.com/golang/snappy"
dest "github.com/grafana/carbon-relay-ng/destination"
"github.com/grafana/carbon-relay-ng/matcher"
"github.com/grafana/carbon-relay-ng/persister"
"github.com/grafana/carbon-relay-ng/stats"
"github.com/grafana/carbon-relay-ng/util"
"github.com/jpillora/backoff"
log "github.com/sirupsen/logrus"
conf "github.com/grafana/carbon-relay-ng/pkg/mt-conf"
"github.com/grafana/metrictank/schema"
"github.com/grafana/metrictank/schema/msg"
)
type GrafanaNetConfig struct {
// mandatory
Addr string
ApiKey string
SchemasFile string
// optional
AggregationFile string
BufSize int // amount of messages we can buffer up.
FlushMaxNum int // flush after this many metrics seen
FlushMaxWait time.Duration // flush after this much time passed
Timeout time.Duration // timeout for http operations
Concurrency int // number of concurrent connections to tsdb-gw
OrgID int
SSLVerify bool
Blocking bool
Spool bool // ignored for now
// optional http backoff params for posting metrics and schemas
ErrBackoffMin time.Duration
ErrBackoffFactor float64
}
func NewGrafanaNetConfig(addr, apiKey, schemasFile, aggregationFile string) (GrafanaNetConfig, error) {
u, err := url.Parse(addr)
if err != nil || !u.IsAbs() || u.Host == "" { // apparently "http://" is a valid absolute URL (with empty host), but we don't want that
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: invalid value for 'addr': %q. need an absolute http[s] url", addr)
}
if !strings.HasSuffix(u.Path, "/metrics") && !strings.HasSuffix(u.Path, "/metrics/") {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: invalid value for 'addr': %q. needs to be a /metrics endpoint", addr)
}
if apiKey == "" {
return GrafanaNetConfig{}, errors.New("NewGrafanaNetConfig: invalid value for 'apiKey'. value must be set to non-empty string")
}
if schemasFile == "" {
return GrafanaNetConfig{}, errors.New("NewGrafanaNetConfig: invalid value for 'schemasFile'. value must be set to the path to your storage-schemas.conf file")
}
_, err = getSchemas(schemasFile)
if err != nil {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: could not read schemasFile %q: %s", schemasFile, err.Error())
}
if aggregationFile != "" {
_, err = conf.ReadAggregations(aggregationFile)
if err != nil {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: could not read aggregationFile %q: %s", aggregationFile, err.Error())
}
}
return GrafanaNetConfig{
Addr: addr,
ApiKey: apiKey,
SchemasFile: schemasFile,
AggregationFile: aggregationFile,
BufSize: 1e7, // since a message is typically around 100B this is 1GB
FlushMaxNum: 5000,
FlushMaxWait: time.Second / 2,
Timeout: 10 * time.Second,
Concurrency: 100,
OrgID: 1,
SSLVerify: true,
Blocking: false,
Spool: false,
ErrBackoffMin: 100 * time.Millisecond,
ErrBackoffFactor: 1.5,
}, nil
}
type GrafanaNet struct {
baseRoute
Cfg GrafanaNetConfig
schemas persister.WhisperSchemas
aggregation conf.Aggregations
schemasStr string
aggregationStr string
addrMetrics string
addrSchemas string
addrAggregation string
dispatch func(chan []byte, []byte, metrics.Gauge, metrics.Counter)
in []chan []byte
shutdown chan struct{}
wg *sync.WaitGroup
client *http.Client
numErrFlush metrics.Counter
numOut metrics.Counter // metrics successfully written to our buffered conn (no flushing yet)
numDropBuffFull metrics.Counter // metric drops due to queue full
durationTickFlush metrics.Timer // only updated after successful flush
durationManuFlush metrics.Timer // only updated after successful flush. not implemented yet
tickFlushSize metrics.Histogram // only updated after successful flush
manuFlushSize metrics.Histogram // only updated after successful flush. not implemented yet
numBuffered metrics.Gauge
bufferSize metrics.Gauge
}
// getGrafanaNetAddr returns the metrics, schemas and aggregation address (URL) for a given config URL
// The URL we instruct customers to use is the url to post metrics to, so that one is obvious
// but we support posting both to both /graphite/metrics and /metrics , whereas the schemas and
// aggregation URL should always get the /graphite prefix.
func getGrafanaNetAddr(addr string) (string, string, string) {
if strings.HasSuffix(addr, "/") {
addr = addr[:len(addr)-1]
}
if !strings.HasSuffix(addr, "/metrics") {
panic("getAddr called on an addr that does not end on /metrics or /metrics/ - this is not supported. Normally NewGrafanaNetConfig would already have validated this")
}
addrMetrics := addr
baseAddr := strings.TrimSuffix(addrMetrics, "/metrics")
if strings.HasSuffix(baseAddr, "/graphite") {
baseAddr = strings.TrimSuffix(baseAddr, "/graphite")
}
addrSchemas := baseAddr + "/graphite/config/storageSchema"
addrAggregation := baseAddr + "/graphite/config/storageAggregation"
return addrMetrics, addrSchemas, addrAggregation
}
// NewGrafanaNet creates a special route that writes to a grafana.net datastore
// We will automatically run the route and the destination
func NewGrafanaNet(key string, matcher matcher.Matcher, cfg GrafanaNetConfig) (Route, error) {
schemas, err := getSchemas(cfg.SchemasFile)
if err != nil {
return nil, err
}
schemasStr := schemas.String()
var aggregation conf.Aggregations
var aggregationStr string
if cfg.AggregationFile != "" {
aggregation, err = conf.ReadAggregations(cfg.AggregationFile)
if err != nil {
return nil, err
}
aggregationStr = aggregation.String()
}
cleanAddr := util.AddrToPath(cfg.Addr)
r := &GrafanaNet{
baseRoute: baseRoute{"GrafanaNet", sync.Mutex{}, atomic.Value{}, key},
Cfg: cfg,
schemas: schemas,
schemasStr: schemasStr,
aggregation: aggregation,
aggregationStr: aggregationStr,
in: make([]chan []byte, cfg.Concurrency),
shutdown: make(chan struct{}),
wg: new(sync.WaitGroup),
numErrFlush: stats.Counter("dest=" + cleanAddr + ".unit=Err.type=flush"),
numOut: stats.Counter("dest=" + cleanAddr + ".unit=Metric.direction=out"),
durationTickFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=ticker"),
durationManuFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=manual"),
tickFlushSize: stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=ticker"),
manuFlushSize: stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=manual"),
numBuffered: stats.Gauge("dest=" + cleanAddr + ".unit=Metric.what=numBuffered"),
bufferSize: stats.Gauge("dest=" + cleanAddr + ".unit=Metric.what=bufferSize"),
numDropBuffFull: stats.Counter("dest=" + cleanAddr + ".unit=Metric.action=drop.reason=queue_full"),
}
r.addrMetrics, r.addrSchemas, r.addrAggregation = getGrafanaNetAddr(cfg.Addr)
r.bufferSize.Update(int64(cfg.BufSize))
if cfg.Blocking {
r.dispatch = dispatchBlocking
} else {
r.dispatch = dispatchNonBlocking
}
r.wg.Add(cfg.Concurrency)
for i := 0; i < cfg.Concurrency; i++ {
r.in[i] = make(chan []byte, cfg.BufSize/cfg.Concurrency)
go r.run(r.in[i])
}
r.config.Store(baseConfig{matcher, make([]*dest.Destination, 0)})
// start off with a transport the same as Go's DefaultTransport
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: cfg.Concurrency,
MaxIdleConnsPerHost: cfg.Concurrency,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
// disable http 2.0 because there seems to be a compatibility problem between nginx hosts and the golang http2 implementation
// which would occasionally result in bogus `400 Bad Request` errors.
transport.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper)
if !cfg.SSLVerify {
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
r.client = &http.Client{
Timeout: cfg.Timeout,
Transport: transport,
}
go r.updateSchemas()
if cfg.AggregationFile != "" {
go r.updateAggregation()
}
return r, nil
}
// run manages incoming and outgoing data for a shard
func (route *GrafanaNet) run(in chan []byte) {
var metrics []*schema.MetricData
buffer := new(bytes.Buffer)
timer := time.NewTimer(route.Cfg.FlushMaxWait)
for {
select {
case buf := <-in:
route.numBuffered.Dec(1)
md, err := parseMetric(buf, route.schemas, route.Cfg.OrgID)
if err != nil {
log.Errorf("RouteGrafanaNet: parseMetric failed: %s. skipping metric", err)
continue
}
md.SetId()
metrics = append(metrics, md)
if len(metrics) == route.Cfg.FlushMaxNum {
metrics = route.retryFlush(metrics, buffer)
// reset our timer
if !timer.Stop() {
<-timer.C
}
timer.Reset(route.Cfg.FlushMaxWait)
}
case <-timer.C:
timer.Reset(route.Cfg.FlushMaxWait)
metrics = route.retryFlush(metrics, buffer)
case <-route.shutdown:
metrics = route.retryFlush(metrics, buffer)
return
}
}
route.wg.Done()
}
func (route *GrafanaNet) retryFlush(metrics []*schema.MetricData, buffer *bytes.Buffer) []*schema.MetricData |
func (route *GrafanaNet) flush(mda schema.MetricDataArray, req *http.Request) (time.Duration, error) {
pre := time.Now()
resp, err := route.client.Do(req)
dur := time.Since(pre)
if err != nil {
return dur, err
}
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
bod, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Warnf("GrafanaNet remote said %q, but could not read its response: %s", resp.Status, err.Error())
return dur, nil
}
var mResp MetricsResponse
err = json.Unmarshal(bod, &mResp)
if err != nil {
log.Warnf("GrafanaNet remote returned %q, but could not parse its response: %s", resp.Status, err.Error())
return dur, nil
}
if mResp.Invalid != 0 {
var b strings.Builder
fmt.Fprintf(&b, "request contained %d invalid metrics that were dropped (%d valid metrics were published in this request)\n", mResp.Invalid, mResp.Published)
for key, vErr := range mResp.ValidationErrors {
fmt.Fprintf(&b, " %q : %d metrics. Examples:\n", key, vErr.Count)
for _, idx := range vErr.ExampleIds {
fmt.Fprintf(&b, " - %#v\n", mda[idx])
}
}
log.Warn(b.String())
}
return dur, nil
}
buf := make([]byte, 300)
n, _ := resp.Body.Read(buf)
ioutil.ReadAll(resp.Body)
resp.Body.Close()
return dur, fmt.Errorf("http %d - %s", resp.StatusCode, buf[:n])
}
// Dispatch takes in the requested buf or drops it if blocking mode and queue of the shard is full
func (route *GrafanaNet) Dispatch(buf []byte) {
// should return as quickly as possible
log.Tracef("route %s sending to dest %s: %s", route.key, route.addrMetrics, buf)
buf = bytes.TrimSpace(buf)
index := bytes.Index(buf, []byte(" "))
if index == -1 {
log.Error("RouteGrafanaNet: invalid message")
return
}
key := buf[:index]
hasher := fnv.New32a()
hasher.Write(key)
shard := int(hasher.Sum32() % uint32(route.Cfg.Concurrency))
route.dispatch(route.in[shard], buf, route.numBuffered, route.numDropBuffFull)
}
func (route *GrafanaNet) Flush() error {
//conf := route.config.Load().(Config)
// no-op. Flush() is currently not called by anything.
return nil
}
func (route *GrafanaNet) updateSchemas() {
route.postConfig(route.addrSchemas, route.schemasStr)
for range time.Tick(6 * time.Hour) {
route.postConfig(route.addrSchemas, route.schemasStr)
}
}
func (route *GrafanaNet) updateAggregation() {
route.postConfig(route.addrAggregation, route.aggregationStr)
for range time.Tick(6 * time.Hour) {
route.postConfig(route.addrAggregation, route.aggregationStr)
}
}
func (route *GrafanaNet) postConfig(path, cfg string) {
boff := &backoff.Backoff{
Min: route.Cfg.ErrBackoffMin,
Max: 30 * time.Minute,
Factor: route.Cfg.ErrBackoffFactor,
Jitter: true,
}
for {
req, err := http.NewRequest("POST", path, strings.NewReader(cfg))
if err != nil {
panic(err)
}
req.Header.Add("Authorization", "Bearer "+route.Cfg.ApiKey)
req.Header.Add("User-Agent", UserAgent)
req.Header.Add("Carbon-Relay-NG-Instance", Instance)
resp, err := route.client.Do(req)
if err != nil {
log.Warnf("got error for %s: %s", path, err.Error())
time.Sleep(boff.Duration())
continue
}
boff.Reset()
if resp.StatusCode == http.StatusNotFound {
// if grafana cloud is not updated yet for this new feature.
// we are still done with our work. no need to log anything
} else if resp.StatusCode >= 200 && resp.StatusCode < 300 {
// it got accepted, we're done.
log.Infof("GrafanaNet %s submitted", path)
} else {
// if it's neither of the above, let's log it, but make it look not too scary
log.Infof("GrafanaNet %s resulted in code %s (should be harmless)", path, resp.Status)
}
ioutil.ReadAll(resp.Body)
resp.Body.Close()
return
}
}
func (route *GrafanaNet) Shutdown() error {
//conf := route.config.Load().(Config)
// trigger all of our queues to be flushed to the tsdb-gw
route.shutdown <- struct{}{}
// wait for all tsdb-gw writes to complete.
route.wg.Wait()
return nil
}
func (route *GrafanaNet) Snapshot() Snapshot {
snapshot := route.baseRoute.Snapshot()
snapshot.Addr = route.Cfg.Addr
return snapshot
}
| {
if len(metrics) == 0 {
return metrics
}
mda := schema.MetricDataArray(metrics)
data, err := msg.CreateMsg(mda, 0, msg.FormatMetricDataArrayMsgp)
if err != nil {
panic(err)
}
route.numOut.Inc(int64(len(metrics)))
buffer.Reset()
snappyBody := snappy.NewWriter(buffer)
snappyBody.Write(data)
snappyBody.Close()
body := buffer.Bytes()
req, err := http.NewRequest("POST", route.addrMetrics, bytes.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Add("Authorization", "Bearer "+route.Cfg.ApiKey)
req.Header.Add("Content-Type", "rt-metric-binary-snappy")
req.Header.Add("User-Agent", UserAgent)
req.Header.Add("Carbon-Relay-NG-Instance", Instance)
boff := &backoff.Backoff{
Min: route.Cfg.ErrBackoffMin,
Max: 30 * time.Second,
Factor: route.Cfg.ErrBackoffFactor,
Jitter: true,
}
var dur time.Duration
for {
dur, err = route.flush(mda, req)
if err == nil {
break
}
route.numErrFlush.Inc(1)
b := boff.Duration()
log.Warnf("GrafanaNet failed to submit data to %s: %s - will try again in %s (this attempt took %s)", route.addrMetrics, err.Error(), b, dur)
time.Sleep(b)
// re-instantiate body, since the previous .Do() attempt would have Read it all the way
req.Body = ioutil.NopCloser(bytes.NewReader(body))
}
log.Debugf("GrafanaNet sent metrics in %s -msg size %d", dur, len(metrics))
route.durationTickFlush.Update(dur)
route.tickFlushSize.Update(int64(len(metrics)))
return metrics[:0]
} | identifier_body |
grafananet.go | package route
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"hash/fnv"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Dieterbe/go-metrics"
"github.com/golang/snappy"
dest "github.com/grafana/carbon-relay-ng/destination"
"github.com/grafana/carbon-relay-ng/matcher"
"github.com/grafana/carbon-relay-ng/persister"
"github.com/grafana/carbon-relay-ng/stats"
"github.com/grafana/carbon-relay-ng/util"
"github.com/jpillora/backoff"
log "github.com/sirupsen/logrus"
conf "github.com/grafana/carbon-relay-ng/pkg/mt-conf"
"github.com/grafana/metrictank/schema"
"github.com/grafana/metrictank/schema/msg"
)
type GrafanaNetConfig struct {
// mandatory
Addr string
ApiKey string
SchemasFile string
// optional
AggregationFile string
BufSize int // amount of messages we can buffer up.
FlushMaxNum int // flush after this many metrics seen
FlushMaxWait time.Duration // flush after this much time passed
Timeout time.Duration // timeout for http operations
Concurrency int // number of concurrent connections to tsdb-gw
OrgID int
SSLVerify bool
Blocking bool
Spool bool // ignored for now
// optional http backoff params for posting metrics and schemas
ErrBackoffMin time.Duration
ErrBackoffFactor float64
}
func NewGrafanaNetConfig(addr, apiKey, schemasFile, aggregationFile string) (GrafanaNetConfig, error) {
u, err := url.Parse(addr)
if err != nil || !u.IsAbs() || u.Host == "" { // apparently "http://" is a valid absolute URL (with empty host), but we don't want that
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: invalid value for 'addr': %q. need an absolute http[s] url", addr)
}
if !strings.HasSuffix(u.Path, "/metrics") && !strings.HasSuffix(u.Path, "/metrics/") {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: invalid value for 'addr': %q. needs to be a /metrics endpoint", addr)
}
if apiKey == "" {
return GrafanaNetConfig{}, errors.New("NewGrafanaNetConfig: invalid value for 'apiKey'. value must be set to non-empty string")
}
if schemasFile == "" {
return GrafanaNetConfig{}, errors.New("NewGrafanaNetConfig: invalid value for 'schemasFile'. value must be set to the path to your storage-schemas.conf file")
}
_, err = getSchemas(schemasFile)
if err != nil {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: could not read schemasFile %q: %s", schemasFile, err.Error())
}
if aggregationFile != "" {
_, err = conf.ReadAggregations(aggregationFile)
if err != nil {
return GrafanaNetConfig{}, fmt.Errorf("NewGrafanaNetConfig: could not read aggregationFile %q: %s", aggregationFile, err.Error())
}
}
return GrafanaNetConfig{
Addr: addr,
ApiKey: apiKey,
SchemasFile: schemasFile,
AggregationFile: aggregationFile,
BufSize: 1e7, // since a message is typically around 100B this is 1GB
FlushMaxNum: 5000,
FlushMaxWait: time.Second / 2,
Timeout: 10 * time.Second,
Concurrency: 100,
OrgID: 1,
SSLVerify: true,
Blocking: false,
Spool: false,
ErrBackoffMin: 100 * time.Millisecond,
ErrBackoffFactor: 1.5,
}, nil
}
type GrafanaNet struct {
baseRoute
Cfg GrafanaNetConfig
schemas persister.WhisperSchemas
aggregation conf.Aggregations
schemasStr string
aggregationStr string
addrMetrics string
addrSchemas string
addrAggregation string
dispatch func(chan []byte, []byte, metrics.Gauge, metrics.Counter)
in []chan []byte
shutdown chan struct{}
wg *sync.WaitGroup
client *http.Client
numErrFlush metrics.Counter
numOut metrics.Counter // metrics successfully written to our buffered conn (no flushing yet)
numDropBuffFull metrics.Counter // metric drops due to queue full
durationTickFlush metrics.Timer // only updated after successful flush
durationManuFlush metrics.Timer // only updated after successful flush. not implemented yet
tickFlushSize metrics.Histogram // only updated after successful flush
manuFlushSize metrics.Histogram // only updated after successful flush. not implemented yet
numBuffered metrics.Gauge
bufferSize metrics.Gauge
}
// getGrafanaNetAddr returns the metrics, schemas and aggregation address (URL) for a given config URL
// The URL we instruct customers to use is the url to post metrics to, so that one is obvious
// but we support posting both to both /graphite/metrics and /metrics , whereas the schemas and
// aggregation URL should always get the /graphite prefix.
func getGrafanaNetAddr(addr string) (string, string, string) {
if strings.HasSuffix(addr, "/") {
addr = addr[:len(addr)-1]
}
if !strings.HasSuffix(addr, "/metrics") {
panic("getAddr called on an addr that does not end on /metrics or /metrics/ - this is not supported. Normally NewGrafanaNetConfig would already have validated this")
}
addrMetrics := addr
baseAddr := strings.TrimSuffix(addrMetrics, "/metrics")
if strings.HasSuffix(baseAddr, "/graphite") {
baseAddr = strings.TrimSuffix(baseAddr, "/graphite")
}
addrSchemas := baseAddr + "/graphite/config/storageSchema"
addrAggregation := baseAddr + "/graphite/config/storageAggregation"
return addrMetrics, addrSchemas, addrAggregation
}
// NewGrafanaNet creates a special route that writes to a grafana.net datastore
// We will automatically run the route and the destination
func NewGrafanaNet(key string, matcher matcher.Matcher, cfg GrafanaNetConfig) (Route, error) {
schemas, err := getSchemas(cfg.SchemasFile)
if err != nil {
return nil, err
}
schemasStr := schemas.String()
var aggregation conf.Aggregations
var aggregationStr string
if cfg.AggregationFile != "" {
aggregation, err = conf.ReadAggregations(cfg.AggregationFile)
if err != nil {
return nil, err
}
aggregationStr = aggregation.String()
}
cleanAddr := util.AddrToPath(cfg.Addr)
r := &GrafanaNet{
baseRoute: baseRoute{"GrafanaNet", sync.Mutex{}, atomic.Value{}, key},
Cfg: cfg,
schemas: schemas,
schemasStr: schemasStr,
aggregation: aggregation,
aggregationStr: aggregationStr,
in: make([]chan []byte, cfg.Concurrency),
shutdown: make(chan struct{}),
wg: new(sync.WaitGroup),
numErrFlush: stats.Counter("dest=" + cleanAddr + ".unit=Err.type=flush"),
numOut: stats.Counter("dest=" + cleanAddr + ".unit=Metric.direction=out"),
durationTickFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=ticker"),
durationManuFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=manual"),
tickFlushSize: stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=ticker"),
manuFlushSize: stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=manual"),
numBuffered: stats.Gauge("dest=" + cleanAddr + ".unit=Metric.what=numBuffered"),
bufferSize: stats.Gauge("dest=" + cleanAddr + ".unit=Metric.what=bufferSize"),
numDropBuffFull: stats.Counter("dest=" + cleanAddr + ".unit=Metric.action=drop.reason=queue_full"),
}
r.addrMetrics, r.addrSchemas, r.addrAggregation = getGrafanaNetAddr(cfg.Addr)
r.bufferSize.Update(int64(cfg.BufSize))
if cfg.Blocking {
r.dispatch = dispatchBlocking
} else {
r.dispatch = dispatchNonBlocking
}
r.wg.Add(cfg.Concurrency)
for i := 0; i < cfg.Concurrency; i++ {
r.in[i] = make(chan []byte, cfg.BufSize/cfg.Concurrency)
go r.run(r.in[i])
}
r.config.Store(baseConfig{matcher, make([]*dest.Destination, 0)})
// start off with a transport the same as Go's DefaultTransport
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: cfg.Concurrency,
MaxIdleConnsPerHost: cfg.Concurrency,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
// disable http 2.0 because there seems to be a compatibility problem between nginx hosts and the golang http2 implementation
// which would occasionally result in bogus `400 Bad Request` errors.
transport.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper)
if !cfg.SSLVerify {
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
r.client = &http.Client{
Timeout: cfg.Timeout,
Transport: transport,
}
go r.updateSchemas()
if cfg.AggregationFile != "" {
go r.updateAggregation()
}
return r, nil
}
// run manages incoming and outgoing data for a shard
func (route *GrafanaNet) run(in chan []byte) {
var metrics []*schema.MetricData
buffer := new(bytes.Buffer)
timer := time.NewTimer(route.Cfg.FlushMaxWait)
for {
select {
case buf := <-in:
route.numBuffered.Dec(1)
md, err := parseMetric(buf, route.schemas, route.Cfg.OrgID)
if err != nil {
log.Errorf("RouteGrafanaNet: parseMetric failed: %s. skipping metric", err)
continue
}
md.SetId()
metrics = append(metrics, md)
if len(metrics) == route.Cfg.FlushMaxNum {
metrics = route.retryFlush(metrics, buffer)
// reset our timer
if !timer.Stop() {
<-timer.C
}
timer.Reset(route.Cfg.FlushMaxWait)
}
case <-timer.C:
timer.Reset(route.Cfg.FlushMaxWait)
metrics = route.retryFlush(metrics, buffer)
case <-route.shutdown:
metrics = route.retryFlush(metrics, buffer)
return
}
}
route.wg.Done()
}
func (route *GrafanaNet) | (metrics []*schema.MetricData, buffer *bytes.Buffer) []*schema.MetricData {
if len(metrics) == 0 {
return metrics
}
mda := schema.MetricDataArray(metrics)
data, err := msg.CreateMsg(mda, 0, msg.FormatMetricDataArrayMsgp)
if err != nil {
panic(err)
}
route.numOut.Inc(int64(len(metrics)))
buffer.Reset()
snappyBody := snappy.NewWriter(buffer)
snappyBody.Write(data)
snappyBody.Close()
body := buffer.Bytes()
req, err := http.NewRequest("POST", route.addrMetrics, bytes.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Add("Authorization", "Bearer "+route.Cfg.ApiKey)
req.Header.Add("Content-Type", "rt-metric-binary-snappy")
req.Header.Add("User-Agent", UserAgent)
req.Header.Add("Carbon-Relay-NG-Instance", Instance)
boff := &backoff.Backoff{
Min: route.Cfg.ErrBackoffMin,
Max: 30 * time.Second,
Factor: route.Cfg.ErrBackoffFactor,
Jitter: true,
}
var dur time.Duration
for {
dur, err = route.flush(mda, req)
if err == nil {
break
}
route.numErrFlush.Inc(1)
b := boff.Duration()
log.Warnf("GrafanaNet failed to submit data to %s: %s - will try again in %s (this attempt took %s)", route.addrMetrics, err.Error(), b, dur)
time.Sleep(b)
// re-instantiate body, since the previous .Do() attempt would have Read it all the way
req.Body = ioutil.NopCloser(bytes.NewReader(body))
}
log.Debugf("GrafanaNet sent metrics in %s -msg size %d", dur, len(metrics))
route.durationTickFlush.Update(dur)
route.tickFlushSize.Update(int64(len(metrics)))
return metrics[:0]
}
func (route *GrafanaNet) flush(mda schema.MetricDataArray, req *http.Request) (time.Duration, error) {
pre := time.Now()
resp, err := route.client.Do(req)
dur := time.Since(pre)
if err != nil {
return dur, err
}
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
bod, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Warnf("GrafanaNet remote said %q, but could not read its response: %s", resp.Status, err.Error())
return dur, nil
}
var mResp MetricsResponse
err = json.Unmarshal(bod, &mResp)
if err != nil {
log.Warnf("GrafanaNet remote returned %q, but could not parse its response: %s", resp.Status, err.Error())
return dur, nil
}
if mResp.Invalid != 0 {
var b strings.Builder
fmt.Fprintf(&b, "request contained %d invalid metrics that were dropped (%d valid metrics were published in this request)\n", mResp.Invalid, mResp.Published)
for key, vErr := range mResp.ValidationErrors {
fmt.Fprintf(&b, " %q : %d metrics. Examples:\n", key, vErr.Count)
for _, idx := range vErr.ExampleIds {
fmt.Fprintf(&b, " - %#v\n", mda[idx])
}
}
log.Warn(b.String())
}
return dur, nil
}
buf := make([]byte, 300)
n, _ := resp.Body.Read(buf)
ioutil.ReadAll(resp.Body)
resp.Body.Close()
return dur, fmt.Errorf("http %d - %s", resp.StatusCode, buf[:n])
}
// Dispatch takes in the requested buf or drops it if blocking mode and queue of the shard is full
func (route *GrafanaNet) Dispatch(buf []byte) {
// should return as quickly as possible
log.Tracef("route %s sending to dest %s: %s", route.key, route.addrMetrics, buf)
buf = bytes.TrimSpace(buf)
index := bytes.Index(buf, []byte(" "))
if index == -1 {
log.Error("RouteGrafanaNet: invalid message")
return
}
key := buf[:index]
hasher := fnv.New32a()
hasher.Write(key)
shard := int(hasher.Sum32() % uint32(route.Cfg.Concurrency))
route.dispatch(route.in[shard], buf, route.numBuffered, route.numDropBuffFull)
}
func (route *GrafanaNet) Flush() error {
//conf := route.config.Load().(Config)
// no-op. Flush() is currently not called by anything.
return nil
}
func (route *GrafanaNet) updateSchemas() {
route.postConfig(route.addrSchemas, route.schemasStr)
for range time.Tick(6 * time.Hour) {
route.postConfig(route.addrSchemas, route.schemasStr)
}
}
func (route *GrafanaNet) updateAggregation() {
route.postConfig(route.addrAggregation, route.aggregationStr)
for range time.Tick(6 * time.Hour) {
route.postConfig(route.addrAggregation, route.aggregationStr)
}
}
func (route *GrafanaNet) postConfig(path, cfg string) {
boff := &backoff.Backoff{
Min: route.Cfg.ErrBackoffMin,
Max: 30 * time.Minute,
Factor: route.Cfg.ErrBackoffFactor,
Jitter: true,
}
for {
req, err := http.NewRequest("POST", path, strings.NewReader(cfg))
if err != nil {
panic(err)
}
req.Header.Add("Authorization", "Bearer "+route.Cfg.ApiKey)
req.Header.Add("User-Agent", UserAgent)
req.Header.Add("Carbon-Relay-NG-Instance", Instance)
resp, err := route.client.Do(req)
if err != nil {
log.Warnf("got error for %s: %s", path, err.Error())
time.Sleep(boff.Duration())
continue
}
boff.Reset()
if resp.StatusCode == http.StatusNotFound {
// if grafana cloud is not updated yet for this new feature.
// we are still done with our work. no need to log anything
} else if resp.StatusCode >= 200 && resp.StatusCode < 300 {
// it got accepted, we're done.
log.Infof("GrafanaNet %s submitted", path)
} else {
// if it's neither of the above, let's log it, but make it look not too scary
log.Infof("GrafanaNet %s resulted in code %s (should be harmless)", path, resp.Status)
}
ioutil.ReadAll(resp.Body)
resp.Body.Close()
return
}
}
func (route *GrafanaNet) Shutdown() error {
//conf := route.config.Load().(Config)
// trigger all of our queues to be flushed to the tsdb-gw
route.shutdown <- struct{}{}
// wait for all tsdb-gw writes to complete.
route.wg.Wait()
return nil
}
func (route *GrafanaNet) Snapshot() Snapshot {
snapshot := route.baseRoute.Snapshot()
snapshot.Addr = route.Cfg.Addr
return snapshot
}
| retryFlush | identifier_name |
process.go | package model
import (
"encoding/binary"
"encoding/hex"
"net"
"os"
"strings"
"time"
"github.com/ds3lab/easeml/engine/database/model/types"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/pkg/errors"
)
// GetProcessByID returns a process given its id.
func (context Context) GetProcessByID(id bson.ObjectId) (result types.Process, err error) {
// Currently there are no restrictions for non-root users here.
c := context.Session.DB(context.DBName).C("processes")
var allResults []types.Process
err = c.Find(bson.M{"_id": id}).All(&allResults)
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
if len(allResults) == 0 |
return allResults[0], nil
}
// GetProcesses lists all processes given some filter criteria.
func (context Context) GetProcesses(
filters F,
limit int,
cursor string,
sortBy string,
order string,
) (result []types.Process, cm types.CollectionMetadata, err error) {
c := context.Session.DB(context.DBName).C("processes")
// Validate the parameters.
if sortBy != "" &&
sortBy != "id" &&
sortBy != "process-id" &&
sortBy != "host-id" &&
sortBy != "host-address" &&
sortBy != "start-time" &&
sortBy != "type" &&
sortBy != "resource" &&
sortBy != "status" {
err = errors.Wrapf(ErrBadInput, "cannot sort by \"%s\"", sortBy)
return
}
if order != "" && order != "asc" && order != "desc" {
err = errors.Wrapf(ErrBadInput, "order can be either \"asc\" or \"desc\", not \"%s\"", order)
return
}
if order == "" {
order = "asc"
}
// We currently don't limit access to this collection. Everyone can see it.
// Build a query given the parameters.
query := bson.M{}
for k, v := range filters {
switch k {
case "id":
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)["$in"] = v.([]bson.ObjectId)
case "process-id":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(uint64)
case "host-id", "host-address", "type", "resource", "status":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(string)
default:
err = errors.Wrap(ErrBadInput, "invalid value of argument filters")
return
}
}
// We count the result size given the filters. This is before pagination.
var resultSize int
resultSize, err = c.Find(query).Count()
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
// If a cursor was specified then we have to do a range query.
if cursor != "" {
comparer := "$gt"
if order == "desc" {
comparer = "$lt"
}
// If there is no sorting then the cursor only points to the _id field.
if sortBy != "" && sortBy != "id" {
splits := strings.Split(cursor, "-")
cursor = splits[1]
var decoded []byte
decoded, err = hex.DecodeString(splits[0])
if err != nil {
err = errors.Wrap(err, "hex decode string failed")
return
}
var otherCursor interface{}
switch sortBy {
case "host-id", "host-address", "type", "resource", "status":
otherCursor = string(decoded)
case "process-id":
otherCursor = binary.BigEndian.Uint64(decoded)
case "start-time":
var t time.Time
t.GobDecode(decoded)
otherCursor = t
}
setDefault(&query, "$or", bson.M{})
query["$or"] = []bson.M{
bson.M{sortBy: bson.M{comparer: otherCursor}},
bson.M{sortBy: bson.M{"$eq": otherCursor}, "_id": bson.M{comparer: bson.ObjectIdHex(cursor)}},
}
} else {
if bson.IsObjectIdHex(cursor) == false {
err = errors.Wrap(ErrBadInput, "invalid cursor")
return
}
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)[comparer] = bson.ObjectIdHex(cursor)
}
}
// Execute the query.
q := c.Find(query)
// We always sort by _id, but we may also sort by a specific field.
if sortBy == "" || sortBy == "id" {
if order == "asc" {
q = q.Sort("_id")
} else {
q = q.Sort("-_id")
}
} else {
if order == "asc" {
q = q.Sort(sortBy, "_id")
} else {
q = q.Sort("-"+sortBy, "-_id")
}
}
if limit > 0 {
q = q.Limit(limit)
}
// Collect the results.
var allResults []types.Process
err = q.All(&allResults)
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
// Compute the next cursor.
nextCursor := ""
if limit > 0 && len(allResults) == limit {
lastResult := allResults[len(allResults)-1]
nextCursor = lastResult.ID.Hex()
if sortBy != "" {
var encoded string
var b []byte
switch sortBy {
case "id":
b = []byte(lastResult.ID)
case "process-id":
b = make([]byte, 4)
binary.BigEndian.PutUint64(b, lastResult.ProcessID)
case "host-id":
b = []byte(lastResult.HostID)
case "host-address":
b = []byte(lastResult.HostAddress)
case "start-time":
b, err = lastResult.StartTime.GobEncode()
case "type":
b = []byte(lastResult.Type)
case "resource":
b = []byte(lastResult.Resource)
case "status":
b = []byte(lastResult.Status)
}
encoded = hex.EncodeToString(b)
nextCursor = encoded + "-" + nextCursor
}
}
// Assemble the results.
result = allResults
cm = types.CollectionMetadata{
TotalResultSize: resultSize,
ReturnedResultSize: len(result),
NextPageCursor: nextCursor,
}
return
}
// CountProcesses is the same as GetProcesses but returns only the count, not the actual processes.
func (context Context) CountProcesses(filters F) (count int, err error) {
c := context.Session.DB(context.DBName).C("processes")
// Build a query given the parameters.
query := bson.M{}
for k, v := range filters {
switch k {
case "id":
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)["$in"] = v.([]bson.ObjectId)
case "process-id":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(uint64)
case "host-id", "host-address", "type", "resource", "status":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(string)
default:
err = errors.Wrap(ErrBadInput, "invalid value of argument filters")
return
}
}
// We count the number of tasks that satisfy the filter criteria.
count, err = c.Find(query).Count()
if err != nil {
err = errors.Wrap(err, "mongo find failed")
}
return
}
// CreateProcess adds a given process to the database.
func (context Context) CreateProcess(proc types.Process) (result types.Process, err error) {
// This action is only permitted for the root user.
if context.User.IsRoot() == false {
err = types.ErrUnauthorized
return
}
if proc.Type != types.ProcController && proc.Type != types.ProcWorker && proc.Type != types.ProcScheduler {
err = errors.Wrapf(ErrBadInput,
"value of type can be \"%s\", \"%s\" or \"%s\", but found \"%s\"",
types.ProcController, types.ProcWorker, types.ProcScheduler, proc.Type)
return
}
// Find the first candidate ordinal. We do this optimistically assuming there is no race conditions.
proc.RunningOrinal, err = context.findCandidateOrdinal(proc.Type)
if err != nil {
err = errors.Wrap(err, "find candidate ordinal failed")
return
}
// Give default values to some fields.
proc.ID = bson.NewObjectId()
proc.Status = types.ProcIdle
proc.StartTime = time.Now()
c := context.Session.DB(context.DBName).C("processes")
err = c.Insert(proc)
if err != nil {
lastError := err.(*mgo.LastError)
if lastError.Code == 11000 {
err = types.ErrIdentifierTaken
return
}
err = errors.Wrap(err, "mongo insert failed")
return
}
// Check if the ordinal was accepted. If not, then there was a race condition so we need to handle it.
var accepted bool
accepted, err = context.isOrdinalAccepted(proc.Type, proc.ID, proc.RunningOrinal)
if err != nil {
err = errors.Wrap(err, "is ordinal accepted failed")
return
}
for accepted == false {
// As long as the ordinal isn't accepted we need to try again.
proc.RunningOrinal, err = context.findCandidateOrdinal(proc.Type)
if err != nil {
err = errors.Wrap(err, "find candidate ordinal failed")
return
}
// Update the process with the new ordinal.
c := context.Session.DB(context.DBName).C("processes")
err = c.Update(bson.M{"_id": proc.ID}, bson.M{"$set": bson.M{"running-ordinal": proc.RunningOrinal}})
if err != nil {
err = errors.Wrap(err, "mongo update failed")
return
}
// Check if the ordinal was accepted.
accepted, err = context.isOrdinalAccepted(proc.Type, proc.ID, proc.RunningOrinal)
if err != nil {
err = errors.Wrap(err, "is ordinal accepted failed")
return
}
}
return proc, nil
}
func (context Context) findCandidateOrdinal(processType string) (ordinal int, err error) {
c := context.Session.DB(context.DBName).C("processes")
query := bson.M{"type": processType, "status": bson.M{"$ne": types.ProcTerminated}}
var ordinals []struct {
Ordinal int `bson:"running-ordinal"`
}
q := c.Find(query).Select(bson.M{"running-ordinal": 1}).Sort("running-ordinal")
err = q.All(&ordinals)
if err != nil {
return -1, err
}
ordinal = 1
for i := range ordinals {
if ordinal == ordinals[i].Ordinal {
ordinal++
}
}
return
}
func (context Context) isOrdinalAccepted(processType string, processID bson.ObjectId, ordinal int) (result bool, err error) {
c := context.Session.DB(context.DBName).C("processes")
query := bson.M{"type": processType, "status": bson.M{"$ne": types.ProcTerminated}, "running-ordinal": ordinal}
var ordinals []struct {
ObjectID bson.ObjectId `bson:"_id"`
Ordinal int `bson:"running-ordinal"`
}
q := c.Find(query).Select(bson.M{"_id": 1, "running-ordinal": 1})
err = q.All(&ordinals)
if err != nil {
return false, err
}
result = true
for i := range ordinals {
if processID > ordinals[i].ObjectID {
result = false
break
}
}
return
}
// UpdateProcess updates the information about a given process.
func (context Context) UpdateProcess(id bson.ObjectId, updates map[string]interface{}) (result types.Process, err error) {
// This action is only permitted for the root user.
if context.User.IsRoot() == false {
err = types.ErrUnauthorized
return
}
// Build the update document. Validate values.
valueUpdates := bson.M{}
for k, v := range updates {
switch k {
case "status":
status := v.(string)
if status != types.ProcIdle && status != types.ProcWorking && status != types.ProcTerminated {
err = errors.Wrapf(ErrBadInput,
"value of status can be \"%s\", \"%s\" or \"%s\", but found \"%s\"",
types.ProcIdle, types.ProcWorking, types.ProcTerminated, status)
return
}
valueUpdates["status"] = status
case "last-keepalive":
valueUpdates["last-keepalive"] = v.(time.Time)
default:
err = errors.Wrap(ErrBadInput, "invalid value of parameter updates")
return
}
}
// If there were no updates, then we can skip this step.
if len(valueUpdates) > 0 {
c := context.Session.DB(context.DBName).C("processes")
err = c.Update(bson.M{"_id": id}, bson.M{"$set": valueUpdates})
if err != nil {
err = errors.Wrap(err, "mongo update failed")
return
}
}
// Get the updated process and update cache if needed.
result, err = context.GetProcessByID(id)
if err != nil {
err = errors.Wrap(err, "process get by ID failed")
return
}
return
}
// StartProcess starts a process of a given type and initializes all other fields automatically.
func (context Context) StartProcess(processType string) (result types.Process, err error) {
if processType != types.ProcController && processType != types.ProcScheduler && processType != types.ProcWorker {
panic("invalid processType")
}
var hostID string
hostID, err = os.Hostname()
if err != nil {
err = errors.Wrap(err, "get hostname from os failed")
return
}
var hostAddress string
hostAddress, err = getOutboundIP()
if err != nil {
//err = errors.Wrap(err, "get outbound ip failed")
//return
hostAddress = "localhost"
}
process := types.Process{
HostID: hostID,
HostAddress: hostAddress,
ProcessID: uint64(os.Getpid()),
Resource: "cpu", // TODO: Change this later.
Type: processType,
}
return context.CreateProcess(process)
}
// getOutboundIP returns the preferred outbound ip of this machine
func getOutboundIP() (ip string, err error) {
var conn net.Conn
conn, err = net.Dial("udp", "8.8.8.8:80")
if err != nil {
return
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
ip = localAddr.IP.String()
return
}
// SetProcessStatus updates the processes state to terminated.
func (context Context) SetProcessStatus(id bson.ObjectId, status string) (result types.Process, err error) {
if status != types.ProcIdle && status != types.ProcWorking && status != types.ProcTerminated {
panic("invalid status")
}
return context.UpdateProcess(id, F{"status": status})
}
// ProcessKeepalive updates the keepalive-time field of the process
// thus notifying the system that it is still running.
func (context Context) ProcessKeepalive(id bson.ObjectId) (err error) {
_, err = context.UpdateProcess(id, F{"last-keepalive": time.Now()})
return
}
// TerminateDeadProcesses goes through all processes that have stopped making keepalive updates
// and sets their status to terminated.
func (context Context) TerminateDeadProcesses(cutoffTime time.Time) (err error) {
// Terminated all idle processes and release all locks that they held.
selector := bson.M{
"last-keepalive": bson.M{"$lt": cutoffTime},
"status": bson.M{"$ne": types.ProcTerminated},
}
update := bson.M{
"$set": bson.M{"status": types.ProcTerminated},
}
change := mgo.Change{Update: update, ReturnNew: true}
found := true
for found {
var process types.Process
c := context.Session.DB(context.DBName).C("processes")
changeInfo, err := c.Find(selector).Apply(change, &process)
if err != nil && err != mgo.ErrNotFound {
return err
}
if changeInfo != nil && changeInfo.Updated > 0 {
// Release any locks that the terminated process held.
context.ReleaseDatasetLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseModuleLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseJobLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseTaskLockByProcess(process.ID)
if err != nil {
return err
}
} else {
found = false
}
}
return nil
}
| {
err = ErrNotFound
return
} | conditional_block |
process.go | package model
import (
"encoding/binary"
"encoding/hex"
"net"
"os"
"strings"
"time"
"github.com/ds3lab/easeml/engine/database/model/types"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/pkg/errors"
)
// GetProcessByID returns a process given its id.
func (context Context) GetProcessByID(id bson.ObjectId) (result types.Process, err error) {
// Currently there are no restrictions for non-root users here.
c := context.Session.DB(context.DBName).C("processes")
var allResults []types.Process
err = c.Find(bson.M{"_id": id}).All(&allResults)
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
if len(allResults) == 0 {
err = ErrNotFound
return
}
return allResults[0], nil
}
// GetProcesses lists all processes given some filter criteria.
func (context Context) GetProcesses(
filters F,
limit int,
cursor string,
sortBy string,
order string,
) (result []types.Process, cm types.CollectionMetadata, err error) {
c := context.Session.DB(context.DBName).C("processes")
// Validate the parameters.
if sortBy != "" &&
sortBy != "id" &&
sortBy != "process-id" &&
sortBy != "host-id" &&
sortBy != "host-address" &&
sortBy != "start-time" &&
sortBy != "type" &&
sortBy != "resource" &&
sortBy != "status" {
err = errors.Wrapf(ErrBadInput, "cannot sort by \"%s\"", sortBy)
return
}
if order != "" && order != "asc" && order != "desc" {
err = errors.Wrapf(ErrBadInput, "order can be either \"asc\" or \"desc\", not \"%s\"", order)
return
}
if order == "" {
order = "asc"
}
// We currently don't limit access to this collection. Everyone can see it.
// Build a query given the parameters.
query := bson.M{}
for k, v := range filters {
switch k {
case "id":
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)["$in"] = v.([]bson.ObjectId)
case "process-id":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(uint64)
case "host-id", "host-address", "type", "resource", "status":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(string)
default:
err = errors.Wrap(ErrBadInput, "invalid value of argument filters")
return
}
}
// We count the result size given the filters. This is before pagination.
var resultSize int
resultSize, err = c.Find(query).Count()
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
// If a cursor was specified then we have to do a range query.
if cursor != "" {
comparer := "$gt"
if order == "desc" {
comparer = "$lt"
}
// If there is no sorting then the cursor only points to the _id field.
if sortBy != "" && sortBy != "id" {
splits := strings.Split(cursor, "-")
cursor = splits[1]
var decoded []byte
decoded, err = hex.DecodeString(splits[0])
if err != nil {
err = errors.Wrap(err, "hex decode string failed")
return
}
var otherCursor interface{}
switch sortBy {
case "host-id", "host-address", "type", "resource", "status":
otherCursor = string(decoded)
case "process-id":
otherCursor = binary.BigEndian.Uint64(decoded)
case "start-time":
var t time.Time
t.GobDecode(decoded)
otherCursor = t
}
setDefault(&query, "$or", bson.M{})
query["$or"] = []bson.M{
bson.M{sortBy: bson.M{comparer: otherCursor}},
bson.M{sortBy: bson.M{"$eq": otherCursor}, "_id": bson.M{comparer: bson.ObjectIdHex(cursor)}},
}
} else {
if bson.IsObjectIdHex(cursor) == false {
err = errors.Wrap(ErrBadInput, "invalid cursor")
return
}
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)[comparer] = bson.ObjectIdHex(cursor)
}
}
// Execute the query.
q := c.Find(query)
// We always sort by _id, but we may also sort by a specific field.
if sortBy == "" || sortBy == "id" {
if order == "asc" {
q = q.Sort("_id")
} else {
q = q.Sort("-_id")
}
} else {
if order == "asc" {
q = q.Sort(sortBy, "_id")
} else {
q = q.Sort("-"+sortBy, "-_id")
}
}
if limit > 0 {
q = q.Limit(limit)
}
// Collect the results.
var allResults []types.Process
err = q.All(&allResults)
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
// Compute the next cursor.
nextCursor := ""
if limit > 0 && len(allResults) == limit {
lastResult := allResults[len(allResults)-1]
nextCursor = lastResult.ID.Hex()
if sortBy != "" {
var encoded string
var b []byte
switch sortBy {
case "id":
b = []byte(lastResult.ID)
case "process-id":
b = make([]byte, 4)
binary.BigEndian.PutUint64(b, lastResult.ProcessID)
case "host-id":
b = []byte(lastResult.HostID)
case "host-address":
b = []byte(lastResult.HostAddress)
case "start-time":
b, err = lastResult.StartTime.GobEncode()
case "type":
b = []byte(lastResult.Type)
case "resource":
b = []byte(lastResult.Resource)
case "status":
b = []byte(lastResult.Status)
}
encoded = hex.EncodeToString(b)
nextCursor = encoded + "-" + nextCursor
}
}
// Assemble the results.
result = allResults
cm = types.CollectionMetadata{
TotalResultSize: resultSize,
ReturnedResultSize: len(result),
NextPageCursor: nextCursor,
}
return
}
// CountProcesses is the same as GetProcesses but returns only the count, not the actual processes.
func (context Context) CountProcesses(filters F) (count int, err error) {
c := context.Session.DB(context.DBName).C("processes")
// Build a query given the parameters.
query := bson.M{}
for k, v := range filters {
switch k {
case "id":
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)["$in"] = v.([]bson.ObjectId)
case "process-id":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(uint64)
case "host-id", "host-address", "type", "resource", "status":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(string)
default:
err = errors.Wrap(ErrBadInput, "invalid value of argument filters")
return
}
}
// We count the number of tasks that satisfy the filter criteria.
count, err = c.Find(query).Count()
if err != nil {
err = errors.Wrap(err, "mongo find failed")
}
return
}
// CreateProcess adds a given process to the database.
func (context Context) CreateProcess(proc types.Process) (result types.Process, err error) {
// This action is only permitted for the root user.
if context.User.IsRoot() == false {
err = types.ErrUnauthorized
return
}
if proc.Type != types.ProcController && proc.Type != types.ProcWorker && proc.Type != types.ProcScheduler {
err = errors.Wrapf(ErrBadInput,
"value of type can be \"%s\", \"%s\" or \"%s\", but found \"%s\"",
types.ProcController, types.ProcWorker, types.ProcScheduler, proc.Type)
return
}
// Find the first candidate ordinal. We do this optimistically assuming there is no race conditions.
proc.RunningOrinal, err = context.findCandidateOrdinal(proc.Type)
if err != nil {
err = errors.Wrap(err, "find candidate ordinal failed")
return
}
// Give default values to some fields.
proc.ID = bson.NewObjectId()
proc.Status = types.ProcIdle
proc.StartTime = time.Now()
c := context.Session.DB(context.DBName).C("processes")
err = c.Insert(proc)
if err != nil {
lastError := err.(*mgo.LastError)
if lastError.Code == 11000 {
err = types.ErrIdentifierTaken
return
}
err = errors.Wrap(err, "mongo insert failed")
return
}
// Check if the ordinal was accepted. If not, then there was a race condition so we need to handle it.
var accepted bool
accepted, err = context.isOrdinalAccepted(proc.Type, proc.ID, proc.RunningOrinal)
if err != nil {
err = errors.Wrap(err, "is ordinal accepted failed")
return
}
for accepted == false {
// As long as the ordinal isn't accepted we need to try again.
proc.RunningOrinal, err = context.findCandidateOrdinal(proc.Type)
if err != nil {
err = errors.Wrap(err, "find candidate ordinal failed")
return
}
// Update the process with the new ordinal.
c := context.Session.DB(context.DBName).C("processes")
err = c.Update(bson.M{"_id": proc.ID}, bson.M{"$set": bson.M{"running-ordinal": proc.RunningOrinal}})
if err != nil {
err = errors.Wrap(err, "mongo update failed")
return
}
// Check if the ordinal was accepted.
accepted, err = context.isOrdinalAccepted(proc.Type, proc.ID, proc.RunningOrinal)
if err != nil {
err = errors.Wrap(err, "is ordinal accepted failed")
return
}
}
return proc, nil
}
func (context Context) findCandidateOrdinal(processType string) (ordinal int, err error) {
c := context.Session.DB(context.DBName).C("processes")
query := bson.M{"type": processType, "status": bson.M{"$ne": types.ProcTerminated}}
var ordinals []struct {
Ordinal int `bson:"running-ordinal"`
}
q := c.Find(query).Select(bson.M{"running-ordinal": 1}).Sort("running-ordinal")
err = q.All(&ordinals)
if err != nil {
return -1, err
}
ordinal = 1
for i := range ordinals {
if ordinal == ordinals[i].Ordinal {
ordinal++
}
}
return
}
func (context Context) isOrdinalAccepted(processType string, processID bson.ObjectId, ordinal int) (result bool, err error) {
c := context.Session.DB(context.DBName).C("processes")
query := bson.M{"type": processType, "status": bson.M{"$ne": types.ProcTerminated}, "running-ordinal": ordinal}
var ordinals []struct {
ObjectID bson.ObjectId `bson:"_id"`
Ordinal int `bson:"running-ordinal"`
}
q := c.Find(query).Select(bson.M{"_id": 1, "running-ordinal": 1})
err = q.All(&ordinals)
if err != nil {
return false, err
}
result = true
for i := range ordinals {
if processID > ordinals[i].ObjectID {
result = false
break
}
}
return
}
// UpdateProcess updates the information about a given process.
func (context Context) UpdateProcess(id bson.ObjectId, updates map[string]interface{}) (result types.Process, err error) {
// This action is only permitted for the root user.
if context.User.IsRoot() == false {
err = types.ErrUnauthorized
return
}
// Build the update document. Validate values.
valueUpdates := bson.M{}
for k, v := range updates {
switch k {
case "status":
status := v.(string)
if status != types.ProcIdle && status != types.ProcWorking && status != types.ProcTerminated {
err = errors.Wrapf(ErrBadInput,
"value of status can be \"%s\", \"%s\" or \"%s\", but found \"%s\"",
types.ProcIdle, types.ProcWorking, types.ProcTerminated, status)
return
}
valueUpdates["status"] = status
case "last-keepalive":
valueUpdates["last-keepalive"] = v.(time.Time)
default:
err = errors.Wrap(ErrBadInput, "invalid value of parameter updates")
return
}
}
// If there were no updates, then we can skip this step.
if len(valueUpdates) > 0 {
c := context.Session.DB(context.DBName).C("processes")
err = c.Update(bson.M{"_id": id}, bson.M{"$set": valueUpdates})
if err != nil {
err = errors.Wrap(err, "mongo update failed")
return
}
}
// Get the updated process and update cache if needed.
result, err = context.GetProcessByID(id)
if err != nil {
err = errors.Wrap(err, "process get by ID failed")
return
}
return
}
// StartProcess starts a process of a given type and initializes all other fields automatically.
func (context Context) StartProcess(processType string) (result types.Process, err error) |
// getOutboundIP returns the preferred outbound ip of this machine
func getOutboundIP() (ip string, err error) {
var conn net.Conn
conn, err = net.Dial("udp", "8.8.8.8:80")
if err != nil {
return
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
ip = localAddr.IP.String()
return
}
// SetProcessStatus updates the processes state to terminated.
func (context Context) SetProcessStatus(id bson.ObjectId, status string) (result types.Process, err error) {
if status != types.ProcIdle && status != types.ProcWorking && status != types.ProcTerminated {
panic("invalid status")
}
return context.UpdateProcess(id, F{"status": status})
}
// ProcessKeepalive updates the keepalive-time field of the process
// thus notifying the system that it is still running.
func (context Context) ProcessKeepalive(id bson.ObjectId) (err error) {
_, err = context.UpdateProcess(id, F{"last-keepalive": time.Now()})
return
}
// TerminateDeadProcesses goes through all processes that have stopped making keepalive updates
// and sets their status to terminated.
func (context Context) TerminateDeadProcesses(cutoffTime time.Time) (err error) {
// Terminated all idle processes and release all locks that they held.
selector := bson.M{
"last-keepalive": bson.M{"$lt": cutoffTime},
"status": bson.M{"$ne": types.ProcTerminated},
}
update := bson.M{
"$set": bson.M{"status": types.ProcTerminated},
}
change := mgo.Change{Update: update, ReturnNew: true}
found := true
for found {
var process types.Process
c := context.Session.DB(context.DBName).C("processes")
changeInfo, err := c.Find(selector).Apply(change, &process)
if err != nil && err != mgo.ErrNotFound {
return err
}
if changeInfo != nil && changeInfo.Updated > 0 {
// Release any locks that the terminated process held.
context.ReleaseDatasetLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseModuleLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseJobLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseTaskLockByProcess(process.ID)
if err != nil {
return err
}
} else {
found = false
}
}
return nil
}
| {
if processType != types.ProcController && processType != types.ProcScheduler && processType != types.ProcWorker {
panic("invalid processType")
}
var hostID string
hostID, err = os.Hostname()
if err != nil {
err = errors.Wrap(err, "get hostname from os failed")
return
}
var hostAddress string
hostAddress, err = getOutboundIP()
if err != nil {
//err = errors.Wrap(err, "get outbound ip failed")
//return
hostAddress = "localhost"
}
process := types.Process{
HostID: hostID,
HostAddress: hostAddress,
ProcessID: uint64(os.Getpid()),
Resource: "cpu", // TODO: Change this later.
Type: processType,
}
return context.CreateProcess(process)
} | identifier_body |
process.go | package model
import (
"encoding/binary"
"encoding/hex"
"net"
"os"
"strings"
"time"
"github.com/ds3lab/easeml/engine/database/model/types"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/pkg/errors"
)
// GetProcessByID returns a process given its id.
func (context Context) GetProcessByID(id bson.ObjectId) (result types.Process, err error) {
// Currently there are no restrictions for non-root users here.
c := context.Session.DB(context.DBName).C("processes")
var allResults []types.Process
err = c.Find(bson.M{"_id": id}).All(&allResults)
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
if len(allResults) == 0 {
err = ErrNotFound
return
}
return allResults[0], nil
}
// GetProcesses lists all processes given some filter criteria.
func (context Context) GetProcesses(
filters F,
limit int,
cursor string,
sortBy string,
order string,
) (result []types.Process, cm types.CollectionMetadata, err error) {
c := context.Session.DB(context.DBName).C("processes")
// Validate the parameters.
if sortBy != "" &&
sortBy != "id" &&
sortBy != "process-id" &&
sortBy != "host-id" &&
sortBy != "host-address" &&
sortBy != "start-time" &&
sortBy != "type" &&
sortBy != "resource" &&
sortBy != "status" {
err = errors.Wrapf(ErrBadInput, "cannot sort by \"%s\"", sortBy)
return
}
if order != "" && order != "asc" && order != "desc" {
err = errors.Wrapf(ErrBadInput, "order can be either \"asc\" or \"desc\", not \"%s\"", order)
return
}
if order == "" {
order = "asc"
}
// We currently don't limit access to this collection. Everyone can see it.
// Build a query given the parameters.
query := bson.M{}
for k, v := range filters {
switch k {
case "id":
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)["$in"] = v.([]bson.ObjectId)
case "process-id":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(uint64)
case "host-id", "host-address", "type", "resource", "status":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(string)
default:
err = errors.Wrap(ErrBadInput, "invalid value of argument filters")
return
}
}
// We count the result size given the filters. This is before pagination.
var resultSize int
resultSize, err = c.Find(query).Count()
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
} | // If a cursor was specified then we have to do a range query.
if cursor != "" {
comparer := "$gt"
if order == "desc" {
comparer = "$lt"
}
// If there is no sorting then the cursor only points to the _id field.
if sortBy != "" && sortBy != "id" {
splits := strings.Split(cursor, "-")
cursor = splits[1]
var decoded []byte
decoded, err = hex.DecodeString(splits[0])
if err != nil {
err = errors.Wrap(err, "hex decode string failed")
return
}
var otherCursor interface{}
switch sortBy {
case "host-id", "host-address", "type", "resource", "status":
otherCursor = string(decoded)
case "process-id":
otherCursor = binary.BigEndian.Uint64(decoded)
case "start-time":
var t time.Time
t.GobDecode(decoded)
otherCursor = t
}
setDefault(&query, "$or", bson.M{})
query["$or"] = []bson.M{
bson.M{sortBy: bson.M{comparer: otherCursor}},
bson.M{sortBy: bson.M{"$eq": otherCursor}, "_id": bson.M{comparer: bson.ObjectIdHex(cursor)}},
}
} else {
if bson.IsObjectIdHex(cursor) == false {
err = errors.Wrap(ErrBadInput, "invalid cursor")
return
}
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)[comparer] = bson.ObjectIdHex(cursor)
}
}
// Execute the query.
q := c.Find(query)
// We always sort by _id, but we may also sort by a specific field.
if sortBy == "" || sortBy == "id" {
if order == "asc" {
q = q.Sort("_id")
} else {
q = q.Sort("-_id")
}
} else {
if order == "asc" {
q = q.Sort(sortBy, "_id")
} else {
q = q.Sort("-"+sortBy, "-_id")
}
}
if limit > 0 {
q = q.Limit(limit)
}
// Collect the results.
var allResults []types.Process
err = q.All(&allResults)
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
// Compute the next cursor.
nextCursor := ""
if limit > 0 && len(allResults) == limit {
lastResult := allResults[len(allResults)-1]
nextCursor = lastResult.ID.Hex()
if sortBy != "" {
var encoded string
var b []byte
switch sortBy {
case "id":
b = []byte(lastResult.ID)
case "process-id":
b = make([]byte, 4)
binary.BigEndian.PutUint64(b, lastResult.ProcessID)
case "host-id":
b = []byte(lastResult.HostID)
case "host-address":
b = []byte(lastResult.HostAddress)
case "start-time":
b, err = lastResult.StartTime.GobEncode()
case "type":
b = []byte(lastResult.Type)
case "resource":
b = []byte(lastResult.Resource)
case "status":
b = []byte(lastResult.Status)
}
encoded = hex.EncodeToString(b)
nextCursor = encoded + "-" + nextCursor
}
}
// Assemble the results.
result = allResults
cm = types.CollectionMetadata{
TotalResultSize: resultSize,
ReturnedResultSize: len(result),
NextPageCursor: nextCursor,
}
return
}
// CountProcesses is the same as GetProcesses but returns only the count, not the actual processes.
func (context Context) CountProcesses(filters F) (count int, err error) {
c := context.Session.DB(context.DBName).C("processes")
// Build a query given the parameters.
query := bson.M{}
for k, v := range filters {
switch k {
case "id":
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)["$in"] = v.([]bson.ObjectId)
case "process-id":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(uint64)
case "host-id", "host-address", "type", "resource", "status":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(string)
default:
err = errors.Wrap(ErrBadInput, "invalid value of argument filters")
return
}
}
// We count the number of tasks that satisfy the filter criteria.
count, err = c.Find(query).Count()
if err != nil {
err = errors.Wrap(err, "mongo find failed")
}
return
}
// CreateProcess adds a given process to the database.
func (context Context) CreateProcess(proc types.Process) (result types.Process, err error) {
// This action is only permitted for the root user.
if context.User.IsRoot() == false {
err = types.ErrUnauthorized
return
}
if proc.Type != types.ProcController && proc.Type != types.ProcWorker && proc.Type != types.ProcScheduler {
err = errors.Wrapf(ErrBadInput,
"value of type can be \"%s\", \"%s\" or \"%s\", but found \"%s\"",
types.ProcController, types.ProcWorker, types.ProcScheduler, proc.Type)
return
}
// Find the first candidate ordinal. We do this optimistically assuming there is no race conditions.
proc.RunningOrinal, err = context.findCandidateOrdinal(proc.Type)
if err != nil {
err = errors.Wrap(err, "find candidate ordinal failed")
return
}
// Give default values to some fields.
proc.ID = bson.NewObjectId()
proc.Status = types.ProcIdle
proc.StartTime = time.Now()
c := context.Session.DB(context.DBName).C("processes")
err = c.Insert(proc)
if err != nil {
lastError := err.(*mgo.LastError)
if lastError.Code == 11000 {
err = types.ErrIdentifierTaken
return
}
err = errors.Wrap(err, "mongo insert failed")
return
}
// Check if the ordinal was accepted. If not, then there was a race condition so we need to handle it.
var accepted bool
accepted, err = context.isOrdinalAccepted(proc.Type, proc.ID, proc.RunningOrinal)
if err != nil {
err = errors.Wrap(err, "is ordinal accepted failed")
return
}
for accepted == false {
// As long as the ordinal isn't accepted we need to try again.
proc.RunningOrinal, err = context.findCandidateOrdinal(proc.Type)
if err != nil {
err = errors.Wrap(err, "find candidate ordinal failed")
return
}
// Update the process with the new ordinal.
c := context.Session.DB(context.DBName).C("processes")
err = c.Update(bson.M{"_id": proc.ID}, bson.M{"$set": bson.M{"running-ordinal": proc.RunningOrinal}})
if err != nil {
err = errors.Wrap(err, "mongo update failed")
return
}
// Check if the ordinal was accepted.
accepted, err = context.isOrdinalAccepted(proc.Type, proc.ID, proc.RunningOrinal)
if err != nil {
err = errors.Wrap(err, "is ordinal accepted failed")
return
}
}
return proc, nil
}
func (context Context) findCandidateOrdinal(processType string) (ordinal int, err error) {
c := context.Session.DB(context.DBName).C("processes")
query := bson.M{"type": processType, "status": bson.M{"$ne": types.ProcTerminated}}
var ordinals []struct {
Ordinal int `bson:"running-ordinal"`
}
q := c.Find(query).Select(bson.M{"running-ordinal": 1}).Sort("running-ordinal")
err = q.All(&ordinals)
if err != nil {
return -1, err
}
ordinal = 1
for i := range ordinals {
if ordinal == ordinals[i].Ordinal {
ordinal++
}
}
return
}
func (context Context) isOrdinalAccepted(processType string, processID bson.ObjectId, ordinal int) (result bool, err error) {
c := context.Session.DB(context.DBName).C("processes")
query := bson.M{"type": processType, "status": bson.M{"$ne": types.ProcTerminated}, "running-ordinal": ordinal}
var ordinals []struct {
ObjectID bson.ObjectId `bson:"_id"`
Ordinal int `bson:"running-ordinal"`
}
q := c.Find(query).Select(bson.M{"_id": 1, "running-ordinal": 1})
err = q.All(&ordinals)
if err != nil {
return false, err
}
result = true
for i := range ordinals {
if processID > ordinals[i].ObjectID {
result = false
break
}
}
return
}
// UpdateProcess updates the information about a given process.
func (context Context) UpdateProcess(id bson.ObjectId, updates map[string]interface{}) (result types.Process, err error) {
// This action is only permitted for the root user.
if context.User.IsRoot() == false {
err = types.ErrUnauthorized
return
}
// Build the update document. Validate values.
valueUpdates := bson.M{}
for k, v := range updates {
switch k {
case "status":
status := v.(string)
if status != types.ProcIdle && status != types.ProcWorking && status != types.ProcTerminated {
err = errors.Wrapf(ErrBadInput,
"value of status can be \"%s\", \"%s\" or \"%s\", but found \"%s\"",
types.ProcIdle, types.ProcWorking, types.ProcTerminated, status)
return
}
valueUpdates["status"] = status
case "last-keepalive":
valueUpdates["last-keepalive"] = v.(time.Time)
default:
err = errors.Wrap(ErrBadInput, "invalid value of parameter updates")
return
}
}
// If there were no updates, then we can skip this step.
if len(valueUpdates) > 0 {
c := context.Session.DB(context.DBName).C("processes")
err = c.Update(bson.M{"_id": id}, bson.M{"$set": valueUpdates})
if err != nil {
err = errors.Wrap(err, "mongo update failed")
return
}
}
// Get the updated process and update cache if needed.
result, err = context.GetProcessByID(id)
if err != nil {
err = errors.Wrap(err, "process get by ID failed")
return
}
return
}
// StartProcess starts a process of a given type and initializes all other fields automatically.
func (context Context) StartProcess(processType string) (result types.Process, err error) {
if processType != types.ProcController && processType != types.ProcScheduler && processType != types.ProcWorker {
panic("invalid processType")
}
var hostID string
hostID, err = os.Hostname()
if err != nil {
err = errors.Wrap(err, "get hostname from os failed")
return
}
var hostAddress string
hostAddress, err = getOutboundIP()
if err != nil {
//err = errors.Wrap(err, "get outbound ip failed")
//return
hostAddress = "localhost"
}
process := types.Process{
HostID: hostID,
HostAddress: hostAddress,
ProcessID: uint64(os.Getpid()),
Resource: "cpu", // TODO: Change this later.
Type: processType,
}
return context.CreateProcess(process)
}
// getOutboundIP returns the preferred outbound ip of this machine
func getOutboundIP() (ip string, err error) {
var conn net.Conn
conn, err = net.Dial("udp", "8.8.8.8:80")
if err != nil {
return
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
ip = localAddr.IP.String()
return
}
// SetProcessStatus updates the processes state to terminated.
func (context Context) SetProcessStatus(id bson.ObjectId, status string) (result types.Process, err error) {
if status != types.ProcIdle && status != types.ProcWorking && status != types.ProcTerminated {
panic("invalid status")
}
return context.UpdateProcess(id, F{"status": status})
}
// ProcessKeepalive updates the keepalive-time field of the process
// thus notifying the system that it is still running.
func (context Context) ProcessKeepalive(id bson.ObjectId) (err error) {
_, err = context.UpdateProcess(id, F{"last-keepalive": time.Now()})
return
}
// TerminateDeadProcesses goes through all processes that have stopped making keepalive updates
// and sets their status to terminated.
func (context Context) TerminateDeadProcesses(cutoffTime time.Time) (err error) {
// Terminated all idle processes and release all locks that they held.
selector := bson.M{
"last-keepalive": bson.M{"$lt": cutoffTime},
"status": bson.M{"$ne": types.ProcTerminated},
}
update := bson.M{
"$set": bson.M{"status": types.ProcTerminated},
}
change := mgo.Change{Update: update, ReturnNew: true}
found := true
for found {
var process types.Process
c := context.Session.DB(context.DBName).C("processes")
changeInfo, err := c.Find(selector).Apply(change, &process)
if err != nil && err != mgo.ErrNotFound {
return err
}
if changeInfo != nil && changeInfo.Updated > 0 {
// Release any locks that the terminated process held.
context.ReleaseDatasetLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseModuleLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseJobLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseTaskLockByProcess(process.ID)
if err != nil {
return err
}
} else {
found = false
}
}
return nil
} | random_line_split | |
process.go | package model
import (
"encoding/binary"
"encoding/hex"
"net"
"os"
"strings"
"time"
"github.com/ds3lab/easeml/engine/database/model/types"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/pkg/errors"
)
// GetProcessByID returns a process given its id.
func (context Context) GetProcessByID(id bson.ObjectId) (result types.Process, err error) {
// Currently there are no restrictions for non-root users here.
c := context.Session.DB(context.DBName).C("processes")
var allResults []types.Process
err = c.Find(bson.M{"_id": id}).All(&allResults)
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
if len(allResults) == 0 {
err = ErrNotFound
return
}
return allResults[0], nil
}
// GetProcesses lists all processes given some filter criteria.
func (context Context) | (
filters F,
limit int,
cursor string,
sortBy string,
order string,
) (result []types.Process, cm types.CollectionMetadata, err error) {
c := context.Session.DB(context.DBName).C("processes")
// Validate the parameters.
if sortBy != "" &&
sortBy != "id" &&
sortBy != "process-id" &&
sortBy != "host-id" &&
sortBy != "host-address" &&
sortBy != "start-time" &&
sortBy != "type" &&
sortBy != "resource" &&
sortBy != "status" {
err = errors.Wrapf(ErrBadInput, "cannot sort by \"%s\"", sortBy)
return
}
if order != "" && order != "asc" && order != "desc" {
err = errors.Wrapf(ErrBadInput, "order can be either \"asc\" or \"desc\", not \"%s\"", order)
return
}
if order == "" {
order = "asc"
}
// We currently don't limit access to this collection. Everyone can see it.
// Build a query given the parameters.
query := bson.M{}
for k, v := range filters {
switch k {
case "id":
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)["$in"] = v.([]bson.ObjectId)
case "process-id":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(uint64)
case "host-id", "host-address", "type", "resource", "status":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(string)
default:
err = errors.Wrap(ErrBadInput, "invalid value of argument filters")
return
}
}
// We count the result size given the filters. This is before pagination.
var resultSize int
resultSize, err = c.Find(query).Count()
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
// If a cursor was specified then we have to do a range query.
if cursor != "" {
comparer := "$gt"
if order == "desc" {
comparer = "$lt"
}
// If there is no sorting then the cursor only points to the _id field.
if sortBy != "" && sortBy != "id" {
splits := strings.Split(cursor, "-")
cursor = splits[1]
var decoded []byte
decoded, err = hex.DecodeString(splits[0])
if err != nil {
err = errors.Wrap(err, "hex decode string failed")
return
}
var otherCursor interface{}
switch sortBy {
case "host-id", "host-address", "type", "resource", "status":
otherCursor = string(decoded)
case "process-id":
otherCursor = binary.BigEndian.Uint64(decoded)
case "start-time":
var t time.Time
t.GobDecode(decoded)
otherCursor = t
}
setDefault(&query, "$or", bson.M{})
query["$or"] = []bson.M{
bson.M{sortBy: bson.M{comparer: otherCursor}},
bson.M{sortBy: bson.M{"$eq": otherCursor}, "_id": bson.M{comparer: bson.ObjectIdHex(cursor)}},
}
} else {
if bson.IsObjectIdHex(cursor) == false {
err = errors.Wrap(ErrBadInput, "invalid cursor")
return
}
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)[comparer] = bson.ObjectIdHex(cursor)
}
}
// Execute the query.
q := c.Find(query)
// We always sort by _id, but we may also sort by a specific field.
if sortBy == "" || sortBy == "id" {
if order == "asc" {
q = q.Sort("_id")
} else {
q = q.Sort("-_id")
}
} else {
if order == "asc" {
q = q.Sort(sortBy, "_id")
} else {
q = q.Sort("-"+sortBy, "-_id")
}
}
if limit > 0 {
q = q.Limit(limit)
}
// Collect the results.
var allResults []types.Process
err = q.All(&allResults)
if err != nil {
err = errors.Wrap(err, "mongo find failed")
return
}
// Compute the next cursor.
nextCursor := ""
if limit > 0 && len(allResults) == limit {
lastResult := allResults[len(allResults)-1]
nextCursor = lastResult.ID.Hex()
if sortBy != "" {
var encoded string
var b []byte
switch sortBy {
case "id":
b = []byte(lastResult.ID)
case "process-id":
b = make([]byte, 4)
binary.BigEndian.PutUint64(b, lastResult.ProcessID)
case "host-id":
b = []byte(lastResult.HostID)
case "host-address":
b = []byte(lastResult.HostAddress)
case "start-time":
b, err = lastResult.StartTime.GobEncode()
case "type":
b = []byte(lastResult.Type)
case "resource":
b = []byte(lastResult.Resource)
case "status":
b = []byte(lastResult.Status)
}
encoded = hex.EncodeToString(b)
nextCursor = encoded + "-" + nextCursor
}
}
// Assemble the results.
result = allResults
cm = types.CollectionMetadata{
TotalResultSize: resultSize,
ReturnedResultSize: len(result),
NextPageCursor: nextCursor,
}
return
}
// CountProcesses is the same as GetProcesses but returns only the count, not the actual processes.
func (context Context) CountProcesses(filters F) (count int, err error) {
c := context.Session.DB(context.DBName).C("processes")
// Build a query given the parameters.
query := bson.M{}
for k, v := range filters {
switch k {
case "id":
setDefault(&query, "_id", bson.M{})
query["_id"].(bson.M)["$in"] = v.([]bson.ObjectId)
case "process-id":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(uint64)
case "host-id", "host-address", "type", "resource", "status":
setDefault(&query, k, bson.M{})
query[k].(bson.M)["$eq"] = v.(string)
default:
err = errors.Wrap(ErrBadInput, "invalid value of argument filters")
return
}
}
// We count the number of tasks that satisfy the filter criteria.
count, err = c.Find(query).Count()
if err != nil {
err = errors.Wrap(err, "mongo find failed")
}
return
}
// CreateProcess adds a given process to the database.
func (context Context) CreateProcess(proc types.Process) (result types.Process, err error) {
// This action is only permitted for the root user.
if context.User.IsRoot() == false {
err = types.ErrUnauthorized
return
}
if proc.Type != types.ProcController && proc.Type != types.ProcWorker && proc.Type != types.ProcScheduler {
err = errors.Wrapf(ErrBadInput,
"value of type can be \"%s\", \"%s\" or \"%s\", but found \"%s\"",
types.ProcController, types.ProcWorker, types.ProcScheduler, proc.Type)
return
}
// Find the first candidate ordinal. We do this optimistically assuming there is no race conditions.
proc.RunningOrinal, err = context.findCandidateOrdinal(proc.Type)
if err != nil {
err = errors.Wrap(err, "find candidate ordinal failed")
return
}
// Give default values to some fields.
proc.ID = bson.NewObjectId()
proc.Status = types.ProcIdle
proc.StartTime = time.Now()
c := context.Session.DB(context.DBName).C("processes")
err = c.Insert(proc)
if err != nil {
lastError := err.(*mgo.LastError)
if lastError.Code == 11000 {
err = types.ErrIdentifierTaken
return
}
err = errors.Wrap(err, "mongo insert failed")
return
}
// Check if the ordinal was accepted. If not, then there was a race condition so we need to handle it.
var accepted bool
accepted, err = context.isOrdinalAccepted(proc.Type, proc.ID, proc.RunningOrinal)
if err != nil {
err = errors.Wrap(err, "is ordinal accepted failed")
return
}
for accepted == false {
// As long as the ordinal isn't accepted we need to try again.
proc.RunningOrinal, err = context.findCandidateOrdinal(proc.Type)
if err != nil {
err = errors.Wrap(err, "find candidate ordinal failed")
return
}
// Update the process with the new ordinal.
c := context.Session.DB(context.DBName).C("processes")
err = c.Update(bson.M{"_id": proc.ID}, bson.M{"$set": bson.M{"running-ordinal": proc.RunningOrinal}})
if err != nil {
err = errors.Wrap(err, "mongo update failed")
return
}
// Check if the ordinal was accepted.
accepted, err = context.isOrdinalAccepted(proc.Type, proc.ID, proc.RunningOrinal)
if err != nil {
err = errors.Wrap(err, "is ordinal accepted failed")
return
}
}
return proc, nil
}
func (context Context) findCandidateOrdinal(processType string) (ordinal int, err error) {
c := context.Session.DB(context.DBName).C("processes")
query := bson.M{"type": processType, "status": bson.M{"$ne": types.ProcTerminated}}
var ordinals []struct {
Ordinal int `bson:"running-ordinal"`
}
q := c.Find(query).Select(bson.M{"running-ordinal": 1}).Sort("running-ordinal")
err = q.All(&ordinals)
if err != nil {
return -1, err
}
ordinal = 1
for i := range ordinals {
if ordinal == ordinals[i].Ordinal {
ordinal++
}
}
return
}
func (context Context) isOrdinalAccepted(processType string, processID bson.ObjectId, ordinal int) (result bool, err error) {
c := context.Session.DB(context.DBName).C("processes")
query := bson.M{"type": processType, "status": bson.M{"$ne": types.ProcTerminated}, "running-ordinal": ordinal}
var ordinals []struct {
ObjectID bson.ObjectId `bson:"_id"`
Ordinal int `bson:"running-ordinal"`
}
q := c.Find(query).Select(bson.M{"_id": 1, "running-ordinal": 1})
err = q.All(&ordinals)
if err != nil {
return false, err
}
result = true
for i := range ordinals {
if processID > ordinals[i].ObjectID {
result = false
break
}
}
return
}
// UpdateProcess updates the information about a given process.
func (context Context) UpdateProcess(id bson.ObjectId, updates map[string]interface{}) (result types.Process, err error) {
// This action is only permitted for the root user.
if context.User.IsRoot() == false {
err = types.ErrUnauthorized
return
}
// Build the update document. Validate values.
valueUpdates := bson.M{}
for k, v := range updates {
switch k {
case "status":
status := v.(string)
if status != types.ProcIdle && status != types.ProcWorking && status != types.ProcTerminated {
err = errors.Wrapf(ErrBadInput,
"value of status can be \"%s\", \"%s\" or \"%s\", but found \"%s\"",
types.ProcIdle, types.ProcWorking, types.ProcTerminated, status)
return
}
valueUpdates["status"] = status
case "last-keepalive":
valueUpdates["last-keepalive"] = v.(time.Time)
default:
err = errors.Wrap(ErrBadInput, "invalid value of parameter updates")
return
}
}
// If there were no updates, then we can skip this step.
if len(valueUpdates) > 0 {
c := context.Session.DB(context.DBName).C("processes")
err = c.Update(bson.M{"_id": id}, bson.M{"$set": valueUpdates})
if err != nil {
err = errors.Wrap(err, "mongo update failed")
return
}
}
// Get the updated process and update cache if needed.
result, err = context.GetProcessByID(id)
if err != nil {
err = errors.Wrap(err, "process get by ID failed")
return
}
return
}
// StartProcess starts a process of a given type and initializes all other fields automatically.
func (context Context) StartProcess(processType string) (result types.Process, err error) {
if processType != types.ProcController && processType != types.ProcScheduler && processType != types.ProcWorker {
panic("invalid processType")
}
var hostID string
hostID, err = os.Hostname()
if err != nil {
err = errors.Wrap(err, "get hostname from os failed")
return
}
var hostAddress string
hostAddress, err = getOutboundIP()
if err != nil {
//err = errors.Wrap(err, "get outbound ip failed")
//return
hostAddress = "localhost"
}
process := types.Process{
HostID: hostID,
HostAddress: hostAddress,
ProcessID: uint64(os.Getpid()),
Resource: "cpu", // TODO: Change this later.
Type: processType,
}
return context.CreateProcess(process)
}
// getOutboundIP returns the preferred outbound ip of this machine
func getOutboundIP() (ip string, err error) {
var conn net.Conn
conn, err = net.Dial("udp", "8.8.8.8:80")
if err != nil {
return
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
ip = localAddr.IP.String()
return
}
// SetProcessStatus updates the processes state to terminated.
func (context Context) SetProcessStatus(id bson.ObjectId, status string) (result types.Process, err error) {
if status != types.ProcIdle && status != types.ProcWorking && status != types.ProcTerminated {
panic("invalid status")
}
return context.UpdateProcess(id, F{"status": status})
}
// ProcessKeepalive updates the keepalive-time field of the process
// thus notifying the system that it is still running.
func (context Context) ProcessKeepalive(id bson.ObjectId) (err error) {
_, err = context.UpdateProcess(id, F{"last-keepalive": time.Now()})
return
}
// TerminateDeadProcesses goes through all processes that have stopped making keepalive updates
// and sets their status to terminated.
func (context Context) TerminateDeadProcesses(cutoffTime time.Time) (err error) {
// Terminated all idle processes and release all locks that they held.
selector := bson.M{
"last-keepalive": bson.M{"$lt": cutoffTime},
"status": bson.M{"$ne": types.ProcTerminated},
}
update := bson.M{
"$set": bson.M{"status": types.ProcTerminated},
}
change := mgo.Change{Update: update, ReturnNew: true}
found := true
for found {
var process types.Process
c := context.Session.DB(context.DBName).C("processes")
changeInfo, err := c.Find(selector).Apply(change, &process)
if err != nil && err != mgo.ErrNotFound {
return err
}
if changeInfo != nil && changeInfo.Updated > 0 {
// Release any locks that the terminated process held.
context.ReleaseDatasetLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseModuleLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseJobLockByProcess(process.ID)
if err != nil {
return err
}
context.ReleaseTaskLockByProcess(process.ID)
if err != nil {
return err
}
} else {
found = false
}
}
return nil
}
| GetProcesses | identifier_name |
intcode.rs | //! This module implements an IntCode interpreter.
use std::convert::TryFrom;
// The following terminology notes are taken from day 2 part 2
// - memory: the list of integers used when interpreting
// - address/position: the value at a given index into memory
// - opcode: mark the beginning of an instruction and denote the instruction
// - parameters: the values after an instruction used by the instruction
// - instruction pointer: the address of the current instruction
#[derive(Debug, PartialEq)]
enum OpCode {
Add = 1, // *(pc+1) + *(pc+2) => *(pc+3)
Multiply = 2, // *(pc+1) * *(pc+2) => *(pc+3)
ReadIn = 3, // store input to *(pc+1)
WriteOut = 4, // print value of *(pc+1) to output
JmpIfTrue = 5, // jump if *(pc+1) != 0 => ip = *(pc+2)
JmpIfFalse = 6, // jump if *(pc+1) == 0 => ip = *(pc+2)
LessThan = 7, // if *(pc+1) < *(pc+2) => *(pc+3) = 1, else 0
Equals = 8, // if *(pc+1) == *(pc+2) => *(pc+3) = 1, else 0
Halt = 99,
}
impl TryFrom<isize> for OpCode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> {
match num {
1 => Ok(Self::Add),
2 => Ok(Self::Multiply),
3 => Ok(Self::ReadIn),
4 => Ok(Self::WriteOut),
5 => Ok(Self::JmpIfTrue),
6 => Ok(Self::JmpIfFalse),
7 => Ok(Self::LessThan),
8 => Ok(Self::Equals),
99 => Ok(Self::Halt),
_ => Err("invalid opcode value"),
}
}
}
#[derive(Debug, PartialEq)]
enum AddrMode {
Pos = 0,
Imm = 1,
}
impl TryFrom<isize> for AddrMode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> {
match num {
0 => Ok(Self::Pos),
1 => Ok(Self::Imm),
_ => Err("invalid address mode value"),
}
}
}
#[derive(Debug)]
enum IPChange {
Delta(isize),
New(usize),
Halt,
}
/// Parse instruction will take a full instruction, and split it into the original instruction
/// along with addressing modes for each argument.
fn parse_instruction(word: isize) -> Result<(OpCode, AddrMode, AddrMode, AddrMode), &'static str> {
if word <= 0 {
return Err("instruction word must be greater than zero");
}
Ok((
OpCode::try_from(word % 100)?, // first two digits are op
AddrMode::try_from(word / 100 % 10)?, // 100s place
AddrMode::try_from(word / 1000 % 10)?, // 1000s place
AddrMode::try_from(word / 10000 % 10)?, // 10000s place
))
}
/// Trait is used by interpret for reading information interactively
pub trait Input {
fn get_isize(&mut self) -> isize;
}
/// Trait is used by `interpret` for writing information interactively
pub trait Output {
fn write_isize(&mut self, val: isize) -> ();
}
// Implementations for Input trait
impl Input for () {
fn get_isize(&mut self) -> isize {
panic!("Program requested input, but input source was ()");
}
}
impl Input for isize {
fn get_isize(&mut self) -> isize {
*self
}
}
// Implementations for Output trait
impl Output for () {
fn write_isize(&mut self, _val: isize) -> () {
panic!("Program attempted to write value, but out was ()");
}
}
impl Output for &mut Vec<isize> {
fn write_isize(&mut self, val: isize) -> () {
self.push(val)
}
}
/// Interpret array as an IntCode program.
/// | /// `mem` is the initial machine memory state, it is modified during the run
///
/// Will panic if it encounters an unknown opcode
pub fn interpret(mut mem: &mut [isize], mut input: impl Input, mut output: impl Output) -> isize {
let mut ip: usize = 0;
loop {
match step(&mut mem, ip, &mut input, &mut output) {
IPChange::Delta(delta) => ip = (ip as isize + delta) as usize,
IPChange::New(new) => ip = new,
IPChange::Halt => break,
}
}
mem[0]
}
fn step(
mem: &mut [isize],
ip: usize,
input: &mut impl Input,
output: &mut impl Output,
) -> IPChange {
use AddrMode::*;
use OpCode::*;
let (op, addr1, addr2, addr3) = match parse_instruction(mem[ip]) {
Ok(val) => val,
Err(err) => {
println!(
"State:\n\tIP: {}\n\tVals: {:?}, {:?}, {:?}, {:?}",
ip,
mem.get(ip),
mem.get(ip + 1),
mem.get(ip + 2),
mem.get(ip + 3)
);
panic!(format!("Encountered unrecoverable error: {}", err));
}
};
// placing Halt check here so that args can be extracted without duplicating their code all
// over the place
if op == Halt {
return IPChange::Halt;
}
// HACK: this whole block is a hack, need to wrap memory up in a new type and provide accessors
// that understand addressing modes
let arg1 = match addr1 {
Imm => mem.get(ip + 1),
Pos => mem.get(*mem.get(ip + 1).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
let arg2 = match addr2 {
Imm => mem.get(ip + 2),
Pos => mem.get(*mem.get(ip + 2).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
match op {
Add => {
mem[mem[ip + 3] as usize] = arg1 + arg2;
IPChange::Delta(4)
}
Multiply => {
mem[mem[ip + 3] as usize] = arg1 * arg2;
IPChange::Delta(4)
}
ReadIn => {
mem[mem[ip + 1] as usize] = input.get_isize();
IPChange::Delta(2)
}
WriteOut => {
output.write_isize(mem[mem[ip + 1] as usize]);
IPChange::Delta(2)
}
JmpIfTrue => {
if *arg1 != 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
JmpIfFalse => {
if *arg1 == 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
LessThan => {
mem[mem[ip + 3] as usize] = if arg1 < arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Equals => {
mem[mem[ip + 3] as usize] = if arg1 == arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Halt => unreachable!(),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn interpret_day2_examples() {
let mut programs = vec![
vec![1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50],
vec![1, 0, 0, 0, 99],
vec![2, 3, 0, 3, 99],
vec![2, 4, 4, 5, 99, 0],
vec![1, 1, 1, 4, 99, 5, 6, 0, 99],
];
let outputs = vec![
vec![3500, 9, 10, 70, 2, 3, 11, 0, 99, 30, 40, 50],
vec![2, 0, 0, 0, 99],
vec![2, 3, 0, 6, 99],
vec![2, 4, 4, 5, 99, 9801],
vec![30, 1, 1, 4, 2, 5, 6, 0, 99],
];
for i in 0..programs.len() {
assert_eq!(interpret(&mut programs[i], (), ()), outputs[i][0]);
assert_eq!(programs[i], outputs[i]);
}
}
#[test]
fn test_parse_instruction() {
use AddrMode::*;
use OpCode::*;
type Output = (OpCode, AddrMode, AddrMode, AddrMode);
fn eq(left: Output, right: Output) -> bool {
left.0 == right.0 && left.1 == right.1 && left.2 == right.2 && left.3 == right.3
}
// from day 5 examples
assert!(eq(
parse_instruction(1002).unwrap(),
(Multiply, Pos, Imm, Pos)
));
// synthetic
assert!(eq(parse_instruction(2).unwrap(), (Multiply, Pos, Pos, Pos)));
assert!(eq(parse_instruction(11101).unwrap(), (Add, Imm, Imm, Imm)));
assert!(eq(parse_instruction(10101).unwrap(), (Add, Imm, Pos, Imm)));
assert!(eq(
parse_instruction(104).unwrap(),
(WriteOut, Imm, Pos, Pos)
));
assert!(eq(
parse_instruction(10003).unwrap(),
(ReadIn, Pos, Pos, Imm)
));
}
#[test]
fn day5_snippets() {
// This tests immediate and positional addressing and negative immediate support
// Should: find (100 + -1), store result @4
let mut simple_prog = vec![1101, 100, -1, 4, 0];
interpret(&mut simple_prog, (), ());
assert_eq!(simple_prog[4], 99);
// This should save whatever it gets from input to @0, then print it back out
let arb_input = 10346;
let mut output = Vec::new();
let mut simple_io = vec![3, 0, 4, 0, 99];
interpret(&mut simple_io, arb_input, &mut output);
println!("{:?}", output[0]);
println!("{:?}", simple_io);
assert_eq!(simple_io[0], arb_input);
assert_eq!(output[0], arb_input);
}
#[test]
fn day5_jump_tests() {
// These programs compare the input to 8, outputting 1 if eq or lt, 0 otherwise
// they use different methods for each
// test eq
let progs_eq_to_eight = vec![
vec![3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8], // positional
vec![3, 3, 1108, -1, 8, 3, 4, 3, 99], // immediate
];
for (input, exp_out) in vec![(0, 0), (8, 1), (-8, 0), (10, 0)] {
for i in 0..progs_eq_to_eight.len() {
let mut prog = progs_eq_to_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0]);
}
}
// test lt
let progs_lt_eight = vec![
vec![3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8], // lt positional
vec![3, 3, 1107, -1, 8, 3, 4, 3, 99], // lt immediate
];
for (input, exp_out) in vec![(0, 1), (-1, 1), (8, 0), (10, 0)] {
for i in 0..progs_lt_eight.len() {
let mut prog = progs_lt_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
// test jump
let jump_progs = vec![
vec![3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9], // positional
vec![3, 3, 1105, -1, 9, 1101, 0, 0, 12, 4, 12, 99, 1], // immediate
];
for (input, exp_out) in vec![(0, 0), (-1, 1), (8, 1), (10, 1)] {
for i in 0..jump_progs.len() {
let mut prog = jump_progs[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
#[test]
fn day5_large_test() {
let jmp_prog = vec![
3, 21, 1008, 21, 8, 20, 1005, 20, 22, 107, 8, 21, 20, 1006, 20, 31, 1106, 0, 36, 98, 0,
0, 1002, 21, 125, 20, 4, 20, 1105, 1, 46, 104, 999, 1105, 1, 46, 1101, 1000, 1, 20, 4,
20, 1105, 1, 46, 98, 99,
];
let in_outs = vec![
(-1, 999),
(0, 999),
(5, 999),
(8, 1000),
(9, 1001),
(14240, 1001),
];
let mut output = Vec::new();
for (input, exp_out) in in_outs.into_iter() {
println!("{:?}", input);
let mut prog = jmp_prog.clone();
output.clear();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
} | random_line_split | |
intcode.rs | //! This module implements an IntCode interpreter.
use std::convert::TryFrom;
// The following terminology notes are taken from day 2 part 2
// - memory: the list of integers used when interpreting
// - address/position: the value at a given index into memory
// - opcode: mark the beginning of an instruction and denote the instruction
// - parameters: the values after an instruction used by the instruction
// - instruction pointer: the address of the current instruction
#[derive(Debug, PartialEq)]
enum OpCode {
Add = 1, // *(pc+1) + *(pc+2) => *(pc+3)
Multiply = 2, // *(pc+1) * *(pc+2) => *(pc+3)
ReadIn = 3, // store input to *(pc+1)
WriteOut = 4, // print value of *(pc+1) to output
JmpIfTrue = 5, // jump if *(pc+1) != 0 => ip = *(pc+2)
JmpIfFalse = 6, // jump if *(pc+1) == 0 => ip = *(pc+2)
LessThan = 7, // if *(pc+1) < *(pc+2) => *(pc+3) = 1, else 0
Equals = 8, // if *(pc+1) == *(pc+2) => *(pc+3) = 1, else 0
Halt = 99,
}
impl TryFrom<isize> for OpCode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> |
}
#[derive(Debug, PartialEq)]
enum AddrMode {
Pos = 0,
Imm = 1,
}
impl TryFrom<isize> for AddrMode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> {
match num {
0 => Ok(Self::Pos),
1 => Ok(Self::Imm),
_ => Err("invalid address mode value"),
}
}
}
#[derive(Debug)]
enum IPChange {
Delta(isize),
New(usize),
Halt,
}
/// Parse instruction will take a full instruction, and split it into the original instruction
/// along with addressing modes for each argument.
fn parse_instruction(word: isize) -> Result<(OpCode, AddrMode, AddrMode, AddrMode), &'static str> {
if word <= 0 {
return Err("instruction word must be greater than zero");
}
Ok((
OpCode::try_from(word % 100)?, // first two digits are op
AddrMode::try_from(word / 100 % 10)?, // 100s place
AddrMode::try_from(word / 1000 % 10)?, // 1000s place
AddrMode::try_from(word / 10000 % 10)?, // 10000s place
))
}
/// Trait is used by interpret for reading information interactively
pub trait Input {
fn get_isize(&mut self) -> isize;
}
/// Trait is used by `interpret` for writing information interactively
pub trait Output {
fn write_isize(&mut self, val: isize) -> ();
}
// Implementations for Input trait
impl Input for () {
fn get_isize(&mut self) -> isize {
panic!("Program requested input, but input source was ()");
}
}
impl Input for isize {
fn get_isize(&mut self) -> isize {
*self
}
}
// Implementations for Output trait
impl Output for () {
fn write_isize(&mut self, _val: isize) -> () {
panic!("Program attempted to write value, but out was ()");
}
}
impl Output for &mut Vec<isize> {
fn write_isize(&mut self, val: isize) -> () {
self.push(val)
}
}
/// Interpret array as an IntCode program.
///
/// `mem` is the initial machine memory state, it is modified during the run
///
/// Will panic if it encounters an unknown opcode
pub fn interpret(mut mem: &mut [isize], mut input: impl Input, mut output: impl Output) -> isize {
let mut ip: usize = 0;
loop {
match step(&mut mem, ip, &mut input, &mut output) {
IPChange::Delta(delta) => ip = (ip as isize + delta) as usize,
IPChange::New(new) => ip = new,
IPChange::Halt => break,
}
}
mem[0]
}
fn step(
mem: &mut [isize],
ip: usize,
input: &mut impl Input,
output: &mut impl Output,
) -> IPChange {
use AddrMode::*;
use OpCode::*;
let (op, addr1, addr2, addr3) = match parse_instruction(mem[ip]) {
Ok(val) => val,
Err(err) => {
println!(
"State:\n\tIP: {}\n\tVals: {:?}, {:?}, {:?}, {:?}",
ip,
mem.get(ip),
mem.get(ip + 1),
mem.get(ip + 2),
mem.get(ip + 3)
);
panic!(format!("Encountered unrecoverable error: {}", err));
}
};
// placing Halt check here so that args can be extracted without duplicating their code all
// over the place
if op == Halt {
return IPChange::Halt;
}
// HACK: this whole block is a hack, need to wrap memory up in a new type and provide accessors
// that understand addressing modes
let arg1 = match addr1 {
Imm => mem.get(ip + 1),
Pos => mem.get(*mem.get(ip + 1).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
let arg2 = match addr2 {
Imm => mem.get(ip + 2),
Pos => mem.get(*mem.get(ip + 2).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
match op {
Add => {
mem[mem[ip + 3] as usize] = arg1 + arg2;
IPChange::Delta(4)
}
Multiply => {
mem[mem[ip + 3] as usize] = arg1 * arg2;
IPChange::Delta(4)
}
ReadIn => {
mem[mem[ip + 1] as usize] = input.get_isize();
IPChange::Delta(2)
}
WriteOut => {
output.write_isize(mem[mem[ip + 1] as usize]);
IPChange::Delta(2)
}
JmpIfTrue => {
if *arg1 != 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
JmpIfFalse => {
if *arg1 == 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
LessThan => {
mem[mem[ip + 3] as usize] = if arg1 < arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Equals => {
mem[mem[ip + 3] as usize] = if arg1 == arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Halt => unreachable!(),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn interpret_day2_examples() {
let mut programs = vec![
vec![1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50],
vec![1, 0, 0, 0, 99],
vec![2, 3, 0, 3, 99],
vec![2, 4, 4, 5, 99, 0],
vec![1, 1, 1, 4, 99, 5, 6, 0, 99],
];
let outputs = vec![
vec![3500, 9, 10, 70, 2, 3, 11, 0, 99, 30, 40, 50],
vec![2, 0, 0, 0, 99],
vec![2, 3, 0, 6, 99],
vec![2, 4, 4, 5, 99, 9801],
vec![30, 1, 1, 4, 2, 5, 6, 0, 99],
];
for i in 0..programs.len() {
assert_eq!(interpret(&mut programs[i], (), ()), outputs[i][0]);
assert_eq!(programs[i], outputs[i]);
}
}
#[test]
fn test_parse_instruction() {
use AddrMode::*;
use OpCode::*;
type Output = (OpCode, AddrMode, AddrMode, AddrMode);
fn eq(left: Output, right: Output) -> bool {
left.0 == right.0 && left.1 == right.1 && left.2 == right.2 && left.3 == right.3
}
// from day 5 examples
assert!(eq(
parse_instruction(1002).unwrap(),
(Multiply, Pos, Imm, Pos)
));
// synthetic
assert!(eq(parse_instruction(2).unwrap(), (Multiply, Pos, Pos, Pos)));
assert!(eq(parse_instruction(11101).unwrap(), (Add, Imm, Imm, Imm)));
assert!(eq(parse_instruction(10101).unwrap(), (Add, Imm, Pos, Imm)));
assert!(eq(
parse_instruction(104).unwrap(),
(WriteOut, Imm, Pos, Pos)
));
assert!(eq(
parse_instruction(10003).unwrap(),
(ReadIn, Pos, Pos, Imm)
));
}
#[test]
fn day5_snippets() {
// This tests immediate and positional addressing and negative immediate support
// Should: find (100 + -1), store result @4
let mut simple_prog = vec![1101, 100, -1, 4, 0];
interpret(&mut simple_prog, (), ());
assert_eq!(simple_prog[4], 99);
// This should save whatever it gets from input to @0, then print it back out
let arb_input = 10346;
let mut output = Vec::new();
let mut simple_io = vec![3, 0, 4, 0, 99];
interpret(&mut simple_io, arb_input, &mut output);
println!("{:?}", output[0]);
println!("{:?}", simple_io);
assert_eq!(simple_io[0], arb_input);
assert_eq!(output[0], arb_input);
}
#[test]
fn day5_jump_tests() {
// These programs compare the input to 8, outputting 1 if eq or lt, 0 otherwise
// they use different methods for each
// test eq
let progs_eq_to_eight = vec![
vec![3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8], // positional
vec![3, 3, 1108, -1, 8, 3, 4, 3, 99], // immediate
];
for (input, exp_out) in vec![(0, 0), (8, 1), (-8, 0), (10, 0)] {
for i in 0..progs_eq_to_eight.len() {
let mut prog = progs_eq_to_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0]);
}
}
// test lt
let progs_lt_eight = vec![
vec![3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8], // lt positional
vec![3, 3, 1107, -1, 8, 3, 4, 3, 99], // lt immediate
];
for (input, exp_out) in vec![(0, 1), (-1, 1), (8, 0), (10, 0)] {
for i in 0..progs_lt_eight.len() {
let mut prog = progs_lt_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
// test jump
let jump_progs = vec![
vec![3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9], // positional
vec![3, 3, 1105, -1, 9, 1101, 0, 0, 12, 4, 12, 99, 1], // immediate
];
for (input, exp_out) in vec![(0, 0), (-1, 1), (8, 1), (10, 1)] {
for i in 0..jump_progs.len() {
let mut prog = jump_progs[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
#[test]
fn day5_large_test() {
let jmp_prog = vec![
3, 21, 1008, 21, 8, 20, 1005, 20, 22, 107, 8, 21, 20, 1006, 20, 31, 1106, 0, 36, 98, 0,
0, 1002, 21, 125, 20, 4, 20, 1105, 1, 46, 104, 999, 1105, 1, 46, 1101, 1000, 1, 20, 4,
20, 1105, 1, 46, 98, 99,
];
let in_outs = vec![
(-1, 999),
(0, 999),
(5, 999),
(8, 1000),
(9, 1001),
(14240, 1001),
];
let mut output = Vec::new();
for (input, exp_out) in in_outs.into_iter() {
println!("{:?}", input);
let mut prog = jmp_prog.clone();
output.clear();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
| {
match num {
1 => Ok(Self::Add),
2 => Ok(Self::Multiply),
3 => Ok(Self::ReadIn),
4 => Ok(Self::WriteOut),
5 => Ok(Self::JmpIfTrue),
6 => Ok(Self::JmpIfFalse),
7 => Ok(Self::LessThan),
8 => Ok(Self::Equals),
99 => Ok(Self::Halt),
_ => Err("invalid opcode value"),
}
} | identifier_body |
intcode.rs | //! This module implements an IntCode interpreter.
use std::convert::TryFrom;
// The following terminology notes are taken from day 2 part 2
// - memory: the list of integers used when interpreting
// - address/position: the value at a given index into memory
// - opcode: mark the beginning of an instruction and denote the instruction
// - parameters: the values after an instruction used by the instruction
// - instruction pointer: the address of the current instruction
#[derive(Debug, PartialEq)]
enum OpCode {
Add = 1, // *(pc+1) + *(pc+2) => *(pc+3)
Multiply = 2, // *(pc+1) * *(pc+2) => *(pc+3)
ReadIn = 3, // store input to *(pc+1)
WriteOut = 4, // print value of *(pc+1) to output
JmpIfTrue = 5, // jump if *(pc+1) != 0 => ip = *(pc+2)
JmpIfFalse = 6, // jump if *(pc+1) == 0 => ip = *(pc+2)
LessThan = 7, // if *(pc+1) < *(pc+2) => *(pc+3) = 1, else 0
Equals = 8, // if *(pc+1) == *(pc+2) => *(pc+3) = 1, else 0
Halt = 99,
}
impl TryFrom<isize> for OpCode {
type Error = &'static str;
fn | (num: isize) -> Result<Self, Self::Error> {
match num {
1 => Ok(Self::Add),
2 => Ok(Self::Multiply),
3 => Ok(Self::ReadIn),
4 => Ok(Self::WriteOut),
5 => Ok(Self::JmpIfTrue),
6 => Ok(Self::JmpIfFalse),
7 => Ok(Self::LessThan),
8 => Ok(Self::Equals),
99 => Ok(Self::Halt),
_ => Err("invalid opcode value"),
}
}
}
#[derive(Debug, PartialEq)]
enum AddrMode {
Pos = 0,
Imm = 1,
}
impl TryFrom<isize> for AddrMode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> {
match num {
0 => Ok(Self::Pos),
1 => Ok(Self::Imm),
_ => Err("invalid address mode value"),
}
}
}
#[derive(Debug)]
enum IPChange {
Delta(isize),
New(usize),
Halt,
}
/// Parse instruction will take a full instruction, and split it into the original instruction
/// along with addressing modes for each argument.
fn parse_instruction(word: isize) -> Result<(OpCode, AddrMode, AddrMode, AddrMode), &'static str> {
if word <= 0 {
return Err("instruction word must be greater than zero");
}
Ok((
OpCode::try_from(word % 100)?, // first two digits are op
AddrMode::try_from(word / 100 % 10)?, // 100s place
AddrMode::try_from(word / 1000 % 10)?, // 1000s place
AddrMode::try_from(word / 10000 % 10)?, // 10000s place
))
}
/// Trait is used by interpret for reading information interactively
pub trait Input {
fn get_isize(&mut self) -> isize;
}
/// Trait is used by `interpret` for writing information interactively
pub trait Output {
fn write_isize(&mut self, val: isize) -> ();
}
// Implementations for Input trait
impl Input for () {
fn get_isize(&mut self) -> isize {
panic!("Program requested input, but input source was ()");
}
}
impl Input for isize {
fn get_isize(&mut self) -> isize {
*self
}
}
// Implementations for Output trait
impl Output for () {
fn write_isize(&mut self, _val: isize) -> () {
panic!("Program attempted to write value, but out was ()");
}
}
impl Output for &mut Vec<isize> {
fn write_isize(&mut self, val: isize) -> () {
self.push(val)
}
}
/// Interpret array as an IntCode program.
///
/// `mem` is the initial machine memory state, it is modified during the run
///
/// Will panic if it encounters an unknown opcode
pub fn interpret(mut mem: &mut [isize], mut input: impl Input, mut output: impl Output) -> isize {
let mut ip: usize = 0;
loop {
match step(&mut mem, ip, &mut input, &mut output) {
IPChange::Delta(delta) => ip = (ip as isize + delta) as usize,
IPChange::New(new) => ip = new,
IPChange::Halt => break,
}
}
mem[0]
}
fn step(
mem: &mut [isize],
ip: usize,
input: &mut impl Input,
output: &mut impl Output,
) -> IPChange {
use AddrMode::*;
use OpCode::*;
let (op, addr1, addr2, addr3) = match parse_instruction(mem[ip]) {
Ok(val) => val,
Err(err) => {
println!(
"State:\n\tIP: {}\n\tVals: {:?}, {:?}, {:?}, {:?}",
ip,
mem.get(ip),
mem.get(ip + 1),
mem.get(ip + 2),
mem.get(ip + 3)
);
panic!(format!("Encountered unrecoverable error: {}", err));
}
};
// placing Halt check here so that args can be extracted without duplicating their code all
// over the place
if op == Halt {
return IPChange::Halt;
}
// HACK: this whole block is a hack, need to wrap memory up in a new type and provide accessors
// that understand addressing modes
let arg1 = match addr1 {
Imm => mem.get(ip + 1),
Pos => mem.get(*mem.get(ip + 1).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
let arg2 = match addr2 {
Imm => mem.get(ip + 2),
Pos => mem.get(*mem.get(ip + 2).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
match op {
Add => {
mem[mem[ip + 3] as usize] = arg1 + arg2;
IPChange::Delta(4)
}
Multiply => {
mem[mem[ip + 3] as usize] = arg1 * arg2;
IPChange::Delta(4)
}
ReadIn => {
mem[mem[ip + 1] as usize] = input.get_isize();
IPChange::Delta(2)
}
WriteOut => {
output.write_isize(mem[mem[ip + 1] as usize]);
IPChange::Delta(2)
}
JmpIfTrue => {
if *arg1 != 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
JmpIfFalse => {
if *arg1 == 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
LessThan => {
mem[mem[ip + 3] as usize] = if arg1 < arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Equals => {
mem[mem[ip + 3] as usize] = if arg1 == arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Halt => unreachable!(),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn interpret_day2_examples() {
let mut programs = vec![
vec![1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50],
vec![1, 0, 0, 0, 99],
vec![2, 3, 0, 3, 99],
vec![2, 4, 4, 5, 99, 0],
vec![1, 1, 1, 4, 99, 5, 6, 0, 99],
];
let outputs = vec![
vec![3500, 9, 10, 70, 2, 3, 11, 0, 99, 30, 40, 50],
vec![2, 0, 0, 0, 99],
vec![2, 3, 0, 6, 99],
vec![2, 4, 4, 5, 99, 9801],
vec![30, 1, 1, 4, 2, 5, 6, 0, 99],
];
for i in 0..programs.len() {
assert_eq!(interpret(&mut programs[i], (), ()), outputs[i][0]);
assert_eq!(programs[i], outputs[i]);
}
}
#[test]
fn test_parse_instruction() {
use AddrMode::*;
use OpCode::*;
type Output = (OpCode, AddrMode, AddrMode, AddrMode);
fn eq(left: Output, right: Output) -> bool {
left.0 == right.0 && left.1 == right.1 && left.2 == right.2 && left.3 == right.3
}
// from day 5 examples
assert!(eq(
parse_instruction(1002).unwrap(),
(Multiply, Pos, Imm, Pos)
));
// synthetic
assert!(eq(parse_instruction(2).unwrap(), (Multiply, Pos, Pos, Pos)));
assert!(eq(parse_instruction(11101).unwrap(), (Add, Imm, Imm, Imm)));
assert!(eq(parse_instruction(10101).unwrap(), (Add, Imm, Pos, Imm)));
assert!(eq(
parse_instruction(104).unwrap(),
(WriteOut, Imm, Pos, Pos)
));
assert!(eq(
parse_instruction(10003).unwrap(),
(ReadIn, Pos, Pos, Imm)
));
}
#[test]
fn day5_snippets() {
// This tests immediate and positional addressing and negative immediate support
// Should: find (100 + -1), store result @4
let mut simple_prog = vec![1101, 100, -1, 4, 0];
interpret(&mut simple_prog, (), ());
assert_eq!(simple_prog[4], 99);
// This should save whatever it gets from input to @0, then print it back out
let arb_input = 10346;
let mut output = Vec::new();
let mut simple_io = vec![3, 0, 4, 0, 99];
interpret(&mut simple_io, arb_input, &mut output);
println!("{:?}", output[0]);
println!("{:?}", simple_io);
assert_eq!(simple_io[0], arb_input);
assert_eq!(output[0], arb_input);
}
#[test]
fn day5_jump_tests() {
// These programs compare the input to 8, outputting 1 if eq or lt, 0 otherwise
// they use different methods for each
// test eq
let progs_eq_to_eight = vec![
vec![3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8], // positional
vec![3, 3, 1108, -1, 8, 3, 4, 3, 99], // immediate
];
for (input, exp_out) in vec![(0, 0), (8, 1), (-8, 0), (10, 0)] {
for i in 0..progs_eq_to_eight.len() {
let mut prog = progs_eq_to_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0]);
}
}
// test lt
let progs_lt_eight = vec![
vec![3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8], // lt positional
vec![3, 3, 1107, -1, 8, 3, 4, 3, 99], // lt immediate
];
for (input, exp_out) in vec![(0, 1), (-1, 1), (8, 0), (10, 0)] {
for i in 0..progs_lt_eight.len() {
let mut prog = progs_lt_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
// test jump
let jump_progs = vec![
vec![3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9], // positional
vec![3, 3, 1105, -1, 9, 1101, 0, 0, 12, 4, 12, 99, 1], // immediate
];
for (input, exp_out) in vec![(0, 0), (-1, 1), (8, 1), (10, 1)] {
for i in 0..jump_progs.len() {
let mut prog = jump_progs[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
#[test]
fn day5_large_test() {
let jmp_prog = vec![
3, 21, 1008, 21, 8, 20, 1005, 20, 22, 107, 8, 21, 20, 1006, 20, 31, 1106, 0, 36, 98, 0,
0, 1002, 21, 125, 20, 4, 20, 1105, 1, 46, 104, 999, 1105, 1, 46, 1101, 1000, 1, 20, 4,
20, 1105, 1, 46, 98, 99,
];
let in_outs = vec![
(-1, 999),
(0, 999),
(5, 999),
(8, 1000),
(9, 1001),
(14240, 1001),
];
let mut output = Vec::new();
for (input, exp_out) in in_outs.into_iter() {
println!("{:?}", input);
let mut prog = jmp_prog.clone();
output.clear();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
| try_from | identifier_name |
main.go | package main
import (
"bufio"
"bytes"
"database/sql"
"encoding/json"
"fmt"
"io"
"log"
"math/rand"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
_ "github.com/lib/pq"
)
var (
db *sql.DB
config tomlConfig
clientConn *net.TCPConn
closeSignChan = make(chan struct{})
requestChan = make(chan Notification, 1)
gLogFile *os.File
LoginQQ int64
)
func main() {
rand.Seed(42)
var err error
logFilename := "robirt.log"
gLogFile, err = os.OpenFile(logFilename, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
fmt.Printf("open file error=%s\r\n", err.Error())
os.Exit(-1)
}
writers := []io.Writer{
gLogFile,
os.Stdout,
}
fileAndStdoutWriter := io.MultiWriter(writers...)
logger = log.New(fileAndStdoutWriter, "", log.Ldate|log.Ltime|log.Lshortfile)
if _, err := toml.DecodeFile("config.toml", &config); err != nil {
logger.Println(err)
return
}
db, err = sql.Open("postgres", config.Database.DBName)
if err != nil {
panic(err)
}
defer db.Close()
db.SetConnMaxLifetime(10 * time.Second)
db.SetMaxIdleConns(4)
db.SetMaxOpenConns(20)
go serverStart()
go func() {
localAddress, err := net.ResolveTCPAddr("tcp4", "127.0.0.1:7008")
if err != nil {
logger.Fatalf("ResolveTCPAddr Error: %v\n", err)
}
ln, err := net.ListenTCP("tcp4", localAddress)
if err != nil {
logger.Fatalf("Failed to listening server: %v", err)
}
logger.Println("Listening server on tcp:127.0.0.1:7008")
for {
conn, err := ln.Accept()
if err != nil {
logger.Printf("Accept Error: %v\n", err)
continue
}
go handleRequest(conn)
}
}()
go func() {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
cmd(scanner.Text())
}
}()
<-closeSignChan
}
func cmd(cmd string) {
if cmd == "exit" {
close(closeSignChan)
return
} else if cmd == "init" {
getLoginQQ()
getGroupList()
return
} else if strings.HasPrefix(cmd, "random:") {
var i int32 = 0
var target = rand.Int31n(300)
groups.Range(func(key, value interface{}) bool {
if i == target {
sendGroupMessage(value.(Group).GroupNum, cmd[7:])
return false
}
i++
return true
})
return
} else if strings.HasPrefix(cmd, "level:") {
groupNum, err := strconv.ParseInt(strings.TrimSpace(cmd[6:]), 10, 64)
if err != nil {
fmt.Println(err)
return
}
if _, ok := groups.Load(groupNum); ok {
leaveGroup(groupNum)
getGroupList()
}
return
}
s := strings.SplitN(strings.TrimSpace(cmd), ":", 2)
if len(s) == 2 {
groupNum, err := strconv.ParseInt(strings.TrimSpace(s[0]), 10, 64)
if err != nil {
fmt.Println(err)
return
}
if g, ok := groups.Load(groupNum); ok {
sendGroupMessage(g.(Group).GroupNum, s[1])
} else {
fmt.Println("group not found!")
}
}
}
func handleRequest(conn net.Conn) {
defer conn.Close()
tmpbyte := make([]byte, 2<<22)
tmpbyte = tmpbyte[:0]
buf := make([]byte, 2<<20)
for {
// Read the incoming connection into the buffer.
reqLen, err := conn.Read(buf)
if err != nil {
logger.Println("Error reading:", err.Error())
return
}
if reqLen == 0 {
continue
}
scanner := bufio.NewScanner(bytes.NewReader(buf[:reqLen]))
for scanner.Scan() {
b := bytes.TrimSpace(scanner.Bytes())
if len(b) == 0 {
logger.Println("len(b)==0", string(b))
continue
} else if len(tmpbyte) > 0 && len(tmpbyte)+len(b) < 4096 {
b = append(tmpbyte, b...)
logger.Printf("retry b := %s\n", string(b))
} else {
tmpbyte = tmpbyte[:0]
}
var js Notification
err = json.Unmarshal(b, &js)
if err != nil {
if serr, ok := err.(*json.SyntaxError); ok {
logger.Printf("%s, %s\n", serr.Error(), string(b))
if errStr := serr.Error(); strings.HasPrefix(errStr, "invalid character") && strings.HasSuffix(errStr, "in string literal") {
tmpbyte = tmpbyte[:0]
continue
}
tmpbyte = append(tmpbyte, b...)
} else {
logger.Printf("%s, %s\n", err.Error(), string(b))
tmpbyte = tmpbyte[:0]
}
continue
}
requestChan <- js
tmpbyte = tmpbyte[:0]
}
}
}
func serverStart() {
eventLoop()
}
func eventLoop() {
for {
js := <-requestChan
if js.Method == "LoginQq" {
LoginQQ, _ = js.Params.getInt64("loginqq")
logger.Printf(">>> %d\n", LoginQQ)
continue
}
subtype, _ := js.Params.getInt64("subtype")
switch js.Method {
case "GroupMessage":
go groupMessageHandle(js.Params)
case "DiscussMessage":
go discussMessageHandle(js.Params)
case "PrivateMessage":
go privateMessageHandle(js.Params)
case "GroupMemberJoin":
qqNum, _ := js.Params.getInt64("qqnum")
groupNum, _ := js.Params.getInt64("groupnum")
beingOperateQQ, _ := js.Params.getInt64("opqqnum")
message := welcomeNewMember(subtype, groupNum, qqNum, beingOperateQQ)
sendGroupMessage(groupNum, message)
getGroupMemberList(groupNum)
case "GroupMemberLeave":
qqNum, _ := js.Params.getInt64("qqnum")
groupNum, _ := js.Params.getInt64("groupnum")
if groupNum == 196656732 {
continue
}
//beingOperateQQ := js.Params.GetInt64("opqqnum")
if v, ok := groups.Load(groupNum); ok {
group := v.(Group)
members := group.Members
if members == nil {
continue
}
if v, ok := members.Load(qqNum); ok {
member := v.(Member)
if subtype == 1 {
message := fmt.Sprintf("群员:[%s] 退群了!!!", member.Nickname)
sendGroupMessage(groupNum, message)
} else if subtype == 2 {
message := fmt.Sprintf("群员:[%s] 被 某个管理员 踢了!!!", member.Nickname)
sendGroupMessage(groupNum, message)
}
}
}
case "RequestAddFriend":
responseFlag, _ := js.Params.getString("response_flag")
addFriend(responseFlag, 1, "")
case "RequestAddGroup":
responseFlag, _ := js.Params.getString("response_flag")
if subtype == 2 {
addGroup(responseFlag, 2, 1, "")
}
getGroupList()
case "GroupMemberList":
var groupMemberList []Member
if err := js.Params.UnmarshalGroupMemberList(&groupMemberList); err != nil {
logger.Printf(">>> get group member list faild: %v", err)
}
logger.Printf(">>> %v\n", groupMemberList)
go updateGroupMember(groupMemberList)
case "GroupList":
var grouplist []Group
if err := js.Params.UnmarshalGroupList(&grouplist); err != nil {
logger.Printf(">>> get group list faild: %v", err)
}
//logger.Printf(">>> %v\n", grouplist)
go updateGroupList(grouplist)
case "GroupMemberInfo":
var memberInfo Member
if err := js.Params.UnmarshalGroupMemberInfo(&memberInfo); err != nil {
logger.Printf(">>> get member info faild: %v", err)
}
logger.Printf(">>> %v\n", memberInfo)
go updateMemberInfo(memberInfo)
default:
logger.Printf("未处理:%s\n", js)
}
}
}
func GetGroupListFromDB() {
rows, err := db.Query("select id, group_number, name from groups")
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var groupName string
var groupID int64
var groupNum int64
rows.Scan(&groupID, &groupNum, &groupName)
if _, ok := groups.Load(groupNum); !ok {
group := Group{}
group.ID = groupID
group.GroupNum = groupNum
group.GroupName = groupName
group.Members = GetGroupMembersFromDB(group.ID, group.GroupNum)
groups.Store(groupNum, group)
}
}
}
func GetGroupMembersFromDB(groupId int64, groupNumbber int64) *sync.Map {
memberList := new(sync.Map)
rows, err := db.Query("select m.id, m.user_id, u.qq_number, m.Nickname, m.Rights from group_members m join users u on m.user_id = u.id where m.group_id = $1", groupId)
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var id int64
var userId int64
var qq_number int64
var nickname string
var rights int32
rows.Scan(&id, &userId, &qq_number, &nickname, &rights)
m := Member{}
m.ID = id
m.UserID = userId
m.GroupID = groupId
m.GroupNum = groupNumbber
m.QQNum = qq_number
m.Nickname = nickname
m.Permission = rights
memberList.Store(qq_number, m)
}
return memberList
}
func updateGroupList(groupList []Group) {
if len(groupList) == 0 {
return
}
GetGroupListFromDB()
var groupNums []int64 = []int64{}
for _, ng := range groupList {
groupNums = append(groupNums, ng.GroupNum)
if v, ok := groups.Load(ng.GroupNum); ok {
og := v.(Group)
if og.GroupName != ng.GroupName {
og.GroupName = ng.GroupName
groups.Store(ng.GroupNum, og)
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
_, err = trans.Exec("update groups set name = $1 where Id = $2", ng.GroupName, og.ID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
}
} else {
var groupID int64
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
err = trans.QueryRow("insert into groups(group_number, name) values($1, $2) returning id", ng.GroupNum, ng.GroupName).Scan(&groupID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
groups.Store(ng.GroupNum, Group{ID: groupID, GroupNum: ng.GroupNum, GroupName: ng.GroupName, Members: nil})
//getGroupMemberList(ng.GroupNum)
}
}
var waitForDeleteGroupNums []int64 = []int64{}
groups.Range(func(key, value interface{}) bool {
found := false
for _, num := range groupNums {
if num == key {
found = true
break
}
}
if !found {
g := value.(Group)
waitForDeleteGroupNums = append(waitForDeleteGroupNums, g.GroupNum)
trans, err := db.Begin()
if err != nil {
return true
}
_, err = trans.Exec("delete from replies where group_id = $1", g.ID)
if err != nil {
trans.Rollback()
return true
}
_, err = trans.Exec("delete from group_members where group_id = $1", g.ID)
if err != nil {
trans.Rollback()
return true
}
_, err = trans.Exec("delete from groups where id = $1", g.ID)
if err != nil {
trans.Rollback()
} else {
trans.Commit()
}
}
return true
})
for _, num := range waitForDeleteGroupNums {
groups.Delete(num)
}
groups.Range(func(key, value interface{}) bool {
log.Printf(">>> new groups: %v\n", value.(Group))
return true
})
}
func updateGroupMember(groupMemberList []Member) {
flag := true
for _, nm := range gr | ber) {
if v, ok := groups.Load(memberInfo.GroupNum); ok {
g := v.(Group)
if _, ok := g.Members.Load(memberInfo.QQNum); !ok {
g.Members.Store(memberInfo.QQNum, memberInfo)
}
}
}
func welcomeNewMember(subtype, groupNo, QQNum, operateQQ int64) (message string) {
var newbeMission string
if groupNo == 171712942 {
newbeMission = "新手四项任务:\n 1.修改群名片(群名片格式:游戏id + 活动区域)\n 2.上传带游戏id 的游戏内截图(上传到群内新人报道相册)\n 3.完成游戏自带training(游戏主界面右上角ops->training下所有项目)\n 4.阅读 ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n 5.不要轻易相信群里自称托马西的\n欢迎提其它问题"
} else if groupNo == 147798016 {
newbeMission = "新手五项任务:\n 1.修改群名片(群名片格式:游戏等级-游戏id-活动区域)\n 2.上传带游戏id 的游戏内截图(上传到群内新人报道相册)\n 3.完成游戏自带training(游戏主界面右上角ops->training下所有项目)\n 4.阅读ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n 5.汉子请爆照, 妹子自动免除此条\n欢迎提其它问题"
} else if groupNo == 196656732 {
newbeMission = `欢迎加入南京蓝色抵抗军大家庭^_^
建议新人按顺序完成以下事宜:
1. 修改群名片(格式:游戏id-活动区域)。
2. 上传带游戏id 的游戏内截图(上传到群内新人报道相册)。
3. 完成游戏自带training(游戏主界面右上角ops->training下所有项目)。
4.推荐关注北京蓝军公众号:ingressbeijing。每天都有关于ingress有趣新闻推送。
5.群文件有各种科学上网工具,仍有困难可以咨询群里老司机。
外地agent来访推荐做南大、东大拼图任务。upc以及其他任务攻略,可以咨询群里老司机。`
} else if groupNo == 292243472 {
newbeMission = "新手五项任务:\n 1. 修改群名片(群名片格式:游戏id-活动区域)。\n 2. 上传带游戏id 的游戏内截图(上传到群内新人报道相册)。\n 3. 完成游戏自带training(游戏主界面右上角ops->training下所有项目)。\n .阅读ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n欢迎提其它问题"
} else if groupNo == 312848770 {
newbeMission = fmt.Sprintf(`[CQ:at,qq=%d], 欢迎加入苏州抵抗军!
教程:
1、【重要】过马路不要玩手机!!!!!
2、基础概念 ( http://t.cn/R5drRQF )
3、升级指南 ( http://t.cn/R5drRQe )
4、进阶数据 ( http://t.cn/R5drRQD )
5、视频教程 ( http://i.youku.com/tomasish )
传教:
1、 Agents ( http://t.cn/R5drRQg )
上海蓝军微信公众号:sh_res
The world around you is not what it seems.`, QQNum)
}
if subtype == 1 {
message = fmt.Sprintf("欢迎新人 [CQ:at,qq=%d]!\n建议玩家使用英文界面方便交流(不要吐槽英文界面哪里方便交流...)\n先右上角目录→设备→语言→english即可\n请务必完成\n%s", QQNum, newbeMission)
} else if subtype == 2 {
message = fmt.Sprintf("欢迎 [CQ:at,qq=%d] 邀请的新人 [CQ:at,qq=%d]!建议玩家使用英文界面方便交流(不要吐槽英文界面哪里方便交流...)\n先右上角目录→设备→语言→english即可", operateQQ, QQNum)
}
return
}
| oupMemberList {
if v, ok := groups.Load(nm.GroupNum); ok {
g := v.(Group)
if flag {
g.Members = GetGroupMembersFromDB(g.ID, g.GroupNum)
flag = false
}
if g.Members == nil {
g.Members = new(sync.Map)
}
if _, ok := g.Members.Load(nm.QQNum); !ok {
g.Members.Store(nm.QQNum, nm)
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
var userID int64
var count int64
err = trans.QueryRow("select count(1) from users where qq_numer = $1", nm.QQNum).Scan(&count)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
if count > 0 {
err = trans.QueryRow("select id from users where qq_numer = $1", nm.QQNum).Scan(&userID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
} else {
err = trans.QueryRow("insert into users(qq_number, qq_name) values($1, $2) returning id", nm.QQNum, nm.Nickname).Scan(&userID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
}
_, err = trans.Exec("insert into group_members(group_id, user_id, nickname, rights) values($1, $2, $3, $4) returning id", g.ID, userID, nm.Nickname, nm.Permission)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
}
}
}
}
func updateMemberInfo(memberInfo Mem | identifier_body |
main.go | package main
import (
"bufio"
"bytes"
"database/sql"
"encoding/json"
"fmt"
"io"
"log"
"math/rand"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
_ "github.com/lib/pq"
)
var (
db *sql.DB
config tomlConfig
clientConn *net.TCPConn
closeSignChan = make(chan struct{})
requestChan = make(chan Notification, 1)
gLogFile *os.File
LoginQQ int64
)
func main() {
rand.Seed(42)
var err error
logFilename := "robirt.log"
gLogFile, err = os.OpenFile(logFilename, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
fmt.Printf("open file error=%s\r\n", err.Error())
os.Exit(-1)
}
writers := []io.Writer{
gLogFile,
os.Stdout,
}
fileAndStdoutWriter := io.MultiWriter(writers...)
logger = log.New(fileAndStdoutWriter, "", log.Ldate|log.Ltime|log.Lshortfile)
if _, err := toml.DecodeFile("config.toml", &config); err != nil {
logger.Println(err)
return
}
db, err = sql.Open("postgres", config.Database.DBName)
if err != nil {
panic(err)
}
defer db.Close()
db.SetConnMaxLifetime(10 * time.Second)
db.SetMaxIdleConns(4)
db.SetMaxOpenConns(20)
go serverStart()
go func() {
localAddress, err := net.ResolveTCPAddr("tcp4", "127.0.0.1:7008")
if err != nil {
logger.Fatalf("ResolveTCPAddr Error: %v\n", err)
}
ln, err := net.ListenTCP("tcp4", localAddress)
if err != nil {
logger.Fatalf("Failed to listening server: %v", err)
}
logger.Println("Listening server on tcp:127.0.0.1:7008")
for {
conn, err := ln.Accept()
if err != nil {
logger.Printf("Accept Error: %v\n", err)
continue
}
go handleRequest(conn)
}
}()
go func() {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
cmd(scanner.Text())
}
}()
<-closeSignChan
}
func cmd(cmd string) {
if cmd == "exit" {
close(closeSignChan)
return
} else if cmd == "init" {
getLoginQQ()
getGroupList()
return
} else if strings.HasPrefix(cmd, "random:") {
var i int32 = 0
var target = rand.Int31n(300)
groups.Range(func(key, value interface{}) bool {
if i == target {
sendGroupMessage(value.(Group).GroupNum, cmd[7:])
return false
}
i++
return true
})
return
} else if strings.HasPrefix(cmd, "level:") {
groupNum, err := strconv.ParseInt(strings.TrimSpace(cmd[6:]), 10, 64)
if err != nil {
fmt.Println(err)
return
}
if _, ok := groups.Load(groupNum); ok |
return
}
s := strings.SplitN(strings.TrimSpace(cmd), ":", 2)
if len(s) == 2 {
groupNum, err := strconv.ParseInt(strings.TrimSpace(s[0]), 10, 64)
if err != nil {
fmt.Println(err)
return
}
if g, ok := groups.Load(groupNum); ok {
sendGroupMessage(g.(Group).GroupNum, s[1])
} else {
fmt.Println("group not found!")
}
}
}
func handleRequest(conn net.Conn) {
defer conn.Close()
tmpbyte := make([]byte, 2<<22)
tmpbyte = tmpbyte[:0]
buf := make([]byte, 2<<20)
for {
// Read the incoming connection into the buffer.
reqLen, err := conn.Read(buf)
if err != nil {
logger.Println("Error reading:", err.Error())
return
}
if reqLen == 0 {
continue
}
scanner := bufio.NewScanner(bytes.NewReader(buf[:reqLen]))
for scanner.Scan() {
b := bytes.TrimSpace(scanner.Bytes())
if len(b) == 0 {
logger.Println("len(b)==0", string(b))
continue
} else if len(tmpbyte) > 0 && len(tmpbyte)+len(b) < 4096 {
b = append(tmpbyte, b...)
logger.Printf("retry b := %s\n", string(b))
} else {
tmpbyte = tmpbyte[:0]
}
var js Notification
err = json.Unmarshal(b, &js)
if err != nil {
if serr, ok := err.(*json.SyntaxError); ok {
logger.Printf("%s, %s\n", serr.Error(), string(b))
if errStr := serr.Error(); strings.HasPrefix(errStr, "invalid character") && strings.HasSuffix(errStr, "in string literal") {
tmpbyte = tmpbyte[:0]
continue
}
tmpbyte = append(tmpbyte, b...)
} else {
logger.Printf("%s, %s\n", err.Error(), string(b))
tmpbyte = tmpbyte[:0]
}
continue
}
requestChan <- js
tmpbyte = tmpbyte[:0]
}
}
}
func serverStart() {
eventLoop()
}
func eventLoop() {
for {
js := <-requestChan
if js.Method == "LoginQq" {
LoginQQ, _ = js.Params.getInt64("loginqq")
logger.Printf(">>> %d\n", LoginQQ)
continue
}
subtype, _ := js.Params.getInt64("subtype")
switch js.Method {
case "GroupMessage":
go groupMessageHandle(js.Params)
case "DiscussMessage":
go discussMessageHandle(js.Params)
case "PrivateMessage":
go privateMessageHandle(js.Params)
case "GroupMemberJoin":
qqNum, _ := js.Params.getInt64("qqnum")
groupNum, _ := js.Params.getInt64("groupnum")
beingOperateQQ, _ := js.Params.getInt64("opqqnum")
message := welcomeNewMember(subtype, groupNum, qqNum, beingOperateQQ)
sendGroupMessage(groupNum, message)
getGroupMemberList(groupNum)
case "GroupMemberLeave":
qqNum, _ := js.Params.getInt64("qqnum")
groupNum, _ := js.Params.getInt64("groupnum")
if groupNum == 196656732 {
continue
}
//beingOperateQQ := js.Params.GetInt64("opqqnum")
if v, ok := groups.Load(groupNum); ok {
group := v.(Group)
members := group.Members
if members == nil {
continue
}
if v, ok := members.Load(qqNum); ok {
member := v.(Member)
if subtype == 1 {
message := fmt.Sprintf("群员:[%s] 退群了!!!", member.Nickname)
sendGroupMessage(groupNum, message)
} else if subtype == 2 {
message := fmt.Sprintf("群员:[%s] 被 某个管理员 踢了!!!", member.Nickname)
sendGroupMessage(groupNum, message)
}
}
}
case "RequestAddFriend":
responseFlag, _ := js.Params.getString("response_flag")
addFriend(responseFlag, 1, "")
case "RequestAddGroup":
responseFlag, _ := js.Params.getString("response_flag")
if subtype == 2 {
addGroup(responseFlag, 2, 1, "")
}
getGroupList()
case "GroupMemberList":
var groupMemberList []Member
if err := js.Params.UnmarshalGroupMemberList(&groupMemberList); err != nil {
logger.Printf(">>> get group member list faild: %v", err)
}
logger.Printf(">>> %v\n", groupMemberList)
go updateGroupMember(groupMemberList)
case "GroupList":
var grouplist []Group
if err := js.Params.UnmarshalGroupList(&grouplist); err != nil {
logger.Printf(">>> get group list faild: %v", err)
}
//logger.Printf(">>> %v\n", grouplist)
go updateGroupList(grouplist)
case "GroupMemberInfo":
var memberInfo Member
if err := js.Params.UnmarshalGroupMemberInfo(&memberInfo); err != nil {
logger.Printf(">>> get member info faild: %v", err)
}
logger.Printf(">>> %v\n", memberInfo)
go updateMemberInfo(memberInfo)
default:
logger.Printf("未处理:%s\n", js)
}
}
}
func GetGroupListFromDB() {
rows, err := db.Query("select id, group_number, name from groups")
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var groupName string
var groupID int64
var groupNum int64
rows.Scan(&groupID, &groupNum, &groupName)
if _, ok := groups.Load(groupNum); !ok {
group := Group{}
group.ID = groupID
group.GroupNum = groupNum
group.GroupName = groupName
group.Members = GetGroupMembersFromDB(group.ID, group.GroupNum)
groups.Store(groupNum, group)
}
}
}
func GetGroupMembersFromDB(groupId int64, groupNumbber int64) *sync.Map {
memberList := new(sync.Map)
rows, err := db.Query("select m.id, m.user_id, u.qq_number, m.Nickname, m.Rights from group_members m join users u on m.user_id = u.id where m.group_id = $1", groupId)
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var id int64
var userId int64
var qq_number int64
var nickname string
var rights int32
rows.Scan(&id, &userId, &qq_number, &nickname, &rights)
m := Member{}
m.ID = id
m.UserID = userId
m.GroupID = groupId
m.GroupNum = groupNumbber
m.QQNum = qq_number
m.Nickname = nickname
m.Permission = rights
memberList.Store(qq_number, m)
}
return memberList
}
func updateGroupList(groupList []Group) {
if len(groupList) == 0 {
return
}
GetGroupListFromDB()
var groupNums []int64 = []int64{}
for _, ng := range groupList {
groupNums = append(groupNums, ng.GroupNum)
if v, ok := groups.Load(ng.GroupNum); ok {
og := v.(Group)
if og.GroupName != ng.GroupName {
og.GroupName = ng.GroupName
groups.Store(ng.GroupNum, og)
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
_, err = trans.Exec("update groups set name = $1 where Id = $2", ng.GroupName, og.ID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
}
} else {
var groupID int64
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
err = trans.QueryRow("insert into groups(group_number, name) values($1, $2) returning id", ng.GroupNum, ng.GroupName).Scan(&groupID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
groups.Store(ng.GroupNum, Group{ID: groupID, GroupNum: ng.GroupNum, GroupName: ng.GroupName, Members: nil})
//getGroupMemberList(ng.GroupNum)
}
}
var waitForDeleteGroupNums []int64 = []int64{}
groups.Range(func(key, value interface{}) bool {
found := false
for _, num := range groupNums {
if num == key {
found = true
break
}
}
if !found {
g := value.(Group)
waitForDeleteGroupNums = append(waitForDeleteGroupNums, g.GroupNum)
trans, err := db.Begin()
if err != nil {
return true
}
_, err = trans.Exec("delete from replies where group_id = $1", g.ID)
if err != nil {
trans.Rollback()
return true
}
_, err = trans.Exec("delete from group_members where group_id = $1", g.ID)
if err != nil {
trans.Rollback()
return true
}
_, err = trans.Exec("delete from groups where id = $1", g.ID)
if err != nil {
trans.Rollback()
} else {
trans.Commit()
}
}
return true
})
for _, num := range waitForDeleteGroupNums {
groups.Delete(num)
}
groups.Range(func(key, value interface{}) bool {
log.Printf(">>> new groups: %v\n", value.(Group))
return true
})
}
func updateGroupMember(groupMemberList []Member) {
flag := true
for _, nm := range groupMemberList {
if v, ok := groups.Load(nm.GroupNum); ok {
g := v.(Group)
if flag {
g.Members = GetGroupMembersFromDB(g.ID, g.GroupNum)
flag = false
}
if g.Members == nil {
g.Members = new(sync.Map)
}
if _, ok := g.Members.Load(nm.QQNum); !ok {
g.Members.Store(nm.QQNum, nm)
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
var userID int64
var count int64
err = trans.QueryRow("select count(1) from users where qq_numer = $1", nm.QQNum).Scan(&count)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
if count > 0 {
err = trans.QueryRow("select id from users where qq_numer = $1", nm.QQNum).Scan(&userID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
} else {
err = trans.QueryRow("insert into users(qq_number, qq_name) values($1, $2) returning id", nm.QQNum, nm.Nickname).Scan(&userID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
}
_, err = trans.Exec("insert into group_members(group_id, user_id, nickname, rights) values($1, $2, $3, $4) returning id", g.ID, userID, nm.Nickname, nm.Permission)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
}
}
}
}
func updateMemberInfo(memberInfo Member) {
if v, ok := groups.Load(memberInfo.GroupNum); ok {
g := v.(Group)
if _, ok := g.Members.Load(memberInfo.QQNum); !ok {
g.Members.Store(memberInfo.QQNum, memberInfo)
}
}
}
func welcomeNewMember(subtype, groupNo, QQNum, operateQQ int64) (message string) {
var newbeMission string
if groupNo == 171712942 {
newbeMission = "新手四项任务:\n 1.修改群名片(群名片格式:游戏id + 活动区域)\n 2.上传带游戏id 的游戏内截图(上传到群内新人报道相册)\n 3.完成游戏自带training(游戏主界面右上角ops->training下所有项目)\n 4.阅读 ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n 5.不要轻易相信群里自称托马西的\n欢迎提其它问题"
} else if groupNo == 147798016 {
newbeMission = "新手五项任务:\n 1.修改群名片(群名片格式:游戏等级-游戏id-活动区域)\n 2.上传带游戏id 的游戏内截图(上传到群内新人报道相册)\n 3.完成游戏自带training(游戏主界面右上角ops->training下所有项目)\n 4.阅读ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n 5.汉子请爆照, 妹子自动免除此条\n欢迎提其它问题"
} else if groupNo == 196656732 {
newbeMission = `欢迎加入南京蓝色抵抗军大家庭^_^
建议新人按顺序完成以下事宜:
1. 修改群名片(格式:游戏id-活动区域)。
2. 上传带游戏id 的游戏内截图(上传到群内新人报道相册)。
3. 完成游戏自带training(游戏主界面右上角ops->training下所有项目)。
4.推荐关注北京蓝军公众号:ingressbeijing。每天都有关于ingress有趣新闻推送。
5.群文件有各种科学上网工具,仍有困难可以咨询群里老司机。
外地agent来访推荐做南大、东大拼图任务。upc以及其他任务攻略,可以咨询群里老司机。`
} else if groupNo == 292243472 {
newbeMission = "新手五项任务:\n 1. 修改群名片(群名片格式:游戏id-活动区域)。\n 2. 上传带游戏id 的游戏内截图(上传到群内新人报道相册)。\n 3. 完成游戏自带training(游戏主界面右上角ops->training下所有项目)。\n .阅读ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n欢迎提其它问题"
} else if groupNo == 312848770 {
newbeMission = fmt.Sprintf(`[CQ:at,qq=%d], 欢迎加入苏州抵抗军!
教程:
1、【重要】过马路不要玩手机!!!!!
2、基础概念 ( http://t.cn/R5drRQF )
3、升级指南 ( http://t.cn/R5drRQe )
4、进阶数据 ( http://t.cn/R5drRQD )
5、视频教程 ( http://i.youku.com/tomasish )
传教:
1、 Agents ( http://t.cn/R5drRQg )
上海蓝军微信公众号:sh_res
The world around you is not what it seems.`, QQNum)
}
if subtype == 1 {
message = fmt.Sprintf("欢迎新人 [CQ:at,qq=%d]!\n建议玩家使用英文界面方便交流(不要吐槽英文界面哪里方便交流...)\n先右上角目录→设备→语言→english即可\n请务必完成\n%s", QQNum, newbeMission)
} else if subtype == 2 {
message = fmt.Sprintf("欢迎 [CQ:at,qq=%d] 邀请的新人 [CQ:at,qq=%d]!建议玩家使用英文界面方便交流(不要吐槽英文界面哪里方便交流...)\n先右上角目录→设备→语言→english即可", operateQQ, QQNum)
}
return
}
| {
leaveGroup(groupNum)
getGroupList()
} | conditional_block |
main.go | package main
import (
"bufio"
"bytes"
"database/sql"
"encoding/json"
"fmt"
"io"
"log"
"math/rand"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
_ "github.com/lib/pq"
)
var (
db *sql.DB
config tomlConfig
clientConn *net.TCPConn
closeSignChan = make(chan struct{})
requestChan = make(chan Notification, 1)
gLogFile *os.File
LoginQQ int64
)
func main() {
rand.Seed(42)
var err error
logFilename := "robirt.log"
gLogFile, err = os.OpenFile(logFilename, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
fmt.Printf("open file error=%s\r\n", err.Error())
os.Exit(-1)
}
writers := []io.Writer{
gLogFile,
os.Stdout,
}
fileAndStdoutWriter := io.MultiWriter(writers...)
logger = log.New(fileAndStdoutWriter, "", log.Ldate|log.Ltime|log.Lshortfile)
if _, err := toml.DecodeFile("config.toml", &config); err != nil {
logger.Println(err)
return
}
db, err = sql.Open("postgres", config.Database.DBName)
if err != nil {
panic(err)
}
defer db.Close()
db.SetConnMaxLifetime(10 * time.Second)
db.SetMaxIdleConns(4)
db.SetMaxOpenConns(20)
go serverStart()
go func() {
localAddress, err := net.ResolveTCPAddr("tcp4", "127.0.0.1:7008")
if err != nil {
logger.Fatalf("ResolveTCPAddr Error: %v\n", err)
}
ln, err := net.ListenTCP("tcp4", localAddress)
if err != nil {
logger.Fatalf("Failed to listening server: %v", err)
}
logger.Println("Listening server on tcp:127.0.0.1:7008")
for {
conn, err := ln.Accept()
if err != nil {
logger.Printf("Accept Error: %v\n", err)
continue
}
go handleRequest(conn)
}
}()
go func() {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
cmd(scanner.Text())
}
}()
<-closeSignChan
}
func cmd(cmd string) {
if cmd == "exit" {
close(closeSignChan)
return
} else if cmd == "init" {
getLoginQQ()
getGroupList()
return
} else if strings.HasPrefix(cmd, "random:") {
var i int32 = 0
var target = rand.Int31n(300)
groups.Range(func(key, value interface{}) bool {
if i == target {
sendGroupMessage(value.(Group).GroupNum, cmd[7:])
return false
}
i++
return true
})
return
} else if strings.HasPrefix(cmd, "level:") {
groupNum, err := strconv.ParseInt(strings.TrimSpace(cmd[6:]), 10, 64)
if err != nil {
fmt.Println(err)
return
}
if _, ok := groups.Load(groupNum); ok {
leaveGroup(groupNum)
getGroupList()
}
return
}
s := strings.SplitN(strings.TrimSpace(cmd), ":", 2)
if len(s) == 2 {
groupNum, err := strconv.ParseInt(strings.TrimSpace(s[0]), 10, 64)
if err != nil {
fmt.Println(err)
return
}
if g, ok := groups.Load(groupNum); ok {
sendGroupMessage(g.(Group).GroupNum, s[1])
} else {
fmt.Println("group not found!")
}
}
}
func handleRequest(conn net.Conn) {
defer conn.Close()
tmpbyte := make([]byte, 2<<22)
tmpbyte = tmpbyte[:0]
buf := make([]byte, 2<<20)
for {
// Read the incoming connection into the buffer.
reqLen, err := conn.Read(buf)
if err != nil {
logger.Println("Error reading:", err.Error())
return
}
if reqLen == 0 {
continue
}
scanner := bufio.NewScanner(bytes.NewReader(buf[:reqLen]))
for scanner.Scan() {
b := bytes.TrimSpace(scanner.Bytes())
if len(b) == 0 {
logger.Println("len(b)==0", string(b))
continue
} else if len(tmpbyte) > 0 && len(tmpbyte)+len(b) < 4096 {
b = append(tmpbyte, b...)
logger.Printf("retry b := %s\n", string(b))
} else {
tmpbyte = tmpbyte[:0]
}
var js Notification
err = json.Unmarshal(b, &js)
if err != nil {
if serr, ok := err.(*json.SyntaxError); ok {
logger.Printf("%s, %s\n", serr.Error(), string(b))
if errStr := serr.Error(); strings.HasPrefix(errStr, "invalid character") && strings.HasSuffix(errStr, "in string literal") {
tmpbyte = tmpbyte[:0]
continue
}
tmpbyte = append(tmpbyte, b...)
} else {
logger.Printf("%s, %s\n", err.Error(), string(b))
tmpbyte = tmpbyte[:0]
}
continue
}
requestChan <- js
tmpbyte = tmpbyte[:0]
}
}
}
func serverStart() {
eventLoop()
}
func eventLoop() {
for {
js := <-requestChan
if js.Method == "LoginQq" {
LoginQQ, _ = js.Params.getInt64("loginqq")
logger.Printf(">>> %d\n", LoginQQ)
continue
}
subtype, _ := js.Params.getInt64("subtype")
switch js.Method {
case "GroupMessage":
go groupMessageHandle(js.Params)
case "DiscussMessage":
go discussMessageHandle(js.Params)
case "PrivateMessage":
go privateMessageHandle(js.Params)
case "GroupMemberJoin":
qqNum, _ := js.Params.getInt64("qqnum")
groupNum, _ := js.Params.getInt64("groupnum")
beingOperateQQ, _ := js.Params.getInt64("opqqnum")
message := welcomeNewMember(subtype, groupNum, qqNum, beingOperateQQ)
sendGroupMessage(groupNum, message)
getGroupMemberList(groupNum)
case "GroupMemberLeave":
qqNum, _ := js.Params.getInt64("qqnum")
groupNum, _ := js.Params.getInt64("groupnum")
if groupNum == 196656732 {
continue
}
//beingOperateQQ := js.Params.GetInt64("opqqnum")
if v, ok := groups.Load(groupNum); ok {
group := v.(Group)
members := group.Members
if members == nil {
continue
}
if v, ok := members.Load(qqNum); ok {
member := v.(Member)
if subtype == 1 {
message := fmt.Sprintf("群员:[%s] 退群了!!!", member.Nickname)
sendGroupMessage(groupNum, message)
} else if subtype == 2 {
message := fmt.Sprintf("群员:[%s] 被 某个管理员 踢了!!!", member.Nickname)
sendGroupMessage(groupNum, message)
}
}
}
case "RequestAddFriend":
responseFlag, _ := js.Params.getString("response_flag")
addFriend(responseFlag, 1, "")
case "RequestAddGroup":
responseFlag, _ := js.Params.getString("response_flag")
if subtype == 2 {
addGroup(responseFlag, 2, 1, "")
}
getGroupList()
case "GroupMemberList":
var groupMemberList []Member
if err := js.Params.UnmarshalGroupMemberList(&groupMemberList); err != nil {
logger.Printf(">>> get group member list faild: %v", err)
}
logger.Printf(">>> %v\n", groupMemberList)
go updateGroupMember(groupMemberList)
case "GroupList":
var grouplist []Group
if err := js.Params.UnmarshalGroupList(&grouplist); err != nil {
logger.Printf(">>> get group list faild: %v", err)
}
//logger.Printf(">>> %v\n", grouplist)
go updateGroupList(grouplist)
case "GroupMemberInfo":
var memberInfo Member
if err := js.Params.UnmarshalGroupMemberInfo(&memberInfo); err != nil {
logger.Printf(">>> get member info faild: %v", err)
}
logger.Printf(">>> %v\n", memberInfo)
go updateMemberInfo(memberInfo)
default:
logger.Printf("未处理:%s\n", js)
}
}
}
func GetGroupListFromDB() {
rows, err := db.Query("select id, group_number, name from groups")
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var groupName string
var groupID int64
var groupNum int64
rows.Scan(&groupID, &groupNum, &groupName)
if _, ok := groups.Load(groupNum); !ok {
group := Group{}
group.ID = groupID
group.GroupNum = groupNum
group.GroupName = groupName
group.Members = GetGroupMembersFromDB(group.ID, group.GroupNum)
groups.Store(groupNum, group)
}
}
}
func GetGroupMembersFromDB(groupId int64, groupNumbber int64) *sync.Map {
memberList := new(sync.Map)
rows, err := db.Query("select m.id, m.user_id, u.qq_number, m.Nickname, m.Rights from group_members m join users u on m.user_id = u.id where m.group_id = $1", groupId)
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var id int64
var userId int64
var qq_number int64
var nickname string
var rights int32
rows.Scan(&id, &userId, &qq_number, &nickname, &rights)
m := Member{}
m.ID = id
m.UserID = userId
m.GroupID = groupId
m.GroupNum = groupNumbber
m.QQNum = qq_number
m.Nickname = nickname
m.Permission = rights
memberList.Store(qq_number, m)
}
return memberList
}
func updateGroupList(groupList []Group) {
if len(groupList) == 0 {
return
}
GetGroupListFromDB()
var groupNums []int64 = []int64{}
for _, ng := range groupList {
groupNums = append(groupNums, ng.GroupNum)
if v, ok := groups.Load(ng.GroupNum); ok {
og := v.(Group)
if og.GroupName != ng.GroupName {
og.GroupName = ng.GroupName
groups.Store(ng.GroupNum, og)
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
_, err = trans.Exec("update groups set name = $1 where Id = $2", ng.GroupName, og.ID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
}
} else {
var groupID int64
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
err = trans.QueryRow("insert into groups(group_number, name) values($1, $2) returning id", ng.GroupNum, ng.GroupName).Scan(&groupID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
groups.Store(ng.GroupNum, Group{ID: groupID, GroupNum: ng.GroupNum, GroupName: ng.GroupName, Members: nil})
//getGroupMemberList(ng.GroupNum)
}
}
var waitForDeleteGroupNums []int64 = []int64{}
groups.Range(func(key, value interface{}) bool {
found := false
for _, num := range groupNums {
if num == key {
found = true
break
}
}
if !found {
g := value.(Group)
waitForDeleteGroupNums = append(waitForDeleteGroupNums, g.GroupNum)
trans, err := db.Begin()
if err != nil {
return true
}
_, err = trans.Exec("delete from replies where group_id = $1", g.ID)
if err != nil {
trans.Rollback()
return true
}
_, err = trans.Exec("delete from group_members where group_id = $1", g.ID)
if err != nil {
trans.Rollback()
return true
}
_, err = trans.Exec("delete from groups where id = $1", g.ID)
if err != nil {
trans.Rollback()
} else {
trans.Commit()
}
}
return true
})
for _, num := range waitForDeleteGroupNums {
groups.Delete(num)
}
groups.Range(func(key, value interface{}) bool {
log.Printf(">>> new groups: %v\n", value.(Group))
return true
})
}
func updateGroupMember(groupMemberList []Member) {
flag := true
for _, nm := range groupMemberList {
if v, ok := groups.Load(nm.GroupNum); ok {
g := v.(Group)
if flag {
g.Members = GetGroupMembersFromDB(g.ID, g.GroupNum)
flag = false
}
if g.Members == nil {
g.Members = new(sync.Map)
}
if _, ok := g.Members.Load(nm.QQNum); !ok {
g.Members.Store(nm.QQNum, nm)
trans, err := db.Begin()
if err != nil {
//reportError(err) | var userID int64
var count int64
err = trans.QueryRow("select count(1) from users where qq_numer = $1", nm.QQNum).Scan(&count)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
if count > 0 {
err = trans.QueryRow("select id from users where qq_numer = $1", nm.QQNum).Scan(&userID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
} else {
err = trans.QueryRow("insert into users(qq_number, qq_name) values($1, $2) returning id", nm.QQNum, nm.Nickname).Scan(&userID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
}
_, err = trans.Exec("insert into group_members(group_id, user_id, nickname, rights) values($1, $2, $3, $4) returning id", g.ID, userID, nm.Nickname, nm.Permission)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
}
}
}
}
func updateMemberInfo(memberInfo Member) {
if v, ok := groups.Load(memberInfo.GroupNum); ok {
g := v.(Group)
if _, ok := g.Members.Load(memberInfo.QQNum); !ok {
g.Members.Store(memberInfo.QQNum, memberInfo)
}
}
}
func welcomeNewMember(subtype, groupNo, QQNum, operateQQ int64) (message string) {
var newbeMission string
if groupNo == 171712942 {
newbeMission = "新手四项任务:\n 1.修改群名片(群名片格式:游戏id + 活动区域)\n 2.上传带游戏id 的游戏内截图(上传到群内新人报道相册)\n 3.完成游戏自带training(游戏主界面右上角ops->training下所有项目)\n 4.阅读 ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n 5.不要轻易相信群里自称托马西的\n欢迎提其它问题"
} else if groupNo == 147798016 {
newbeMission = "新手五项任务:\n 1.修改群名片(群名片格式:游戏等级-游戏id-活动区域)\n 2.上传带游戏id 的游戏内截图(上传到群内新人报道相册)\n 3.完成游戏自带training(游戏主界面右上角ops->training下所有项目)\n 4.阅读ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n 5.汉子请爆照, 妹子自动免除此条\n欢迎提其它问题"
} else if groupNo == 196656732 {
newbeMission = `欢迎加入南京蓝色抵抗军大家庭^_^
建议新人按顺序完成以下事宜:
1. 修改群名片(格式:游戏id-活动区域)。
2. 上传带游戏id 的游戏内截图(上传到群内新人报道相册)。
3. 完成游戏自带training(游戏主界面右上角ops->training下所有项目)。
4.推荐关注北京蓝军公众号:ingressbeijing。每天都有关于ingress有趣新闻推送。
5.群文件有各种科学上网工具,仍有困难可以咨询群里老司机。
外地agent来访推荐做南大、东大拼图任务。upc以及其他任务攻略,可以咨询群里老司机。`
} else if groupNo == 292243472 {
newbeMission = "新手五项任务:\n 1. 修改群名片(群名片格式:游戏id-活动区域)。\n 2. 上传带游戏id 的游戏内截图(上传到群内新人报道相册)。\n 3. 完成游戏自带training(游戏主界面右上角ops->training下所有项目)。\n .阅读ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n欢迎提其它问题"
} else if groupNo == 312848770 {
newbeMission = fmt.Sprintf(`[CQ:at,qq=%d], 欢迎加入苏州抵抗军!
教程:
1、【重要】过马路不要玩手机!!!!!
2、基础概念 ( http://t.cn/R5drRQF )
3、升级指南 ( http://t.cn/R5drRQe )
4、进阶数据 ( http://t.cn/R5drRQD )
5、视频教程 ( http://i.youku.com/tomasish )
传教:
1、 Agents ( http://t.cn/R5drRQg )
上海蓝军微信公众号:sh_res
The world around you is not what it seems.`, QQNum)
}
if subtype == 1 {
message = fmt.Sprintf("欢迎新人 [CQ:at,qq=%d]!\n建议玩家使用英文界面方便交流(不要吐槽英文界面哪里方便交流...)\n先右上角目录→设备→语言→english即可\n请务必完成\n%s", QQNum, newbeMission)
} else if subtype == 2 {
message = fmt.Sprintf("欢迎 [CQ:at,qq=%d] 邀请的新人 [CQ:at,qq=%d]!建议玩家使用英文界面方便交流(不要吐槽英文界面哪里方便交流...)\n先右上角目录→设备→语言→english即可", operateQQ, QQNum)
}
return
} | continue
} | random_line_split |
main.go | package main
import (
"bufio"
"bytes"
"database/sql"
"encoding/json"
"fmt"
"io"
"log"
"math/rand"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
_ "github.com/lib/pq"
)
var (
db *sql.DB
config tomlConfig
clientConn *net.TCPConn
closeSignChan = make(chan struct{})
requestChan = make(chan Notification, 1)
gLogFile *os.File
LoginQQ int64
)
func main() {
rand.Seed(42)
var err error
logFilename := "robirt.log"
gLogFile, err = os.OpenFile(logFilename, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
fmt.Printf("open file error=%s\r\n", err.Error())
os.Exit(-1)
}
writers := []io.Writer{
gLogFile,
os.Stdout,
}
fileAndStdoutWriter := io.MultiWriter(writers...)
logger = log.New(fileAndStdoutWriter, "", log.Ldate|log.Ltime|log.Lshortfile)
if _, err := toml.DecodeFile("config.toml", &config); err != nil {
logger.Println(err)
return
}
db, err = sql.Open("postgres", config.Database.DBName)
if err != nil {
panic(err)
}
defer db.Close()
db.SetConnMaxLifetime(10 * time.Second)
db.SetMaxIdleConns(4)
db.SetMaxOpenConns(20)
go serverStart()
go func() {
localAddress, err := net.ResolveTCPAddr("tcp4", "127.0.0.1:7008")
if err != nil {
logger.Fatalf("ResolveTCPAddr Error: %v\n", err)
}
ln, err := net.ListenTCP("tcp4", localAddress)
if err != nil {
logger.Fatalf("Failed to listening server: %v", err)
}
logger.Println("Listening server on tcp:127.0.0.1:7008")
for {
conn, err := ln.Accept()
if err != nil {
logger.Printf("Accept Error: %v\n", err)
continue
}
go handleRequest(conn)
}
}()
go func() {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
cmd(scanner.Text())
}
}()
<-closeSignChan
}
func | (cmd string) {
if cmd == "exit" {
close(closeSignChan)
return
} else if cmd == "init" {
getLoginQQ()
getGroupList()
return
} else if strings.HasPrefix(cmd, "random:") {
var i int32 = 0
var target = rand.Int31n(300)
groups.Range(func(key, value interface{}) bool {
if i == target {
sendGroupMessage(value.(Group).GroupNum, cmd[7:])
return false
}
i++
return true
})
return
} else if strings.HasPrefix(cmd, "level:") {
groupNum, err := strconv.ParseInt(strings.TrimSpace(cmd[6:]), 10, 64)
if err != nil {
fmt.Println(err)
return
}
if _, ok := groups.Load(groupNum); ok {
leaveGroup(groupNum)
getGroupList()
}
return
}
s := strings.SplitN(strings.TrimSpace(cmd), ":", 2)
if len(s) == 2 {
groupNum, err := strconv.ParseInt(strings.TrimSpace(s[0]), 10, 64)
if err != nil {
fmt.Println(err)
return
}
if g, ok := groups.Load(groupNum); ok {
sendGroupMessage(g.(Group).GroupNum, s[1])
} else {
fmt.Println("group not found!")
}
}
}
func handleRequest(conn net.Conn) {
defer conn.Close()
tmpbyte := make([]byte, 2<<22)
tmpbyte = tmpbyte[:0]
buf := make([]byte, 2<<20)
for {
// Read the incoming connection into the buffer.
reqLen, err := conn.Read(buf)
if err != nil {
logger.Println("Error reading:", err.Error())
return
}
if reqLen == 0 {
continue
}
scanner := bufio.NewScanner(bytes.NewReader(buf[:reqLen]))
for scanner.Scan() {
b := bytes.TrimSpace(scanner.Bytes())
if len(b) == 0 {
logger.Println("len(b)==0", string(b))
continue
} else if len(tmpbyte) > 0 && len(tmpbyte)+len(b) < 4096 {
b = append(tmpbyte, b...)
logger.Printf("retry b := %s\n", string(b))
} else {
tmpbyte = tmpbyte[:0]
}
var js Notification
err = json.Unmarshal(b, &js)
if err != nil {
if serr, ok := err.(*json.SyntaxError); ok {
logger.Printf("%s, %s\n", serr.Error(), string(b))
if errStr := serr.Error(); strings.HasPrefix(errStr, "invalid character") && strings.HasSuffix(errStr, "in string literal") {
tmpbyte = tmpbyte[:0]
continue
}
tmpbyte = append(tmpbyte, b...)
} else {
logger.Printf("%s, %s\n", err.Error(), string(b))
tmpbyte = tmpbyte[:0]
}
continue
}
requestChan <- js
tmpbyte = tmpbyte[:0]
}
}
}
func serverStart() {
eventLoop()
}
func eventLoop() {
for {
js := <-requestChan
if js.Method == "LoginQq" {
LoginQQ, _ = js.Params.getInt64("loginqq")
logger.Printf(">>> %d\n", LoginQQ)
continue
}
subtype, _ := js.Params.getInt64("subtype")
switch js.Method {
case "GroupMessage":
go groupMessageHandle(js.Params)
case "DiscussMessage":
go discussMessageHandle(js.Params)
case "PrivateMessage":
go privateMessageHandle(js.Params)
case "GroupMemberJoin":
qqNum, _ := js.Params.getInt64("qqnum")
groupNum, _ := js.Params.getInt64("groupnum")
beingOperateQQ, _ := js.Params.getInt64("opqqnum")
message := welcomeNewMember(subtype, groupNum, qqNum, beingOperateQQ)
sendGroupMessage(groupNum, message)
getGroupMemberList(groupNum)
case "GroupMemberLeave":
qqNum, _ := js.Params.getInt64("qqnum")
groupNum, _ := js.Params.getInt64("groupnum")
if groupNum == 196656732 {
continue
}
//beingOperateQQ := js.Params.GetInt64("opqqnum")
if v, ok := groups.Load(groupNum); ok {
group := v.(Group)
members := group.Members
if members == nil {
continue
}
if v, ok := members.Load(qqNum); ok {
member := v.(Member)
if subtype == 1 {
message := fmt.Sprintf("群员:[%s] 退群了!!!", member.Nickname)
sendGroupMessage(groupNum, message)
} else if subtype == 2 {
message := fmt.Sprintf("群员:[%s] 被 某个管理员 踢了!!!", member.Nickname)
sendGroupMessage(groupNum, message)
}
}
}
case "RequestAddFriend":
responseFlag, _ := js.Params.getString("response_flag")
addFriend(responseFlag, 1, "")
case "RequestAddGroup":
responseFlag, _ := js.Params.getString("response_flag")
if subtype == 2 {
addGroup(responseFlag, 2, 1, "")
}
getGroupList()
case "GroupMemberList":
var groupMemberList []Member
if err := js.Params.UnmarshalGroupMemberList(&groupMemberList); err != nil {
logger.Printf(">>> get group member list faild: %v", err)
}
logger.Printf(">>> %v\n", groupMemberList)
go updateGroupMember(groupMemberList)
case "GroupList":
var grouplist []Group
if err := js.Params.UnmarshalGroupList(&grouplist); err != nil {
logger.Printf(">>> get group list faild: %v", err)
}
//logger.Printf(">>> %v\n", grouplist)
go updateGroupList(grouplist)
case "GroupMemberInfo":
var memberInfo Member
if err := js.Params.UnmarshalGroupMemberInfo(&memberInfo); err != nil {
logger.Printf(">>> get member info faild: %v", err)
}
logger.Printf(">>> %v\n", memberInfo)
go updateMemberInfo(memberInfo)
default:
logger.Printf("未处理:%s\n", js)
}
}
}
func GetGroupListFromDB() {
rows, err := db.Query("select id, group_number, name from groups")
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var groupName string
var groupID int64
var groupNum int64
rows.Scan(&groupID, &groupNum, &groupName)
if _, ok := groups.Load(groupNum); !ok {
group := Group{}
group.ID = groupID
group.GroupNum = groupNum
group.GroupName = groupName
group.Members = GetGroupMembersFromDB(group.ID, group.GroupNum)
groups.Store(groupNum, group)
}
}
}
func GetGroupMembersFromDB(groupId int64, groupNumbber int64) *sync.Map {
memberList := new(sync.Map)
rows, err := db.Query("select m.id, m.user_id, u.qq_number, m.Nickname, m.Rights from group_members m join users u on m.user_id = u.id where m.group_id = $1", groupId)
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var id int64
var userId int64
var qq_number int64
var nickname string
var rights int32
rows.Scan(&id, &userId, &qq_number, &nickname, &rights)
m := Member{}
m.ID = id
m.UserID = userId
m.GroupID = groupId
m.GroupNum = groupNumbber
m.QQNum = qq_number
m.Nickname = nickname
m.Permission = rights
memberList.Store(qq_number, m)
}
return memberList
}
func updateGroupList(groupList []Group) {
if len(groupList) == 0 {
return
}
GetGroupListFromDB()
var groupNums []int64 = []int64{}
for _, ng := range groupList {
groupNums = append(groupNums, ng.GroupNum)
if v, ok := groups.Load(ng.GroupNum); ok {
og := v.(Group)
if og.GroupName != ng.GroupName {
og.GroupName = ng.GroupName
groups.Store(ng.GroupNum, og)
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
_, err = trans.Exec("update groups set name = $1 where Id = $2", ng.GroupName, og.ID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
}
} else {
var groupID int64
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
err = trans.QueryRow("insert into groups(group_number, name) values($1, $2) returning id", ng.GroupNum, ng.GroupName).Scan(&groupID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
groups.Store(ng.GroupNum, Group{ID: groupID, GroupNum: ng.GroupNum, GroupName: ng.GroupName, Members: nil})
//getGroupMemberList(ng.GroupNum)
}
}
var waitForDeleteGroupNums []int64 = []int64{}
groups.Range(func(key, value interface{}) bool {
found := false
for _, num := range groupNums {
if num == key {
found = true
break
}
}
if !found {
g := value.(Group)
waitForDeleteGroupNums = append(waitForDeleteGroupNums, g.GroupNum)
trans, err := db.Begin()
if err != nil {
return true
}
_, err = trans.Exec("delete from replies where group_id = $1", g.ID)
if err != nil {
trans.Rollback()
return true
}
_, err = trans.Exec("delete from group_members where group_id = $1", g.ID)
if err != nil {
trans.Rollback()
return true
}
_, err = trans.Exec("delete from groups where id = $1", g.ID)
if err != nil {
trans.Rollback()
} else {
trans.Commit()
}
}
return true
})
for _, num := range waitForDeleteGroupNums {
groups.Delete(num)
}
groups.Range(func(key, value interface{}) bool {
log.Printf(">>> new groups: %v\n", value.(Group))
return true
})
}
func updateGroupMember(groupMemberList []Member) {
flag := true
for _, nm := range groupMemberList {
if v, ok := groups.Load(nm.GroupNum); ok {
g := v.(Group)
if flag {
g.Members = GetGroupMembersFromDB(g.ID, g.GroupNum)
flag = false
}
if g.Members == nil {
g.Members = new(sync.Map)
}
if _, ok := g.Members.Load(nm.QQNum); !ok {
g.Members.Store(nm.QQNum, nm)
trans, err := db.Begin()
if err != nil {
//reportError(err)
continue
}
var userID int64
var count int64
err = trans.QueryRow("select count(1) from users where qq_numer = $1", nm.QQNum).Scan(&count)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
if count > 0 {
err = trans.QueryRow("select id from users where qq_numer = $1", nm.QQNum).Scan(&userID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
} else {
err = trans.QueryRow("insert into users(qq_number, qq_name) values($1, $2) returning id", nm.QQNum, nm.Nickname).Scan(&userID)
if err != nil {
//reportError(err)
trans.Rollback()
continue
}
}
_, err = trans.Exec("insert into group_members(group_id, user_id, nickname, rights) values($1, $2, $3, $4) returning id", g.ID, userID, nm.Nickname, nm.Permission)
if err != nil {
//reportError(err)
trans.Rollback()
continue
} else {
trans.Commit()
}
}
}
}
}
func updateMemberInfo(memberInfo Member) {
if v, ok := groups.Load(memberInfo.GroupNum); ok {
g := v.(Group)
if _, ok := g.Members.Load(memberInfo.QQNum); !ok {
g.Members.Store(memberInfo.QQNum, memberInfo)
}
}
}
func welcomeNewMember(subtype, groupNo, QQNum, operateQQ int64) (message string) {
var newbeMission string
if groupNo == 171712942 {
newbeMission = "新手四项任务:\n 1.修改群名片(群名片格式:游戏id + 活动区域)\n 2.上传带游戏id 的游戏内截图(上传到群内新人报道相册)\n 3.完成游戏自带training(游戏主界面右上角ops->training下所有项目)\n 4.阅读 ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n 5.不要轻易相信群里自称托马西的\n欢迎提其它问题"
} else if groupNo == 147798016 {
newbeMission = "新手五项任务:\n 1.修改群名片(群名片格式:游戏等级-游戏id-活动区域)\n 2.上传带游戏id 的游戏内截图(上传到群内新人报道相册)\n 3.完成游戏自带training(游戏主界面右上角ops->training下所有项目)\n 4.阅读ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n 5.汉子请爆照, 妹子自动免除此条\n欢迎提其它问题"
} else if groupNo == 196656732 {
newbeMission = `欢迎加入南京蓝色抵抗军大家庭^_^
建议新人按顺序完成以下事宜:
1. 修改群名片(格式:游戏id-活动区域)。
2. 上传带游戏id 的游戏内截图(上传到群内新人报道相册)。
3. 完成游戏自带training(游戏主界面右上角ops->training下所有项目)。
4.推荐关注北京蓝军公众号:ingressbeijing。每天都有关于ingress有趣新闻推送。
5.群文件有各种科学上网工具,仍有困难可以咨询群里老司机。
外地agent来访推荐做南大、东大拼图任务。upc以及其他任务攻略,可以咨询群里老司机。`
} else if groupNo == 292243472 {
newbeMission = "新手五项任务:\n 1. 修改群名片(群名片格式:游戏id-活动区域)。\n 2. 上传带游戏id 的游戏内截图(上传到群内新人报道相册)。\n 3. 完成游戏自带training(游戏主界面右上角ops->training下所有项目)。\n .阅读ingress 新手指南: http://mp.weixin.qq.com/s?__biz=MzIxNTI4ODU1OA==&mid=403604670&idx=1&sn=1b74a16225deebefe9fcb81e09a39477&scene=18\n欢迎提其它问题"
} else if groupNo == 312848770 {
newbeMission = fmt.Sprintf(`[CQ:at,qq=%d], 欢迎加入苏州抵抗军!
教程:
1、【重要】过马路不要玩手机!!!!!
2、基础概念 ( http://t.cn/R5drRQF )
3、升级指南 ( http://t.cn/R5drRQe )
4、进阶数据 ( http://t.cn/R5drRQD )
5、视频教程 ( http://i.youku.com/tomasish )
传教:
1、 Agents ( http://t.cn/R5drRQg )
上海蓝军微信公众号:sh_res
The world around you is not what it seems.`, QQNum)
}
if subtype == 1 {
message = fmt.Sprintf("欢迎新人 [CQ:at,qq=%d]!\n建议玩家使用英文界面方便交流(不要吐槽英文界面哪里方便交流...)\n先右上角目录→设备→语言→english即可\n请务必完成\n%s", QQNum, newbeMission)
} else if subtype == 2 {
message = fmt.Sprintf("欢迎 [CQ:at,qq=%d] 邀请的新人 [CQ:at,qq=%d]!建议玩家使用英文界面方便交流(不要吐槽英文界面哪里方便交流...)\n先右上角目录→设备→语言→english即可", operateQQ, QQNum)
}
return
}
| cmd | identifier_name |
main.rs | #![recursion_limit="128"]
//#![allow(unused_imports)]
//#![allow(dead_code)]
extern crate gl;
extern crate glutin;
#[macro_use] extern crate nom;
extern crate image;
extern crate time;
extern crate cupid;
extern crate sysinfo;
extern crate systemstat;
extern crate bytesize;
// extern crate subprocess;
use {
gl::*,
glutin::{
// dpi::*,
event::{Event, WindowEvent, Event::DeviceEvent, },
event_loop::{ControlFlow, EventLoop, },
},
sysinfo::SystemExt,
systemstat::{System, Platform},
// crate::{
// // render::{ * },
// // shader::{ Shader, },
// },
};
// in project stuff
pub mod display; // I think I still need this for storing window dimensions
pub mod gamemgr;
pub mod input;
pub mod loader; // Can be simplified
pub mod render;
pub mod shader;
pub mod text;
pub mod texture; // needed for font atlas but needed things can be ported out
pub mod timer;
pub mod util;
pub use display::Display;
pub use input::Handler;
pub use loader::Loader;
pub use render::{RenderMgr, };
pub use shader::Shader;
pub use timer::Timer;
fn main() {
// Test code for parsing fnt files
// use text::metafile::test_noms;
// test_noms();
// Specify OpenGL version
let gl_request = glutin::GlRequest::Specific(glutin::Api::OpenGl, (4, 3));
let gl_profile = glutin::GlProfile::Core;
// Create a window
let el = EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("RaumEn SysInfo")
.with_inner_size(glutin::dpi::LogicalSize::new(1024.0, 768.0))
.with_maximized(false);
let windowed_context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.build_windowed(wb, &el)
.unwrap();
let windowed_context = unsafe { windowed_context.make_current().unwrap() };
// Set up OpenGL
unsafe {
load_with(|symbol| windowed_context.context().get_proc_address(symbol) as *const _);
ClearColor(0.0, 1.0, 0.0, 1.0);
}
let mut render_mgr = RenderMgr::new();
let mut mgr = render_mgr.mgr.clone();
let mut system = sysinfo::System::new();
let cpu = cpu_name();
let ram = get_ram_total(&mut system);
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let hdd = get_hdd();
let mut fps: f32 = 30.0;
let mut once_per_sec = false;
let mut clean_up_time = false;
{ // Here, we're getting the size of the window in pixels
// and passing it to the update_size() method. It in turn
// updates the Projection Matrix and passes that to
// ALL THE SHADERS, so if you add a SHADER, you need
// to REMEMBER to add that shader to the update_size()
// method near the bottom of this file.
// let dpi = windowed_context.window().get_hidpi_factor();
let size: glutin::dpi::PhysicalSize<u32> = windowed_context.window().inner_size();
mgr.update_size(size.into());
}
{
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.add_font(mgr.clone(), "pirate");
textmgr.add_font(mgr.clone(), "sans");
textmgr.new_text(mgr.clone(), "Title", "SysInfo", "pirate", 4.0, 0.0, 0.0, 1.0, true, true);
textmgr.new_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""), "sans", 2.0, 0.0, 0.4, 1.0, true, true);
textmgr.new_text(mgr.clone(), "FPS", "FPS: 0.0", "sans", 1.5, 0.0, 0.0, 0.3, false, true);
}
// Game loop!
println!("Starting main loop.");
el.run(move |event, _, control_flow| {
// *control_flow = ControlFlow::Wait;
match event {
Event::LoopDestroyed => {
println!("Cleaning Up...");
// Clean up
render_mgr.clean_up();
clean_up_time = true;
return
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => { *control_flow = ControlFlow::Exit; },
WindowEvent::Resized(size) => {
windowed_context.resize(size);
mgr.update_size(size.into());
},
_ => { mgr.handler_do(|handler| { handler.window_event(&event); }); }
},
DeviceEvent{ event, ..} => { mgr.handler_do(|handler| { handler.device_event(&event); }); }
Event::NewEvents( _time ) => {
// Emitted when new events arrive from the OS to be processed.
//
// This event type is useful as a place to put code that should be done before you start processing events, such as
// updating frame timing information for benchmarking or checking the StartCause][crate::event::StartCause] to see
// if a timer set by [ControlFlow::WaitUntil has elapsed.
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing is about to begin.
// This event is useful as a place to put your code that should be run after all state-changing events have been
// handled and you want to do stuff (updating state, performing calculations, etc) that happens as the "main body"
// of your event loop.
// If your program draws graphics, it's usually better to do it in response to Event::RedrawRequested, which gets
// emitted immediately after this event.
{
let mut handler = mgr.handler.lock().unwrap();
handler.timer.tick();
handler.reset_delta();
if handler.timer.once_per_sec() {
fps = handler.timer.fps;
once_per_sec = true;
}
}
if once_per_sec {
once_per_sec = false;
println!("Once per second FPS: {}", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.update_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""));
textmgr.update_text(mgr.clone(), "FPS", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
}
windowed_context.window().request_redraw();
}
Event::RedrawRequested(_) => {
// Emitted after MainEventsCleared when a window should be redrawn.
// This gets triggered in two scenarios:
// - The OS has performed an operation that's invalidated the window's contents (such as resizing the window).
// - The application has explicitly requested a redraw via Window::request_redraw.
// During each iteration of the event loop, Winit will aggregate duplicate redraw requests into a single event,
// to help avoid duplicating rendering work.
if clean_up_time { return; }
// *** Drawing phase
render_mgr.render();
// _fbo_final.blit_to_screen(&world);
// Write the new frame to the screen!
windowed_context.swap_buffers().unwrap();
}
Event::RedrawEventsCleared => {
// Emitted after all RedrawRequested events have been processed and control flow is about to be taken away from
// the program. If there are no RedrawRequested events, it is emitted immediately after MainEventsCleared.
// This event is useful for doing any cleanup or bookkeeping work after all the rendering tasks have been completed.
}
e => println!("Other Event:\n{:?}", e)
}
});
}
pub const EOF: &str = "\04";
pub fn eof(string: &str) -> String {
[string, EOF].join("")
}
// pub fn call_cmd(cmd: &str) -> Result<String, String> {
// use subprocess::{Exec,Redirection};
// let out = Exec::shell(cmd)
// .stdout(Redirection::Pipe)
// .capture().map_err(|e|e.to_string())?
// .stdout_str();
// return Ok(out.trim().to_owned());
// }
// use nom::{multispace, rest_s};
// named!(_cpu_name<&str, String>,
// do_parse!(
// tag!("Name") >> multispace >> out: rest_s >>
// ( out.to_owned() )
// )
// );
fn mk_cpu_ram_str(cpu: &str, ram: &str, system: &mut sysinfo::System) -> String |
fn cpu_name() -> String {
// use cupid;
let info = cupid::master();
match info {
Some(x) => {
match x.brand_string() {
Some(x) => { ["CPU: ".to_owned(), x.to_owned()].join("") }
_ => { "Could not get CPU Name".to_owned() }
}
}
_ => { "Could not get CPU Name".to_owned() }
}
}
fn get_ram_total(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_total = ((system.get_total_memory() as f32 / 1024.0) / 1024.0).round();
format!("Total Memory: {} GB", ram_total )
}
fn get_ram_used(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_used = (((system.get_used_memory() as f32 / 1024.0) / 1024.0) * 1000.0).round() / 1000.0;
format!("Used Memory : {:.3} GB", ram_used )
}
fn get_hdd() -> String {
let sys = System::new();
let mut out = String::new();
match sys.mounts() {
Ok(mounts) => {
for mount in mounts.iter() {
if mount.total == bytesize::ByteSize::b(0) { continue; }
let mnt = mount.fs_mounted_on.clone();
if is_dir_or_subdir_linux(&mnt, "/boot") { continue; }
if is_dir_or_subdir_linux(&mnt, "/dev") { continue; }
if is_dir_or_subdir_linux(&mnt, "/run") { continue; }
if is_dir_or_subdir_linux(&mnt, "/snap") { continue; }
if is_dir_or_subdir_linux(&mnt, "/sys") { continue; }
out = format!("{}\n{} Size: {}; Free: {}",
out, mount.fs_mounted_on, mount.total, mount.avail);
}
}
Err(x) => println!("\nMounts: error: {}", x)
}
out
}
fn is_dir_or_subdir_linux(test: &str, path: &str) -> bool {
let tc = test.chars().count();
let pc = path.chars().count();
let pc2 = pc + 1;
let path2: &str = &format!("{}/", path);
if (tc == pc && test == path)
|| (tc > pc2 && &test[..pc2] == path2)
{ return true; }
false
} | {
let ram_used = get_ram_used(system);
[cpu.to_owned(), ram.to_owned(), ram_used].join("\n")
} | identifier_body |
main.rs | #![recursion_limit="128"]
//#![allow(unused_imports)]
//#![allow(dead_code)]
extern crate gl;
extern crate glutin;
#[macro_use] extern crate nom;
extern crate image;
extern crate time;
extern crate cupid;
extern crate sysinfo;
extern crate systemstat;
extern crate bytesize;
// extern crate subprocess;
use {
gl::*,
glutin::{
// dpi::*,
event::{Event, WindowEvent, Event::DeviceEvent, },
event_loop::{ControlFlow, EventLoop, },
},
sysinfo::SystemExt,
systemstat::{System, Platform},
// crate::{
// // render::{ * },
// // shader::{ Shader, },
// },
};
// in project stuff
pub mod display; // I think I still need this for storing window dimensions
pub mod gamemgr;
pub mod input;
pub mod loader; // Can be simplified
pub mod render;
pub mod shader;
pub mod text;
pub mod texture; // needed for font atlas but needed things can be ported out
pub mod timer;
pub mod util;
pub use display::Display;
pub use input::Handler;
pub use loader::Loader;
pub use render::{RenderMgr, };
pub use shader::Shader;
pub use timer::Timer;
fn main() {
// Test code for parsing fnt files
// use text::metafile::test_noms;
// test_noms();
// Specify OpenGL version
let gl_request = glutin::GlRequest::Specific(glutin::Api::OpenGl, (4, 3));
let gl_profile = glutin::GlProfile::Core;
// Create a window
let el = EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("RaumEn SysInfo")
.with_inner_size(glutin::dpi::LogicalSize::new(1024.0, 768.0))
.with_maximized(false);
let windowed_context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.build_windowed(wb, &el)
.unwrap();
let windowed_context = unsafe { windowed_context.make_current().unwrap() };
// Set up OpenGL
unsafe {
load_with(|symbol| windowed_context.context().get_proc_address(symbol) as *const _);
ClearColor(0.0, 1.0, 0.0, 1.0);
}
let mut render_mgr = RenderMgr::new();
let mut mgr = render_mgr.mgr.clone();
let mut system = sysinfo::System::new();
let cpu = cpu_name();
let ram = get_ram_total(&mut system);
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let hdd = get_hdd();
let mut fps: f32 = 30.0;
let mut once_per_sec = false;
let mut clean_up_time = false;
{ // Here, we're getting the size of the window in pixels
// and passing it to the update_size() method. It in turn
// updates the Projection Matrix and passes that to
// ALL THE SHADERS, so if you add a SHADER, you need
// to REMEMBER to add that shader to the update_size()
// method near the bottom of this file.
// let dpi = windowed_context.window().get_hidpi_factor();
let size: glutin::dpi::PhysicalSize<u32> = windowed_context.window().inner_size();
mgr.update_size(size.into());
}
{
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.add_font(mgr.clone(), "pirate");
textmgr.add_font(mgr.clone(), "sans");
textmgr.new_text(mgr.clone(), "Title", "SysInfo", "pirate", 4.0, 0.0, 0.0, 1.0, true, true);
textmgr.new_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""), "sans", 2.0, 0.0, 0.4, 1.0, true, true);
textmgr.new_text(mgr.clone(), "FPS", "FPS: 0.0", "sans", 1.5, 0.0, 0.0, 0.3, false, true);
}
// Game loop!
println!("Starting main loop.");
el.run(move |event, _, control_flow| {
// *control_flow = ControlFlow::Wait;
match event {
Event::LoopDestroyed => {
println!("Cleaning Up...");
// Clean up
render_mgr.clean_up();
clean_up_time = true;
return
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => { *control_flow = ControlFlow::Exit; },
WindowEvent::Resized(size) => {
windowed_context.resize(size);
mgr.update_size(size.into());
},
_ => { mgr.handler_do(|handler| { handler.window_event(&event); }); }
},
DeviceEvent{ event, ..} => { mgr.handler_do(|handler| { handler.device_event(&event); }); }
Event::NewEvents( _time ) => {
// Emitted when new events arrive from the OS to be processed.
//
// This event type is useful as a place to put code that should be done before you start processing events, such as
// updating frame timing information for benchmarking or checking the StartCause][crate::event::StartCause] to see
// if a timer set by [ControlFlow::WaitUntil has elapsed.
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing is about to begin.
// This event is useful as a place to put your code that should be run after all state-changing events have been
// handled and you want to do stuff (updating state, performing calculations, etc) that happens as the "main body"
// of your event loop.
// If your program draws graphics, it's usually better to do it in response to Event::RedrawRequested, which gets
// emitted immediately after this event.
{
let mut handler = mgr.handler.lock().unwrap();
handler.timer.tick();
handler.reset_delta();
if handler.timer.once_per_sec() {
fps = handler.timer.fps;
once_per_sec = true;
}
}
if once_per_sec {
once_per_sec = false;
println!("Once per second FPS: {}", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.update_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""));
textmgr.update_text(mgr.clone(), "FPS", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
}
windowed_context.window().request_redraw();
}
Event::RedrawRequested(_) => {
// Emitted after MainEventsCleared when a window should be redrawn.
// This gets triggered in two scenarios:
// - The OS has performed an operation that's invalidated the window's contents (such as resizing the window).
// - The application has explicitly requested a redraw via Window::request_redraw.
// During each iteration of the event loop, Winit will aggregate duplicate redraw requests into a single event,
// to help avoid duplicating rendering work.
if clean_up_time { return; }
// *** Drawing phase
render_mgr.render();
// _fbo_final.blit_to_screen(&world);
// Write the new frame to the screen!
windowed_context.swap_buffers().unwrap();
}
Event::RedrawEventsCleared => {
// Emitted after all RedrawRequested events have been processed and control flow is about to be taken away from
// the program. If there are no RedrawRequested events, it is emitted immediately after MainEventsCleared.
// This event is useful for doing any cleanup or bookkeeping work after all the rendering tasks have been completed.
}
e => println!("Other Event:\n{:?}", e)
}
});
}
pub const EOF: &str = "\04";
pub fn eof(string: &str) -> String {
[string, EOF].join("")
}
// pub fn call_cmd(cmd: &str) -> Result<String, String> {
// use subprocess::{Exec,Redirection};
// let out = Exec::shell(cmd)
// .stdout(Redirection::Pipe)
// .capture().map_err(|e|e.to_string())?
// .stdout_str();
// return Ok(out.trim().to_owned());
// }
// use nom::{multispace, rest_s};
// named!(_cpu_name<&str, String>,
// do_parse!(
// tag!("Name") >> multispace >> out: rest_s >>
// ( out.to_owned() )
// )
// );
fn mk_cpu_ram_str(cpu: &str, ram: &str, system: &mut sysinfo::System) -> String {
let ram_used = get_ram_used(system);
[cpu.to_owned(), ram.to_owned(), ram_used].join("\n")
}
fn cpu_name() -> String {
// use cupid;
let info = cupid::master();
match info {
Some(x) => {
match x.brand_string() {
Some(x) => { ["CPU: ".to_owned(), x.to_owned()].join("") }
_ => { "Could not get CPU Name".to_owned() }
}
}
_ => { "Could not get CPU Name".to_owned() }
}
}
fn get_ram_total(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_total = ((system.get_total_memory() as f32 / 1024.0) / 1024.0).round();
format!("Total Memory: {} GB", ram_total )
}
fn get_ram_used(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_used = (((system.get_used_memory() as f32 / 1024.0) / 1024.0) * 1000.0).round() / 1000.0;
format!("Used Memory : {:.3} GB", ram_used )
}
fn get_hdd() -> String {
let sys = System::new();
let mut out = String::new();
match sys.mounts() {
Ok(mounts) => {
for mount in mounts.iter() {
if mount.total == bytesize::ByteSize::b(0) { continue; }
let mnt = mount.fs_mounted_on.clone();
if is_dir_or_subdir_linux(&mnt, "/boot") { continue; }
if is_dir_or_subdir_linux(&mnt, "/dev") { continue; }
if is_dir_or_subdir_linux(&mnt, "/run") |
if is_dir_or_subdir_linux(&mnt, "/snap") { continue; }
if is_dir_or_subdir_linux(&mnt, "/sys") { continue; }
out = format!("{}\n{} Size: {}; Free: {}",
out, mount.fs_mounted_on, mount.total, mount.avail);
}
}
Err(x) => println!("\nMounts: error: {}", x)
}
out
}
fn is_dir_or_subdir_linux(test: &str, path: &str) -> bool {
let tc = test.chars().count();
let pc = path.chars().count();
let pc2 = pc + 1;
let path2: &str = &format!("{}/", path);
if (tc == pc && test == path)
|| (tc > pc2 && &test[..pc2] == path2)
{ return true; }
false
} | { continue; } | conditional_block |
main.rs | #![recursion_limit="128"]
//#![allow(unused_imports)]
//#![allow(dead_code)]
extern crate gl;
extern crate glutin;
#[macro_use] extern crate nom;
extern crate image;
extern crate time;
extern crate cupid;
extern crate sysinfo;
extern crate systemstat;
extern crate bytesize;
// extern crate subprocess;
use {
gl::*,
glutin::{
// dpi::*,
event::{Event, WindowEvent, Event::DeviceEvent, },
event_loop::{ControlFlow, EventLoop, },
},
sysinfo::SystemExt,
systemstat::{System, Platform},
// crate::{
// // render::{ * },
// // shader::{ Shader, },
// },
};
// in project stuff
pub mod display; // I think I still need this for storing window dimensions
pub mod gamemgr;
pub mod input;
pub mod loader; // Can be simplified
pub mod render;
pub mod shader;
pub mod text;
pub mod texture; // needed for font atlas but needed things can be ported out
pub mod timer;
pub mod util;
pub use display::Display;
pub use input::Handler;
pub use loader::Loader;
pub use render::{RenderMgr, };
pub use shader::Shader;
pub use timer::Timer;
fn main() {
// Test code for parsing fnt files
// use text::metafile::test_noms;
// test_noms();
// Specify OpenGL version
let gl_request = glutin::GlRequest::Specific(glutin::Api::OpenGl, (4, 3));
let gl_profile = glutin::GlProfile::Core;
// Create a window
let el = EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("RaumEn SysInfo")
.with_inner_size(glutin::dpi::LogicalSize::new(1024.0, 768.0))
.with_maximized(false);
let windowed_context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.build_windowed(wb, &el)
.unwrap();
let windowed_context = unsafe { windowed_context.make_current().unwrap() };
// Set up OpenGL
unsafe {
load_with(|symbol| windowed_context.context().get_proc_address(symbol) as *const _);
ClearColor(0.0, 1.0, 0.0, 1.0);
}
let mut render_mgr = RenderMgr::new();
let mut mgr = render_mgr.mgr.clone();
let mut system = sysinfo::System::new();
let cpu = cpu_name();
let ram = get_ram_total(&mut system);
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let hdd = get_hdd();
let mut fps: f32 = 30.0;
let mut once_per_sec = false;
let mut clean_up_time = false;
{ // Here, we're getting the size of the window in pixels
// and passing it to the update_size() method. It in turn
// updates the Projection Matrix and passes that to
// ALL THE SHADERS, so if you add a SHADER, you need
// to REMEMBER to add that shader to the update_size()
// method near the bottom of this file.
// let dpi = windowed_context.window().get_hidpi_factor();
let size: glutin::dpi::PhysicalSize<u32> = windowed_context.window().inner_size();
mgr.update_size(size.into());
}
{
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.add_font(mgr.clone(), "pirate");
textmgr.add_font(mgr.clone(), "sans");
textmgr.new_text(mgr.clone(), "Title", "SysInfo", "pirate", 4.0, 0.0, 0.0, 1.0, true, true);
textmgr.new_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""), "sans", 2.0, 0.0, 0.4, 1.0, true, true);
textmgr.new_text(mgr.clone(), "FPS", "FPS: 0.0", "sans", 1.5, 0.0, 0.0, 0.3, false, true);
}
// Game loop!
println!("Starting main loop.");
el.run(move |event, _, control_flow| {
// *control_flow = ControlFlow::Wait;
match event {
Event::LoopDestroyed => {
println!("Cleaning Up...");
// Clean up
render_mgr.clean_up();
clean_up_time = true;
return
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => { *control_flow = ControlFlow::Exit; },
WindowEvent::Resized(size) => {
windowed_context.resize(size);
mgr.update_size(size.into());
},
_ => { mgr.handler_do(|handler| { handler.window_event(&event); }); }
},
DeviceEvent{ event, ..} => { mgr.handler_do(|handler| { handler.device_event(&event); }); }
Event::NewEvents( _time ) => {
// Emitted when new events arrive from the OS to be processed.
//
// This event type is useful as a place to put code that should be done before you start processing events, such as
// updating frame timing information for benchmarking or checking the StartCause][crate::event::StartCause] to see
// if a timer set by [ControlFlow::WaitUntil has elapsed.
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing is about to begin.
// This event is useful as a place to put your code that should be run after all state-changing events have been
// handled and you want to do stuff (updating state, performing calculations, etc) that happens as the "main body"
// of your event loop.
// If your program draws graphics, it's usually better to do it in response to Event::RedrawRequested, which gets
// emitted immediately after this event.
{
let mut handler = mgr.handler.lock().unwrap();
handler.timer.tick();
handler.reset_delta();
if handler.timer.once_per_sec() {
fps = handler.timer.fps;
once_per_sec = true;
}
}
if once_per_sec {
once_per_sec = false;
println!("Once per second FPS: {}", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.update_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""));
textmgr.update_text(mgr.clone(), "FPS", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
}
windowed_context.window().request_redraw();
}
Event::RedrawRequested(_) => {
// Emitted after MainEventsCleared when a window should be redrawn.
// This gets triggered in two scenarios:
// - The OS has performed an operation that's invalidated the window's contents (such as resizing the window).
// - The application has explicitly requested a redraw via Window::request_redraw.
// During each iteration of the event loop, Winit will aggregate duplicate redraw requests into a single event,
// to help avoid duplicating rendering work.
if clean_up_time { return; }
// *** Drawing phase
render_mgr.render();
// _fbo_final.blit_to_screen(&world);
// Write the new frame to the screen!
windowed_context.swap_buffers().unwrap();
}
Event::RedrawEventsCleared => {
// Emitted after all RedrawRequested events have been processed and control flow is about to be taken away from
// the program. If there are no RedrawRequested events, it is emitted immediately after MainEventsCleared.
// This event is useful for doing any cleanup or bookkeeping work after all the rendering tasks have been completed.
}
e => println!("Other Event:\n{:?}", e)
}
});
}
pub const EOF: &str = "\04";
pub fn eof(string: &str) -> String {
[string, EOF].join("")
}
// pub fn call_cmd(cmd: &str) -> Result<String, String> {
// use subprocess::{Exec,Redirection};
// let out = Exec::shell(cmd)
// .stdout(Redirection::Pipe)
// .capture().map_err(|e|e.to_string())?
// .stdout_str();
// return Ok(out.trim().to_owned());
// }
// use nom::{multispace, rest_s};
// named!(_cpu_name<&str, String>,
// do_parse!(
// tag!("Name") >> multispace >> out: rest_s >>
// ( out.to_owned() )
// )
// );
fn mk_cpu_ram_str(cpu: &str, ram: &str, system: &mut sysinfo::System) -> String {
let ram_used = get_ram_used(system);
[cpu.to_owned(), ram.to_owned(), ram_used].join("\n")
}
fn cpu_name() -> String {
// use cupid;
let info = cupid::master();
match info {
Some(x) => {
match x.brand_string() {
Some(x) => { ["CPU: ".to_owned(), x.to_owned()].join("") }
_ => { "Could not get CPU Name".to_owned() }
}
}
_ => { "Could not get CPU Name".to_owned() }
}
}
fn get_ram_total(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_total = ((system.get_total_memory() as f32 / 1024.0) / 1024.0).round();
format!("Total Memory: {} GB", ram_total )
}
fn get_ram_used(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_used = (((system.get_used_memory() as f32 / 1024.0) / 1024.0) * 1000.0).round() / 1000.0;
format!("Used Memory : {:.3} GB", ram_used )
}
fn | () -> String {
let sys = System::new();
let mut out = String::new();
match sys.mounts() {
Ok(mounts) => {
for mount in mounts.iter() {
if mount.total == bytesize::ByteSize::b(0) { continue; }
let mnt = mount.fs_mounted_on.clone();
if is_dir_or_subdir_linux(&mnt, "/boot") { continue; }
if is_dir_or_subdir_linux(&mnt, "/dev") { continue; }
if is_dir_or_subdir_linux(&mnt, "/run") { continue; }
if is_dir_or_subdir_linux(&mnt, "/snap") { continue; }
if is_dir_or_subdir_linux(&mnt, "/sys") { continue; }
out = format!("{}\n{} Size: {}; Free: {}",
out, mount.fs_mounted_on, mount.total, mount.avail);
}
}
Err(x) => println!("\nMounts: error: {}", x)
}
out
}
fn is_dir_or_subdir_linux(test: &str, path: &str) -> bool {
let tc = test.chars().count();
let pc = path.chars().count();
let pc2 = pc + 1;
let path2: &str = &format!("{}/", path);
if (tc == pc && test == path)
|| (tc > pc2 && &test[..pc2] == path2)
{ return true; }
false
} | get_hdd | identifier_name |
main.rs | #![recursion_limit="128"]
//#![allow(unused_imports)]
//#![allow(dead_code)]
extern crate gl;
extern crate glutin;
#[macro_use] extern crate nom;
extern crate image;
extern crate time;
extern crate cupid;
extern crate sysinfo;
extern crate systemstat;
extern crate bytesize;
// extern crate subprocess;
use {
gl::*,
glutin::{
// dpi::*,
event::{Event, WindowEvent, Event::DeviceEvent, },
event_loop::{ControlFlow, EventLoop, },
},
sysinfo::SystemExt,
systemstat::{System, Platform},
// crate::{
// // render::{ * },
// // shader::{ Shader, },
// },
};
// in project stuff
pub mod display; // I think I still need this for storing window dimensions
pub mod gamemgr;
pub mod input;
pub mod loader; // Can be simplified
pub mod render;
pub mod shader;
pub mod text;
pub mod texture; // needed for font atlas but needed things can be ported out
pub mod timer;
pub mod util;
pub use display::Display;
pub use input::Handler;
pub use loader::Loader;
pub use render::{RenderMgr, };
pub use shader::Shader;
pub use timer::Timer;
fn main() {
// Test code for parsing fnt files
// use text::metafile::test_noms;
// test_noms();
// Specify OpenGL version
let gl_request = glutin::GlRequest::Specific(glutin::Api::OpenGl, (4, 3));
let gl_profile = glutin::GlProfile::Core;
// Create a window
let el = EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("RaumEn SysInfo")
.with_inner_size(glutin::dpi::LogicalSize::new(1024.0, 768.0))
.with_maximized(false);
let windowed_context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.build_windowed(wb, &el)
.unwrap();
let windowed_context = unsafe { windowed_context.make_current().unwrap() };
// Set up OpenGL
unsafe {
load_with(|symbol| windowed_context.context().get_proc_address(symbol) as *const _);
ClearColor(0.0, 1.0, 0.0, 1.0);
}
let mut render_mgr = RenderMgr::new();
let mut mgr = render_mgr.mgr.clone();
let mut system = sysinfo::System::new();
let cpu = cpu_name();
let ram = get_ram_total(&mut system);
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let hdd = get_hdd();
let mut fps: f32 = 30.0;
let mut once_per_sec = false;
let mut clean_up_time = false;
{ // Here, we're getting the size of the window in pixels
// and passing it to the update_size() method. It in turn
// updates the Projection Matrix and passes that to
// ALL THE SHADERS, so if you add a SHADER, you need
// to REMEMBER to add that shader to the update_size()
// method near the bottom of this file.
// let dpi = windowed_context.window().get_hidpi_factor();
let size: glutin::dpi::PhysicalSize<u32> = windowed_context.window().inner_size();
mgr.update_size(size.into());
}
{
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.add_font(mgr.clone(), "pirate");
textmgr.add_font(mgr.clone(), "sans");
textmgr.new_text(mgr.clone(), "Title", "SysInfo", "pirate", 4.0, 0.0, 0.0, 1.0, true, true);
textmgr.new_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""), "sans", 2.0, 0.0, 0.4, 1.0, true, true);
textmgr.new_text(mgr.clone(), "FPS", "FPS: 0.0", "sans", 1.5, 0.0, 0.0, 0.3, false, true);
}
// Game loop!
println!("Starting main loop.");
el.run(move |event, _, control_flow| {
// *control_flow = ControlFlow::Wait;
match event {
Event::LoopDestroyed => {
println!("Cleaning Up...");
// Clean up
render_mgr.clean_up();
clean_up_time = true;
return
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => { *control_flow = ControlFlow::Exit; },
WindowEvent::Resized(size) => {
windowed_context.resize(size);
mgr.update_size(size.into());
},
_ => { mgr.handler_do(|handler| { handler.window_event(&event); }); }
},
DeviceEvent{ event, ..} => { mgr.handler_do(|handler| { handler.device_event(&event); }); }
Event::NewEvents( _time ) => {
// Emitted when new events arrive from the OS to be processed.
//
// This event type is useful as a place to put code that should be done before you start processing events, such as
// updating frame timing information for benchmarking or checking the StartCause][crate::event::StartCause] to see
// if a timer set by [ControlFlow::WaitUntil has elapsed.
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing is about to begin.
// This event is useful as a place to put your code that should be run after all state-changing events have been
// handled and you want to do stuff (updating state, performing calculations, etc) that happens as the "main body"
// of your event loop.
// If your program draws graphics, it's usually better to do it in response to Event::RedrawRequested, which gets
// emitted immediately after this event.
{
let mut handler = mgr.handler.lock().unwrap();
handler.timer.tick();
handler.reset_delta();
if handler.timer.once_per_sec() {
fps = handler.timer.fps;
once_per_sec = true;
}
}
if once_per_sec {
once_per_sec = false;
println!("Once per second FPS: {}", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.update_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""));
textmgr.update_text(mgr.clone(), "FPS", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
}
windowed_context.window().request_redraw();
}
Event::RedrawRequested(_) => {
// Emitted after MainEventsCleared when a window should be redrawn.
// This gets triggered in two scenarios:
// - The OS has performed an operation that's invalidated the window's contents (such as resizing the window).
// - The application has explicitly requested a redraw via Window::request_redraw.
// During each iteration of the event loop, Winit will aggregate duplicate redraw requests into a single event,
// to help avoid duplicating rendering work.
if clean_up_time { return; }
// *** Drawing phase
render_mgr.render();
// _fbo_final.blit_to_screen(&world);
// Write the new frame to the screen!
windowed_context.swap_buffers().unwrap();
}
Event::RedrawEventsCleared => {
// Emitted after all RedrawRequested events have been processed and control flow is about to be taken away from
// the program. If there are no RedrawRequested events, it is emitted immediately after MainEventsCleared.
// This event is useful for doing any cleanup or bookkeeping work after all the rendering tasks have been completed.
}
e => println!("Other Event:\n{:?}", e)
}
});
}
pub const EOF: &str = "\04";
pub fn eof(string: &str) -> String {
[string, EOF].join("")
}
// pub fn call_cmd(cmd: &str) -> Result<String, String> {
// use subprocess::{Exec,Redirection};
// let out = Exec::shell(cmd)
// .stdout(Redirection::Pipe)
// .capture().map_err(|e|e.to_string())?
// .stdout_str();
// return Ok(out.trim().to_owned());
// }
// use nom::{multispace, rest_s};
// named!(_cpu_name<&str, String>,
// do_parse!(
// tag!("Name") >> multispace >> out: rest_s >>
// ( out.to_owned() )
// )
// );
fn mk_cpu_ram_str(cpu: &str, ram: &str, system: &mut sysinfo::System) -> String {
let ram_used = get_ram_used(system);
[cpu.to_owned(), ram.to_owned(), ram_used].join("\n")
}
fn cpu_name() -> String {
// use cupid;
let info = cupid::master();
match info {
Some(x) => {
match x.brand_string() {
Some(x) => { ["CPU: ".to_owned(), x.to_owned()].join("") }
_ => { "Could not get CPU Name".to_owned() }
}
}
_ => { "Could not get CPU Name".to_owned() }
}
}
fn get_ram_total(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_total = ((system.get_total_memory() as f32 / 1024.0) / 1024.0).round();
format!("Total Memory: {} GB", ram_total )
}
fn get_ram_used(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_used = (((system.get_used_memory() as f32 / 1024.0) / 1024.0) * 1000.0).round() / 1000.0;
format!("Used Memory : {:.3} GB", ram_used )
}
fn get_hdd() -> String {
let sys = System::new();
let mut out = String::new();
match sys.mounts() {
Ok(mounts) => {
for mount in mounts.iter() {
if mount.total == bytesize::ByteSize::b(0) { continue; }
let mnt = mount.fs_mounted_on.clone();
if is_dir_or_subdir_linux(&mnt, "/boot") { continue; }
if is_dir_or_subdir_linux(&mnt, "/dev") { continue; }
if is_dir_or_subdir_linux(&mnt, "/run") { continue; }
if is_dir_or_subdir_linux(&mnt, "/snap") { continue; }
if is_dir_or_subdir_linux(&mnt, "/sys") { continue; } | out = format!("{}\n{} Size: {}; Free: {}",
out, mount.fs_mounted_on, mount.total, mount.avail);
}
}
Err(x) => println!("\nMounts: error: {}", x)
}
out
}
fn is_dir_or_subdir_linux(test: &str, path: &str) -> bool {
let tc = test.chars().count();
let pc = path.chars().count();
let pc2 = pc + 1;
let path2: &str = &format!("{}/", path);
if (tc == pc && test == path)
|| (tc > pc2 && &test[..pc2] == path2)
{ return true; }
false
} | random_line_split | |
lib.rs | #[macro_use]
extern crate compre_combinee;
extern crate combine;
mod errors;
mod details;
mod traits;
mod stop_watch;
use std::collections::{HashMap};
use combine::{parser, eof, satisfy, choice, attempt};
use combine::parser::range::{take_while1};
use combine::parser::char::*;
use combine::{Parser, many, optional, skip_many, sep_by, between};
pub use crate::errors::ErrorCause;
pub use crate::details::Node;
pub use crate::traits::*;
use std::{f64, mem, str};
use std::convert::TryFrom;
use smol_str::SmolStr;
fn parse_hex<'a>() -> impl Parser<&'a str, Output = u32> {
satisfy(|c: char|
(c >= '0' && c <= '9') ||
(c >= 'a' && c <= 'f') ||
(c >= 'A' && c <= 'F')
).map(|c| if c >= '0' && c <= '9' {
c as u64 - '0' as u64
} else if c >= 'a' && c <= 'f' {
10 + c as u64 - 'a' as u64
} else {
10 + c as u64 - 'A' as u64
} as u32
)
}
fn unicode_char<'a>() -> impl Parser<&'a str, Output = Option<char>> {
c_hx_do!{
__ <- string(r#"\u"#),
d3 <- parse_hex(),
d2 <- parse_hex(),
d1 <- parse_hex(),
d0 <- parse_hex();
{
let unicode = d0 +
0x10 * d1 +
0x100 * d2 +
0x1000 * d3;
char::try_from(unicode).ok()
}
}
}
#[derive(PartialEq)]
enum StringPiece<'a >
{
Ref(&'a str),
Char(Option<char>)
}
fn braced_parser<'a, PBL, P, PBR, O>(pbl: PBL, p: P, pbr: PBR) -> impl Parser<&'a str, Output = O>
where
PBL: Parser<&'a str>,
PBR: Parser<&'a str>,
P: Parser<&'a str, Output = O>
{
between(
c_compre![c; c <- pbl, __ <- skip_many(space())],
c_compre![c; __ <- skip_many(space()), c <- pbr],
p
)
}
fn string_part<'a>() -> impl Parser<&'a str, Output = Vec<StringPiece<'a >>> {
many(
choice(
(
attempt(take_while1(|c: char| c != '\\' && c != '"' && c != '\n' && c != '\r' && c != '\t')
.map(|chars: &str| StringPiece::Ref(chars))),
attempt(string("\\\"").map(|_|StringPiece::Ref("\""))),
attempt(string("\\\\").map(|_|StringPiece::Ref("\\"))),
attempt(string("\\n").map(|_|StringPiece::Ref("\n"))),
attempt(string("\\t").map(|_|StringPiece::Ref("\t"))),
attempt(string("\\/").map(|_|StringPiece::Ref("/"))),
attempt(string("\\r").map(|_|StringPiece::Ref("\r"))),
attempt(string("\\f").map(|_|StringPiece::Ref("\u{000c}"))),
attempt(string("\\b").map(|_|StringPiece::Ref("\u{0008}"))),
attempt(unicode_char().map(|s|StringPiece::Char(s))),
)
)
)
}
fn string_parser_inner<'a>() -> impl Parser<&'a str, Output = SmolStr> {
c_hx_do! {
x <- between(char('"'), char('"'), string_part());
{
let cap = x.iter().fold(0, |acc, s|
acc +
match s {
StringPiece::Ref(strref) => strref.len(),
StringPiece::Char(c) => c.map(|c_inner| c_inner.len_utf8()).unwrap_or(0)
}
);
if cap <= 22 {
let mut buf: [u8; 22] = [0; 22];
let mut offset = 0;
for s in x.iter() {
match s {
StringPiece::Ref(strref) => {
for &b in strref.as_bytes() {
buf[offset] = b;
offset += 1;
}
},
StringPiece::Char(c) => {
if let Some(chr) = c {
chr.encode_utf8(&mut buf[offset..]);
offset += chr.len_utf8();
}
}
}
}
return unsafe {
SmolStr::new(str::from_utf8_unchecked(&buf[0..cap]))
};
}
let mut str = String::with_capacity(cap);
for s in x.iter() {
match s {
StringPiece::Ref(strref) => str.push_str(strref),
StringPiece::Char(c) => if let Some(chr) = c { str.push(*chr); }
}
}
SmolStr::new(str)
}
}
}
fn string_parser<'a>() -> impl Parser<&'a str, Output = Node> {
string_parser_inner().map(|x| Node::String(x))
}
fn digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
take_while1(|c: char| c >= '0' && c <= '9')
}
#[inline(always)]
fn power(lhs: f64, rhs: f64) -> f64 {
lhs.powf(rhs)
}
fn trailing_digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
c_hx_do! {
__ <- char('.'),
rest <- digit_sequence();
rest
}
}
| __ <- satisfy(|c: char| c == 'e' || c == 'E'),
sign_char <- optional(satisfy(|c: char| c == '+' || c == '-')),
digits <- digit_sequence();
{
let sign = match sign_char {
Some('-') => -1.0,
_ => 1.0
};
let mut acc = 0;
for c in digits.as_bytes() {
acc = acc * 10 + (c - b'0') as u64;
}
power(10.0, sign * acc as f64)
}
}
}
#[derive(PartialEq, Copy, Clone)]
enum NumberPrefix<'a >
{
LeadingZero,
Digits(char, &'a str)
}
fn leading_zero_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
char('0').map(|_| NumberPrefix::LeadingZero)
}
fn leading_digits_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
c_hx_do! {
leading_digit <- satisfy(|c: char| c >= '1' && c <= '9'),
digs <- optional(digit_sequence());
NumberPrefix::Digits(leading_digit, digs.unwrap_or(""))
}
}
fn leading_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
choice((
attempt(leading_digits_parser()),
attempt(leading_zero_parser()),
))
}
fn number_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do! {
minus_sign <- optional(char('-')),
leading <- leading_parser(),
trail <- optional(trailing_digit_sequence()),
exp <- optional(exponent_parser());
{
Node::Number({
let mut acc = match leading {
NumberPrefix::LeadingZero => 0.0,
NumberPrefix::Digits(leading_digit, l_digs) => {
let mut l = (leading_digit as u8 - b'0') as u64;
for c in l_digs.as_bytes() {
l = l * 10 + (c - b'0') as u64;
}
l as f64
}
};
if let Some(t_digs) = trail {
let mut divider = 1.0;
for c in t_digs.as_bytes() {
divider /= 10.0;
acc += (c - b'0') as f64 * divider;
}
}
if let Some(exponent) = exp {
acc *= exponent;
}
if let Some(_) = minus_sign {
-acc
} else {
acc
}
})
}
}
}
fn bool_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do!{
word <- string("true").or(string("false"));
match word {
"true" => Node::Boolean(true),
_ => Node::Boolean(false)
}
}
}
fn null_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do!{
_word <- string("null");
Node::Null
}
}
macro_rules! ref_parser {
($parser_fn:ident) => {
parser(|input| {
let _: &mut &str = input;
$parser_fn().parse_stream(input).into_result()
})
}
}
fn primitive_parser<'a>() -> impl Parser<&'a str, Output = Node> {
let possible_parser = bool_parser()
.or(number_parser())
.or(string_parser())
.or(null_parser())
.or(ref_parser!(array_parser))
.or(ref_parser!(dictionary_parser));
c_hx_do! {
__ <- skip_many(space()),
pars <- possible_parser,
___ <- skip_many(space());
pars
}
}
fn array_parser<'a>() -> impl Parser<&'a str, Output = Node> {
braced_parser(
char('['),
sep_by(primitive_parser(), char(',')),
char(']')
).map(|nodes: Vec<Node>|
Node::Array(nodes)
)
}
fn pair_parser<'a>() -> impl Parser<&'a str, Output = Option<(SmolStr, Node)>> {
let str_parser = c_hx_do!{
__ <- skip_many(space()),
stp <- string_parser_inner(),
___ <- skip_many(space());
stp
};
c_hx_do!{
l <- str_parser,
__ <- char(':'),
r <- primitive_parser();
Some((l, r))
}
}
fn dictionary_parser<'a>() -> impl Parser<&'a str, Output = Node> {
braced_parser(
char('{'),
sep_by(pair_parser(), char(',')),
char('}')
).map(|mut pairs: Vec<Option<(SmolStr, Node)>>| {
let mut dict = HashMap::with_capacity(pairs.len());
for mut pair in pairs {
let (l, r) = mem::replace(&mut pair, None).unwrap();
dict.insert(l, r);
}
Node::Object(
dict
)
})
}
fn json_parser<'a>() -> impl Parser<&'a str, Output = Node> {
null_parser()
.or(bool_parser())
.or(number_parser())
.or(string_parser())
.or(array_parser())
.or(dictionary_parser())
}
pub fn parse_json(content: &str) -> Result<Node, String> {
let mut parser = c_hx_do!{
__ <- skip_many(space()),
json <- json_parser(),
___ <- skip_many(space()),
____ <- eof();
json
};
let res = parser.parse(content);
match res {
Err(x) => Err(format!("{}", x.to_string())),
Ok((res,_)) => Ok(res)
}
} | fn exponent_parser<'a>() -> impl Parser<&'a str, Output = f64> {
c_hx_do!{ | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.