file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
data_tensorboard.py | import tensorflow as tf
from tensorflow import keras
import numpy as np
import os
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
"""
crop_top takes as an input img which is an array of shape (x, y, 3) and
percentage of picture to crop
"""
def crop_top(img, percent=0.15):
offset = int(img.shape[0] * percent) #cut the top portion of image
return img[offset:] # return image
"""
central_crop takes as an input img which is an array of shape (x, y, 3)
"""
def central_crop(img):
size = min(img.shape[0], img.shape[1]) # min of x and y
offset_h = int((img.shape[0] - size) / 2) # horizontal len
offset_w = int((img.shape[1] - size) / 2) # vertical
return img[offset_h:offset_h + size, offset_w:offset_w + size] # makes square image and centered
"""
process_image_file take as an input path of the photo for example data/train/1-s2.0-S0929664620300449-gr2_lrg-a.jpg,
top_percante for e.g top_percante = 0.08, and size of on axis of the image. In our case it will be 480
"""
def process_image_file(filepath, top_percent, size):
img = cv2.imread(filepath) # load image as array of shape (x, y , 3)
img = crop_top(img, percent=top_percent) # use function define above
img = central_crop(img) # use function define above
img = cv2.resize(img, (size, size)) # resize image from (min(x, y), min(x,y)) to (480,480). noticed that it remains
# of the shape with 3 chanels (480,480,3)
return img
"""
random_ratio_resize takes as an input image path, prob ,the probability of rotation if the random value is bigger than
prob do nothing and delta set as default to 0.1. As this function is used in code after central_crop which squares the image
ration will be 1. So we take then random value between segmet form [1-dleta, 1+ dleta]
"""
def random_ratio_resize(img, prob=0.3, delta=0.1):
if np.random.rand() >= prob: # bigger do nothing
return img
ratio = img.shape[0] / img.shape[1] # in our case 1
ratio = np.random.uniform(max(ratio - delta, 0.01), ratio + delta) # random value form [1-delta, 1+delta]. if delta
# change we prevent from left end of segment being non positve
if ratio * img.shape[1] <= img.shape[1]:
size = (int(img.shape[1] * ratio), img.shape[1]) # e.g shape of (474, 480) after this operation
else:
size = (img.shape[0], int(img.shape[0] / ratio)) #e.g shape of (480, 472) after this operation
dh = img.shape[0] - size[1] # could be zero or (480 - less number than 480)
top, bot = dh // 2, dh - dh // 2 # could be zeros ot the sum up to dh e.g dh = 9 then top = 4, bot = 5
dw = img.shape[1] - size[0] # similar to above
left, right = dw // 2, dw - dw // 2
if size[0] > 480 or size[1] > 480: #should not happen casue one of the coordinates should always be 480
print(img.shape, size, ratio)
img = cv2.resize(img, size) # resize image
img = cv2.copyMakeBorder(img, top, bot, left, right, cv2.BORDER_CONSTANT,
(0, 0, 0)) # this function makes image back to shape of (480, 480, 3) however it add black famre
# around the image
if img.shape[0] != 480 or img.shape[1] != 480: # should have happned since the ouput shape after copyMakeBorder supposed to be
# (480,480, 3)
raise ValueError(img.shape, size) # in case of error raise exception
return img
_augmentation_transform = ImageDataGenerator(
featurewise_center=False, # Boolean. Set input mean to 0 over the dataset, feature-wise.
featurewise_std_normalization=False, # Boolean. Divide inputs by std of the dataset, feature-wise.
rotation_range=10, # Int. Degree range for random rotations.
width_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total width, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-width_shift_range, +width_shift_range)
# With width_shift_range=2 possible values are integers [-1, 0, +1], same as
# with width_shift_range=[-1, 0, +1], while with width_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
height_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total height, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-height_shift_range, +height_shift_range)
#With height_shift_range=2 possible values are integers [-1, 0, +1], same as
# with height_shift_range=[-1, 0, +1], while with height_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
horizontal_flip=True, # Boolean. Randomly flip inputs horizontally.
brightness_range=(0.9, 1.1), # Tuple or list of two floats. Range for picking a brightness shift value from.
zoom_range=(0.85, 1.15), # Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
fill_mode='constant', # One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'. Points outside
# the boundaries of the input are filled according to the given mode:
#'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
#'nearest': aaaaaaaa|abcd|dddddddd
#'reflect': abcddcba|abcd|dcbaabcd
#'wrap': abcdabcd|abcd|abcdabcd
cval=0., # Float or Int. Value used for points outside the boundaries when fill_mode = "constant".
)
"""
apply_augmentation takes as input img which is an array of shape (x, y, 3)
"""
def apply_augmentation(img):
img = random_ratio_resize(img) #defina above
img = _augmentation_transform.random_transform(img) # Applies a random transformation to an image.
return img
"""
_process_csv_file take as an input a file in our case these are train_split.txt and test_split.txt (names may differ)
"""
def _process_csv_file(file):
with open(file, 'r') as fr: # open file
files = fr.readlines() # read lines
return files
class BalanceCovidDataset(keras.utils.Sequence):
'Generates data for Keras'
def __init__(
self,
data_dir,
csv_file,
is_training=True,
batch_size=8,
input_shape=(224, 224), # here default shape is (224, 224) becasue these values were for former models,
# In another file we set it for (480, 480)
n_classes=3, # normal, pneunomia, COVID-19
num_channels=3, # depth of image. Although the images are grey we keep this chanel (with possibility to delete this) | },
shuffle=True,
augmentation=apply_augmentation,
covid_percent=0.3,
class_weights=[1., 1., 6.], # default weight of classes. The less numbered class gets is more worthy than the others
# in this case COVID_19
top_percent=0.08 # here set to 0.08, though in above functions was set to 0.15
):
'Initialization' # seeting values in constructor
self.datadir = data_dir
self.dataset = _process_csv_file(csv_file)
self.is_training = is_training
self.batch_size = batch_size
self.N = len(self.dataset)
self.input_shape = input_shape
self.n_classes = n_classes
self.num_channels = num_channels
self.mapping = mapping
self.shuffle = True
self.covid_percent = covid_percent
self.class_weights = class_weights
self.n = 0
self.augmentation = augmentation
self.top_percent = top_percent
datasets = {'normal': [], 'pneumonia': [], 'COVID-19': []} #dictionary for classes
for l in self.dataset: # iterate for dataset
datasets[l.split()[2]].append(l) # the second argument describes the name of the class e.g l.split()[2] - normal
# append the whole line to dictionary.
self.datasets = [
datasets['normal'] + datasets['pneumonia'],
datasets['COVID-19'],
] # set dataset to list of list where the first one is the conctaenation of lists 'normal' and 'pneumonia', and the
# second one is COVID_19
print(len(self.datasets[0]), len(self.datasets[1]))
self.on_epoch_end() # is triggered once at the very beginning as well as at the end of each epoch.
# If the shuffle parameter is set to True, we will get a new order of exploration at
# each pass (or just keep a linear exploration scheme otherwise).
def __next__(self): # mothod that need to be implement, keras generator
# Get one batch of data
batch_x, batch_y, weights = self.__getitem__(self.n) # if the numer of batch is less than number of batches we call the
# __getitem__ methdd
# Batch index
self.n += 1
# If we have processed the entire dataset then
if self.n >= self.__len__(): # it means that we fed all of our training exmaples in this epoch
self.on_epoch_end() # schuffle traing set
self.n = 0 # set to zero
return batch_x, batch_y, weights
def __len__(self):
return int(np.ceil(len(self.datasets[0]) / float(self.batch_size))) # returns the numer of batches that we will have
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
for v in self.datasets:
np.random.shuffle(v) #shuffle afetr each epoch. This is done becouse we want our model to generalzie as much as we can
# it shuffles the concatenated list of normal and pneunomia, and shuffles the COVID_19 list
def __getitem__(self, idx):
batch_x, batch_y = np.zeros(
(self.batch_size, *self.input_shape,
self.num_channels)), np.zeros(self.batch_size) # batch_x has a shape of (8, 480, 480. 3) and batch_y = (8), the * is used beacuse
# the input_shape is a tuple which len don't have to be 2 (in our case yes, but the python
# languages requries this)
batch_files = self.datasets[0][idx * self.batch_size:(idx + 1) *
self.batch_size] # we take a batch_size of training examples from concatenated list of normal and pneunomia
# len of batch files is equal to batch_size = 8
# upsample covid cases
covid_size = max(int(len(batch_files) * self.covid_percent), 1) # we would like to have at lest 1 exmaple of COVID-19 in our batch_size
# setting the proper value of covid_percent gives us more. In case of
# covid_percent = 0.3 and batch_size = 8, it returns int(2.4) = 2
covid_inds = np.random.choice(np.arange(len(batch_files)), # chose covid_size (2) random indexes from range 0 to batch_files - 1.
size=covid_size,
replace=False)
covid_files = np.random.choice(self.datasets[1], # chose random COVID_19 examples (in our case 2) from list of COVID_19 records
size=covid_size,
replace=False)
for i in range(covid_size):
batch_files[covid_inds[i]] = covid_files[i] # change chosen examples with those COVID-19 ones. Noticed that in case of batch_size = 8
# we have 2 COVID-19 exmaples but the rest 6 is in unkown ratio. eg it could be 6:0, 1:5 etc
for i in range(len(batch_files)):
sample = batch_files[i].split() # take sample form batch
if self.is_training: # if is_training = true this i an training sample. We do not make augmentation for test set
folder = 'train'
else:
folder = 'test'
x = process_image_file(os.path.join(self.datadir, folder, sample[1]), # preprocess an image
self.top_percent,
self.input_shape[0])
if self.is_training and hasattr(self, 'augmentation'): # if traing sample we do augmentation
x = self.augmentation(x)
x = x.astype('float32') / 255.0 # we normalized the values to be [0,1] the format is png and jpeg not dicom
y = self.mapping[sample[2]] # label sample second argument in sample is class name
batch_x[i] = x # bulid X batch
batch_y[i] = y # build y batch
class_weights = self.class_weights # use class weight to denote its importance
weights = np.take(class_weights, batch_y.astype('int64')) # e.g we have a y batch of np.array([2, 2, 0, 1, 0, 0, 1, 0]) and
# class_weights = [1,1,6] we get for each sample te result of
# array([6, 6, 1, 1, 1, 1, 1, 1])
return batch_x, keras.utils.to_categorical(batch_y, num_classes=self.n_classes), weights # to categorcial makes one_hot_encding in our case
"""
Worthy note: The class ImageDataGenerertor wroks like this. We feed it e.g with 8 samples of our training set(batch). Then we use our define
transofrmation. As a result we get new 8 samples of that have never been seen by our model. We feed it with this not with the orgnial 8 examples.
The motivation behind this is to genelrize the model. In each epoch we feed model with difent batches with slitly chanhes to images caused by our
trasformations. Every Sequence must implement the __getitem__ and the __len__ methods. If you want to modify your dataset between epochs you may
implement on_epoch_end. The method __getitem__ should return a complete batch.
""" | mapping={
'normal': 0,
'pneumonia': 1,
'COVID-19': 2 | random_line_split |
start-webserver.py | import sys
sys.path.append('..')
from aips import *
import threading
import webbrowser
import http.server
import requests
import json
import urllib
from staticmap import StaticMap, CircleMarker
from urllib.parse import urlparse, parse_qs
import os
FILE = 'semantic-search'
#HOST = os.getenv('HOST') or 'localhost'
AIPS_WEBSERVER_PORT = os.getenv('WEBSERVER_PORT') or 2345
#SOLR_HOST = os.getenv('SOLR_HOST') or 'aips-solr'
#SOLR_PORT = os.getenv('SOLR_PORT') or 8983
#SOLR_HOST_URL = "http://" + SOLR_HOST + ":" + str(SOLR_PORT) + "/solr"
SOLR_URL = "http://localhost:8983/solr"
def query_solr(collection,query):
response = requests.post(SOLR_URL + '/' + collection + '/select',
{
"type": 'POST',
"data": json.puts(query),
"dataType": 'json',
"contentType": 'application/json'
});
return response
def tag_query(post_body):
return requests.post(SOLR_URL + '/entities/tag?json.nl=map&sort=popularity%20desc&matchText=true&echoParams=all&fl=id,type,canonical_form,name,country:countrycode_s,admin_area:admin_code_1_s,popularity,*_p,command_function', post_body).text
def tag_places(post_body):
x = json.dumps(post_body)
return requests.post(SOLR_URL + '/reviews/select', json=post_body).text
def post_search(post_body):
x = json.dumps(post_body)
return requests.post(SOLR_URL + '/reviews/select', json=post_body).text
def queryTreeToResolvedString(query_tree):
resolved_query = ""
for i in range(len(query_tree)):
if (len(resolved_query) > 0):
resolved_query += " "
resolved_query += query_tree[i]['query']
return resolved_query
def run_search(text):
#http://localhost:8983/solr/places/select?q=%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=%2234.9362399,-80.8379247%22}&fl=name_s,location_p,city_s,doc_type_s,state_s&debug=true&qf=text_t
#url = "http://localhost:8983/solr/places/select?q=%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=34.9362399,-80.8379247}&qf=text_t&defType=lucene"
#solrQuery = {"query": text, "params":{ "defType": "lucene", "qf": "name_t^100 text_t city_t^0.1 categories_t^0.01", "debug": "true"}}
#return requests.post('http://localhost:8983/solr/places/select', json=solrQuery).text
#solrQuery = {"query": "%2B{!edismax v=\'bbq^0.9191 ribs^0.6186 pork^0.5991'} %2B{!geofilt d=50 sfield=location_p pt=34.9362399,-80.8379247}", "params":{ "defType": "lucene", "qf": "name_t^100 text_t city_t^0.1 categories_t^0.01", "debug": "true"}}
#return requests.post('http://localhost:8983/solr/places/select', json=json.dumps(solrQuery)).text
#q = "%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=34.9362399,-80.8379247}"
q = urllib.parse.quote(text)
print(q)
#q=text.replace("+", "%2B") #so it doesn't get interpreted as space
qf="text_t"
defType="lucene"
return requests.get(SOLR_URL + "/reviews/select?q=" + q + "&qf=" + qf + "&defType=" + defType).text
def process_basic_query(query_bytes):
text = query_bytes.decode('UTF-8')
response = {
"resolved_query": '+{!edismax mm=100% v="' + escapeQuotesInQuery(text) + '"}'
}
return response
def process_semantic_query(query_bytes):
text = query_bytes.decode('UTF-8')
data = tag_query(query_bytes)
tagged_response = json.loads(data)
#loop through all documents (entities) returned
doc_map={} # reset to empty
if (tagged_response['response'] and tagged_response['response']['docs']):
docs = tagged_response['response']['docs']
for doc in docs:
doc_map[doc['id']] = doc
#for (d=0; d<Object.keys(docs).length; d++) {
# let doc = docs[d];
# doc_map[doc.id]=doc;
#}
#sort doc_map by popularity so first most popular always wins
#def popularity_sort(doc_a, doc_b){
# return a.popularity - b.popularity;
#}
#//doc_map.sort(popularity_sort);
#}
query_tree = []
tagged_query = ""
transformed_query =""
if (tagged_response['tags'] is not None):
tags = tagged_response['tags']
#//var lastStart = 0;
lastEnd = 0
metaData = {}
for tag in tags:
#tag = tags[key]
matchText = tag['matchText']
doc_ids = tag['ids']
#pick top-ranked docid
best_doc_id = None
for doc_id in doc_ids:
if (best_doc_id):
if (doc_map[doc_id]['popularity'] > doc_map[best_doc_id]['popularity']):
best_doc_id = doc_id
else:
best_doc_id = doc_id
best_doc = doc_map[best_doc_id]
#store the unknown text as keywords
nextText = text[lastEnd:tag['startOffset']].strip()
if (len(nextText) > 0): #not whitespace
query_tree.append({ "type":"keyword", "known":False, "surface_form":nextText, "canonical_form":nextText })
tagged_query += " " + nextText
transformed_query += " " + "{ type:keyword, known: false, surface_form: \"" + nextText + "\"}"
# store the known entity as entity
query_tree.append(best_doc) #this is wrong. Need the query tree to have _all_
# interpretations available and then loop through them to resolve. TODO = fix this.
tagged_query += " {" + matchText + "}"
#//transformed_query += " {type: " + best_doc.type + ", canonical_form: \"" + best_doc.canonical_form + "\"}";
transformed_query += json.dumps(best_doc)
lastEnd = tag['endOffset']
if (lastEnd < len(text)):
finalText = text[lastEnd:len(text)].strip()
if (len(finalText) > 0):
query_tree.append({ "type":"keyword", "known":False, "surface_form":finalText, "canonical_form":finalText })
tagged_query += " " + finalText
transformed_query += " " + "{ type:keyword, known: false, surface_form: \"" + finalText + "\"}"
#finalquery = {"query_tree": query_tree}
#let query = {query_tree: query_tree}; //so we can pass byref
final_query = resolveQuery(query_tree)
#if (query != null){ //short circuit if new request has been issued
resolved_query = queryTreeToResolvedString(query_tree)
#UI.updateResolvedQuery(resolved_query)
#}
response = {
"tagged_query": tagged_query,
"transformed_query": transformed_query,
"resolved_query": resolved_query,
"tagger_data": tagged_response
}
return response
def resolveQuery(query_tree):
query_tree = processCommands(query_tree)
# Now process everything that is not yet resolved
for position in range(len(query_tree)):
item = query_tree[position];
if (item["type"] != "solr"): #already resolved
if (item["type"] == "keyword"):
#TODO: this currently looks up ALL unknown keywords in the SKG, which isn't very smart
#need to switch to looking up meaningful phrases in next pass. This is mostly for testing
#at the moment, so putting up with the noise temporarily.
categoryAndTermVector = None
#TODO: figure out way (probably timestamp-based) to guarantee processing in order given current async nature
solrResponse = get_category_and_term_vector_solr_response(item["surface_form"])
categoryAndTermVector = parse_category_and_term_vector_from_solr_response(solrResponse)
#if (latestAsyncRequestID != categoryAndTermVector.asyncRequestID){
# return null;
#}
queryString = ""
if ("term_vector" in categoryAndTermVector):
|
if ("category" in categoryAndTermVector):
if (len(queryString) > 0):
queryString += " "
queryString += "+doc_type:\"" + categoryAndTermVector["category"] + "\""
if (len(queryString) == 0):
queryString = item["surface_form"] #just keep the input as a keyword
query_tree[position] = { "type":"solr", "query": "+{!edismax v=\"" + escapeQuotesInQuery(queryString) + "\"}" }
elif (item["type"] == "color"):
solrQuery = "+colors_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
elif (item["type"] == "known_item" or item["type"] == "city" or item["type"] == "event"):
solrQuery = "+name_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
elif (item["type"] == "brand"):
solrQuery = "+brand_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
else:
query_tree[position] = {"type":"solr", "query": "+{!edismax v=\"" + escapeQuotesInQuery(item["surface_form"]) + "\"}"}
return query_tree
def escapeQuotesInQuery(query):
return query.replace('"', '\\"')
def processCommands(query_tree):
position = 0
while position < len(query_tree):
item = query_tree[position]
# process commands. For now, going left to right and then sorting by priority when ambiguous commands occur;
# consider other weighting options later.
if (item['type'] == "command"):
commandIsResolved = False
command = item['command_function']
if (command):
query = {"query_tree": query_tree} #pass by-ref
commandIsResolved = eval(item['command_function']); #Careful... there is code in the docs that is being eval'd.
#MUST ENSURE THESE DOCS ARE SECURE, OTHERWISE THIS WILL INTRODUCE A POTENTIAL SECURITY THREAT (CODE INJECTION)
#else:
#Alert ("Error: " + query.query_tree.canonical_form + " has no command function.");
if (False == commandIsResolved):
#Bad command. Just remove for now... could alternatively keep it and run as a keyword
query_tree.pop(position) #.splice(position,1)
position += 1
return query_tree
def cmd_popularity(query, position):
if (len(query['query_tree']) -1 > position):
query['query_tree'][position] = {"type":"solr", "query": '+{!func v="mul(if(stars_i,stars_i,0),20)"}'}
return True
else:
return False
def cmd_location_distance(query, position):
#TODO: Notes. Need a "multi-location-hopping" resolver in here. For example,
#hotels near bbq near haystack. This is a search for doctype=hotels, join with (doctype=restaurant AND bbq OR barbecue OR ...) filtered on distance. The first "near" requires pushing the second to a sub-query (join/graph) and then takes over as the actual location distance command.
if (len(query['query_tree']) -1 > position):
nextEntity = query['query_tree'][position + 1]
if (nextEntity['type'] == "city"):
query['query_tree'].pop(position + 1); #remove next element since we're inserting into command's position
query['query_tree'][position] = {"type":"solr",
"query": create_geo_filter(nextEntity['location_p'],
"location_p", 50)}
return True
elif 'coordinates_pt' in nextEntity:
query['query_tree'].pop(position + 1) #remove next element since we're inserting into command's position
query['query_tree'][position] = {"type":"solr",
"query": create_geo_filter(nextEntity['coordinates_pt'],
"coordinates_pt", 50) }
return True
elif nextEntity['type'] == "event":
#nextEntity doesn't have coordinates on it, so try traversing to find coordinates on a parent node (i.e. a venue)
query['query_tree'].pop(position + 1); #remove next element since we're inserting into command's position
quickHack = None
if (nextEntity['canonical_form'] == "activate conference"):
quickHack = "activate"
if (nextEntity['canonical_form'] == "haystack conference"):
quickHack = "haystack"
attemptedGraphLookup = find_location_coordinates(quickHack)
if ('response' in attemptedGraphLookup
and 'docs' in attemptedGraphLookup['response']
and len(attemptedGraphLookup['response']['docs']) > 0):
query['query_tree'][position] = \
{ "type":"solr",
"query": create_geo_filter(
attemptedGraphLookup['response']['docs'][0]['coordinates_s'],
"coordinates_pt", 50)
}
return True
return False
def create_geo_filter(coordinates, field, distanceInKM):
return "+{!geofilt d=" + str(distanceInKM) + " sfield=\"" + field + "\" pt=\"" + coordinates + "\"}"
def find_location_coordinates(keyword):
query = {
"params": {
"qf": "name_t",
"keywords": keyword
},
"query": "{!graph from=name_s to=venue_s returnOnlyLeaf=true}{!edismax v=$keywords}"
}
return json.loads(tag_places(query))
def get_category_and_term_vector_solr_response(keyword):
query = {
"params": {
"fore": keyword,
"back": "*:*",
"df": "text_t",
#"qf": "text_t",
#"defType": "edismax",
"echoParams": "none"
},
"query": "*:*",
"limit": 0,
"facet": {
"term_needing_vector": {
"type": "query",
"query": keyword,
"facet": {
"related_terms" : {
"type" : "terms",
"field" : "text_t",
"limit": 3,
"sort": { "r1": "desc" },
"facet" : {
"r1" : "relatedness($fore,$back)"
}
},
"doc_type" : {
"type" : "terms",
"field" : "doc_type",
"limit": 1,
"sort": { "r2": "desc" },
"facet" : {
"r2" : "relatedness($fore,$back)"
}
}
}
}
}
}
response = post_search(query)
#response.asyncRequestID = asyncRequestID; //used to guarantee order of processing
return json.loads(response)
def parse_category_and_term_vector_from_solr_response(solrResponse):
parsed = {}
relatedTermNodes = {}
if ('facets' in solrResponse and 'term_needing_vector' in solrResponse['facets']):
if ('doc_type' in solrResponse['facets']['term_needing_vector']
and 'buckets' in solrResponse['facets']['term_needing_vector']['doc_type']
and len(solrResponse['facets']['term_needing_vector']['doc_type']['buckets']) > 0 ):
parsed['category'] = solrResponse['facets']['term_needing_vector']['doc_type']['buckets'][0]['val'] #just top one for now
if ('related_terms' in solrResponse['facets']['term_needing_vector']
and 'buckets' in solrResponse['facets']['term_needing_vector']['related_terms']
and len(solrResponse['facets']['term_needing_vector']['related_terms']['buckets']) > 0 ): #at least one entry
relatedTermNodes = solrResponse['facets']['term_needing_vector']['related_terms']['buckets']
termVector = ""
for relatedTermNode in relatedTermNodes:
if (len(termVector) > 0): termVector += " "
termVector += relatedTermNode['val'] + "^" + "{:.4f}".format(relatedTermNode['r1']['relatedness'])
parsed['term_vector'] = termVector
#parsed.asyncRequestID = solrResponse.asyncRequestID; //used to guarantee order of processing
return parsed
import os, re
import io
def render_search_results(results, keywords_to_highlight):
file_path = os.path.dirname(os.path.abspath(__file__))
search_results_template_file = os.path.join(file_path, "search-results-template.html")
with open(search_results_template_file) as file:
file_content = file.read()
template_syntax = "<!-- BEGIN_TEMPLATE[^>]*-->(.*)<!-- END_TEMPLATE[^>]*-->"
header_template = re.sub(template_syntax, "", file_content, flags=re.S)
results_template_syntax = "<!-- BEGIN_TEMPLATE: SEARCH_RESULTS -->(.*)<!-- END_TEMPLATE: SEARCH_RESULTS[^>]*-->"
x = re.search(results_template_syntax, file_content, flags=re.S)
results_template = x.group(1)
separator_template_syntax = "<!-- BEGIN_TEMPLATE: SEPARATOR -->(.*)<!-- END_TEMPLATE: SEPARATOR[^>]*-->"
x = re.search(separator_template_syntax, file_content, flags=re.S)
separator_template = x.group(1)
rendered = ""
for result in results['response']['docs']:
#todo: add highlighting
rendered += results_template.replace("${NAME}", result['name_t'] if 'name_t' in result else "UNKNOWN") \
.replace("${CITY}", result['city_t'] + ", " + result['state_t'] if 'city_t' in result and 'state_t' in result else "UNKNOWN") \
.replace("${DESCRIPTION}", result['text_t'] if 'text_t' in result else "") \
.replace("${IMAGE_URL}", "/map?lat=" + str(result['latitude_d']) + "&lon=" + str(result['longitude_d'])) \
.replace("${STARS}", "★" * int(result['stars_i']) if 'stars_i' in result else "")
rendered += separator_template
if rendered == "":
rendered = "No Results for this query."
return rendered
class SemanticSearchHandler(http.server.SimpleHTTPRequestHandler):
"""The test example handler."""
def sendResponse(self, response):
try:
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(json.dumps(response), 'utf-8'))
except Exception as ex:
self.send_error(500, ex)
def sendImageResponse(self, response):
try:
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(response))
except Exception as ex:
self.send_error(500, ex)
def do_POST(self):
content_len = int(self.headers.get("Content-Length"), 0)
post_body = self.rfile.read(content_len)
if (self.path.startswith("/tag_query")):
self.sendResponse(tag_query(post_body))
elif self.path.startswith("/tag_places"):
self.sendResponse(tag_places(post_body))
elif self.path.startswith("/process_semantic_query"):
self.sendResponse(process_semantic_query(post_body))
elif self.path.startswith("/process_basic_query"):
self.sendResponse(process_basic_query(post_body))
elif self.path.startswith("/run_search"):
text = post_body.decode('UTF-8')
results = json.loads(run_search(text))
highlight_terms = post_body.decode('UTF-8').split(' ')
rendered_results = render_search_results(results, highlight_terms)
self.sendResponse(rendered_results)
def do_GET(self):
if self.path.startswith("/search") or self.path.startswith("/semantic-search"):
self.path = "/search.html"
http.server.SimpleHTTPRequestHandler.do_GET(self)
http.server.SimpleHTTPRequestHandler.do_GET(self)
elif self.path.startswith("/map"):
qsVars = parse_qs(urlparse(self.path).query)
if 'lat' in qsVars and 'lon' in qsVars:
lat = float(qsVars["lat"][0])
lon = float(qsVars["lon"][0])
zoom = int(qsVars['zoom'][0]) if 'zoom' in qsVars else 10
m = StaticMap(200, 200)
marker_outline = CircleMarker((lon, lat), 'white', 18)
marker = CircleMarker((lon, lat), '#0036FF', 12)
m.add_marker(marker_outline)
m.add_marker(marker)
image = m.render(zoom=zoom)
buf = io.BytesIO()
image.save(buf, format='JPEG')
self.sendImageResponse(buf.getvalue())
elif self.path.startswith("/healthcheck"):
self.send_response(200)
self.send_header('Access-Control-Allow-Private-Network', 'true')
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-type','image/png')
self.end_headers()
#Open the static file requested and send it
image = open("is-running.png", 'br')
self.wfile.write(image.read())
image.close()
def open_browser():
"""Start a browser after waiting for half a second."""
def _open_browser():
if AIPS_WEBSERVER_HOST == "localhost":
webbrowser.open(WEBSERVER_URL + '/%s' % FILE)
thread = threading.Timer(0.5, _open_browser)
thread.start()
def start_server():
"""Start the server."""
server_address = ("0.0.0.0", int(AIPS_WEBSERVER_PORT))
server = http.server.HTTPServer(server_address, SemanticSearchHandler)
server.serve_forever()
if __name__ == "__main__":
open_browser()
start_server() | queryString = categoryAndTermVector["term_vector"] | conditional_block |
start-webserver.py | import sys
sys.path.append('..')
from aips import *
import threading
import webbrowser
import http.server
import requests
import json
import urllib
from staticmap import StaticMap, CircleMarker
from urllib.parse import urlparse, parse_qs
import os
FILE = 'semantic-search'
#HOST = os.getenv('HOST') or 'localhost'
AIPS_WEBSERVER_PORT = os.getenv('WEBSERVER_PORT') or 2345
#SOLR_HOST = os.getenv('SOLR_HOST') or 'aips-solr'
#SOLR_PORT = os.getenv('SOLR_PORT') or 8983
#SOLR_HOST_URL = "http://" + SOLR_HOST + ":" + str(SOLR_PORT) + "/solr"
SOLR_URL = "http://localhost:8983/solr"
def query_solr(collection,query):
response = requests.post(SOLR_URL + '/' + collection + '/select',
{
"type": 'POST',
"data": json.puts(query),
"dataType": 'json',
"contentType": 'application/json'
});
return response
def tag_query(post_body):
return requests.post(SOLR_URL + '/entities/tag?json.nl=map&sort=popularity%20desc&matchText=true&echoParams=all&fl=id,type,canonical_form,name,country:countrycode_s,admin_area:admin_code_1_s,popularity,*_p,command_function', post_body).text
def tag_places(post_body):
x = json.dumps(post_body)
return requests.post(SOLR_URL + '/reviews/select', json=post_body).text
def post_search(post_body):
x = json.dumps(post_body)
return requests.post(SOLR_URL + '/reviews/select', json=post_body).text
def queryTreeToResolvedString(query_tree):
resolved_query = ""
for i in range(len(query_tree)):
if (len(resolved_query) > 0):
resolved_query += " "
resolved_query += query_tree[i]['query']
return resolved_query
def run_search(text):
#http://localhost:8983/solr/places/select?q=%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=%2234.9362399,-80.8379247%22}&fl=name_s,location_p,city_s,doc_type_s,state_s&debug=true&qf=text_t
#url = "http://localhost:8983/solr/places/select?q=%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=34.9362399,-80.8379247}&qf=text_t&defType=lucene"
#solrQuery = {"query": text, "params":{ "defType": "lucene", "qf": "name_t^100 text_t city_t^0.1 categories_t^0.01", "debug": "true"}}
#return requests.post('http://localhost:8983/solr/places/select', json=solrQuery).text
#solrQuery = {"query": "%2B{!edismax v=\'bbq^0.9191 ribs^0.6186 pork^0.5991'} %2B{!geofilt d=50 sfield=location_p pt=34.9362399,-80.8379247}", "params":{ "defType": "lucene", "qf": "name_t^100 text_t city_t^0.1 categories_t^0.01", "debug": "true"}}
#return requests.post('http://localhost:8983/solr/places/select', json=json.dumps(solrQuery)).text
#q = "%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=34.9362399,-80.8379247}"
q = urllib.parse.quote(text)
print(q)
#q=text.replace("+", "%2B") #so it doesn't get interpreted as space
qf="text_t"
defType="lucene"
return requests.get(SOLR_URL + "/reviews/select?q=" + q + "&qf=" + qf + "&defType=" + defType).text
def process_basic_query(query_bytes):
text = query_bytes.decode('UTF-8')
response = {
"resolved_query": '+{!edismax mm=100% v="' + escapeQuotesInQuery(text) + '"}'
}
return response
def process_semantic_query(query_bytes):
text = query_bytes.decode('UTF-8')
data = tag_query(query_bytes)
tagged_response = json.loads(data)
#loop through all documents (entities) returned
doc_map={} # reset to empty
if (tagged_response['response'] and tagged_response['response']['docs']):
docs = tagged_response['response']['docs']
for doc in docs:
doc_map[doc['id']] = doc
#for (d=0; d<Object.keys(docs).length; d++) {
# let doc = docs[d];
# doc_map[doc.id]=doc;
#}
#sort doc_map by popularity so first most popular always wins
#def popularity_sort(doc_a, doc_b){
# return a.popularity - b.popularity;
#}
#//doc_map.sort(popularity_sort);
#}
query_tree = []
tagged_query = ""
transformed_query =""
if (tagged_response['tags'] is not None):
tags = tagged_response['tags']
#//var lastStart = 0;
lastEnd = 0
metaData = {}
for tag in tags:
#tag = tags[key]
matchText = tag['matchText']
doc_ids = tag['ids']
#pick top-ranked docid
best_doc_id = None
for doc_id in doc_ids:
if (best_doc_id):
if (doc_map[doc_id]['popularity'] > doc_map[best_doc_id]['popularity']):
best_doc_id = doc_id
else:
best_doc_id = doc_id
best_doc = doc_map[best_doc_id]
#store the unknown text as keywords
nextText = text[lastEnd:tag['startOffset']].strip()
if (len(nextText) > 0): #not whitespace
query_tree.append({ "type":"keyword", "known":False, "surface_form":nextText, "canonical_form":nextText })
tagged_query += " " + nextText
transformed_query += " " + "{ type:keyword, known: false, surface_form: \"" + nextText + "\"}"
# store the known entity as entity
query_tree.append(best_doc) #this is wrong. Need the query tree to have _all_
# interpretations available and then loop through them to resolve. TODO = fix this.
tagged_query += " {" + matchText + "}"
#//transformed_query += " {type: " + best_doc.type + ", canonical_form: \"" + best_doc.canonical_form + "\"}";
transformed_query += json.dumps(best_doc)
lastEnd = tag['endOffset']
if (lastEnd < len(text)):
finalText = text[lastEnd:len(text)].strip()
if (len(finalText) > 0):
query_tree.append({ "type":"keyword", "known":False, "surface_form":finalText, "canonical_form":finalText })
tagged_query += " " + finalText
transformed_query += " " + "{ type:keyword, known: false, surface_form: \"" + finalText + "\"}"
#finalquery = {"query_tree": query_tree}
#let query = {query_tree: query_tree}; //so we can pass byref
final_query = resolveQuery(query_tree)
#if (query != null){ //short circuit if new request has been issued
resolved_query = queryTreeToResolvedString(query_tree)
#UI.updateResolvedQuery(resolved_query)
#}
response = {
"tagged_query": tagged_query,
"transformed_query": transformed_query,
"resolved_query": resolved_query,
"tagger_data": tagged_response
}
return response
def resolveQuery(query_tree):
query_tree = processCommands(query_tree)
# Now process everything that is not yet resolved
for position in range(len(query_tree)):
item = query_tree[position];
if (item["type"] != "solr"): #already resolved
if (item["type"] == "keyword"):
#TODO: this currently looks up ALL unknown keywords in the SKG, which isn't very smart
#need to switch to looking up meaningful phrases in next pass. This is mostly for testing
#at the moment, so putting up with the noise temporarily.
categoryAndTermVector = None
#TODO: figure out way (probably timestamp-based) to guarantee processing in order given current async nature
solrResponse = get_category_and_term_vector_solr_response(item["surface_form"])
categoryAndTermVector = parse_category_and_term_vector_from_solr_response(solrResponse)
#if (latestAsyncRequestID != categoryAndTermVector.asyncRequestID){
# return null;
#}
queryString = ""
if ("term_vector" in categoryAndTermVector):
queryString = categoryAndTermVector["term_vector"]
if ("category" in categoryAndTermVector):
if (len(queryString) > 0):
queryString += " "
queryString += "+doc_type:\"" + categoryAndTermVector["category"] + "\""
if (len(queryString) == 0):
queryString = item["surface_form"] #just keep the input as a keyword
query_tree[position] = { "type":"solr", "query": "+{!edismax v=\"" + escapeQuotesInQuery(queryString) + "\"}" }
elif (item["type"] == "color"):
solrQuery = "+colors_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
elif (item["type"] == "known_item" or item["type"] == "city" or item["type"] == "event"):
solrQuery = "+name_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
elif (item["type"] == "brand"):
solrQuery = "+brand_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
else:
query_tree[position] = {"type":"solr", "query": "+{!edismax v=\"" + escapeQuotesInQuery(item["surface_form"]) + "\"}"}
return query_tree
def escapeQuotesInQuery(query):
return query.replace('"', '\\"')
def processCommands(query_tree):
position = 0
while position < len(query_tree):
item = query_tree[position]
# process commands. For now, going left to right and then sorting by priority when ambiguous commands occur;
# consider other weighting options later.
if (item['type'] == "command"):
commandIsResolved = False
command = item['command_function']
if (command):
query = {"query_tree": query_tree} #pass by-ref
commandIsResolved = eval(item['command_function']); #Careful... there is code in the docs that is being eval'd.
#MUST ENSURE THESE DOCS ARE SECURE, OTHERWISE THIS WILL INTRODUCE A POTENTIAL SECURITY THREAT (CODE INJECTION)
#else:
#Alert ("Error: " + query.query_tree.canonical_form + " has no command function.");
if (False == commandIsResolved):
#Bad command. Just remove for now... could alternatively keep it and run as a keyword
query_tree.pop(position) #.splice(position,1)
position += 1
return query_tree
def cmd_popularity(query, position):
if (len(query['query_tree']) -1 > position):
query['query_tree'][position] = {"type":"solr", "query": '+{!func v="mul(if(stars_i,stars_i,0),20)"}'}
return True
else:
return False
def cmd_location_distance(query, position):
#TODO: Notes. Need a "multi-location-hopping" resolver in here. For example,
#hotels near bbq near haystack. This is a search for doctype=hotels, join with (doctype=restaurant AND bbq OR barbecue OR ...) filtered on distance. The first "near" requires pushing the second to a sub-query (join/graph) and then takes over as the actual location distance command.
if (len(query['query_tree']) -1 > position):
nextEntity = query['query_tree'][position + 1]
if (nextEntity['type'] == "city"):
query['query_tree'].pop(position + 1); #remove next element since we're inserting into command's position
query['query_tree'][position] = {"type":"solr",
"query": create_geo_filter(nextEntity['location_p'],
"location_p", 50)}
return True
elif 'coordinates_pt' in nextEntity:
query['query_tree'].pop(position + 1) #remove next element since we're inserting into command's position
query['query_tree'][position] = {"type":"solr",
"query": create_geo_filter(nextEntity['coordinates_pt'],
"coordinates_pt", 50) }
return True
elif nextEntity['type'] == "event":
#nextEntity doesn't have coordinates on it, so try traversing to find coordinates on a parent node (i.e. a venue)
query['query_tree'].pop(position + 1); #remove next element since we're inserting into command's position
quickHack = None
if (nextEntity['canonical_form'] == "activate conference"):
quickHack = "activate"
if (nextEntity['canonical_form'] == "haystack conference"):
quickHack = "haystack"
attemptedGraphLookup = find_location_coordinates(quickHack)
if ('response' in attemptedGraphLookup
and 'docs' in attemptedGraphLookup['response']
and len(attemptedGraphLookup['response']['docs']) > 0):
query['query_tree'][position] = \
{ "type":"solr",
"query": create_geo_filter(
attemptedGraphLookup['response']['docs'][0]['coordinates_s'],
"coordinates_pt", 50)
}
return True
return False
def create_geo_filter(coordinates, field, distanceInKM):
return "+{!geofilt d=" + str(distanceInKM) + " sfield=\"" + field + "\" pt=\"" + coordinates + "\"}"
def find_location_coordinates(keyword):
query = {
"params": {
"qf": "name_t",
"keywords": keyword
},
"query": "{!graph from=name_s to=venue_s returnOnlyLeaf=true}{!edismax v=$keywords}"
}
return json.loads(tag_places(query))
def get_category_and_term_vector_solr_response(keyword):
query = {
"params": {
"fore": keyword,
"back": "*:*",
"df": "text_t",
#"qf": "text_t",
#"defType": "edismax",
"echoParams": "none"
},
"query": "*:*",
"limit": 0,
"facet": {
"term_needing_vector": {
"type": "query",
"query": keyword,
"facet": {
"related_terms" : {
"type" : "terms",
"field" : "text_t",
"limit": 3,
"sort": { "r1": "desc" },
"facet" : {
"r1" : "relatedness($fore,$back)"
}
},
"doc_type" : {
"type" : "terms",
"field" : "doc_type",
"limit": 1,
"sort": { "r2": "desc" },
"facet" : {
"r2" : "relatedness($fore,$back)"
}
}
}
}
}
}
response = post_search(query)
#response.asyncRequestID = asyncRequestID; //used to guarantee order of processing
return json.loads(response)
def parse_category_and_term_vector_from_solr_response(solrResponse):
parsed = {}
relatedTermNodes = {}
if ('facets' in solrResponse and 'term_needing_vector' in solrResponse['facets']):
if ('doc_type' in solrResponse['facets']['term_needing_vector']
and 'buckets' in solrResponse['facets']['term_needing_vector']['doc_type']
and len(solrResponse['facets']['term_needing_vector']['doc_type']['buckets']) > 0 ):
parsed['category'] = solrResponse['facets']['term_needing_vector']['doc_type']['buckets'][0]['val'] #just top one for now
if ('related_terms' in solrResponse['facets']['term_needing_vector']
and 'buckets' in solrResponse['facets']['term_needing_vector']['related_terms']
and len(solrResponse['facets']['term_needing_vector']['related_terms']['buckets']) > 0 ): #at least one entry
relatedTermNodes = solrResponse['facets']['term_needing_vector']['related_terms']['buckets']
termVector = ""
for relatedTermNode in relatedTermNodes:
if (len(termVector) > 0): termVector += " "
termVector += relatedTermNode['val'] + "^" + "{:.4f}".format(relatedTermNode['r1']['relatedness'])
parsed['term_vector'] = termVector
#parsed.asyncRequestID = solrResponse.asyncRequestID; //used to guarantee order of processing
return parsed
import os, re
import io
def render_search_results(results, keywords_to_highlight):
file_path = os.path.dirname(os.path.abspath(__file__))
search_results_template_file = os.path.join(file_path, "search-results-template.html")
with open(search_results_template_file) as file:
file_content = file.read()
template_syntax = "<!-- BEGIN_TEMPLATE[^>]*-->(.*)<!-- END_TEMPLATE[^>]*-->"
header_template = re.sub(template_syntax, "", file_content, flags=re.S)
results_template_syntax = "<!-- BEGIN_TEMPLATE: SEARCH_RESULTS -->(.*)<!-- END_TEMPLATE: SEARCH_RESULTS[^>]*-->"
x = re.search(results_template_syntax, file_content, flags=re.S)
results_template = x.group(1)
separator_template_syntax = "<!-- BEGIN_TEMPLATE: SEPARATOR -->(.*)<!-- END_TEMPLATE: SEPARATOR[^>]*-->"
x = re.search(separator_template_syntax, file_content, flags=re.S)
separator_template = x.group(1)
rendered = ""
for result in results['response']['docs']:
#todo: add highlighting
rendered += results_template.replace("${NAME}", result['name_t'] if 'name_t' in result else "UNKNOWN") \
.replace("${CITY}", result['city_t'] + ", " + result['state_t'] if 'city_t' in result and 'state_t' in result else "UNKNOWN") \
.replace("${DESCRIPTION}", result['text_t'] if 'text_t' in result else "") \
.replace("${IMAGE_URL}", "/map?lat=" + str(result['latitude_d']) + "&lon=" + str(result['longitude_d'])) \
.replace("${STARS}", "★" * int(result['stars_i']) if 'stars_i' in result else "")
rendered += separator_template
if rendered == "":
rendered = "No Results for this query."
return rendered
class SemanticSearchHandler(http.server.SimpleHTTPRequestHandler):
"""The test example handler."""
def sendResponse(self, response):
try:
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(json.dumps(response), 'utf-8'))
except Exception as ex:
self.send_error(500, ex)
def sendImageResponse(self, response):
try:
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(response))
except Exception as ex:
self.send_error(500, ex)
def do | elf):
content_len = int(self.headers.get("Content-Length"), 0)
post_body = self.rfile.read(content_len)
if (self.path.startswith("/tag_query")):
self.sendResponse(tag_query(post_body))
elif self.path.startswith("/tag_places"):
self.sendResponse(tag_places(post_body))
elif self.path.startswith("/process_semantic_query"):
self.sendResponse(process_semantic_query(post_body))
elif self.path.startswith("/process_basic_query"):
self.sendResponse(process_basic_query(post_body))
elif self.path.startswith("/run_search"):
text = post_body.decode('UTF-8')
results = json.loads(run_search(text))
highlight_terms = post_body.decode('UTF-8').split(' ')
rendered_results = render_search_results(results, highlight_terms)
self.sendResponse(rendered_results)
def do_GET(self):
if self.path.startswith("/search") or self.path.startswith("/semantic-search"):
self.path = "/search.html"
http.server.SimpleHTTPRequestHandler.do_GET(self)
http.server.SimpleHTTPRequestHandler.do_GET(self)
elif self.path.startswith("/map"):
qsVars = parse_qs(urlparse(self.path).query)
if 'lat' in qsVars and 'lon' in qsVars:
lat = float(qsVars["lat"][0])
lon = float(qsVars["lon"][0])
zoom = int(qsVars['zoom'][0]) if 'zoom' in qsVars else 10
m = StaticMap(200, 200)
marker_outline = CircleMarker((lon, lat), 'white', 18)
marker = CircleMarker((lon, lat), '#0036FF', 12)
m.add_marker(marker_outline)
m.add_marker(marker)
image = m.render(zoom=zoom)
buf = io.BytesIO()
image.save(buf, format='JPEG')
self.sendImageResponse(buf.getvalue())
elif self.path.startswith("/healthcheck"):
self.send_response(200)
self.send_header('Access-Control-Allow-Private-Network', 'true')
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-type','image/png')
self.end_headers()
#Open the static file requested and send it
image = open("is-running.png", 'br')
self.wfile.write(image.read())
image.close()
def open_browser():
"""Start a browser after waiting for half a second."""
def _open_browser():
if AIPS_WEBSERVER_HOST == "localhost":
webbrowser.open(WEBSERVER_URL + '/%s' % FILE)
thread = threading.Timer(0.5, _open_browser)
thread.start()
def start_server():
"""Start the server."""
server_address = ("0.0.0.0", int(AIPS_WEBSERVER_PORT))
server = http.server.HTTPServer(server_address, SemanticSearchHandler)
server.serve_forever()
if __name__ == "__main__":
open_browser()
start_server() | _POST(s | identifier_name |
start-webserver.py | import sys
sys.path.append('..')
from aips import *
import threading
import webbrowser
import http.server
import requests
import json
import urllib
from staticmap import StaticMap, CircleMarker
from urllib.parse import urlparse, parse_qs
import os
FILE = 'semantic-search'
#HOST = os.getenv('HOST') or 'localhost'
AIPS_WEBSERVER_PORT = os.getenv('WEBSERVER_PORT') or 2345
#SOLR_HOST = os.getenv('SOLR_HOST') or 'aips-solr'
#SOLR_PORT = os.getenv('SOLR_PORT') or 8983
#SOLR_HOST_URL = "http://" + SOLR_HOST + ":" + str(SOLR_PORT) + "/solr"
SOLR_URL = "http://localhost:8983/solr"
def query_solr(collection,query):
response = requests.post(SOLR_URL + '/' + collection + '/select',
{
"type": 'POST',
"data": json.puts(query),
"dataType": 'json',
"contentType": 'application/json'
});
return response
def tag_query(post_body):
return requests.post(SOLR_URL + '/entities/tag?json.nl=map&sort=popularity%20desc&matchText=true&echoParams=all&fl=id,type,canonical_form,name,country:countrycode_s,admin_area:admin_code_1_s,popularity,*_p,command_function', post_body).text
def tag_places(post_body):
x = json.dumps(post_body)
return requests.post(SOLR_URL + '/reviews/select', json=post_body).text
def post_search(post_body):
x = json.dumps(post_body)
return requests.post(SOLR_URL + '/reviews/select', json=post_body).text
def queryTreeToResolvedString(query_tree):
resolved_query = ""
for i in range(len(query_tree)):
if (len(resolved_query) > 0):
resolved_query += " "
resolved_query += query_tree[i]['query']
return resolved_query
def run_search(text):
#http://localhost:8983/solr/places/select?q=%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=%2234.9362399,-80.8379247%22}&fl=name_s,location_p,city_s,doc_type_s,state_s&debug=true&qf=text_t
#url = "http://localhost:8983/solr/places/select?q=%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=34.9362399,-80.8379247}&qf=text_t&defType=lucene"
#solrQuery = {"query": text, "params":{ "defType": "lucene", "qf": "name_t^100 text_t city_t^0.1 categories_t^0.01", "debug": "true"}}
#return requests.post('http://localhost:8983/solr/places/select', json=solrQuery).text
#solrQuery = {"query": "%2B{!edismax v=\'bbq^0.9191 ribs^0.6186 pork^0.5991'} %2B{!geofilt d=50 sfield=location_p pt=34.9362399,-80.8379247}", "params":{ "defType": "lucene", "qf": "name_t^100 text_t city_t^0.1 categories_t^0.01", "debug": "true"}}
#return requests.post('http://localhost:8983/solr/places/select', json=json.dumps(solrQuery)).text
#q = "%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=34.9362399,-80.8379247}"
q = urllib.parse.quote(text)
print(q)
#q=text.replace("+", "%2B") #so it doesn't get interpreted as space
qf="text_t"
defType="lucene"
return requests.get(SOLR_URL + "/reviews/select?q=" + q + "&qf=" + qf + "&defType=" + defType).text
def process_basic_query(query_bytes):
text = query_bytes.decode('UTF-8')
response = {
"resolved_query": '+{!edismax mm=100% v="' + escapeQuotesInQuery(text) + '"}'
}
return response
def process_semantic_query(query_bytes):
text = query_bytes.decode('UTF-8')
data = tag_query(query_bytes)
tagged_response = json.loads(data)
#loop through all documents (entities) returned
doc_map={} # reset to empty
if (tagged_response['response'] and tagged_response['response']['docs']):
docs = tagged_response['response']['docs']
for doc in docs:
doc_map[doc['id']] = doc
#for (d=0; d<Object.keys(docs).length; d++) {
# let doc = docs[d];
# doc_map[doc.id]=doc;
#}
#sort doc_map by popularity so first most popular always wins
#def popularity_sort(doc_a, doc_b){
# return a.popularity - b.popularity;
#}
#//doc_map.sort(popularity_sort);
#}
query_tree = []
tagged_query = ""
transformed_query =""
if (tagged_response['tags'] is not None):
tags = tagged_response['tags']
#//var lastStart = 0;
lastEnd = 0
metaData = {}
for tag in tags:
#tag = tags[key]
matchText = tag['matchText']
doc_ids = tag['ids']
#pick top-ranked docid
best_doc_id = None
for doc_id in doc_ids:
if (best_doc_id):
if (doc_map[doc_id]['popularity'] > doc_map[best_doc_id]['popularity']):
best_doc_id = doc_id
else:
best_doc_id = doc_id
best_doc = doc_map[best_doc_id]
#store the unknown text as keywords
nextText = text[lastEnd:tag['startOffset']].strip()
if (len(nextText) > 0): #not whitespace
query_tree.append({ "type":"keyword", "known":False, "surface_form":nextText, "canonical_form":nextText })
tagged_query += " " + nextText
transformed_query += " " + "{ type:keyword, known: false, surface_form: \"" + nextText + "\"}"
# store the known entity as entity
query_tree.append(best_doc) #this is wrong. Need the query tree to have _all_
# interpretations available and then loop through them to resolve. TODO = fix this.
tagged_query += " {" + matchText + "}"
#//transformed_query += " {type: " + best_doc.type + ", canonical_form: \"" + best_doc.canonical_form + "\"}";
transformed_query += json.dumps(best_doc)
lastEnd = tag['endOffset']
if (lastEnd < len(text)):
finalText = text[lastEnd:len(text)].strip()
if (len(finalText) > 0):
query_tree.append({ "type":"keyword", "known":False, "surface_form":finalText, "canonical_form":finalText })
tagged_query += " " + finalText
transformed_query += " " + "{ type:keyword, known: false, surface_form: \"" + finalText + "\"}"
#finalquery = {"query_tree": query_tree}
#let query = {query_tree: query_tree}; //so we can pass byref
final_query = resolveQuery(query_tree)
#if (query != null){ //short circuit if new request has been issued
resolved_query = queryTreeToResolvedString(query_tree)
#UI.updateResolvedQuery(resolved_query)
#}
response = {
"tagged_query": tagged_query,
"transformed_query": transformed_query,
"resolved_query": resolved_query,
"tagger_data": tagged_response
}
return response
def resolveQuery(query_tree):
query_tree = processCommands(query_tree)
# Now process everything that is not yet resolved
for position in range(len(query_tree)):
item = query_tree[position];
if (item["type"] != "solr"): #already resolved
if (item["type"] == "keyword"):
#TODO: this currently looks up ALL unknown keywords in the SKG, which isn't very smart
#need to switch to looking up meaningful phrases in next pass. This is mostly for testing
#at the moment, so putting up with the noise temporarily.
categoryAndTermVector = None
#TODO: figure out way (probably timestamp-based) to guarantee processing in order given current async nature
solrResponse = get_category_and_term_vector_solr_response(item["surface_form"])
categoryAndTermVector = parse_category_and_term_vector_from_solr_response(solrResponse)
#if (latestAsyncRequestID != categoryAndTermVector.asyncRequestID){
# return null;
#}
queryString = ""
if ("term_vector" in categoryAndTermVector):
queryString = categoryAndTermVector["term_vector"]
if ("category" in categoryAndTermVector):
if (len(queryString) > 0):
queryString += " "
queryString += "+doc_type:\"" + categoryAndTermVector["category"] + "\""
if (len(queryString) == 0):
queryString = item["surface_form"] #just keep the input as a keyword
query_tree[position] = { "type":"solr", "query": "+{!edismax v=\"" + escapeQuotesInQuery(queryString) + "\"}" }
elif (item["type"] == "color"):
solrQuery = "+colors_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
elif (item["type"] == "known_item" or item["type"] == "city" or item["type"] == "event"):
solrQuery = "+name_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
elif (item["type"] == "brand"):
solrQuery = "+brand_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
else:
query_tree[position] = {"type":"solr", "query": "+{!edismax v=\"" + escapeQuotesInQuery(item["surface_form"]) + "\"}"}
return query_tree
def escapeQuotesInQuery(query):
return query.replace('"', '\\"')
def processCommands(query_tree):
position = 0
while position < len(query_tree):
item = query_tree[position]
# process commands. For now, going left to right and then sorting by priority when ambiguous commands occur;
# consider other weighting options later.
if (item['type'] == "command"):
commandIsResolved = False
command = item['command_function']
if (command):
query = {"query_tree": query_tree} #pass by-ref
commandIsResolved = eval(item['command_function']); #Careful... there is code in the docs that is being eval'd.
#MUST ENSURE THESE DOCS ARE SECURE, OTHERWISE THIS WILL INTRODUCE A POTENTIAL SECURITY THREAT (CODE INJECTION)
#else:
#Alert ("Error: " + query.query_tree.canonical_form + " has no command function.");
if (False == commandIsResolved):
#Bad command. Just remove for now... could alternatively keep it and run as a keyword
query_tree.pop(position) #.splice(position,1)
position += 1
return query_tree
def cmd_popularity(query, position):
if (len(query['query_tree']) -1 > position):
query['query_tree'][position] = {"type":"solr", "query": '+{!func v="mul(if(stars_i,stars_i,0),20)"}'}
return True
else:
return False
def cmd_location_distance(query, position):
#TODO: Notes. Need a "multi-location-hopping" resolver in here. For example,
#hotels near bbq near haystack. This is a search for doctype=hotels, join with (doctype=restaurant AND bbq OR barbecue OR ...) filtered on distance. The first "near" requires pushing the second to a sub-query (join/graph) and then takes over as the actual location distance command.
if (len(query['query_tree']) -1 > position):
nextEntity = query['query_tree'][position + 1]
if (nextEntity['type'] == "city"):
query['query_tree'].pop(position + 1); #remove next element since we're inserting into command's position
query['query_tree'][position] = {"type":"solr",
"query": create_geo_filter(nextEntity['location_p'],
"location_p", 50)}
return True
elif 'coordinates_pt' in nextEntity:
query['query_tree'].pop(position + 1) #remove next element since we're inserting into command's position
query['query_tree'][position] = {"type":"solr",
"query": create_geo_filter(nextEntity['coordinates_pt'],
"coordinates_pt", 50) }
return True
elif nextEntity['type'] == "event":
#nextEntity doesn't have coordinates on it, so try traversing to find coordinates on a parent node (i.e. a venue)
query['query_tree'].pop(position + 1); #remove next element since we're inserting into command's position
quickHack = None
if (nextEntity['canonical_form'] == "activate conference"):
quickHack = "activate"
if (nextEntity['canonical_form'] == "haystack conference"):
quickHack = "haystack"
attemptedGraphLookup = find_location_coordinates(quickHack)
if ('response' in attemptedGraphLookup
and 'docs' in attemptedGraphLookup['response']
and len(attemptedGraphLookup['response']['docs']) > 0):
query['query_tree'][position] = \
{ "type":"solr",
"query": create_geo_filter(
attemptedGraphLookup['response']['docs'][0]['coordinates_s'],
"coordinates_pt", 50)
}
return True
return False
def create_geo_filter(coordinates, field, distanceInKM):
return "+{!geofilt d=" + str(distanceInKM) + " sfield=\"" + field + "\" pt=\"" + coordinates + "\"}"
def find_location_coordinates(keyword):
query = {
"params": {
"qf": "name_t",
"keywords": keyword
},
"query": "{!graph from=name_s to=venue_s returnOnlyLeaf=true}{!edismax v=$keywords}"
}
return json.loads(tag_places(query))
def get_category_and_term_vector_solr_response(keyword):
query = {
"params": {
"fore": keyword,
"back": "*:*",
"df": "text_t",
#"qf": "text_t",
#"defType": "edismax",
"echoParams": "none"
},
"query": "*:*",
"limit": 0,
"facet": {
"term_needing_vector": {
"type": "query",
"query": keyword,
"facet": {
"related_terms" : {
"type" : "terms",
"field" : "text_t",
"limit": 3,
"sort": { "r1": "desc" },
"facet" : {
"r1" : "relatedness($fore,$back)"
}
},
"doc_type" : {
"type" : "terms",
"field" : "doc_type",
"limit": 1,
"sort": { "r2": "desc" },
"facet" : {
"r2" : "relatedness($fore,$back)"
}
}
}
}
}
}
response = post_search(query)
#response.asyncRequestID = asyncRequestID; //used to guarantee order of processing
return json.loads(response)
def parse_category_and_term_vector_from_solr_response(solrResponse):
parsed = {}
relatedTermNodes = {}
if ('facets' in solrResponse and 'term_needing_vector' in solrResponse['facets']):
if ('doc_type' in solrResponse['facets']['term_needing_vector']
and 'buckets' in solrResponse['facets']['term_needing_vector']['doc_type']
and len(solrResponse['facets']['term_needing_vector']['doc_type']['buckets']) > 0 ):
parsed['category'] = solrResponse['facets']['term_needing_vector']['doc_type']['buckets'][0]['val'] #just top one for now
if ('related_terms' in solrResponse['facets']['term_needing_vector']
and 'buckets' in solrResponse['facets']['term_needing_vector']['related_terms']
and len(solrResponse['facets']['term_needing_vector']['related_terms']['buckets']) > 0 ): #at least one entry
relatedTermNodes = solrResponse['facets']['term_needing_vector']['related_terms']['buckets']
termVector = ""
for relatedTermNode in relatedTermNodes:
if (len(termVector) > 0): termVector += " "
termVector += relatedTermNode['val'] + "^" + "{:.4f}".format(relatedTermNode['r1']['relatedness'])
parsed['term_vector'] = termVector
#parsed.asyncRequestID = solrResponse.asyncRequestID; //used to guarantee order of processing
return parsed
import os, re
import io
def render_search_results(results, keywords_to_highlight):
file_path = os.path.dirname(os.path.abspath(__file__))
search_results_template_file = os.path.join(file_path, "search-results-template.html")
with open(search_results_template_file) as file:
file_content = file.read()
template_syntax = "<!-- BEGIN_TEMPLATE[^>]*-->(.*)<!-- END_TEMPLATE[^>]*-->"
header_template = re.sub(template_syntax, "", file_content, flags=re.S)
results_template_syntax = "<!-- BEGIN_TEMPLATE: SEARCH_RESULTS -->(.*)<!-- END_TEMPLATE: SEARCH_RESULTS[^>]*-->"
x = re.search(results_template_syntax, file_content, flags=re.S)
results_template = x.group(1)
separator_template_syntax = "<!-- BEGIN_TEMPLATE: SEPARATOR -->(.*)<!-- END_TEMPLATE: SEPARATOR[^>]*-->"
x = re.search(separator_template_syntax, file_content, flags=re.S)
separator_template = x.group(1)
rendered = ""
for result in results['response']['docs']:
#todo: add highlighting
rendered += results_template.replace("${NAME}", result['name_t'] if 'name_t' in result else "UNKNOWN") \
.replace("${CITY}", result['city_t'] + ", " + result['state_t'] if 'city_t' in result and 'state_t' in result else "UNKNOWN") \
.replace("${DESCRIPTION}", result['text_t'] if 'text_t' in result else "") \
.replace("${IMAGE_URL}", "/map?lat=" + str(result['latitude_d']) + "&lon=" + str(result['longitude_d'])) \
.replace("${STARS}", "★" * int(result['stars_i']) if 'stars_i' in result else "")
rendered += separator_template
if rendered == "":
rendered = "No Results for this query."
return rendered
class SemanticSearchHandler(http.server.SimpleHTTPRequestHandler):
"""The test example handler."""
def sendResponse(self, response):
try:
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(json.dumps(response), 'utf-8'))
except Exception as ex:
self.send_error(500, ex)
def sendImageResponse(self, response):
try:
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(response))
except Exception as ex:
self.send_error(500, ex)
def do_POST(self):
content_len = int(self.headers.get("Content-Length"), 0)
post_body = self.rfile.read(content_len)
if (self.path.startswith("/tag_query")):
self.sendResponse(tag_query(post_body))
elif self.path.startswith("/tag_places"):
self.sendResponse(tag_places(post_body))
elif self.path.startswith("/process_semantic_query"):
self.sendResponse(process_semantic_query(post_body))
elif self.path.startswith("/process_basic_query"):
self.sendResponse(process_basic_query(post_body))
elif self.path.startswith("/run_search"):
text = post_body.decode('UTF-8')
results = json.loads(run_search(text))
highlight_terms = post_body.decode('UTF-8').split(' ')
rendered_results = render_search_results(results, highlight_terms)
self.sendResponse(rendered_results)
def do_GET(self):
if self.path.startswith("/search") or self.path.startswith("/semantic-search"):
self.path = "/search.html"
http.server.SimpleHTTPRequestHandler.do_GET(self)
http.server.SimpleHTTPRequestHandler.do_GET(self)
elif self.path.startswith("/map"):
qsVars = parse_qs(urlparse(self.path).query)
if 'lat' in qsVars and 'lon' in qsVars:
lat = float(qsVars["lat"][0])
lon = float(qsVars["lon"][0])
zoom = int(qsVars['zoom'][0]) if 'zoom' in qsVars else 10
m = StaticMap(200, 200)
marker_outline = CircleMarker((lon, lat), 'white', 18)
marker = CircleMarker((lon, lat), '#0036FF', 12)
m.add_marker(marker_outline)
m.add_marker(marker)
image = m.render(zoom=zoom)
buf = io.BytesIO()
image.save(buf, format='JPEG')
self.sendImageResponse(buf.getvalue())
elif self.path.startswith("/healthcheck"):
self.send_response(200)
self.send_header('Access-Control-Allow-Private-Network', 'true')
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-type','image/png')
self.end_headers()
#Open the static file requested and send it
image = open("is-running.png", 'br')
self.wfile.write(image.read())
image.close()
def open_browser():
"" | def start_server():
"""Start the server."""
server_address = ("0.0.0.0", int(AIPS_WEBSERVER_PORT))
server = http.server.HTTPServer(server_address, SemanticSearchHandler)
server.serve_forever()
if __name__ == "__main__":
open_browser()
start_server() | "Start a browser after waiting for half a second."""
def _open_browser():
if AIPS_WEBSERVER_HOST == "localhost":
webbrowser.open(WEBSERVER_URL + '/%s' % FILE)
thread = threading.Timer(0.5, _open_browser)
thread.start()
| identifier_body |
start-webserver.py | import sys
sys.path.append('..')
from aips import *
import threading
import webbrowser
import http.server
import requests
import json
import urllib
from staticmap import StaticMap, CircleMarker
from urllib.parse import urlparse, parse_qs
import os
FILE = 'semantic-search'
#HOST = os.getenv('HOST') or 'localhost'
AIPS_WEBSERVER_PORT = os.getenv('WEBSERVER_PORT') or 2345
#SOLR_HOST = os.getenv('SOLR_HOST') or 'aips-solr'
#SOLR_PORT = os.getenv('SOLR_PORT') or 8983
#SOLR_HOST_URL = "http://" + SOLR_HOST + ":" + str(SOLR_PORT) + "/solr"
SOLR_URL = "http://localhost:8983/solr"
def query_solr(collection,query):
response = requests.post(SOLR_URL + '/' + collection + '/select',
{
"type": 'POST',
"data": json.puts(query),
"dataType": 'json',
"contentType": 'application/json'
});
return response
def tag_query(post_body):
return requests.post(SOLR_URL + '/entities/tag?json.nl=map&sort=popularity%20desc&matchText=true&echoParams=all&fl=id,type,canonical_form,name,country:countrycode_s,admin_area:admin_code_1_s,popularity,*_p,command_function', post_body).text
def tag_places(post_body):
x = json.dumps(post_body)
return requests.post(SOLR_URL + '/reviews/select', json=post_body).text
def post_search(post_body):
x = json.dumps(post_body)
return requests.post(SOLR_URL + '/reviews/select', json=post_body).text
def queryTreeToResolvedString(query_tree):
resolved_query = ""
for i in range(len(query_tree)):
if (len(resolved_query) > 0):
resolved_query += " "
resolved_query += query_tree[i]['query']
return resolved_query
def run_search(text):
#http://localhost:8983/solr/places/select?q=%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=%2234.9362399,-80.8379247%22}&fl=name_s,location_p,city_s,doc_type_s,state_s&debug=true&qf=text_t
#url = "http://localhost:8983/solr/places/select?q=%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=34.9362399,-80.8379247}&qf=text_t&defType=lucene"
#solrQuery = {"query": text, "params":{ "defType": "lucene", "qf": "name_t^100 text_t city_t^0.1 categories_t^0.01", "debug": "true"}}
#return requests.post('http://localhost:8983/solr/places/select', json=solrQuery).text
#solrQuery = {"query": "%2B{!edismax v=\'bbq^0.9191 ribs^0.6186 pork^0.5991'} %2B{!geofilt d=50 sfield=location_p pt=34.9362399,-80.8379247}", "params":{ "defType": "lucene", "qf": "name_t^100 text_t city_t^0.1 categories_t^0.01", "debug": "true"}}
#return requests.post('http://localhost:8983/solr/places/select', json=json.dumps(solrQuery)).text | #q = "%2B{!edismax%20v=%22bbq^0.9191%20ribs^0.6186%20pork^0.5991%22}%20%2B{!geofilt%20d=50%20sfield=location_p%20pt=34.9362399,-80.8379247}"
q = urllib.parse.quote(text)
print(q)
#q=text.replace("+", "%2B") #so it doesn't get interpreted as space
qf="text_t"
defType="lucene"
return requests.get(SOLR_URL + "/reviews/select?q=" + q + "&qf=" + qf + "&defType=" + defType).text
def process_basic_query(query_bytes):
text = query_bytes.decode('UTF-8')
response = {
"resolved_query": '+{!edismax mm=100% v="' + escapeQuotesInQuery(text) + '"}'
}
return response
def process_semantic_query(query_bytes):
text = query_bytes.decode('UTF-8')
data = tag_query(query_bytes)
tagged_response = json.loads(data)
#loop through all documents (entities) returned
doc_map={} # reset to empty
if (tagged_response['response'] and tagged_response['response']['docs']):
docs = tagged_response['response']['docs']
for doc in docs:
doc_map[doc['id']] = doc
#for (d=0; d<Object.keys(docs).length; d++) {
# let doc = docs[d];
# doc_map[doc.id]=doc;
#}
#sort doc_map by popularity so first most popular always wins
#def popularity_sort(doc_a, doc_b){
# return a.popularity - b.popularity;
#}
#//doc_map.sort(popularity_sort);
#}
query_tree = []
tagged_query = ""
transformed_query =""
if (tagged_response['tags'] is not None):
tags = tagged_response['tags']
#//var lastStart = 0;
lastEnd = 0
metaData = {}
for tag in tags:
#tag = tags[key]
matchText = tag['matchText']
doc_ids = tag['ids']
#pick top-ranked docid
best_doc_id = None
for doc_id in doc_ids:
if (best_doc_id):
if (doc_map[doc_id]['popularity'] > doc_map[best_doc_id]['popularity']):
best_doc_id = doc_id
else:
best_doc_id = doc_id
best_doc = doc_map[best_doc_id]
#store the unknown text as keywords
nextText = text[lastEnd:tag['startOffset']].strip()
if (len(nextText) > 0): #not whitespace
query_tree.append({ "type":"keyword", "known":False, "surface_form":nextText, "canonical_form":nextText })
tagged_query += " " + nextText
transformed_query += " " + "{ type:keyword, known: false, surface_form: \"" + nextText + "\"}"
# store the known entity as entity
query_tree.append(best_doc) #this is wrong. Need the query tree to have _all_
# interpretations available and then loop through them to resolve. TODO = fix this.
tagged_query += " {" + matchText + "}"
#//transformed_query += " {type: " + best_doc.type + ", canonical_form: \"" + best_doc.canonical_form + "\"}";
transformed_query += json.dumps(best_doc)
lastEnd = tag['endOffset']
if (lastEnd < len(text)):
finalText = text[lastEnd:len(text)].strip()
if (len(finalText) > 0):
query_tree.append({ "type":"keyword", "known":False, "surface_form":finalText, "canonical_form":finalText })
tagged_query += " " + finalText
transformed_query += " " + "{ type:keyword, known: false, surface_form: \"" + finalText + "\"}"
#finalquery = {"query_tree": query_tree}
#let query = {query_tree: query_tree}; //so we can pass byref
final_query = resolveQuery(query_tree)
#if (query != null){ //short circuit if new request has been issued
resolved_query = queryTreeToResolvedString(query_tree)
#UI.updateResolvedQuery(resolved_query)
#}
response = {
"tagged_query": tagged_query,
"transformed_query": transformed_query,
"resolved_query": resolved_query,
"tagger_data": tagged_response
}
return response
def resolveQuery(query_tree):
query_tree = processCommands(query_tree)
# Now process everything that is not yet resolved
for position in range(len(query_tree)):
item = query_tree[position];
if (item["type"] != "solr"): #already resolved
if (item["type"] == "keyword"):
#TODO: this currently looks up ALL unknown keywords in the SKG, which isn't very smart
#need to switch to looking up meaningful phrases in next pass. This is mostly for testing
#at the moment, so putting up with the noise temporarily.
categoryAndTermVector = None
#TODO: figure out way (probably timestamp-based) to guarantee processing in order given current async nature
solrResponse = get_category_and_term_vector_solr_response(item["surface_form"])
categoryAndTermVector = parse_category_and_term_vector_from_solr_response(solrResponse)
#if (latestAsyncRequestID != categoryAndTermVector.asyncRequestID){
# return null;
#}
queryString = ""
if ("term_vector" in categoryAndTermVector):
queryString = categoryAndTermVector["term_vector"]
if ("category" in categoryAndTermVector):
if (len(queryString) > 0):
queryString += " "
queryString += "+doc_type:\"" + categoryAndTermVector["category"] + "\""
if (len(queryString) == 0):
queryString = item["surface_form"] #just keep the input as a keyword
query_tree[position] = { "type":"solr", "query": "+{!edismax v=\"" + escapeQuotesInQuery(queryString) + "\"}" }
elif (item["type"] == "color"):
solrQuery = "+colors_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
elif (item["type"] == "known_item" or item["type"] == "city" or item["type"] == "event"):
solrQuery = "+name_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
elif (item["type"] == "brand"):
solrQuery = "+brand_s:\"" + item["canonical_form"] + "\""
query_tree[position] = {"type":"solr", "query": solrQuery}
else:
query_tree[position] = {"type":"solr", "query": "+{!edismax v=\"" + escapeQuotesInQuery(item["surface_form"]) + "\"}"}
return query_tree
def escapeQuotesInQuery(query):
return query.replace('"', '\\"')
def processCommands(query_tree):
position = 0
while position < len(query_tree):
item = query_tree[position]
# process commands. For now, going left to right and then sorting by priority when ambiguous commands occur;
# consider other weighting options later.
if (item['type'] == "command"):
commandIsResolved = False
command = item['command_function']
if (command):
query = {"query_tree": query_tree} #pass by-ref
commandIsResolved = eval(item['command_function']); #Careful... there is code in the docs that is being eval'd.
#MUST ENSURE THESE DOCS ARE SECURE, OTHERWISE THIS WILL INTRODUCE A POTENTIAL SECURITY THREAT (CODE INJECTION)
#else:
#Alert ("Error: " + query.query_tree.canonical_form + " has no command function.");
if (False == commandIsResolved):
#Bad command. Just remove for now... could alternatively keep it and run as a keyword
query_tree.pop(position) #.splice(position,1)
position += 1
return query_tree
def cmd_popularity(query, position):
if (len(query['query_tree']) -1 > position):
query['query_tree'][position] = {"type":"solr", "query": '+{!func v="mul(if(stars_i,stars_i,0),20)"}'}
return True
else:
return False
def cmd_location_distance(query, position):
#TODO: Notes. Need a "multi-location-hopping" resolver in here. For example,
#hotels near bbq near haystack. This is a search for doctype=hotels, join with (doctype=restaurant AND bbq OR barbecue OR ...) filtered on distance. The first "near" requires pushing the second to a sub-query (join/graph) and then takes over as the actual location distance command.
if (len(query['query_tree']) -1 > position):
nextEntity = query['query_tree'][position + 1]
if (nextEntity['type'] == "city"):
query['query_tree'].pop(position + 1); #remove next element since we're inserting into command's position
query['query_tree'][position] = {"type":"solr",
"query": create_geo_filter(nextEntity['location_p'],
"location_p", 50)}
return True
elif 'coordinates_pt' in nextEntity:
query['query_tree'].pop(position + 1) #remove next element since we're inserting into command's position
query['query_tree'][position] = {"type":"solr",
"query": create_geo_filter(nextEntity['coordinates_pt'],
"coordinates_pt", 50) }
return True
elif nextEntity['type'] == "event":
#nextEntity doesn't have coordinates on it, so try traversing to find coordinates on a parent node (i.e. a venue)
query['query_tree'].pop(position + 1); #remove next element since we're inserting into command's position
quickHack = None
if (nextEntity['canonical_form'] == "activate conference"):
quickHack = "activate"
if (nextEntity['canonical_form'] == "haystack conference"):
quickHack = "haystack"
attemptedGraphLookup = find_location_coordinates(quickHack)
if ('response' in attemptedGraphLookup
and 'docs' in attemptedGraphLookup['response']
and len(attemptedGraphLookup['response']['docs']) > 0):
query['query_tree'][position] = \
{ "type":"solr",
"query": create_geo_filter(
attemptedGraphLookup['response']['docs'][0]['coordinates_s'],
"coordinates_pt", 50)
}
return True
return False
def create_geo_filter(coordinates, field, distanceInKM):
return "+{!geofilt d=" + str(distanceInKM) + " sfield=\"" + field + "\" pt=\"" + coordinates + "\"}"
def find_location_coordinates(keyword):
query = {
"params": {
"qf": "name_t",
"keywords": keyword
},
"query": "{!graph from=name_s to=venue_s returnOnlyLeaf=true}{!edismax v=$keywords}"
}
return json.loads(tag_places(query))
def get_category_and_term_vector_solr_response(keyword):
query = {
"params": {
"fore": keyword,
"back": "*:*",
"df": "text_t",
#"qf": "text_t",
#"defType": "edismax",
"echoParams": "none"
},
"query": "*:*",
"limit": 0,
"facet": {
"term_needing_vector": {
"type": "query",
"query": keyword,
"facet": {
"related_terms" : {
"type" : "terms",
"field" : "text_t",
"limit": 3,
"sort": { "r1": "desc" },
"facet" : {
"r1" : "relatedness($fore,$back)"
}
},
"doc_type" : {
"type" : "terms",
"field" : "doc_type",
"limit": 1,
"sort": { "r2": "desc" },
"facet" : {
"r2" : "relatedness($fore,$back)"
}
}
}
}
}
}
response = post_search(query)
#response.asyncRequestID = asyncRequestID; //used to guarantee order of processing
return json.loads(response)
def parse_category_and_term_vector_from_solr_response(solrResponse):
parsed = {}
relatedTermNodes = {}
if ('facets' in solrResponse and 'term_needing_vector' in solrResponse['facets']):
if ('doc_type' in solrResponse['facets']['term_needing_vector']
and 'buckets' in solrResponse['facets']['term_needing_vector']['doc_type']
and len(solrResponse['facets']['term_needing_vector']['doc_type']['buckets']) > 0 ):
parsed['category'] = solrResponse['facets']['term_needing_vector']['doc_type']['buckets'][0]['val'] #just top one for now
if ('related_terms' in solrResponse['facets']['term_needing_vector']
and 'buckets' in solrResponse['facets']['term_needing_vector']['related_terms']
and len(solrResponse['facets']['term_needing_vector']['related_terms']['buckets']) > 0 ): #at least one entry
relatedTermNodes = solrResponse['facets']['term_needing_vector']['related_terms']['buckets']
termVector = ""
for relatedTermNode in relatedTermNodes:
if (len(termVector) > 0): termVector += " "
termVector += relatedTermNode['val'] + "^" + "{:.4f}".format(relatedTermNode['r1']['relatedness'])
parsed['term_vector'] = termVector
#parsed.asyncRequestID = solrResponse.asyncRequestID; //used to guarantee order of processing
return parsed
import os, re
import io
def render_search_results(results, keywords_to_highlight):
file_path = os.path.dirname(os.path.abspath(__file__))
search_results_template_file = os.path.join(file_path, "search-results-template.html")
with open(search_results_template_file) as file:
file_content = file.read()
template_syntax = "<!-- BEGIN_TEMPLATE[^>]*-->(.*)<!-- END_TEMPLATE[^>]*-->"
header_template = re.sub(template_syntax, "", file_content, flags=re.S)
results_template_syntax = "<!-- BEGIN_TEMPLATE: SEARCH_RESULTS -->(.*)<!-- END_TEMPLATE: SEARCH_RESULTS[^>]*-->"
x = re.search(results_template_syntax, file_content, flags=re.S)
results_template = x.group(1)
separator_template_syntax = "<!-- BEGIN_TEMPLATE: SEPARATOR -->(.*)<!-- END_TEMPLATE: SEPARATOR[^>]*-->"
x = re.search(separator_template_syntax, file_content, flags=re.S)
separator_template = x.group(1)
rendered = ""
for result in results['response']['docs']:
#todo: add highlighting
rendered += results_template.replace("${NAME}", result['name_t'] if 'name_t' in result else "UNKNOWN") \
.replace("${CITY}", result['city_t'] + ", " + result['state_t'] if 'city_t' in result and 'state_t' in result else "UNKNOWN") \
.replace("${DESCRIPTION}", result['text_t'] if 'text_t' in result else "") \
.replace("${IMAGE_URL}", "/map?lat=" + str(result['latitude_d']) + "&lon=" + str(result['longitude_d'])) \
.replace("${STARS}", "★" * int(result['stars_i']) if 'stars_i' in result else "")
rendered += separator_template
if rendered == "":
rendered = "No Results for this query."
return rendered
class SemanticSearchHandler(http.server.SimpleHTTPRequestHandler):
"""The test example handler."""
def sendResponse(self, response):
try:
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(json.dumps(response), 'utf-8'))
except Exception as ex:
self.send_error(500, ex)
def sendImageResponse(self, response):
try:
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(response))
except Exception as ex:
self.send_error(500, ex)
def do_POST(self):
content_len = int(self.headers.get("Content-Length"), 0)
post_body = self.rfile.read(content_len)
if (self.path.startswith("/tag_query")):
self.sendResponse(tag_query(post_body))
elif self.path.startswith("/tag_places"):
self.sendResponse(tag_places(post_body))
elif self.path.startswith("/process_semantic_query"):
self.sendResponse(process_semantic_query(post_body))
elif self.path.startswith("/process_basic_query"):
self.sendResponse(process_basic_query(post_body))
elif self.path.startswith("/run_search"):
text = post_body.decode('UTF-8')
results = json.loads(run_search(text))
highlight_terms = post_body.decode('UTF-8').split(' ')
rendered_results = render_search_results(results, highlight_terms)
self.sendResponse(rendered_results)
def do_GET(self):
if self.path.startswith("/search") or self.path.startswith("/semantic-search"):
self.path = "/search.html"
http.server.SimpleHTTPRequestHandler.do_GET(self)
http.server.SimpleHTTPRequestHandler.do_GET(self)
elif self.path.startswith("/map"):
qsVars = parse_qs(urlparse(self.path).query)
if 'lat' in qsVars and 'lon' in qsVars:
lat = float(qsVars["lat"][0])
lon = float(qsVars["lon"][0])
zoom = int(qsVars['zoom'][0]) if 'zoom' in qsVars else 10
m = StaticMap(200, 200)
marker_outline = CircleMarker((lon, lat), 'white', 18)
marker = CircleMarker((lon, lat), '#0036FF', 12)
m.add_marker(marker_outline)
m.add_marker(marker)
image = m.render(zoom=zoom)
buf = io.BytesIO()
image.save(buf, format='JPEG')
self.sendImageResponse(buf.getvalue())
elif self.path.startswith("/healthcheck"):
self.send_response(200)
self.send_header('Access-Control-Allow-Private-Network', 'true')
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-type','image/png')
self.end_headers()
#Open the static file requested and send it
image = open("is-running.png", 'br')
self.wfile.write(image.read())
image.close()
def open_browser():
"""Start a browser after waiting for half a second."""
def _open_browser():
if AIPS_WEBSERVER_HOST == "localhost":
webbrowser.open(WEBSERVER_URL + '/%s' % FILE)
thread = threading.Timer(0.5, _open_browser)
thread.start()
def start_server():
"""Start the server."""
server_address = ("0.0.0.0", int(AIPS_WEBSERVER_PORT))
server = http.server.HTTPServer(server_address, SemanticSearchHandler)
server.serve_forever()
if __name__ == "__main__":
open_browser()
start_server() | random_line_split | |
stream_animation.rs | use super::stream_layer::*;
use super::stream_animation_core::*;
use crate::traits::*;
use crate::storage::*;
use crate::storage::file_properties::*;
use crate::storage::layer_properties::*;
use ::desync::*;
use flo_stream::*;
use itertools::*;
use futures::prelude::*;
use futures::task::{Poll};
use futures::stream;
use futures::stream::{BoxStream};
use std::sync::*;
use std::ops::{Range};
use std::time::{Duration};
use std::collections::{HashMap};
///
/// Animation that sends its updates to a storage stream
///
pub struct StreamAnimation {
/// The core, where the actual work is done
core: Arc<Desync<StreamAnimationCore>>,
/// The publisher for the edits to this animation
edit_publisher: Publisher<Arc<Vec<AnimationEdit>>>,
/// Available synchronous requests
idle_sync_requests: Desync<Vec<Desync<Option<Vec<StorageResponse>>>>>,
}
impl StreamAnimation {
///
/// Creates a new stream animation. The result is the animation implementation and the
/// stream of requests to be sent to the storage layer
///
pub fn new<ConnectStream: FnOnce(BoxStream<'static, Vec<StorageCommand>>) -> BoxStream<'static, Vec<StorageResponse>>>(connect_stream: ConnectStream) -> StreamAnimation {
// Create the storage requests. When the storage layer is running behind, we'll buffer up to 10 of these
let mut requests = Publisher::new(10);
let commands = requests.subscribe().boxed();
let storage_responses = connect_stream(commands);
let mut edit_publisher = Publisher::new(10);
let storage_connection = StorageConnection::new(requests, storage_responses);
// The core is used to actually execute the requests
let core = StreamAnimationCore {
storage_connection: storage_connection,
next_element_id: None,
cached_layers: HashMap::new(),
cached_keyframe: None,
brush_defn: None,
brush_props: None,
path_brush_defn: None,
path_brush_props: None,
retired_edit_senders: vec![],
};
let core = Arc::new(Desync::new(core));
// Anything published to the editor is piped into the core
pipe_in(Arc::clone(&core), edit_publisher.subscribe(), |core, edits: Arc<Vec<AnimationEdit>>| {
async move {
// Edits require some pre-processing: assign the IDs, perform undo actions and write to the log (note that undo edits are performed before serialization)
let mut edits = core.assign_ids_to_edits(&*edits).await;
core.process_undo_edits(&mut edits).await;
core.serialize_edits_to_log(&edits).await;
// Perform the edits to retire them
let retired = core.perform_edits(edits).await;
// Clean up the edit publishers, in case any aren't being listened to any more
core.retired_edit_senders.retain(|sender| sender.count_subscribers() > 0);
// Send the edits as retired
for retired_sender in core.retired_edit_senders.iter_mut() {
retired_sender.publish(retired.clone()).await;
}
}.boxed()
});
// Build the animation
StreamAnimation {
core: core,
idle_sync_requests: Desync::new(vec![]),
edit_publisher: edit_publisher
}
}
///
/// Performs an asynchronous request on a storage layer for this animation
///
pub (super) fn request_async<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> impl Future<Output=Option<Vec<StorageResponse>>> {
request_core_async(&self.core, request.into_iter().collect())
}
///
/// Performs a synchronous request on the storage layer for this animation
///
/// Synchronous requests are fairly slow, so should be avoided in inner loops
///
pub (super) fn request_sync<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> Option<Vec<StorageResponse>> {
request_core_sync(Arc::clone(&self.core), request.into_iter().collect())
}
///
/// Waits for any pending edits on this animation to complete
///
pub (super) fn wait_for_edits(&self) {
// Force a desync to wait for the when_empty future to complete
let when_empty = self.edit_publisher.republish().when_empty();
// Create a desync and wait for the 'when_empty' signal to show up (indicating all the edits have been sent to the core)
let wait_for_edits = Desync::new(());
let _ = wait_for_edits.future_desync(move |_| async move { when_empty.await; }.boxed());
// Synchronise after the future has completed
wait_for_edits.sync(|_| { });
// Synchronise with the animation core so that all the edits are performed
self.core.sync(|_| { });
}
///
/// Retrieves the current file properties for the animation
///
fn file_properties(&self) -> FileProperties {
// Retrieve the properties from storage (and update the version we have stored if there is one)
let mut response = self.request_sync(vec![StorageCommand::ReadAnimationProperties]).unwrap_or_else(|| vec![]);
let properties;
match response.pop() {
Some(StorageResponse::NotFound) => {
// File properties are not set
properties = FileProperties::default();
}
Some(StorageResponse::AnimationProperties(props)) => {
// Deserialize the file properties
properties = FileProperties::deserialize(&mut props.chars()).expect("Could not parse file properties");
}
unknown => panic!("Unexpected response {:?} while reading file properties", unknown)
}
properties
}
}
impl Animation for StreamAnimation {
///
/// Retrieves the frame size of this animation
///
fn size(&self) -> (f64, f64) {
self.wait_for_edits();
self.file_properties().size
}
///
/// Retrieves the length of this animation
///
fn duration(&self) -> Duration {
self.wait_for_edits();
self.file_properties().duration
}
///
/// Retrieves the duration of a single frame
///
fn frame_length(&self) -> Duration {
self.wait_for_edits();
self.file_properties().frame_length
}
///
/// Retrieves the IDs of the layers in this object
///
fn get_layer_ids(&self) -> Vec<u64> {
self.wait_for_edits();
let layer_responses = self.request_sync(vec![StorageCommand::ReadLayers]).unwrap_or_else(|| vec![]);
layer_responses
.into_iter()
.flat_map(|response| {
match response {
StorageResponse::LayerProperties(id, properties) => Some((id, LayerProperties::deserialize(&mut properties.chars())?)),
_ => None
}
})
.sorted_by(|(id_a, layer_a), (id_b, layer_b)| {
if layer_a.ordering == layer_b.ordering {
id_a.cmp(&id_b)
} else {
layer_a.ordering.cmp(&layer_b.ordering)
}
})
.map(|(id, _props)| id)
.collect()
}
///
/// Retrieves the layer with the specified ID from this animation
///
fn get_layer_with_id(&self, layer_id: u64) -> Option<Arc<dyn Layer>> {
self.wait_for_edits();
// Read the properties for the specified layer
let layer_properties = self.request_sync(vec![StorageCommand::ReadLayerProperties(layer_id)]);
if let Some(StorageResponse::LayerProperties(_, serialized)) = layer_properties.and_then(|mut props| props.pop()) {
if let Some(layer_properties) = LayerProperties::deserialize(&mut serialized.chars()) {
// Found the layer
Some(Arc::new(StreamLayer::new(Arc::clone(&self.core), layer_id, layer_properties)))
} else {
// Can't deserialize the layer properties
None
}
} else {
// Layer does not exist
None
}
}
///
/// Retrieves the total number of edits that have been performed on this animation
///
fn get_num_edits(&self) -> usize {
self.wait_for_edits();
let mut response = self.request_sync(vec![StorageCommand::ReadEditLogLength]).unwrap_or_else(|| vec![]);
match response.pop() {
Some(StorageResponse::NumberOfEdits(num_edits)) => num_edits,
_ => panic!("Unexpected response while reading number of edits")
}
}
///
/// Reads from the edit log for this animation
///
fn read_edit_log<'a>(&'a self, range: Range<usize>) -> BoxStream<'a, AnimationEdit> {
self.wait_for_edits();
// Clamp the range of edits to the maximum number of edits
let max_edit = self.get_num_edits();
let range = if range.end > max_edit {
range.start..max_edit
} else {
range
};
// Generate a stream to read from the edit log as we go
let per_request = 20;
let mut remaining = range;
let mut fetched = vec![];
let mut next_response = None;
stream::poll_fn(move |context| {
loop {
if remaining.len() != 0 && fetched.len() == 0 && next_response.is_none() {
// Fetch up to per_request items for each request
let num_to_fetch = remaining.len();
let num_to_fetch = if num_to_fetch > per_request { per_request } else { num_to_fetch };
let fetch_range = (remaining.start)..(remaining.start + num_to_fetch);
// Start polling for the next batch
next_response = Some(self.request_async(vec![StorageCommand::ReadEdits(fetch_range)]));
remaining = (remaining.start+num_to_fetch)..(remaining.end);
}
if let Some(next) = fetched.pop() {
// Just returning the batch we've already fetched
return Poll::Ready(Some(next));
} else if let Some(mut waiting) = next_response.take() {
// Try to retrieve the next item from the batch
let poll_response = waiting.poll_unpin(context);
match poll_response {
Poll::Pending => {
// Keep waiting for the response
next_response = Some(waiting);
return Poll::Pending
},
Poll::Ready(response) => {
// Load the edits into the fetched array
let mut response = response.unwrap_or(vec![]);
while let Some(response) = response.pop() {
// Ignore everything that's not an edit (we have no way to do error handling here)
if let StorageResponse::Edit(_num, serialized_edit) = response {
// Store edits that deserialize successfully on the fetched list
if let Some(edit) = AnimationEdit::deserialize(&mut serialized_edit.chars()) {
fetched.push(edit)
}
}
}
}
}
} else if remaining.len() == 0 {
// Reached the end of the stream
return Poll::Ready(None);
}
}
}).fuse().boxed()
}
}
impl EditableAnimation for StreamAnimation {
///
/// Assigns a new unique ID for creating a new motion
///
/// This ID will not have been used so far and will not be used again, and can be used as the ID for the MotionElement vector element.
///
fn | (&self) -> ElementId {
// Create a queue to run the 'assign element ID' future on
let core = Arc::clone(&self.core);
// Perform the request and retrieve the result
core.future_desync(|core| core.assign_element_id(ElementId::Unassigned).boxed())
.sync().unwrap()
}
///
/// Retrieves a sink that can be used to send edits for this animation
///
/// Edits are supplied as groups (stored in a vec) so that it's possible to ensure that
/// a set of related edits are performed atomically
///
fn edit(&self) -> Publisher<Arc<Vec<AnimationEdit>>> {
self.edit_publisher.republish()
}
///
/// Sends a set of edits straight to this animation
///
/// (Note that these are not always published to the publisher)
///
fn perform_edits(&self, edits: Vec<AnimationEdit>) {
// Get a publisher to send the edits to (this editor does send its edits to the publisher)
let mut publisher = self.edit_publisher.republish();
// Get an idle sync request desync
// We use desync instead of the futures executor as the executor will panic if we are called from within another future
// (desync provides a way around this problem)
let sync_request = self.idle_sync_requests.sync(|reqs| {
let next_request = reqs.pop();
if let Some(next_request) = next_request {
next_request
} else {
let req = Desync::new(None);
req
}
});
// Queue a request
sync_request.future_desync(move |_| {
async move {
// Publish the edits
publisher.publish(Arc::new(edits)).await;
}.boxed()
}).sync().ok();
// Return the sync_request to the pool
self.idle_sync_requests.desync(move |reqs| { reqs.push(sync_request) });
// Wait for the edits to complete
self.wait_for_edits();
}
///
/// Returns a stream of edits as they are being retired (ie, the edits that are now visible on the animation)
///
fn retired_edits(&self) -> BoxStream<'static, RetiredEdit> {
// Create a channel to send edits through
let mut sender = Publisher::new(10);
let receiver = sender.subscribe();
// Add to the list in the core
self.core.sync(move |core| {
core.retired_edit_senders.push(sender);
});
// Box up the receiver to create the result
receiver.boxed()
}
///
/// Flushes any caches this might have (forces reload from data storage)
///
fn flush_caches(&self) {
self.core.desync(|core| {
core.cached_keyframe = None;
});
}
}
| assign_element_id | identifier_name |
stream_animation.rs | use super::stream_layer::*;
use super::stream_animation_core::*;
use crate::traits::*;
use crate::storage::*;
use crate::storage::file_properties::*;
use crate::storage::layer_properties::*;
use ::desync::*;
use flo_stream::*;
use itertools::*;
use futures::prelude::*;
use futures::task::{Poll};
use futures::stream;
use futures::stream::{BoxStream};
use std::sync::*;
use std::ops::{Range};
use std::time::{Duration};
use std::collections::{HashMap};
///
/// Animation that sends its updates to a storage stream
///
pub struct StreamAnimation {
/// The core, where the actual work is done
core: Arc<Desync<StreamAnimationCore>>,
/// The publisher for the edits to this animation
edit_publisher: Publisher<Arc<Vec<AnimationEdit>>>,
/// Available synchronous requests
idle_sync_requests: Desync<Vec<Desync<Option<Vec<StorageResponse>>>>>,
}
impl StreamAnimation {
///
/// Creates a new stream animation. The result is the animation implementation and the
/// stream of requests to be sent to the storage layer
///
pub fn new<ConnectStream: FnOnce(BoxStream<'static, Vec<StorageCommand>>) -> BoxStream<'static, Vec<StorageResponse>>>(connect_stream: ConnectStream) -> StreamAnimation {
// Create the storage requests. When the storage layer is running behind, we'll buffer up to 10 of these
let mut requests = Publisher::new(10);
let commands = requests.subscribe().boxed();
let storage_responses = connect_stream(commands);
let mut edit_publisher = Publisher::new(10);
let storage_connection = StorageConnection::new(requests, storage_responses);
// The core is used to actually execute the requests
let core = StreamAnimationCore {
storage_connection: storage_connection,
next_element_id: None,
cached_layers: HashMap::new(),
cached_keyframe: None,
brush_defn: None,
brush_props: None,
path_brush_defn: None,
path_brush_props: None,
retired_edit_senders: vec![],
};
let core = Arc::new(Desync::new(core));
// Anything published to the editor is piped into the core
pipe_in(Arc::clone(&core), edit_publisher.subscribe(), |core, edits: Arc<Vec<AnimationEdit>>| {
async move {
// Edits require some pre-processing: assign the IDs, perform undo actions and write to the log (note that undo edits are performed before serialization)
let mut edits = core.assign_ids_to_edits(&*edits).await;
core.process_undo_edits(&mut edits).await;
core.serialize_edits_to_log(&edits).await;
// Perform the edits to retire them
let retired = core.perform_edits(edits).await;
// Clean up the edit publishers, in case any aren't being listened to any more
core.retired_edit_senders.retain(|sender| sender.count_subscribers() > 0);
// Send the edits as retired
for retired_sender in core.retired_edit_senders.iter_mut() {
retired_sender.publish(retired.clone()).await;
}
}.boxed()
});
// Build the animation
StreamAnimation {
core: core,
idle_sync_requests: Desync::new(vec![]),
edit_publisher: edit_publisher
}
}
///
/// Performs an asynchronous request on a storage layer for this animation
///
pub (super) fn request_async<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> impl Future<Output=Option<Vec<StorageResponse>>> {
request_core_async(&self.core, request.into_iter().collect())
}
///
/// Performs a synchronous request on the storage layer for this animation
///
/// Synchronous requests are fairly slow, so should be avoided in inner loops
///
pub (super) fn request_sync<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> Option<Vec<StorageResponse>> {
request_core_sync(Arc::clone(&self.core), request.into_iter().collect())
}
///
/// Waits for any pending edits on this animation to complete
///
pub (super) fn wait_for_edits(&self) {
// Force a desync to wait for the when_empty future to complete
let when_empty = self.edit_publisher.republish().when_empty();
// Create a desync and wait for the 'when_empty' signal to show up (indicating all the edits have been sent to the core)
let wait_for_edits = Desync::new(());
let _ = wait_for_edits.future_desync(move |_| async move { when_empty.await; }.boxed());
// Synchronise after the future has completed
wait_for_edits.sync(|_| { });
// Synchronise with the animation core so that all the edits are performed
self.core.sync(|_| { });
}
///
/// Retrieves the current file properties for the animation
///
fn file_properties(&self) -> FileProperties {
// Retrieve the properties from storage (and update the version we have stored if there is one)
let mut response = self.request_sync(vec![StorageCommand::ReadAnimationProperties]).unwrap_or_else(|| vec![]);
let properties;
match response.pop() {
Some(StorageResponse::NotFound) => {
// File properties are not set
properties = FileProperties::default();
}
Some(StorageResponse::AnimationProperties(props)) => {
// Deserialize the file properties
properties = FileProperties::deserialize(&mut props.chars()).expect("Could not parse file properties");
}
unknown => panic!("Unexpected response {:?} while reading file properties", unknown)
}
properties
}
}
impl Animation for StreamAnimation {
///
/// Retrieves the frame size of this animation
///
fn size(&self) -> (f64, f64) {
self.wait_for_edits();
self.file_properties().size
}
///
/// Retrieves the length of this animation
///
fn duration(&self) -> Duration {
self.wait_for_edits();
self.file_properties().duration
}
///
/// Retrieves the duration of a single frame
///
fn frame_length(&self) -> Duration {
self.wait_for_edits();
self.file_properties().frame_length
}
///
/// Retrieves the IDs of the layers in this object
///
fn get_layer_ids(&self) -> Vec<u64> {
self.wait_for_edits();
let layer_responses = self.request_sync(vec![StorageCommand::ReadLayers]).unwrap_or_else(|| vec![]);
layer_responses
.into_iter()
.flat_map(|response| {
match response {
StorageResponse::LayerProperties(id, properties) => Some((id, LayerProperties::deserialize(&mut properties.chars())?)),
_ => None
}
})
.sorted_by(|(id_a, layer_a), (id_b, layer_b)| {
if layer_a.ordering == layer_b.ordering {
id_a.cmp(&id_b)
} else {
layer_a.ordering.cmp(&layer_b.ordering)
}
})
.map(|(id, _props)| id)
.collect()
}
///
/// Retrieves the layer with the specified ID from this animation
///
fn get_layer_with_id(&self, layer_id: u64) -> Option<Arc<dyn Layer>> {
self.wait_for_edits();
// Read the properties for the specified layer
let layer_properties = self.request_sync(vec![StorageCommand::ReadLayerProperties(layer_id)]);
if let Some(StorageResponse::LayerProperties(_, serialized)) = layer_properties.and_then(|mut props| props.pop()) {
if let Some(layer_properties) = LayerProperties::deserialize(&mut serialized.chars()) {
// Found the layer
Some(Arc::new(StreamLayer::new(Arc::clone(&self.core), layer_id, layer_properties)))
} else {
// Can't deserialize the layer properties
None
}
} else {
// Layer does not exist
None
}
}
///
/// Retrieves the total number of edits that have been performed on this animation
///
fn get_num_edits(&self) -> usize {
self.wait_for_edits();
let mut response = self.request_sync(vec![StorageCommand::ReadEditLogLength]).unwrap_or_else(|| vec![]);
match response.pop() {
Some(StorageResponse::NumberOfEdits(num_edits)) => num_edits,
_ => panic!("Unexpected response while reading number of edits")
}
}
///
/// Reads from the edit log for this animation
///
fn read_edit_log<'a>(&'a self, range: Range<usize>) -> BoxStream<'a, AnimationEdit> {
self.wait_for_edits();
// Clamp the range of edits to the maximum number of edits
let max_edit = self.get_num_edits();
let range = if range.end > max_edit {
range.start..max_edit
} else {
range
};
// Generate a stream to read from the edit log as we go
let per_request = 20;
let mut remaining = range;
let mut fetched = vec![];
let mut next_response = None;
stream::poll_fn(move |context| {
loop {
if remaining.len() != 0 && fetched.len() == 0 && next_response.is_none() |
if let Some(next) = fetched.pop() {
// Just returning the batch we've already fetched
return Poll::Ready(Some(next));
} else if let Some(mut waiting) = next_response.take() {
// Try to retrieve the next item from the batch
let poll_response = waiting.poll_unpin(context);
match poll_response {
Poll::Pending => {
// Keep waiting for the response
next_response = Some(waiting);
return Poll::Pending
},
Poll::Ready(response) => {
// Load the edits into the fetched array
let mut response = response.unwrap_or(vec![]);
while let Some(response) = response.pop() {
// Ignore everything that's not an edit (we have no way to do error handling here)
if let StorageResponse::Edit(_num, serialized_edit) = response {
// Store edits that deserialize successfully on the fetched list
if let Some(edit) = AnimationEdit::deserialize(&mut serialized_edit.chars()) {
fetched.push(edit)
}
}
}
}
}
} else if remaining.len() == 0 {
// Reached the end of the stream
return Poll::Ready(None);
}
}
}).fuse().boxed()
}
}
impl EditableAnimation for StreamAnimation {
///
/// Assigns a new unique ID for creating a new motion
///
/// This ID will not have been used so far and will not be used again, and can be used as the ID for the MotionElement vector element.
///
fn assign_element_id(&self) -> ElementId {
// Create a queue to run the 'assign element ID' future on
let core = Arc::clone(&self.core);
// Perform the request and retrieve the result
core.future_desync(|core| core.assign_element_id(ElementId::Unassigned).boxed())
.sync().unwrap()
}
///
/// Retrieves a sink that can be used to send edits for this animation
///
/// Edits are supplied as groups (stored in a vec) so that it's possible to ensure that
/// a set of related edits are performed atomically
///
fn edit(&self) -> Publisher<Arc<Vec<AnimationEdit>>> {
self.edit_publisher.republish()
}
///
/// Sends a set of edits straight to this animation
///
/// (Note that these are not always published to the publisher)
///
fn perform_edits(&self, edits: Vec<AnimationEdit>) {
// Get a publisher to send the edits to (this editor does send its edits to the publisher)
let mut publisher = self.edit_publisher.republish();
// Get an idle sync request desync
// We use desync instead of the futures executor as the executor will panic if we are called from within another future
// (desync provides a way around this problem)
let sync_request = self.idle_sync_requests.sync(|reqs| {
let next_request = reqs.pop();
if let Some(next_request) = next_request {
next_request
} else {
let req = Desync::new(None);
req
}
});
// Queue a request
sync_request.future_desync(move |_| {
async move {
// Publish the edits
publisher.publish(Arc::new(edits)).await;
}.boxed()
}).sync().ok();
// Return the sync_request to the pool
self.idle_sync_requests.desync(move |reqs| { reqs.push(sync_request) });
// Wait for the edits to complete
self.wait_for_edits();
}
///
/// Returns a stream of edits as they are being retired (ie, the edits that are now visible on the animation)
///
fn retired_edits(&self) -> BoxStream<'static, RetiredEdit> {
// Create a channel to send edits through
let mut sender = Publisher::new(10);
let receiver = sender.subscribe();
// Add to the list in the core
self.core.sync(move |core| {
core.retired_edit_senders.push(sender);
});
// Box up the receiver to create the result
receiver.boxed()
}
///
/// Flushes any caches this might have (forces reload from data storage)
///
fn flush_caches(&self) {
self.core.desync(|core| {
core.cached_keyframe = None;
});
}
}
| {
// Fetch up to per_request items for each request
let num_to_fetch = remaining.len();
let num_to_fetch = if num_to_fetch > per_request { per_request } else { num_to_fetch };
let fetch_range = (remaining.start)..(remaining.start + num_to_fetch);
// Start polling for the next batch
next_response = Some(self.request_async(vec![StorageCommand::ReadEdits(fetch_range)]));
remaining = (remaining.start+num_to_fetch)..(remaining.end);
} | conditional_block |
stream_animation.rs | use super::stream_layer::*;
use super::stream_animation_core::*;
use crate::traits::*;
use crate::storage::*;
use crate::storage::file_properties::*;
use crate::storage::layer_properties::*;
use ::desync::*;
use flo_stream::*;
use itertools::*;
use futures::prelude::*;
use futures::task::{Poll};
use futures::stream;
use futures::stream::{BoxStream};
use std::sync::*;
use std::ops::{Range};
use std::time::{Duration};
use std::collections::{HashMap};
///
/// Animation that sends its updates to a storage stream
///
pub struct StreamAnimation {
/// The core, where the actual work is done
core: Arc<Desync<StreamAnimationCore>>,
/// The publisher for the edits to this animation
edit_publisher: Publisher<Arc<Vec<AnimationEdit>>>,
/// Available synchronous requests
idle_sync_requests: Desync<Vec<Desync<Option<Vec<StorageResponse>>>>>,
}
impl StreamAnimation {
///
/// Creates a new stream animation. The result is the animation implementation and the
/// stream of requests to be sent to the storage layer
///
pub fn new<ConnectStream: FnOnce(BoxStream<'static, Vec<StorageCommand>>) -> BoxStream<'static, Vec<StorageResponse>>>(connect_stream: ConnectStream) -> StreamAnimation {
// Create the storage requests. When the storage layer is running behind, we'll buffer up to 10 of these
let mut requests = Publisher::new(10);
let commands = requests.subscribe().boxed();
let storage_responses = connect_stream(commands);
let mut edit_publisher = Publisher::new(10);
let storage_connection = StorageConnection::new(requests, storage_responses);
// The core is used to actually execute the requests
let core = StreamAnimationCore {
storage_connection: storage_connection,
next_element_id: None,
cached_layers: HashMap::new(),
cached_keyframe: None,
brush_defn: None,
brush_props: None,
path_brush_defn: None,
path_brush_props: None,
retired_edit_senders: vec![],
};
let core = Arc::new(Desync::new(core));
// Anything published to the editor is piped into the core
pipe_in(Arc::clone(&core), edit_publisher.subscribe(), |core, edits: Arc<Vec<AnimationEdit>>| {
async move {
// Edits require some pre-processing: assign the IDs, perform undo actions and write to the log (note that undo edits are performed before serialization)
let mut edits = core.assign_ids_to_edits(&*edits).await;
core.process_undo_edits(&mut edits).await;
core.serialize_edits_to_log(&edits).await;
// Perform the edits to retire them
let retired = core.perform_edits(edits).await;
// Clean up the edit publishers, in case any aren't being listened to any more
core.retired_edit_senders.retain(|sender| sender.count_subscribers() > 0);
// Send the edits as retired
for retired_sender in core.retired_edit_senders.iter_mut() {
retired_sender.publish(retired.clone()).await;
}
}.boxed()
});
// Build the animation
StreamAnimation {
core: core,
idle_sync_requests: Desync::new(vec![]),
edit_publisher: edit_publisher
}
}
///
/// Performs an asynchronous request on a storage layer for this animation
///
pub (super) fn request_async<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> impl Future<Output=Option<Vec<StorageResponse>>> {
request_core_async(&self.core, request.into_iter().collect())
}
///
/// Performs a synchronous request on the storage layer for this animation
///
/// Synchronous requests are fairly slow, so should be avoided in inner loops
///
pub (super) fn request_sync<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> Option<Vec<StorageResponse>> {
request_core_sync(Arc::clone(&self.core), request.into_iter().collect())
}
///
/// Waits for any pending edits on this animation to complete
///
pub (super) fn wait_for_edits(&self) {
// Force a desync to wait for the when_empty future to complete
let when_empty = self.edit_publisher.republish().when_empty();
// Create a desync and wait for the 'when_empty' signal to show up (indicating all the edits have been sent to the core)
let wait_for_edits = Desync::new(());
let _ = wait_for_edits.future_desync(move |_| async move { when_empty.await; }.boxed());
// Synchronise after the future has completed
wait_for_edits.sync(|_| { });
// Synchronise with the animation core so that all the edits are performed
self.core.sync(|_| { });
}
///
/// Retrieves the current file properties for the animation
///
fn file_properties(&self) -> FileProperties {
// Retrieve the properties from storage (and update the version we have stored if there is one)
let mut response = self.request_sync(vec![StorageCommand::ReadAnimationProperties]).unwrap_or_else(|| vec![]);
let properties;
match response.pop() {
Some(StorageResponse::NotFound) => {
// File properties are not set
properties = FileProperties::default();
}
Some(StorageResponse::AnimationProperties(props)) => {
// Deserialize the file properties
properties = FileProperties::deserialize(&mut props.chars()).expect("Could not parse file properties");
}
unknown => panic!("Unexpected response {:?} while reading file properties", unknown)
}
properties
}
}
impl Animation for StreamAnimation {
///
/// Retrieves the frame size of this animation
///
fn size(&self) -> (f64, f64) {
self.wait_for_edits();
self.file_properties().size
}
///
/// Retrieves the length of this animation
///
fn duration(&self) -> Duration {
self.wait_for_edits();
self.file_properties().duration
}
///
/// Retrieves the duration of a single frame
///
fn frame_length(&self) -> Duration {
self.wait_for_edits(); |
///
/// Retrieves the IDs of the layers in this object
///
fn get_layer_ids(&self) -> Vec<u64> {
self.wait_for_edits();
let layer_responses = self.request_sync(vec![StorageCommand::ReadLayers]).unwrap_or_else(|| vec![]);
layer_responses
.into_iter()
.flat_map(|response| {
match response {
StorageResponse::LayerProperties(id, properties) => Some((id, LayerProperties::deserialize(&mut properties.chars())?)),
_ => None
}
})
.sorted_by(|(id_a, layer_a), (id_b, layer_b)| {
if layer_a.ordering == layer_b.ordering {
id_a.cmp(&id_b)
} else {
layer_a.ordering.cmp(&layer_b.ordering)
}
})
.map(|(id, _props)| id)
.collect()
}
///
/// Retrieves the layer with the specified ID from this animation
///
fn get_layer_with_id(&self, layer_id: u64) -> Option<Arc<dyn Layer>> {
self.wait_for_edits();
// Read the properties for the specified layer
let layer_properties = self.request_sync(vec![StorageCommand::ReadLayerProperties(layer_id)]);
if let Some(StorageResponse::LayerProperties(_, serialized)) = layer_properties.and_then(|mut props| props.pop()) {
if let Some(layer_properties) = LayerProperties::deserialize(&mut serialized.chars()) {
// Found the layer
Some(Arc::new(StreamLayer::new(Arc::clone(&self.core), layer_id, layer_properties)))
} else {
// Can't deserialize the layer properties
None
}
} else {
// Layer does not exist
None
}
}
///
/// Retrieves the total number of edits that have been performed on this animation
///
fn get_num_edits(&self) -> usize {
self.wait_for_edits();
let mut response = self.request_sync(vec![StorageCommand::ReadEditLogLength]).unwrap_or_else(|| vec![]);
match response.pop() {
Some(StorageResponse::NumberOfEdits(num_edits)) => num_edits,
_ => panic!("Unexpected response while reading number of edits")
}
}
///
/// Reads from the edit log for this animation
///
fn read_edit_log<'a>(&'a self, range: Range<usize>) -> BoxStream<'a, AnimationEdit> {
self.wait_for_edits();
// Clamp the range of edits to the maximum number of edits
let max_edit = self.get_num_edits();
let range = if range.end > max_edit {
range.start..max_edit
} else {
range
};
// Generate a stream to read from the edit log as we go
let per_request = 20;
let mut remaining = range;
let mut fetched = vec![];
let mut next_response = None;
stream::poll_fn(move |context| {
loop {
if remaining.len() != 0 && fetched.len() == 0 && next_response.is_none() {
// Fetch up to per_request items for each request
let num_to_fetch = remaining.len();
let num_to_fetch = if num_to_fetch > per_request { per_request } else { num_to_fetch };
let fetch_range = (remaining.start)..(remaining.start + num_to_fetch);
// Start polling for the next batch
next_response = Some(self.request_async(vec![StorageCommand::ReadEdits(fetch_range)]));
remaining = (remaining.start+num_to_fetch)..(remaining.end);
}
if let Some(next) = fetched.pop() {
// Just returning the batch we've already fetched
return Poll::Ready(Some(next));
} else if let Some(mut waiting) = next_response.take() {
// Try to retrieve the next item from the batch
let poll_response = waiting.poll_unpin(context);
match poll_response {
Poll::Pending => {
// Keep waiting for the response
next_response = Some(waiting);
return Poll::Pending
},
Poll::Ready(response) => {
// Load the edits into the fetched array
let mut response = response.unwrap_or(vec![]);
while let Some(response) = response.pop() {
// Ignore everything that's not an edit (we have no way to do error handling here)
if let StorageResponse::Edit(_num, serialized_edit) = response {
// Store edits that deserialize successfully on the fetched list
if let Some(edit) = AnimationEdit::deserialize(&mut serialized_edit.chars()) {
fetched.push(edit)
}
}
}
}
}
} else if remaining.len() == 0 {
// Reached the end of the stream
return Poll::Ready(None);
}
}
}).fuse().boxed()
}
}
impl EditableAnimation for StreamAnimation {
///
/// Assigns a new unique ID for creating a new motion
///
/// This ID will not have been used so far and will not be used again, and can be used as the ID for the MotionElement vector element.
///
fn assign_element_id(&self) -> ElementId {
// Create a queue to run the 'assign element ID' future on
let core = Arc::clone(&self.core);
// Perform the request and retrieve the result
core.future_desync(|core| core.assign_element_id(ElementId::Unassigned).boxed())
.sync().unwrap()
}
///
/// Retrieves a sink that can be used to send edits for this animation
///
/// Edits are supplied as groups (stored in a vec) so that it's possible to ensure that
/// a set of related edits are performed atomically
///
fn edit(&self) -> Publisher<Arc<Vec<AnimationEdit>>> {
self.edit_publisher.republish()
}
///
/// Sends a set of edits straight to this animation
///
/// (Note that these are not always published to the publisher)
///
fn perform_edits(&self, edits: Vec<AnimationEdit>) {
// Get a publisher to send the edits to (this editor does send its edits to the publisher)
let mut publisher = self.edit_publisher.republish();
// Get an idle sync request desync
// We use desync instead of the futures executor as the executor will panic if we are called from within another future
// (desync provides a way around this problem)
let sync_request = self.idle_sync_requests.sync(|reqs| {
let next_request = reqs.pop();
if let Some(next_request) = next_request {
next_request
} else {
let req = Desync::new(None);
req
}
});
// Queue a request
sync_request.future_desync(move |_| {
async move {
// Publish the edits
publisher.publish(Arc::new(edits)).await;
}.boxed()
}).sync().ok();
// Return the sync_request to the pool
self.idle_sync_requests.desync(move |reqs| { reqs.push(sync_request) });
// Wait for the edits to complete
self.wait_for_edits();
}
///
/// Returns a stream of edits as they are being retired (ie, the edits that are now visible on the animation)
///
fn retired_edits(&self) -> BoxStream<'static, RetiredEdit> {
// Create a channel to send edits through
let mut sender = Publisher::new(10);
let receiver = sender.subscribe();
// Add to the list in the core
self.core.sync(move |core| {
core.retired_edit_senders.push(sender);
});
// Box up the receiver to create the result
receiver.boxed()
}
///
/// Flushes any caches this might have (forces reload from data storage)
///
fn flush_caches(&self) {
self.core.desync(|core| {
core.cached_keyframe = None;
});
}
} | self.file_properties().frame_length
} | random_line_split |
stream_animation.rs | use super::stream_layer::*;
use super::stream_animation_core::*;
use crate::traits::*;
use crate::storage::*;
use crate::storage::file_properties::*;
use crate::storage::layer_properties::*;
use ::desync::*;
use flo_stream::*;
use itertools::*;
use futures::prelude::*;
use futures::task::{Poll};
use futures::stream;
use futures::stream::{BoxStream};
use std::sync::*;
use std::ops::{Range};
use std::time::{Duration};
use std::collections::{HashMap};
///
/// Animation that sends its updates to a storage stream
///
pub struct StreamAnimation {
/// The core, where the actual work is done
core: Arc<Desync<StreamAnimationCore>>,
/// The publisher for the edits to this animation
edit_publisher: Publisher<Arc<Vec<AnimationEdit>>>,
/// Available synchronous requests
idle_sync_requests: Desync<Vec<Desync<Option<Vec<StorageResponse>>>>>,
}
impl StreamAnimation {
///
/// Creates a new stream animation. The result is the animation implementation and the
/// stream of requests to be sent to the storage layer
///
pub fn new<ConnectStream: FnOnce(BoxStream<'static, Vec<StorageCommand>>) -> BoxStream<'static, Vec<StorageResponse>>>(connect_stream: ConnectStream) -> StreamAnimation |
///
/// Performs an asynchronous request on a storage layer for this animation
///
pub (super) fn request_async<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> impl Future<Output=Option<Vec<StorageResponse>>> {
request_core_async(&self.core, request.into_iter().collect())
}
///
/// Performs a synchronous request on the storage layer for this animation
///
/// Synchronous requests are fairly slow, so should be avoided in inner loops
///
pub (super) fn request_sync<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> Option<Vec<StorageResponse>> {
request_core_sync(Arc::clone(&self.core), request.into_iter().collect())
}
///
/// Waits for any pending edits on this animation to complete
///
pub (super) fn wait_for_edits(&self) {
// Force a desync to wait for the when_empty future to complete
let when_empty = self.edit_publisher.republish().when_empty();
// Create a desync and wait for the 'when_empty' signal to show up (indicating all the edits have been sent to the core)
let wait_for_edits = Desync::new(());
let _ = wait_for_edits.future_desync(move |_| async move { when_empty.await; }.boxed());
// Synchronise after the future has completed
wait_for_edits.sync(|_| { });
// Synchronise with the animation core so that all the edits are performed
self.core.sync(|_| { });
}
///
/// Retrieves the current file properties for the animation
///
fn file_properties(&self) -> FileProperties {
// Retrieve the properties from storage (and update the version we have stored if there is one)
let mut response = self.request_sync(vec![StorageCommand::ReadAnimationProperties]).unwrap_or_else(|| vec![]);
let properties;
match response.pop() {
Some(StorageResponse::NotFound) => {
// File properties are not set
properties = FileProperties::default();
}
Some(StorageResponse::AnimationProperties(props)) => {
// Deserialize the file properties
properties = FileProperties::deserialize(&mut props.chars()).expect("Could not parse file properties");
}
unknown => panic!("Unexpected response {:?} while reading file properties", unknown)
}
properties
}
}
impl Animation for StreamAnimation {
///
/// Retrieves the frame size of this animation
///
fn size(&self) -> (f64, f64) {
self.wait_for_edits();
self.file_properties().size
}
///
/// Retrieves the length of this animation
///
fn duration(&self) -> Duration {
self.wait_for_edits();
self.file_properties().duration
}
///
/// Retrieves the duration of a single frame
///
fn frame_length(&self) -> Duration {
self.wait_for_edits();
self.file_properties().frame_length
}
///
/// Retrieves the IDs of the layers in this object
///
fn get_layer_ids(&self) -> Vec<u64> {
self.wait_for_edits();
let layer_responses = self.request_sync(vec![StorageCommand::ReadLayers]).unwrap_or_else(|| vec![]);
layer_responses
.into_iter()
.flat_map(|response| {
match response {
StorageResponse::LayerProperties(id, properties) => Some((id, LayerProperties::deserialize(&mut properties.chars())?)),
_ => None
}
})
.sorted_by(|(id_a, layer_a), (id_b, layer_b)| {
if layer_a.ordering == layer_b.ordering {
id_a.cmp(&id_b)
} else {
layer_a.ordering.cmp(&layer_b.ordering)
}
})
.map(|(id, _props)| id)
.collect()
}
///
/// Retrieves the layer with the specified ID from this animation
///
fn get_layer_with_id(&self, layer_id: u64) -> Option<Arc<dyn Layer>> {
self.wait_for_edits();
// Read the properties for the specified layer
let layer_properties = self.request_sync(vec![StorageCommand::ReadLayerProperties(layer_id)]);
if let Some(StorageResponse::LayerProperties(_, serialized)) = layer_properties.and_then(|mut props| props.pop()) {
if let Some(layer_properties) = LayerProperties::deserialize(&mut serialized.chars()) {
// Found the layer
Some(Arc::new(StreamLayer::new(Arc::clone(&self.core), layer_id, layer_properties)))
} else {
// Can't deserialize the layer properties
None
}
} else {
// Layer does not exist
None
}
}
///
/// Retrieves the total number of edits that have been performed on this animation
///
fn get_num_edits(&self) -> usize {
self.wait_for_edits();
let mut response = self.request_sync(vec![StorageCommand::ReadEditLogLength]).unwrap_or_else(|| vec![]);
match response.pop() {
Some(StorageResponse::NumberOfEdits(num_edits)) => num_edits,
_ => panic!("Unexpected response while reading number of edits")
}
}
///
/// Reads from the edit log for this animation
///
fn read_edit_log<'a>(&'a self, range: Range<usize>) -> BoxStream<'a, AnimationEdit> {
self.wait_for_edits();
// Clamp the range of edits to the maximum number of edits
let max_edit = self.get_num_edits();
let range = if range.end > max_edit {
range.start..max_edit
} else {
range
};
// Generate a stream to read from the edit log as we go
let per_request = 20;
let mut remaining = range;
let mut fetched = vec![];
let mut next_response = None;
stream::poll_fn(move |context| {
loop {
if remaining.len() != 0 && fetched.len() == 0 && next_response.is_none() {
// Fetch up to per_request items for each request
let num_to_fetch = remaining.len();
let num_to_fetch = if num_to_fetch > per_request { per_request } else { num_to_fetch };
let fetch_range = (remaining.start)..(remaining.start + num_to_fetch);
// Start polling for the next batch
next_response = Some(self.request_async(vec![StorageCommand::ReadEdits(fetch_range)]));
remaining = (remaining.start+num_to_fetch)..(remaining.end);
}
if let Some(next) = fetched.pop() {
// Just returning the batch we've already fetched
return Poll::Ready(Some(next));
} else if let Some(mut waiting) = next_response.take() {
// Try to retrieve the next item from the batch
let poll_response = waiting.poll_unpin(context);
match poll_response {
Poll::Pending => {
// Keep waiting for the response
next_response = Some(waiting);
return Poll::Pending
},
Poll::Ready(response) => {
// Load the edits into the fetched array
let mut response = response.unwrap_or(vec![]);
while let Some(response) = response.pop() {
// Ignore everything that's not an edit (we have no way to do error handling here)
if let StorageResponse::Edit(_num, serialized_edit) = response {
// Store edits that deserialize successfully on the fetched list
if let Some(edit) = AnimationEdit::deserialize(&mut serialized_edit.chars()) {
fetched.push(edit)
}
}
}
}
}
} else if remaining.len() == 0 {
// Reached the end of the stream
return Poll::Ready(None);
}
}
}).fuse().boxed()
}
}
impl EditableAnimation for StreamAnimation {
///
/// Assigns a new unique ID for creating a new motion
///
/// This ID will not have been used so far and will not be used again, and can be used as the ID for the MotionElement vector element.
///
fn assign_element_id(&self) -> ElementId {
// Create a queue to run the 'assign element ID' future on
let core = Arc::clone(&self.core);
// Perform the request and retrieve the result
core.future_desync(|core| core.assign_element_id(ElementId::Unassigned).boxed())
.sync().unwrap()
}
///
/// Retrieves a sink that can be used to send edits for this animation
///
/// Edits are supplied as groups (stored in a vec) so that it's possible to ensure that
/// a set of related edits are performed atomically
///
fn edit(&self) -> Publisher<Arc<Vec<AnimationEdit>>> {
self.edit_publisher.republish()
}
///
/// Sends a set of edits straight to this animation
///
/// (Note that these are not always published to the publisher)
///
fn perform_edits(&self, edits: Vec<AnimationEdit>) {
// Get a publisher to send the edits to (this editor does send its edits to the publisher)
let mut publisher = self.edit_publisher.republish();
// Get an idle sync request desync
// We use desync instead of the futures executor as the executor will panic if we are called from within another future
// (desync provides a way around this problem)
let sync_request = self.idle_sync_requests.sync(|reqs| {
let next_request = reqs.pop();
if let Some(next_request) = next_request {
next_request
} else {
let req = Desync::new(None);
req
}
});
// Queue a request
sync_request.future_desync(move |_| {
async move {
// Publish the edits
publisher.publish(Arc::new(edits)).await;
}.boxed()
}).sync().ok();
// Return the sync_request to the pool
self.idle_sync_requests.desync(move |reqs| { reqs.push(sync_request) });
// Wait for the edits to complete
self.wait_for_edits();
}
///
/// Returns a stream of edits as they are being retired (ie, the edits that are now visible on the animation)
///
fn retired_edits(&self) -> BoxStream<'static, RetiredEdit> {
// Create a channel to send edits through
let mut sender = Publisher::new(10);
let receiver = sender.subscribe();
// Add to the list in the core
self.core.sync(move |core| {
core.retired_edit_senders.push(sender);
});
// Box up the receiver to create the result
receiver.boxed()
}
///
/// Flushes any caches this might have (forces reload from data storage)
///
fn flush_caches(&self) {
self.core.desync(|core| {
core.cached_keyframe = None;
});
}
}
| {
// Create the storage requests. When the storage layer is running behind, we'll buffer up to 10 of these
let mut requests = Publisher::new(10);
let commands = requests.subscribe().boxed();
let storage_responses = connect_stream(commands);
let mut edit_publisher = Publisher::new(10);
let storage_connection = StorageConnection::new(requests, storage_responses);
// The core is used to actually execute the requests
let core = StreamAnimationCore {
storage_connection: storage_connection,
next_element_id: None,
cached_layers: HashMap::new(),
cached_keyframe: None,
brush_defn: None,
brush_props: None,
path_brush_defn: None,
path_brush_props: None,
retired_edit_senders: vec![],
};
let core = Arc::new(Desync::new(core));
// Anything published to the editor is piped into the core
pipe_in(Arc::clone(&core), edit_publisher.subscribe(), |core, edits: Arc<Vec<AnimationEdit>>| {
async move {
// Edits require some pre-processing: assign the IDs, perform undo actions and write to the log (note that undo edits are performed before serialization)
let mut edits = core.assign_ids_to_edits(&*edits).await;
core.process_undo_edits(&mut edits).await;
core.serialize_edits_to_log(&edits).await;
// Perform the edits to retire them
let retired = core.perform_edits(edits).await;
// Clean up the edit publishers, in case any aren't being listened to any more
core.retired_edit_senders.retain(|sender| sender.count_subscribers() > 0);
// Send the edits as retired
for retired_sender in core.retired_edit_senders.iter_mut() {
retired_sender.publish(retired.clone()).await;
}
}.boxed()
});
// Build the animation
StreamAnimation {
core: core,
idle_sync_requests: Desync::new(vec![]),
edit_publisher: edit_publisher
}
} | identifier_body |
config.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Runtime configuration, via [`ConfigOptions`]
use crate::{DataFusionError, Result};
use std::any::Any;
use std::collections::{BTreeMap, HashMap};
use std::fmt::Display;
/// A macro that wraps a configuration struct and automatically derives
/// [`Default`] and [`ConfigField`] for it, allowing it to be used
/// in the [`ConfigOptions`] configuration tree
///
/// For example,
///
/// ```ignore
/// config_namespace! {
/// /// Amazing config
/// pub struct MyConfig {
/// /// Field 1 doc
/// field1: String, default = "".to_string()
///
/// /// Field 2 doc
/// field2: usize, default = 232
///
/// /// Field 3 doc
/// field3: Option<usize>, default = None
/// }
///}
/// ```
///
/// Will generate
///
/// ```ignore
/// /// Amazing config
/// #[derive(Debug, Clone)]
/// #[non_exhaustive]
/// pub struct MyConfig {
/// /// Field 1 doc
/// field1: String,
/// /// Field 2 doc
/// field2: usize,
/// /// Field 3 doc
/// field3: Option<usize>,
/// }
/// impl ConfigField for MyConfig {
/// fn set(&mut self, key: &str, value: &str) -> Result<()> {
/// let (key, rem) = key.split_once('.').unwrap_or((key, ""));
/// match key {
/// "field1" => self.field1.set(rem, value),
/// "field2" => self.field2.set(rem, value),
/// "field3" => self.field3.set(rem, value),
/// _ => Err(DataFusionError::Internal(format!(
/// "Config value \"{}\" not found on MyConfig",
/// key
/// ))),
/// }
/// }
///
/// fn visit<V: Visit>(&self, v: &mut V, key_prefix: &str, _description: &'static str) {
/// let key = format!("{}.field1", key_prefix);
/// let desc = "Field 1 doc";
/// self.field1.visit(v, key.as_str(), desc);
/// let key = format!("{}.field2", key_prefix);
/// let desc = "Field 2 doc";
/// self.field2.visit(v, key.as_str(), desc);
/// let key = format!("{}.field3", key_prefix);
/// let desc = "Field 3 doc";
/// self.field3.visit(v, key.as_str(), desc);
/// }
/// }
///
/// impl Default for MyConfig {
/// fn default() -> Self {
/// Self {
/// field1: "".to_string(),
/// field2: 232,
/// field3: None,
/// }
/// }
/// }
/// ```
///
/// NB: Misplaced commas may result in nonsensical errors
///
macro_rules! config_namespace {
(
$(#[doc = $struct_d:tt])*
$vis:vis struct $struct_name:ident {
$(
$(#[doc = $d:tt])*
$field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
)*$(,)*
}
) => {
$(#[doc = $struct_d])*
#[derive(Debug, Clone)]
#[non_exhaustive]
$vis struct $struct_name{
$(
$(#[doc = $d])*
$field_vis $field_name : $field_type,
)*
}
impl ConfigField for $struct_name {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
$(
stringify!($field_name) => self.$field_name.set(rem, value),
)*
_ => Err(DataFusionError::Internal(
format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
))
}
}
fn visit<V: Visit>(&self, v: &mut V, key_prefix: &str, _description: &'static str) {
$(
let key = format!(concat!("{}.", stringify!($field_name)), key_prefix);
let desc = concat!($($d),*).trim();
self.$field_name.visit(v, key.as_str(), desc);
)*
}
}
impl Default for $struct_name {
fn default() -> Self {
Self {
$($field_name: $default),*
}
}
}
}
}
config_namespace! {
/// Options related to catalog and directory scanning
pub struct CatalogOptions {
/// Whether the default catalog and schema should be created automatically.
pub create_default_catalog_and_schema: bool, default = true
/// The default catalog name - this impacts what SQL queries use if not specified
pub default_catalog: String, default = "datafusion".to_string()
/// The default schema name - this impacts what SQL queries use if not specified
pub default_schema: String, default = "public".to_string()
/// Should DataFusion provide access to `information_schema`
/// virtual tables for displaying schema information
pub information_schema: bool, default = false
/// Location scanned to load tables for `default` schema
pub location: Option<String>, default = None
/// Type of `TableProvider` to use when loading `default` schema
pub format: Option<String>, default = None
/// If the file has a header
pub has_header: bool, default = false
}
}
config_namespace! {
/// Options related to SQL parser
pub struct SqlParserOptions {
/// When set to true, SQL parser will parse float as decimal type
pub parse_float_as_decimal: bool, default = false
/// When set to true, SQL parser will normalize ident (convert ident to lowercase when not quoted)
pub enable_ident_normalization: bool, default = true
/// Configure the SQL dialect used by DataFusion's parser; supported values include: Generic,
/// MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, MsSQL, ClickHouse, BigQuery, and Ansi.
pub dialect: String, default = "generic".to_string()
}
}
config_namespace! {
/// Options related to query execution
pub struct ExecutionOptions {
/// Default batch size while creating new batches, it's especially useful for
/// buffer-in-memory batches since creating tiny batches would result in too much
/// metadata memory consumption
pub batch_size: usize, default = 8192
/// When set to true, record batches will be examined between each operator and
/// small batches will be coalesced into larger batches. This is helpful when there
/// are highly selective filters or joins that could produce tiny output batches. The
/// target batch size is determined by the configuration setting
pub coalesce_batches: bool, default = true
/// Should DataFusion collect statistics after listing files
pub collect_statistics: bool, default = false
/// Number of partitions for query execution. Increasing partitions can increase
/// concurrency.
///
/// Defaults to the number of CPU cores on the system
pub target_partitions: usize, default = num_cpus::get()
/// The default time zone
///
/// Some functions, e.g. `EXTRACT(HOUR from SOME_TIME)`, shift the underlying datetime
/// according to this time zone, and then extract the hour
pub time_zone: Option<String>, default = Some("+00:00".into())
/// Parquet options
pub parquet: ParquetOptions, default = Default::default()
/// Aggregate options
pub aggregate: AggregateOptions, default = Default::default()
/// Fan-out during initial physical planning.
///
/// This is mostly use to plan `UNION` children in parallel.
///
/// Defaults to the number of CPU cores on the system
pub planning_concurrency: usize, default = num_cpus::get()
}
}
config_namespace! {
/// Options related to reading of parquet files
pub struct ParquetOptions {
/// If true, reads the Parquet data page level metadata (the
/// Page Index), if present, to reduce the I/O and number of
/// rows decoded.
pub enable_page_index: bool, default = true
/// If true, the parquet reader attempts to skip entire row groups based
/// on the predicate in the query and the metadata (min/max values) stored in
/// the parquet file
pub pruning: bool, default = true
/// If true, the parquet reader skip the optional embedded metadata that may be in
/// the file Schema. This setting can help avoid schema conflicts when querying
/// multiple parquet files with schemas containing compatible types but different metadata
pub skip_metadata: bool, default = true
/// If specified, the parquet reader will try and fetch the last `size_hint`
/// bytes of the parquet file optimistically. If not specified, two reads are required:
/// One read to fetch the 8-byte parquet footer and
/// another to fetch the metadata length encoded in the footer
pub metadata_size_hint: Option<usize>, default = None
/// If true, filter expressions are be applied during the parquet decoding operation to
/// reduce the number of rows decoded
pub pushdown_filters: bool, default = false
/// If true, filter expressions evaluated during the parquet decoding operation
/// will be reordered heuristically to minimize the cost of evaluation. If false,
/// the filters are applied in the same order as written in the query
pub reorder_filters: bool, default = false
}
}
config_namespace! {
/// Options related to aggregate execution
pub struct AggregateOptions {
/// Specifies the threshold for using `ScalarValue`s to update
/// accumulators during high-cardinality aggregations for each input batch.
///
/// The aggregation is considered high-cardinality if the number of affected groups
/// is greater than or equal to `batch_size / scalar_update_factor`. In such cases,
/// `ScalarValue`s are utilized for updating accumulators, rather than the default
/// batch-slice approach. This can lead to performance improvements.
///
/// By adjusting the `scalar_update_factor`, you can balance the trade-off between
/// more efficient accumulator updates and the number of groups affected.
pub scalar_update_factor: usize, default = 10
}
}
config_namespace! {
/// Options related to query optimization
pub struct OptimizerOptions {
/// When set to true, the physical plan optimizer will try to add round robin
/// repartitioning to increase parallelism to leverage more CPU cores
pub enable_round_robin_repartition: bool, default = true
/// When set to true, the optimizer will insert filters before a join between
/// a nullable and non-nullable column to filter out nulls on the nullable side. This
/// filter can add additional overhead when the file format does not fully support
/// predicate push down.
pub filter_null_join_keys: bool, default = false
/// Should DataFusion repartition data using the aggregate keys to execute aggregates
/// in parallel using the provided `target_partitions` level
pub repartition_aggregations: bool, default = true
/// Minimum total files size in bytes to perform file scan repartitioning.
pub repartition_file_min_size: usize, default = 10 * 1024 * 1024
/// Should DataFusion repartition data using the join keys to execute joins in parallel
/// using the provided `target_partitions` level
pub repartition_joins: bool, default = true
/// Should DataFusion allow symmetric hash joins for unbounded data sources even when
/// its inputs do not have any ordering or filtering If the flag is not enabled,
/// the SymmetricHashJoin operator will be unable to prune its internal buffers,
/// resulting in certain join types - such as Full, Left, LeftAnti, LeftSemi, Right,
/// RightAnti, and RightSemi - being produced only at the end of the execution.
/// This is not typical in stream processing. Additionally, without proper design for
/// long runner execution, all types of joins may encounter out-of-memory errors.
pub allow_symmetric_joins_without_pruning: bool, default = true
/// When set to `true`, file groups will be repartitioned to achieve maximum parallelism.
/// Currently Parquet and CSV formats are supported.
///
/// If set to `true`, all files will be repartitioned evenly (i.e., a single large file
/// might be partitioned into smaller chunks) for parallel scanning.
/// If set to `false`, different files will be read in parallel, but repartitioning won't
/// happen within a single file.
pub repartition_file_scans: bool, default = true
/// Should DataFusion repartition data using the partitions keys to execute window
/// functions in parallel using the provided `target_partitions` level
pub repartition_windows: bool, default = true
/// Should DataFusion execute sorts in a per-partition fashion and merge
/// afterwards instead of coalescing first and sorting globally.
/// With this flag is enabled, plans in the form below
///
/// ```text
/// "SortExec: [a@0 ASC]",
/// " CoalescePartitionsExec",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
/// would turn into the plan below which performs better in multithreaded environments
///
/// ```text
/// "SortPreservingMergeExec: [a@0 ASC]",
/// " SortExec: [a@0 ASC]",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
pub repartition_sorts: bool, default = true
/// When set to true, the logical plan optimizer will produce warning
/// messages if any optimization rules produce errors and then proceed to the next
/// rule. When set to false, any rules that produce errors will cause the query to fail
pub skip_failed_rules: bool, default = false
/// Number of times that the optimizer will attempt to optimize the plan
pub max_passes: usize, default = 3
/// When set to true, the physical plan optimizer will run a top down
/// process to reorder the join keys
pub top_down_join_key_reordering: bool, default = true
/// When set to true, the physical plan optimizer will prefer HashJoin over SortMergeJoin.
/// HashJoin can work more efficiently than SortMergeJoin but consumes more memory
pub prefer_hash_join: bool, default = true
/// The maximum estimated size in bytes for one input side of a HashJoin
/// will be collected into a single partition
pub hash_join_single_partition_threshold: usize, default = 1024 * 1024
}
}
config_namespace! {
/// Options controlling explain output
pub struct ExplainOptions {
/// When set to true, the explain statement will only print logical plans
pub logical_plan_only: bool, default = false
/// When set to true, the explain statement will only print physical plans
pub physical_plan_only: bool, default = false
}
}
/// A key value pair, with a corresponding description
#[derive(Debug)]
pub struct ConfigEntry {
/// A unique string to identify this config value
pub key: String,
/// The value if any
pub value: Option<String>,
/// A description of this configuration entry
pub description: &'static str,
}
/// Configuration options struct, able to store both built-in configuration and custom options
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct ConfigOptions {
/// Catalog options
pub catalog: CatalogOptions,
/// Execution options
pub execution: ExecutionOptions,
/// Optimizer options
pub optimizer: OptimizerOptions,
/// SQL parser options
pub sql_parser: SqlParserOptions,
/// Explain options
pub explain: ExplainOptions,
/// Optional extensions registered using [`Extensions::insert`]
pub extensions: Extensions,
}
impl ConfigField for ConfigOptions {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
// Extensions are handled in the public `ConfigOptions::set`
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
"catalog" => self.catalog.set(rem, value),
"execution" => self.execution.set(rem, value),
"optimizer" => self.optimizer.set(rem, value),
"explain" => self.explain.set(rem, value),
"sql_parser" => self.sql_parser.set(rem, value),
_ => Err(DataFusionError::Internal(format!(
"Config value \"{key}\" not found on ConfigOptions"
))),
}
}
fn visit<V: Visit>(&self, v: &mut V, _key_prefix: &str, _description: &'static str) {
self.catalog.visit(v, "datafusion.catalog", "");
self.execution.visit(v, "datafusion.execution", "");
self.optimizer.visit(v, "datafusion.optimizer", "");
self.explain.visit(v, "datafusion.explain", "");
self.sql_parser.visit(v, "datafusion.sql_parser", "");
}
}
impl ConfigOptions {
/// Creates a new [`ConfigOptions`] with default values
pub fn new() -> Self {
Self::default()
}
/// Set extensions to provided value
pub fn with_extensions(mut self, extensions: Extensions) -> Self {
self.extensions = extensions;
self
}
/// Set a configuration option
pub fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (prefix, key) = key.split_once('.').ok_or_else(|| {
DataFusionError::External(
format!("could not find config namespace for key \"{key}\"",).into(),
)
})?;
if prefix == "datafusion" {
return ConfigField::set(self, key, value);
}
let e = self.extensions.0.get_mut(prefix);
let e = e.ok_or_else(|| {
DataFusionError::External(
format!("Could not find config namespace \"{prefix}\"",).into(),
)
})?;
e.0.set(key, value)
}
/// Create new ConfigOptions struct, taking values from
/// environment variables where possible.
///
/// For example, setting `DATAFUSION_EXECUTION_BATCH_SIZE` will
/// control `datafusion.execution.batch_size`.
pub fn from_env() -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
// Extract the names of all fields and then look up the corresponding
// environment variables. This isn't hugely efficient but avoids
// ambiguity between `a.b` and `a_b` which would both correspond
// to an environment variable of `A_B`
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
let env = key.to_uppercase().replace('.', "_");
if let Some(var) = std::env::var_os(env) {
ret.set(&key, var.to_string_lossy().as_ref())?;
}
}
Ok(ret)
}
/// Create new ConfigOptions struct, taking values from a string hash map.
///
/// Only the built-in configurations will be extracted from the hash map
/// and other key value pairs will be ignored.
pub fn from_string_hash_map(settings: HashMap<String, String>) -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
if let Some(var) = settings.get(&key) {
ret.set(&key, var)?;
}
}
Ok(ret)
}
/// Returns the [`ConfigEntry`] stored within this [`ConfigOptions`]
pub fn entries(&self) -> Vec<ConfigEntry> {
struct Visitor(Vec<ConfigEntry>);
impl Visit for Visitor {
fn some<V: Display>(
&mut self,
key: &str,
value: V,
description: &'static str,
) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: Some(value.to_string()),
description,
})
}
fn none(&mut self, key: &str, description: &'static str) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: None,
description,
})
}
}
let mut v = Visitor(vec![]);
self.visit(&mut v, "datafusion", "");
v.0.extend(self.extensions.0.values().flat_map(|e| e.0.entries()));
v.0
}
/// Generate documentation that can be included in the user guide
pub fn generate_config_markdown() -> String {
use std::fmt::Write as _;
let mut s = Self::default();
// Normalize for display
s.execution.target_partitions = 0;
s.execution.planning_concurrency = 0;
let mut docs = "| key | default | description |\n".to_string();
docs += "|-----|---------|-------------|\n";
let mut entries = s.entries();
entries.sort_unstable_by(|a, b| a.key.cmp(&b.key));
for entry in s.entries() {
let _ = writeln!(
&mut docs,
"| {} | {} | {} |",
entry.key,
entry.value.as_deref().unwrap_or("NULL"),
entry.description
);
}
docs
}
}
/// [`ConfigExtension`] provides a mechanism to store third-party configuration within DataFusion
///
/// Unfortunately associated constants are not currently object-safe, and so this
/// extends the object-safe [`ExtensionOptions`]
pub trait ConfigExtension: ExtensionOptions {
/// Configuration namespace prefix to use
///
/// All values under this will be prefixed with `$PREFIX + "."`
const PREFIX: &'static str;
}
/// An object-safe API for storing arbitrary configuration
pub trait ExtensionOptions: Send + Sync + std::fmt::Debug + 'static {
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any(&self) -> &dyn Any;
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Return a deep clone of this [`ExtensionOptions`]
///
/// It is important this does not share mutable state to avoid consistency issues
/// with configuration changing whilst queries are executing
fn cloned(&self) -> Box<dyn ExtensionOptions>;
/// Set the given `key`, `value` pair
fn set(&mut self, key: &str, value: &str) -> Result<()>;
/// Returns the [`ConfigEntry`] stored in this [`ExtensionOptions`]
fn entries(&self) -> Vec<ConfigEntry>;
}
/// A type-safe container for [`ConfigExtension`]
#[derive(Debug, Default, Clone)]
pub struct Extensions(BTreeMap<&'static str, ExtensionBox>);
impl Extensions {
/// Create a new, empty [`Extensions`]
pub fn new() -> Self {
Self(BTreeMap::new())
}
/// Registers a [`ConfigExtension`] with this [`ConfigOptions`]
pub fn insert<T: ConfigExtension>(&mut self, extension: T) {
assert_ne!(T::PREFIX, "datafusion");
let e = ExtensionBox(Box::new(extension));
self.0.insert(T::PREFIX, e);
}
/// Retrieves the extension of the given type if any
pub fn get<T: ConfigExtension>(&self) -> Option<&T> |
/// Retrieves the extension of the given type if any
pub fn get_mut<T: ConfigExtension>(&mut self) -> Option<&mut T> {
let e = self.0.get_mut(T::PREFIX)?;
e.0.as_any_mut().downcast_mut()
}
}
#[derive(Debug)]
struct ExtensionBox(Box<dyn ExtensionOptions>);
impl Clone for ExtensionBox {
fn clone(&self) -> Self {
Self(self.0.cloned())
}
}
/// A trait implemented by `config_namespace` and for field types that provides
/// the ability to walk and mutate the configuration tree
trait ConfigField {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str);
fn set(&mut self, key: &str, value: &str) -> Result<()>;
}
impl<F: ConfigField + Default> ConfigField for Option<F> {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str) {
match self {
Some(s) => s.visit(v, key, description),
None => v.none(key, description),
}
}
fn set(&mut self, key: &str, value: &str) -> Result<()> {
self.get_or_insert_with(Default::default).set(key, value)
}
}
macro_rules! config_field {
($t:ty) => {
impl ConfigField for $t {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str) {
v.some(key, self, description)
}
fn set(&mut self, _: &str, value: &str) -> Result<()> {
*self = value.parse().map_err(|e| {
DataFusionError::Context(
format!(concat!("Error parsing {} as ", stringify!($t),), value),
Box::new(DataFusionError::External(Box::new(e))),
)
})?;
Ok(())
}
}
};
}
config_field!(String);
config_field!(bool);
config_field!(usize);
/// An implementation trait used to recursively walk configuration
trait Visit {
fn some<V: Display>(&mut self, key: &str, value: V, description: &'static str);
fn none(&mut self, key: &str, description: &'static str);
}
/// Convenience macro to create [`ExtensionsOptions`].
///
/// The created structure implements the following traits:
///
/// - [`Clone`]
/// - [`Debug`]
/// - [`Default`]
/// - [`ExtensionOptions`]
///
/// # Usage
/// The syntax is:
///
/// ```text
/// extensions_options! {
/// /// Struct docs (optional).
/// [<vis>] struct <StructName> {
/// /// Field docs (optional)
/// [<vis>] <field_name>: <field_type>, default = <default_value>
///
/// ... more fields
/// }
/// }
/// ```
///
/// The placeholders are:
/// - `[<vis>]`: Optional visibility modifier like `pub` or `pub(crate)`.
/// - `<StructName>`: Struct name like `MyStruct`.
/// - `<field_name>`: Field name like `my_field`.
/// - `<field_type>`: Field type like `u8`.
/// - `<default_value>`: Default value matching the field type like `42`.
///
/// # Example
/// ```
/// use datafusion_common::extensions_options;
///
/// extensions_options! {
/// /// My own config options.
/// pub struct MyConfig {
/// /// Should "foo" be replaced by "bar"?
/// pub foo_to_bar: bool, default = true
///
/// /// How many "baz" should be created?
/// pub baz_count: usize, default = 1337
/// }
/// }
/// ```
///
///
/// [`Debug`]: std::fmt::Debug
/// [`ExtensionsOptions`]: crate::config::ExtensionOptions
#[macro_export]
macro_rules! extensions_options {
(
$(#[doc = $struct_d:tt])*
$vis:vis struct $struct_name:ident {
$(
$(#[doc = $d:tt])*
$field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
)*$(,)*
}
) => {
$(#[doc = $struct_d])*
#[derive(Debug, Clone)]
#[non_exhaustive]
$vis struct $struct_name{
$(
$(#[doc = $d])*
$field_vis $field_name : $field_type,
)*
}
impl Default for $struct_name {
fn default() -> Self {
Self {
$($field_name: $default),*
}
}
}
impl $crate::config::ExtensionOptions for $struct_name {
fn as_any(&self) -> &dyn ::std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn ::std::any::Any {
self
}
fn cloned(&self) -> Box<dyn $crate::config::ExtensionOptions> {
Box::new(self.clone())
}
fn set(&mut self, key: &str, value: &str) -> $crate::Result<()> {
match key {
$(
stringify!($field_name) => {
self.$field_name = value.parse().map_err(|e| {
$crate::DataFusionError::Context(
format!(concat!("Error parsing {} as ", stringify!($t),), value),
Box::new($crate::DataFusionError::External(Box::new(e))),
)
})?;
Ok(())
}
)*
_ => Err($crate::DataFusionError::Internal(
format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
))
}
}
fn entries(&self) -> Vec<$crate::config::ConfigEntry> {
vec![
$(
$crate::config::ConfigEntry {
key: stringify!($field_name).to_owned(),
value: (self.$field_name != $default).then(|| self.$field_name.to_string()),
description: concat!($($d),*).trim(),
},
)*
]
}
}
}
}
| {
self.0.get(T::PREFIX)?.0.as_any().downcast_ref()
} | identifier_body |
config.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Runtime configuration, via [`ConfigOptions`]
use crate::{DataFusionError, Result};
use std::any::Any;
use std::collections::{BTreeMap, HashMap};
use std::fmt::Display;
/// A macro that wraps a configuration struct and automatically derives
/// [`Default`] and [`ConfigField`] for it, allowing it to be used
/// in the [`ConfigOptions`] configuration tree
///
/// For example,
///
/// ```ignore
/// config_namespace! {
/// /// Amazing config
/// pub struct MyConfig {
/// /// Field 1 doc
/// field1: String, default = "".to_string()
///
/// /// Field 2 doc
/// field2: usize, default = 232
///
/// /// Field 3 doc
/// field3: Option<usize>, default = None
/// }
///}
/// ```
///
/// Will generate
///
/// ```ignore
/// /// Amazing config
/// #[derive(Debug, Clone)]
/// #[non_exhaustive]
/// pub struct MyConfig {
/// /// Field 1 doc
/// field1: String,
/// /// Field 2 doc
/// field2: usize,
/// /// Field 3 doc
/// field3: Option<usize>,
/// }
/// impl ConfigField for MyConfig {
/// fn set(&mut self, key: &str, value: &str) -> Result<()> {
/// let (key, rem) = key.split_once('.').unwrap_or((key, ""));
/// match key {
/// "field1" => self.field1.set(rem, value),
/// "field2" => self.field2.set(rem, value),
/// "field3" => self.field3.set(rem, value),
/// _ => Err(DataFusionError::Internal(format!(
/// "Config value \"{}\" not found on MyConfig",
/// key
/// ))),
/// }
/// }
///
/// fn visit<V: Visit>(&self, v: &mut V, key_prefix: &str, _description: &'static str) {
/// let key = format!("{}.field1", key_prefix);
/// let desc = "Field 1 doc";
/// self.field1.visit(v, key.as_str(), desc);
/// let key = format!("{}.field2", key_prefix);
/// let desc = "Field 2 doc";
/// self.field2.visit(v, key.as_str(), desc);
/// let key = format!("{}.field3", key_prefix);
/// let desc = "Field 3 doc";
/// self.field3.visit(v, key.as_str(), desc);
/// }
/// }
///
/// impl Default for MyConfig {
/// fn default() -> Self {
/// Self {
/// field1: "".to_string(),
/// field2: 232,
/// field3: None,
/// }
/// }
/// }
/// ```
///
/// NB: Misplaced commas may result in nonsensical errors
///
macro_rules! config_namespace {
(
$(#[doc = $struct_d:tt])*
$vis:vis struct $struct_name:ident {
$(
$(#[doc = $d:tt])*
$field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
)*$(,)*
}
) => {
$(#[doc = $struct_d])*
#[derive(Debug, Clone)]
#[non_exhaustive]
$vis struct $struct_name{
$(
$(#[doc = $d])*
$field_vis $field_name : $field_type,
)*
}
impl ConfigField for $struct_name {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
$(
stringify!($field_name) => self.$field_name.set(rem, value),
)*
_ => Err(DataFusionError::Internal(
format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
))
}
}
fn visit<V: Visit>(&self, v: &mut V, key_prefix: &str, _description: &'static str) {
$(
let key = format!(concat!("{}.", stringify!($field_name)), key_prefix);
let desc = concat!($($d),*).trim();
self.$field_name.visit(v, key.as_str(), desc);
)*
}
}
impl Default for $struct_name {
fn default() -> Self {
Self {
$($field_name: $default),*
}
}
}
}
}
config_namespace! {
/// Options related to catalog and directory scanning
pub struct CatalogOptions {
/// Whether the default catalog and schema should be created automatically.
pub create_default_catalog_and_schema: bool, default = true
/// The default catalog name - this impacts what SQL queries use if not specified
pub default_catalog: String, default = "datafusion".to_string()
/// The default schema name - this impacts what SQL queries use if not specified
pub default_schema: String, default = "public".to_string()
/// Should DataFusion provide access to `information_schema`
/// virtual tables for displaying schema information
pub information_schema: bool, default = false
/// Location scanned to load tables for `default` schema
pub location: Option<String>, default = None
/// Type of `TableProvider` to use when loading `default` schema
pub format: Option<String>, default = None
/// If the file has a header
pub has_header: bool, default = false
}
}
config_namespace! {
/// Options related to SQL parser
pub struct SqlParserOptions {
/// When set to true, SQL parser will parse float as decimal type
pub parse_float_as_decimal: bool, default = false
/// When set to true, SQL parser will normalize ident (convert ident to lowercase when not quoted)
pub enable_ident_normalization: bool, default = true
/// Configure the SQL dialect used by DataFusion's parser; supported values include: Generic,
/// MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, MsSQL, ClickHouse, BigQuery, and Ansi.
pub dialect: String, default = "generic".to_string()
}
}
config_namespace! {
/// Options related to query execution
pub struct ExecutionOptions {
/// Default batch size while creating new batches, it's especially useful for
/// buffer-in-memory batches since creating tiny batches would result in too much
/// metadata memory consumption
pub batch_size: usize, default = 8192
/// When set to true, record batches will be examined between each operator and
/// small batches will be coalesced into larger batches. This is helpful when there
/// are highly selective filters or joins that could produce tiny output batches. The
/// target batch size is determined by the configuration setting
pub coalesce_batches: bool, default = true
/// Should DataFusion collect statistics after listing files
pub collect_statistics: bool, default = false
/// Number of partitions for query execution. Increasing partitions can increase
/// concurrency.
///
/// Defaults to the number of CPU cores on the system
pub target_partitions: usize, default = num_cpus::get()
/// The default time zone
///
/// Some functions, e.g. `EXTRACT(HOUR from SOME_TIME)`, shift the underlying datetime
/// according to this time zone, and then extract the hour
pub time_zone: Option<String>, default = Some("+00:00".into())
/// Parquet options
pub parquet: ParquetOptions, default = Default::default()
/// Aggregate options
pub aggregate: AggregateOptions, default = Default::default()
/// Fan-out during initial physical planning.
///
/// This is mostly use to plan `UNION` children in parallel.
///
/// Defaults to the number of CPU cores on the system
pub planning_concurrency: usize, default = num_cpus::get()
}
}
config_namespace! {
/// Options related to reading of parquet files
pub struct ParquetOptions {
/// If true, reads the Parquet data page level metadata (the
/// Page Index), if present, to reduce the I/O and number of
/// rows decoded.
pub enable_page_index: bool, default = true
/// If true, the parquet reader attempts to skip entire row groups based
/// on the predicate in the query and the metadata (min/max values) stored in
/// the parquet file
pub pruning: bool, default = true
/// If true, the parquet reader skip the optional embedded metadata that may be in
/// the file Schema. This setting can help avoid schema conflicts when querying
/// multiple parquet files with schemas containing compatible types but different metadata
pub skip_metadata: bool, default = true
/// If specified, the parquet reader will try and fetch the last `size_hint`
/// bytes of the parquet file optimistically. If not specified, two reads are required:
/// One read to fetch the 8-byte parquet footer and
/// another to fetch the metadata length encoded in the footer
pub metadata_size_hint: Option<usize>, default = None
/// If true, filter expressions are be applied during the parquet decoding operation to
/// reduce the number of rows decoded
pub pushdown_filters: bool, default = false
/// If true, filter expressions evaluated during the parquet decoding operation
/// will be reordered heuristically to minimize the cost of evaluation. If false,
/// the filters are applied in the same order as written in the query
pub reorder_filters: bool, default = false
}
}
config_namespace! {
/// Options related to aggregate execution
pub struct AggregateOptions {
/// Specifies the threshold for using `ScalarValue`s to update
/// accumulators during high-cardinality aggregations for each input batch.
///
/// The aggregation is considered high-cardinality if the number of affected groups
/// is greater than or equal to `batch_size / scalar_update_factor`. In such cases,
/// `ScalarValue`s are utilized for updating accumulators, rather than the default
/// batch-slice approach. This can lead to performance improvements.
///
/// By adjusting the `scalar_update_factor`, you can balance the trade-off between
/// more efficient accumulator updates and the number of groups affected.
pub scalar_update_factor: usize, default = 10
}
}
config_namespace! {
/// Options related to query optimization
pub struct OptimizerOptions {
/// When set to true, the physical plan optimizer will try to add round robin
/// repartitioning to increase parallelism to leverage more CPU cores
pub enable_round_robin_repartition: bool, default = true
/// When set to true, the optimizer will insert filters before a join between
/// a nullable and non-nullable column to filter out nulls on the nullable side. This
/// filter can add additional overhead when the file format does not fully support
/// predicate push down.
pub filter_null_join_keys: bool, default = false
/// Should DataFusion repartition data using the aggregate keys to execute aggregates
/// in parallel using the provided `target_partitions` level
pub repartition_aggregations: bool, default = true
/// Minimum total files size in bytes to perform file scan repartitioning.
pub repartition_file_min_size: usize, default = 10 * 1024 * 1024
/// Should DataFusion repartition data using the join keys to execute joins in parallel
/// using the provided `target_partitions` level
pub repartition_joins: bool, default = true
/// Should DataFusion allow symmetric hash joins for unbounded data sources even when
/// its inputs do not have any ordering or filtering If the flag is not enabled,
/// the SymmetricHashJoin operator will be unable to prune its internal buffers,
/// resulting in certain join types - such as Full, Left, LeftAnti, LeftSemi, Right,
/// RightAnti, and RightSemi - being produced only at the end of the execution.
/// This is not typical in stream processing. Additionally, without proper design for
/// long runner execution, all types of joins may encounter out-of-memory errors.
pub allow_symmetric_joins_without_pruning: bool, default = true
/// When set to `true`, file groups will be repartitioned to achieve maximum parallelism.
/// Currently Parquet and CSV formats are supported.
///
/// If set to `true`, all files will be repartitioned evenly (i.e., a single large file
/// might be partitioned into smaller chunks) for parallel scanning.
/// If set to `false`, different files will be read in parallel, but repartitioning won't
/// happen within a single file.
pub repartition_file_scans: bool, default = true
/// Should DataFusion repartition data using the partitions keys to execute window
/// functions in parallel using the provided `target_partitions` level
pub repartition_windows: bool, default = true
/// Should DataFusion execute sorts in a per-partition fashion and merge
/// afterwards instead of coalescing first and sorting globally.
/// With this flag is enabled, plans in the form below
///
/// ```text
/// "SortExec: [a@0 ASC]",
/// " CoalescePartitionsExec",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
/// would turn into the plan below which performs better in multithreaded environments
///
/// ```text
/// "SortPreservingMergeExec: [a@0 ASC]",
/// " SortExec: [a@0 ASC]",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
pub repartition_sorts: bool, default = true
/// When set to true, the logical plan optimizer will produce warning
/// messages if any optimization rules produce errors and then proceed to the next
/// rule. When set to false, any rules that produce errors will cause the query to fail
pub skip_failed_rules: bool, default = false
/// Number of times that the optimizer will attempt to optimize the plan
pub max_passes: usize, default = 3
/// When set to true, the physical plan optimizer will run a top down
/// process to reorder the join keys
pub top_down_join_key_reordering: bool, default = true
/// When set to true, the physical plan optimizer will prefer HashJoin over SortMergeJoin.
/// HashJoin can work more efficiently than SortMergeJoin but consumes more memory
pub prefer_hash_join: bool, default = true
/// The maximum estimated size in bytes for one input side of a HashJoin
/// will be collected into a single partition
pub hash_join_single_partition_threshold: usize, default = 1024 * 1024
}
}
config_namespace! {
/// Options controlling explain output
pub struct ExplainOptions {
/// When set to true, the explain statement will only print logical plans
pub logical_plan_only: bool, default = false
/// When set to true, the explain statement will only print physical plans
pub physical_plan_only: bool, default = false
}
}
/// A key value pair, with a corresponding description
#[derive(Debug)]
pub struct ConfigEntry {
/// A unique string to identify this config value
pub key: String,
/// The value if any
pub value: Option<String>,
/// A description of this configuration entry
pub description: &'static str,
}
/// Configuration options struct, able to store both built-in configuration and custom options
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct ConfigOptions {
/// Catalog options
pub catalog: CatalogOptions,
/// Execution options
pub execution: ExecutionOptions,
/// Optimizer options
pub optimizer: OptimizerOptions,
/// SQL parser options
pub sql_parser: SqlParserOptions,
/// Explain options
pub explain: ExplainOptions,
/// Optional extensions registered using [`Extensions::insert`]
pub extensions: Extensions,
}
impl ConfigField for ConfigOptions {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
// Extensions are handled in the public `ConfigOptions::set`
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
"catalog" => self.catalog.set(rem, value),
"execution" => self.execution.set(rem, value),
"optimizer" => self.optimizer.set(rem, value),
"explain" => self.explain.set(rem, value),
"sql_parser" => self.sql_parser.set(rem, value),
_ => Err(DataFusionError::Internal(format!(
"Config value \"{key}\" not found on ConfigOptions"
))),
}
}
fn visit<V: Visit>(&self, v: &mut V, _key_prefix: &str, _description: &'static str) {
self.catalog.visit(v, "datafusion.catalog", "");
self.execution.visit(v, "datafusion.execution", "");
self.optimizer.visit(v, "datafusion.optimizer", "");
self.explain.visit(v, "datafusion.explain", "");
self.sql_parser.visit(v, "datafusion.sql_parser", "");
}
}
impl ConfigOptions {
/// Creates a new [`ConfigOptions`] with default values
pub fn new() -> Self {
Self::default()
}
/// Set extensions to provided value
pub fn with_extensions(mut self, extensions: Extensions) -> Self {
self.extensions = extensions;
self
}
/// Set a configuration option
pub fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (prefix, key) = key.split_once('.').ok_or_else(|| {
DataFusionError::External(
format!("could not find config namespace for key \"{key}\"",).into(),
)
})?;
if prefix == "datafusion" {
return ConfigField::set(self, key, value);
}
let e = self.extensions.0.get_mut(prefix);
let e = e.ok_or_else(|| {
DataFusionError::External(
format!("Could not find config namespace \"{prefix}\"",).into(),
)
})?;
e.0.set(key, value)
}
/// Create new ConfigOptions struct, taking values from
/// environment variables where possible.
///
/// For example, setting `DATAFUSION_EXECUTION_BATCH_SIZE` will
/// control `datafusion.execution.batch_size`.
pub fn from_env() -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
// Extract the names of all fields and then look up the corresponding
// environment variables. This isn't hugely efficient but avoids
// ambiguity between `a.b` and `a_b` which would both correspond
// to an environment variable of `A_B`
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
let env = key.to_uppercase().replace('.', "_");
if let Some(var) = std::env::var_os(env) {
ret.set(&key, var.to_string_lossy().as_ref())?;
}
}
Ok(ret)
}
/// Create new ConfigOptions struct, taking values from a string hash map.
///
/// Only the built-in configurations will be extracted from the hash map
/// and other key value pairs will be ignored.
pub fn from_string_hash_map(settings: HashMap<String, String>) -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
if let Some(var) = settings.get(&key) {
ret.set(&key, var)?;
}
}
Ok(ret)
}
/// Returns the [`ConfigEntry`] stored within this [`ConfigOptions`]
pub fn entries(&self) -> Vec<ConfigEntry> {
struct Visitor(Vec<ConfigEntry>);
impl Visit for Visitor {
fn some<V: Display>(
&mut self,
key: &str,
value: V,
description: &'static str,
) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: Some(value.to_string()),
description,
})
}
fn none(&mut self, key: &str, description: &'static str) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: None,
description,
})
}
}
let mut v = Visitor(vec![]);
self.visit(&mut v, "datafusion", "");
v.0.extend(self.extensions.0.values().flat_map(|e| e.0.entries()));
v.0
}
/// Generate documentation that can be included in the user guide
pub fn generate_config_markdown() -> String {
use std::fmt::Write as _;
let mut s = Self::default();
// Normalize for display
s.execution.target_partitions = 0;
s.execution.planning_concurrency = 0;
let mut docs = "| key | default | description |\n".to_string();
docs += "|-----|---------|-------------|\n";
let mut entries = s.entries();
entries.sort_unstable_by(|a, b| a.key.cmp(&b.key));
for entry in s.entries() {
let _ = writeln!(
&mut docs,
"| {} | {} | {} |",
entry.key,
entry.value.as_deref().unwrap_or("NULL"),
entry.description
);
}
docs
}
}
/// [`ConfigExtension`] provides a mechanism to store third-party configuration within DataFusion
///
/// Unfortunately associated constants are not currently object-safe, and so this
/// extends the object-safe [`ExtensionOptions`]
pub trait ConfigExtension: ExtensionOptions {
/// Configuration namespace prefix to use
///
/// All values under this will be prefixed with `$PREFIX + "."`
const PREFIX: &'static str;
}
/// An object-safe API for storing arbitrary configuration
pub trait ExtensionOptions: Send + Sync + std::fmt::Debug + 'static {
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any(&self) -> &dyn Any;
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Return a deep clone of this [`ExtensionOptions`]
///
/// It is important this does not share mutable state to avoid consistency issues
/// with configuration changing whilst queries are executing
fn cloned(&self) -> Box<dyn ExtensionOptions>;
/// Set the given `key`, `value` pair
fn set(&mut self, key: &str, value: &str) -> Result<()>;
/// Returns the [`ConfigEntry`] stored in this [`ExtensionOptions`]
fn entries(&self) -> Vec<ConfigEntry>;
}
/// A type-safe container for [`ConfigExtension`]
#[derive(Debug, Default, Clone)]
pub struct Extensions(BTreeMap<&'static str, ExtensionBox>);
impl Extensions {
/// Create a new, empty [`Extensions`]
pub fn new() -> Self {
Self(BTreeMap::new())
}
/// Registers a [`ConfigExtension`] with this [`ConfigOptions`]
pub fn insert<T: ConfigExtension>(&mut self, extension: T) {
assert_ne!(T::PREFIX, "datafusion");
let e = ExtensionBox(Box::new(extension));
self.0.insert(T::PREFIX, e);
}
/// Retrieves the extension of the given type if any
pub fn get<T: ConfigExtension>(&self) -> Option<&T> {
self.0.get(T::PREFIX)?.0.as_any().downcast_ref()
}
/// Retrieves the extension of the given type if any
pub fn get_mut<T: ConfigExtension>(&mut self) -> Option<&mut T> {
let e = self.0.get_mut(T::PREFIX)?;
e.0.as_any_mut().downcast_mut()
}
}
#[derive(Debug)]
struct ExtensionBox(Box<dyn ExtensionOptions>);
impl Clone for ExtensionBox {
fn clone(&self) -> Self {
Self(self.0.cloned())
}
}
/// A trait implemented by `config_namespace` and for field types that provides
/// the ability to walk and mutate the configuration tree
trait ConfigField {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str);
fn set(&mut self, key: &str, value: &str) -> Result<()>;
}
impl<F: ConfigField + Default> ConfigField for Option<F> {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str) {
match self {
Some(s) => s.visit(v, key, description),
None => v.none(key, description),
}
}
fn set(&mut self, key: &str, value: &str) -> Result<()> {
self.get_or_insert_with(Default::default).set(key, value)
}
}
macro_rules! config_field {
($t:ty) => {
impl ConfigField for $t {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str) {
v.some(key, self, description)
}
fn set(&mut self, _: &str, value: &str) -> Result<()> {
*self = value.parse().map_err(|e| {
DataFusionError::Context(
format!(concat!("Error parsing {} as ", stringify!($t),), value),
Box::new(DataFusionError::External(Box::new(e))),
)
})?;
Ok(())
}
}
};
}
config_field!(String);
config_field!(bool);
config_field!(usize);
/// An implementation trait used to recursively walk configuration
trait Visit {
fn some<V: Display>(&mut self, key: &str, value: V, description: &'static str);
fn none(&mut self, key: &str, description: &'static str);
}
/// Convenience macro to create [`ExtensionsOptions`].
///
/// The created structure implements the following traits:
///
/// - [`Clone`]
/// - [`Debug`]
/// - [`Default`]
/// - [`ExtensionOptions`]
///
/// # Usage
/// The syntax is:
///
/// ```text
/// extensions_options! {
/// /// Struct docs (optional).
/// [<vis>] struct <StructName> {
/// /// Field docs (optional)
/// [<vis>] <field_name>: <field_type>, default = <default_value>
///
/// ... more fields
/// }
/// }
/// ```
///
/// The placeholders are:
/// - `[<vis>]`: Optional visibility modifier like `pub` or `pub(crate)`.
/// - `<StructName>`: Struct name like `MyStruct`.
/// - `<field_name>`: Field name like `my_field`.
/// - `<field_type>`: Field type like `u8`.
/// - `<default_value>`: Default value matching the field type like `42`.
///
/// # Example
/// ```
/// use datafusion_common::extensions_options;
///
/// extensions_options! {
/// /// My own config options.
/// pub struct MyConfig {
/// /// Should "foo" be replaced by "bar"?
/// pub foo_to_bar: bool, default = true
///
/// /// How many "baz" should be created?
/// pub baz_count: usize, default = 1337
/// }
/// }
/// ```
///
///
/// [`Debug`]: std::fmt::Debug
/// [`ExtensionsOptions`]: crate::config::ExtensionOptions
#[macro_export]
macro_rules! extensions_options {
(
$(#[doc = $struct_d:tt])*
$vis:vis struct $struct_name:ident {
$(
$(#[doc = $d:tt])*
$field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
)*$(,)*
}
) => {
$(#[doc = $struct_d])*
#[derive(Debug, Clone)]
#[non_exhaustive]
$vis struct $struct_name{
$(
$(#[doc = $d])*
$field_vis $field_name : $field_type,
)*
}
impl Default for $struct_name {
fn default() -> Self {
Self {
$($field_name: $default),*
}
}
}
impl $crate::config::ExtensionOptions for $struct_name {
fn as_any(&self) -> &dyn ::std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn ::std::any::Any {
self
}
fn cloned(&self) -> Box<dyn $crate::config::ExtensionOptions> {
Box::new(self.clone())
}
fn set(&mut self, key: &str, value: &str) -> $crate::Result<()> {
match key {
$(
stringify!($field_name) => {
self.$field_name = value.parse().map_err(|e| {
$crate::DataFusionError::Context(
format!(concat!("Error parsing {} as ", stringify!($t),), value),
Box::new($crate::DataFusionError::External(Box::new(e))),
)
})?;
Ok(())
} | }
fn entries(&self) -> Vec<$crate::config::ConfigEntry> {
vec![
$(
$crate::config::ConfigEntry {
key: stringify!($field_name).to_owned(),
value: (self.$field_name != $default).then(|| self.$field_name.to_string()),
description: concat!($($d),*).trim(),
},
)*
]
}
}
}
} | )*
_ => Err($crate::DataFusionError::Internal(
format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
))
} | random_line_split |
config.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Runtime configuration, via [`ConfigOptions`]
use crate::{DataFusionError, Result};
use std::any::Any;
use std::collections::{BTreeMap, HashMap};
use std::fmt::Display;
/// A macro that wraps a configuration struct and automatically derives
/// [`Default`] and [`ConfigField`] for it, allowing it to be used
/// in the [`ConfigOptions`] configuration tree
///
/// For example,
///
/// ```ignore
/// config_namespace! {
/// /// Amazing config
/// pub struct MyConfig {
/// /// Field 1 doc
/// field1: String, default = "".to_string()
///
/// /// Field 2 doc
/// field2: usize, default = 232
///
/// /// Field 3 doc
/// field3: Option<usize>, default = None
/// }
///}
/// ```
///
/// Will generate
///
/// ```ignore
/// /// Amazing config
/// #[derive(Debug, Clone)]
/// #[non_exhaustive]
/// pub struct MyConfig {
/// /// Field 1 doc
/// field1: String,
/// /// Field 2 doc
/// field2: usize,
/// /// Field 3 doc
/// field3: Option<usize>,
/// }
/// impl ConfigField for MyConfig {
/// fn set(&mut self, key: &str, value: &str) -> Result<()> {
/// let (key, rem) = key.split_once('.').unwrap_or((key, ""));
/// match key {
/// "field1" => self.field1.set(rem, value),
/// "field2" => self.field2.set(rem, value),
/// "field3" => self.field3.set(rem, value),
/// _ => Err(DataFusionError::Internal(format!(
/// "Config value \"{}\" not found on MyConfig",
/// key
/// ))),
/// }
/// }
///
/// fn visit<V: Visit>(&self, v: &mut V, key_prefix: &str, _description: &'static str) {
/// let key = format!("{}.field1", key_prefix);
/// let desc = "Field 1 doc";
/// self.field1.visit(v, key.as_str(), desc);
/// let key = format!("{}.field2", key_prefix);
/// let desc = "Field 2 doc";
/// self.field2.visit(v, key.as_str(), desc);
/// let key = format!("{}.field3", key_prefix);
/// let desc = "Field 3 doc";
/// self.field3.visit(v, key.as_str(), desc);
/// }
/// }
///
/// impl Default for MyConfig {
/// fn default() -> Self {
/// Self {
/// field1: "".to_string(),
/// field2: 232,
/// field3: None,
/// }
/// }
/// }
/// ```
///
/// NB: Misplaced commas may result in nonsensical errors
///
macro_rules! config_namespace {
(
$(#[doc = $struct_d:tt])*
$vis:vis struct $struct_name:ident {
$(
$(#[doc = $d:tt])*
$field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
)*$(,)*
}
) => {
$(#[doc = $struct_d])*
#[derive(Debug, Clone)]
#[non_exhaustive]
$vis struct $struct_name{
$(
$(#[doc = $d])*
$field_vis $field_name : $field_type,
)*
}
impl ConfigField for $struct_name {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
$(
stringify!($field_name) => self.$field_name.set(rem, value),
)*
_ => Err(DataFusionError::Internal(
format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
))
}
}
fn visit<V: Visit>(&self, v: &mut V, key_prefix: &str, _description: &'static str) {
$(
let key = format!(concat!("{}.", stringify!($field_name)), key_prefix);
let desc = concat!($($d),*).trim();
self.$field_name.visit(v, key.as_str(), desc);
)*
}
}
impl Default for $struct_name {
fn default() -> Self {
Self {
$($field_name: $default),*
}
}
}
}
}
config_namespace! {
/// Options related to catalog and directory scanning
pub struct CatalogOptions {
/// Whether the default catalog and schema should be created automatically.
pub create_default_catalog_and_schema: bool, default = true
/// The default catalog name - this impacts what SQL queries use if not specified
pub default_catalog: String, default = "datafusion".to_string()
/// The default schema name - this impacts what SQL queries use if not specified
pub default_schema: String, default = "public".to_string()
/// Should DataFusion provide access to `information_schema`
/// virtual tables for displaying schema information
pub information_schema: bool, default = false
/// Location scanned to load tables for `default` schema
pub location: Option<String>, default = None
/// Type of `TableProvider` to use when loading `default` schema
pub format: Option<String>, default = None
/// If the file has a header
pub has_header: bool, default = false
}
}
config_namespace! {
/// Options related to SQL parser
pub struct SqlParserOptions {
/// When set to true, SQL parser will parse float as decimal type
pub parse_float_as_decimal: bool, default = false
/// When set to true, SQL parser will normalize ident (convert ident to lowercase when not quoted)
pub enable_ident_normalization: bool, default = true
/// Configure the SQL dialect used by DataFusion's parser; supported values include: Generic,
/// MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, MsSQL, ClickHouse, BigQuery, and Ansi.
pub dialect: String, default = "generic".to_string()
}
}
config_namespace! {
/// Options related to query execution
pub struct ExecutionOptions {
/// Default batch size while creating new batches, it's especially useful for
/// buffer-in-memory batches since creating tiny batches would result in too much
/// metadata memory consumption
pub batch_size: usize, default = 8192
/// When set to true, record batches will be examined between each operator and
/// small batches will be coalesced into larger batches. This is helpful when there
/// are highly selective filters or joins that could produce tiny output batches. The
/// target batch size is determined by the configuration setting
pub coalesce_batches: bool, default = true
/// Should DataFusion collect statistics after listing files
pub collect_statistics: bool, default = false
/// Number of partitions for query execution. Increasing partitions can increase
/// concurrency.
///
/// Defaults to the number of CPU cores on the system
pub target_partitions: usize, default = num_cpus::get()
/// The default time zone
///
/// Some functions, e.g. `EXTRACT(HOUR from SOME_TIME)`, shift the underlying datetime
/// according to this time zone, and then extract the hour
pub time_zone: Option<String>, default = Some("+00:00".into())
/// Parquet options
pub parquet: ParquetOptions, default = Default::default()
/// Aggregate options
pub aggregate: AggregateOptions, default = Default::default()
/// Fan-out during initial physical planning.
///
/// This is mostly use to plan `UNION` children in parallel.
///
/// Defaults to the number of CPU cores on the system
pub planning_concurrency: usize, default = num_cpus::get()
}
}
config_namespace! {
/// Options related to reading of parquet files
pub struct ParquetOptions {
/// If true, reads the Parquet data page level metadata (the
/// Page Index), if present, to reduce the I/O and number of
/// rows decoded.
pub enable_page_index: bool, default = true
/// If true, the parquet reader attempts to skip entire row groups based
/// on the predicate in the query and the metadata (min/max values) stored in
/// the parquet file
pub pruning: bool, default = true
/// If true, the parquet reader skip the optional embedded metadata that may be in
/// the file Schema. This setting can help avoid schema conflicts when querying
/// multiple parquet files with schemas containing compatible types but different metadata
pub skip_metadata: bool, default = true
/// If specified, the parquet reader will try and fetch the last `size_hint`
/// bytes of the parquet file optimistically. If not specified, two reads are required:
/// One read to fetch the 8-byte parquet footer and
/// another to fetch the metadata length encoded in the footer
pub metadata_size_hint: Option<usize>, default = None
/// If true, filter expressions are be applied during the parquet decoding operation to
/// reduce the number of rows decoded
pub pushdown_filters: bool, default = false
/// If true, filter expressions evaluated during the parquet decoding operation
/// will be reordered heuristically to minimize the cost of evaluation. If false,
/// the filters are applied in the same order as written in the query
pub reorder_filters: bool, default = false
}
}
config_namespace! {
/// Options related to aggregate execution
pub struct AggregateOptions {
/// Specifies the threshold for using `ScalarValue`s to update
/// accumulators during high-cardinality aggregations for each input batch.
///
/// The aggregation is considered high-cardinality if the number of affected groups
/// is greater than or equal to `batch_size / scalar_update_factor`. In such cases,
/// `ScalarValue`s are utilized for updating accumulators, rather than the default
/// batch-slice approach. This can lead to performance improvements.
///
/// By adjusting the `scalar_update_factor`, you can balance the trade-off between
/// more efficient accumulator updates and the number of groups affected.
pub scalar_update_factor: usize, default = 10
}
}
config_namespace! {
/// Options related to query optimization
pub struct OptimizerOptions {
/// When set to true, the physical plan optimizer will try to add round robin
/// repartitioning to increase parallelism to leverage more CPU cores
pub enable_round_robin_repartition: bool, default = true
/// When set to true, the optimizer will insert filters before a join between
/// a nullable and non-nullable column to filter out nulls on the nullable side. This
/// filter can add additional overhead when the file format does not fully support
/// predicate push down.
pub filter_null_join_keys: bool, default = false
/// Should DataFusion repartition data using the aggregate keys to execute aggregates
/// in parallel using the provided `target_partitions` level
pub repartition_aggregations: bool, default = true
/// Minimum total files size in bytes to perform file scan repartitioning.
pub repartition_file_min_size: usize, default = 10 * 1024 * 1024
/// Should DataFusion repartition data using the join keys to execute joins in parallel
/// using the provided `target_partitions` level
pub repartition_joins: bool, default = true
/// Should DataFusion allow symmetric hash joins for unbounded data sources even when
/// its inputs do not have any ordering or filtering If the flag is not enabled,
/// the SymmetricHashJoin operator will be unable to prune its internal buffers,
/// resulting in certain join types - such as Full, Left, LeftAnti, LeftSemi, Right,
/// RightAnti, and RightSemi - being produced only at the end of the execution.
/// This is not typical in stream processing. Additionally, without proper design for
/// long runner execution, all types of joins may encounter out-of-memory errors.
pub allow_symmetric_joins_without_pruning: bool, default = true
/// When set to `true`, file groups will be repartitioned to achieve maximum parallelism.
/// Currently Parquet and CSV formats are supported.
///
/// If set to `true`, all files will be repartitioned evenly (i.e., a single large file
/// might be partitioned into smaller chunks) for parallel scanning.
/// If set to `false`, different files will be read in parallel, but repartitioning won't
/// happen within a single file.
pub repartition_file_scans: bool, default = true
/// Should DataFusion repartition data using the partitions keys to execute window
/// functions in parallel using the provided `target_partitions` level
pub repartition_windows: bool, default = true
/// Should DataFusion execute sorts in a per-partition fashion and merge
/// afterwards instead of coalescing first and sorting globally.
/// With this flag is enabled, plans in the form below
///
/// ```text
/// "SortExec: [a@0 ASC]",
/// " CoalescePartitionsExec",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
/// would turn into the plan below which performs better in multithreaded environments
///
/// ```text
/// "SortPreservingMergeExec: [a@0 ASC]",
/// " SortExec: [a@0 ASC]",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
pub repartition_sorts: bool, default = true
/// When set to true, the logical plan optimizer will produce warning
/// messages if any optimization rules produce errors and then proceed to the next
/// rule. When set to false, any rules that produce errors will cause the query to fail
pub skip_failed_rules: bool, default = false
/// Number of times that the optimizer will attempt to optimize the plan
pub max_passes: usize, default = 3
/// When set to true, the physical plan optimizer will run a top down
/// process to reorder the join keys
pub top_down_join_key_reordering: bool, default = true
/// When set to true, the physical plan optimizer will prefer HashJoin over SortMergeJoin.
/// HashJoin can work more efficiently than SortMergeJoin but consumes more memory
pub prefer_hash_join: bool, default = true
/// The maximum estimated size in bytes for one input side of a HashJoin
/// will be collected into a single partition
pub hash_join_single_partition_threshold: usize, default = 1024 * 1024
}
}
config_namespace! {
/// Options controlling explain output
pub struct ExplainOptions {
/// When set to true, the explain statement will only print logical plans
pub logical_plan_only: bool, default = false
/// When set to true, the explain statement will only print physical plans
pub physical_plan_only: bool, default = false
}
}
/// A key value pair, with a corresponding description
#[derive(Debug)]
pub struct ConfigEntry {
/// A unique string to identify this config value
pub key: String,
/// The value if any
pub value: Option<String>,
/// A description of this configuration entry
pub description: &'static str,
}
/// Configuration options struct, able to store both built-in configuration and custom options
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct ConfigOptions {
/// Catalog options
pub catalog: CatalogOptions,
/// Execution options
pub execution: ExecutionOptions,
/// Optimizer options
pub optimizer: OptimizerOptions,
/// SQL parser options
pub sql_parser: SqlParserOptions,
/// Explain options
pub explain: ExplainOptions,
/// Optional extensions registered using [`Extensions::insert`]
pub extensions: Extensions,
}
impl ConfigField for ConfigOptions {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
// Extensions are handled in the public `ConfigOptions::set`
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
"catalog" => self.catalog.set(rem, value),
"execution" => self.execution.set(rem, value),
"optimizer" => self.optimizer.set(rem, value),
"explain" => self.explain.set(rem, value),
"sql_parser" => self.sql_parser.set(rem, value),
_ => Err(DataFusionError::Internal(format!(
"Config value \"{key}\" not found on ConfigOptions"
))),
}
}
fn visit<V: Visit>(&self, v: &mut V, _key_prefix: &str, _description: &'static str) {
self.catalog.visit(v, "datafusion.catalog", "");
self.execution.visit(v, "datafusion.execution", "");
self.optimizer.visit(v, "datafusion.optimizer", "");
self.explain.visit(v, "datafusion.explain", "");
self.sql_parser.visit(v, "datafusion.sql_parser", "");
}
}
impl ConfigOptions {
/// Creates a new [`ConfigOptions`] with default values
pub fn new() -> Self {
Self::default()
}
/// Set extensions to provided value
pub fn with_extensions(mut self, extensions: Extensions) -> Self {
self.extensions = extensions;
self
}
/// Set a configuration option
pub fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (prefix, key) = key.split_once('.').ok_or_else(|| {
DataFusionError::External(
format!("could not find config namespace for key \"{key}\"",).into(),
)
})?;
if prefix == "datafusion" {
return ConfigField::set(self, key, value);
}
let e = self.extensions.0.get_mut(prefix);
let e = e.ok_or_else(|| {
DataFusionError::External(
format!("Could not find config namespace \"{prefix}\"",).into(),
)
})?;
e.0.set(key, value)
}
/// Create new ConfigOptions struct, taking values from
/// environment variables where possible.
///
/// For example, setting `DATAFUSION_EXECUTION_BATCH_SIZE` will
/// control `datafusion.execution.batch_size`.
pub fn from_env() -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
// Extract the names of all fields and then look up the corresponding
// environment variables. This isn't hugely efficient but avoids
// ambiguity between `a.b` and `a_b` which would both correspond
// to an environment variable of `A_B`
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
let env = key.to_uppercase().replace('.', "_");
if let Some(var) = std::env::var_os(env) {
ret.set(&key, var.to_string_lossy().as_ref())?;
}
}
Ok(ret)
}
/// Create new ConfigOptions struct, taking values from a string hash map.
///
/// Only the built-in configurations will be extracted from the hash map
/// and other key value pairs will be ignored.
pub fn from_string_hash_map(settings: HashMap<String, String>) -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
if let Some(var) = settings.get(&key) {
ret.set(&key, var)?;
}
}
Ok(ret)
}
/// Returns the [`ConfigEntry`] stored within this [`ConfigOptions`]
pub fn entries(&self) -> Vec<ConfigEntry> {
struct Visitor(Vec<ConfigEntry>);
impl Visit for Visitor {
fn some<V: Display>(
&mut self,
key: &str,
value: V,
description: &'static str,
) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: Some(value.to_string()),
description,
})
}
fn | (&mut self, key: &str, description: &'static str) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: None,
description,
})
}
}
let mut v = Visitor(vec![]);
self.visit(&mut v, "datafusion", "");
v.0.extend(self.extensions.0.values().flat_map(|e| e.0.entries()));
v.0
}
/// Generate documentation that can be included in the user guide
pub fn generate_config_markdown() -> String {
use std::fmt::Write as _;
let mut s = Self::default();
// Normalize for display
s.execution.target_partitions = 0;
s.execution.planning_concurrency = 0;
let mut docs = "| key | default | description |\n".to_string();
docs += "|-----|---------|-------------|\n";
let mut entries = s.entries();
entries.sort_unstable_by(|a, b| a.key.cmp(&b.key));
for entry in s.entries() {
let _ = writeln!(
&mut docs,
"| {} | {} | {} |",
entry.key,
entry.value.as_deref().unwrap_or("NULL"),
entry.description
);
}
docs
}
}
/// [`ConfigExtension`] provides a mechanism to store third-party configuration within DataFusion
///
/// Unfortunately associated constants are not currently object-safe, and so this
/// extends the object-safe [`ExtensionOptions`]
pub trait ConfigExtension: ExtensionOptions {
/// Configuration namespace prefix to use
///
/// All values under this will be prefixed with `$PREFIX + "."`
const PREFIX: &'static str;
}
/// An object-safe API for storing arbitrary configuration
pub trait ExtensionOptions: Send + Sync + std::fmt::Debug + 'static {
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any(&self) -> &dyn Any;
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Return a deep clone of this [`ExtensionOptions`]
///
/// It is important this does not share mutable state to avoid consistency issues
/// with configuration changing whilst queries are executing
fn cloned(&self) -> Box<dyn ExtensionOptions>;
/// Set the given `key`, `value` pair
fn set(&mut self, key: &str, value: &str) -> Result<()>;
/// Returns the [`ConfigEntry`] stored in this [`ExtensionOptions`]
fn entries(&self) -> Vec<ConfigEntry>;
}
/// A type-safe container for [`ConfigExtension`]
#[derive(Debug, Default, Clone)]
pub struct Extensions(BTreeMap<&'static str, ExtensionBox>);
impl Extensions {
/// Create a new, empty [`Extensions`]
pub fn new() -> Self {
Self(BTreeMap::new())
}
/// Registers a [`ConfigExtension`] with this [`ConfigOptions`]
pub fn insert<T: ConfigExtension>(&mut self, extension: T) {
assert_ne!(T::PREFIX, "datafusion");
let e = ExtensionBox(Box::new(extension));
self.0.insert(T::PREFIX, e);
}
/// Retrieves the extension of the given type if any
pub fn get<T: ConfigExtension>(&self) -> Option<&T> {
self.0.get(T::PREFIX)?.0.as_any().downcast_ref()
}
/// Retrieves the extension of the given type if any
pub fn get_mut<T: ConfigExtension>(&mut self) -> Option<&mut T> {
let e = self.0.get_mut(T::PREFIX)?;
e.0.as_any_mut().downcast_mut()
}
}
#[derive(Debug)]
struct ExtensionBox(Box<dyn ExtensionOptions>);
impl Clone for ExtensionBox {
fn clone(&self) -> Self {
Self(self.0.cloned())
}
}
/// A trait implemented by `config_namespace` and for field types that provides
/// the ability to walk and mutate the configuration tree
trait ConfigField {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str);
fn set(&mut self, key: &str, value: &str) -> Result<()>;
}
impl<F: ConfigField + Default> ConfigField for Option<F> {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str) {
match self {
Some(s) => s.visit(v, key, description),
None => v.none(key, description),
}
}
fn set(&mut self, key: &str, value: &str) -> Result<()> {
self.get_or_insert_with(Default::default).set(key, value)
}
}
macro_rules! config_field {
($t:ty) => {
impl ConfigField for $t {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str) {
v.some(key, self, description)
}
fn set(&mut self, _: &str, value: &str) -> Result<()> {
*self = value.parse().map_err(|e| {
DataFusionError::Context(
format!(concat!("Error parsing {} as ", stringify!($t),), value),
Box::new(DataFusionError::External(Box::new(e))),
)
})?;
Ok(())
}
}
};
}
config_field!(String);
config_field!(bool);
config_field!(usize);
/// An implementation trait used to recursively walk configuration
trait Visit {
fn some<V: Display>(&mut self, key: &str, value: V, description: &'static str);
fn none(&mut self, key: &str, description: &'static str);
}
/// Convenience macro to create [`ExtensionsOptions`].
///
/// The created structure implements the following traits:
///
/// - [`Clone`]
/// - [`Debug`]
/// - [`Default`]
/// - [`ExtensionOptions`]
///
/// # Usage
/// The syntax is:
///
/// ```text
/// extensions_options! {
/// /// Struct docs (optional).
/// [<vis>] struct <StructName> {
/// /// Field docs (optional)
/// [<vis>] <field_name>: <field_type>, default = <default_value>
///
/// ... more fields
/// }
/// }
/// ```
///
/// The placeholders are:
/// - `[<vis>]`: Optional visibility modifier like `pub` or `pub(crate)`.
/// - `<StructName>`: Struct name like `MyStruct`.
/// - `<field_name>`: Field name like `my_field`.
/// - `<field_type>`: Field type like `u8`.
/// - `<default_value>`: Default value matching the field type like `42`.
///
/// # Example
/// ```
/// use datafusion_common::extensions_options;
///
/// extensions_options! {
/// /// My own config options.
/// pub struct MyConfig {
/// /// Should "foo" be replaced by "bar"?
/// pub foo_to_bar: bool, default = true
///
/// /// How many "baz" should be created?
/// pub baz_count: usize, default = 1337
/// }
/// }
/// ```
///
///
/// [`Debug`]: std::fmt::Debug
/// [`ExtensionsOptions`]: crate::config::ExtensionOptions
#[macro_export]
macro_rules! extensions_options {
(
$(#[doc = $struct_d:tt])*
$vis:vis struct $struct_name:ident {
$(
$(#[doc = $d:tt])*
$field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
)*$(,)*
}
) => {
$(#[doc = $struct_d])*
#[derive(Debug, Clone)]
#[non_exhaustive]
$vis struct $struct_name{
$(
$(#[doc = $d])*
$field_vis $field_name : $field_type,
)*
}
impl Default for $struct_name {
fn default() -> Self {
Self {
$($field_name: $default),*
}
}
}
impl $crate::config::ExtensionOptions for $struct_name {
fn as_any(&self) -> &dyn ::std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn ::std::any::Any {
self
}
fn cloned(&self) -> Box<dyn $crate::config::ExtensionOptions> {
Box::new(self.clone())
}
fn set(&mut self, key: &str, value: &str) -> $crate::Result<()> {
match key {
$(
stringify!($field_name) => {
self.$field_name = value.parse().map_err(|e| {
$crate::DataFusionError::Context(
format!(concat!("Error parsing {} as ", stringify!($t),), value),
Box::new($crate::DataFusionError::External(Box::new(e))),
)
})?;
Ok(())
}
)*
_ => Err($crate::DataFusionError::Internal(
format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
))
}
}
fn entries(&self) -> Vec<$crate::config::ConfigEntry> {
vec![
$(
$crate::config::ConfigEntry {
key: stringify!($field_name).to_owned(),
value: (self.$field_name != $default).then(|| self.$field_name.to_string()),
description: concat!($($d),*).trim(),
},
)*
]
}
}
}
}
| none | identifier_name |
hdm.py | # HDM: Holographic Declarative Memory
# A module for Python ACT-R
# written by M. A. Kelly
# except for the parts written by Terry C. Stewart
# Based on original research and the memory models
# BEAGLE (Jones & Mewhort, 2007) and DSHM (Rutledge-Taylor, Kelly, West, & Pyke, 2014)
#
# To use HDM:
# from ccm.lib.actr import *
# from ccm.lib.actr.hdm import *
# ...
# retrieval=Buffer()
# memory=HDM(retrieval)
#
# HDM has some unique parameters that DM does not have:
# N is the dimensionality of the vectors.
# Defaults to a generous 512 dimensions.
# As few as 64 and as many as 2048 have been used in the literature
# depending on the amount of noise or clarity desired.
# verbose defaults to False
# set to True if you want to see what HDM is doing in detail
# HDM also has some parameters that DM has and that are still important:
# buffer is the buffer used to output chunks retrieved from HDM
# latency is F in Fe^-a, where a is the activation calculated as
# a = ln(cosine^2 / (1 - cosine^2))
#
# HDM has three important functions to call:
# add(chunk): adds a chunk to memory
# request(chunk):
# 1. Given a chunk with exactly one unknown value '?',
# request finds the best value to fill '?'
# which it returns
# Reaction time is a function of cosine (similarity of chunk to memory)
#
# 2. Given a chunk with no unknown values,
# resonance will return the chunk if it is familiar
# or fail to return the chunk if it is unfamiliar
# i.e., has a cosine less than threshold
# Reaction time is a function of cosine (similarity of chunk to memory)
# get_activation(chunk):
# Computes the coherence of a chunk, which used in request type 2.
# Returns a mean cosine.
from __future__ import generators
import ccm
import math
import numpy
import copy
__all__=['HDM']
from ccm.lib.actr.buffer import Chunk,Buffer
# add for hdm
from ccm.lib.actr.dm import Memory
from ccm.pattern import Pattern
from ccm.lib.hrr import HRR
class HDM(Memory):
# buffer is the buffer that the retrieved chunk is placed in
# N is the vector dimensionality
# recommended dimensionality in the range of 512 to 2048, defaults to 512
# a smaller dimensionality than 512 can be used to introduce additional noise
# threshold is the lowest log odds activation allowed for a response
# this value is converted to a cosine similarity
# if no memory vector has a similarity to the query greater than threshold, the retrieval fails
# maximum time is the most time the memory system is allowed to take
# latency is used to calculate reaction time
# reaction time = latency * e^(-cosine)
# Note that using this equation, given a cosine of 0, the reaction time = latency
# Bigger latencies result in longer reaction times
# verbose defaults to FALSE.
# If TRUE, verbose turns on print statements giving details about what HDM is doing.
# forgetting controls the forgetting rate due to retroactive inhibition
# range [0 to 1]
# 1 = no forgetting
# 0 = no remembering
# When updating memory:
# memory vector = forgetting * memory vector + new information vector
# noise controls the amount of noise added to memory per time step
# Gaussian noise is added to all memory vectors
# whenever Request or Add is called
# When adding noise:
# memory vector = memory vector + noise * time since last update * noise vector
# Noise ranges from [0 ... ]
# where 0 is no noise
# and more is more noise
def __init__(self,buffer,latency=0.05,threshold=-4.6,maximum_time=10.0,finst_size=4,finst_time=3.0, N=512, verbose=False, forgetting=1.0, noise=0.0):
Memory.__init__(self,buffer)
self._buffer=buffer
self.N = N
self.verbose = verbose
self.env={'?': HRR(N=self.N)}
self.placeholder = self.env['?']
self.mem={}
self.slots={')': numpy.random.permutation(self.N)}
self.left=self.slots[')']
self.error=False
self.busy=False
self.adaptors=[]
self.latency=latency
self.threshold=self.logodds_to_cosine(threshold)
self.maximum_time=maximum_time
self.partials=[]
self.finst=Finst(self,size=finst_size,time=finst_time)
self._request_count=0
self.inhibited=[] # list of inhibited values
self.forgetting=forgetting
self.noise=noise
self.lastUpdate = 0.0
def clear(self):
self.mem.clear()
def add(self,chunk,record=None,**keys):
# if error flag is true, set to false for production system
if self.error: self.error=False
# add noise to memory
if (self.noise != 0):
self.addNoise()
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk
chunk = self.assignValues(chunk)
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in chunk:
# call addWithSlots to add a chunk with slot:value pairs to memory
self.addWithSlots(chunk)
else:
# call addJustValues to add a chunk with values and no slots to memory
self.addJustValues(chunk)
# function for adding noise over time to memory
def addNoise(self):
# weight by time difference
diff = self.now() - self.lastUpdate
for value in self.mem.keys():
noiseVector = HRR(N=self.N)
self.mem[value] = self.mem[value] + (self.noise * diff * noiseVector)
self.lastUpdate = self.now()
def addWithSlots(self,chunk):
# convert chunk to a list of (slot,value) pairs
chunkList = self.chunk2list(chunk)
# define random Gaussian vectors and random permutations for any undefined values and slots
self.defineVectors(chunkList)
# update the memory vectors with the information from the chunk
for p in range(0,len(chunkList)):
# create a copy of chunkList
query = copy.deepcopy(chunkList)
# replace p's value with ? in query, but leave slot as is
query[p][1] = '?'
print(chunkList[p][1])
print(query)
# compute chunk vector
chunkVector = self.getUOGwithSlots(query)
# update memory
self.updateMemory(chunkList[p][1],chunkVector)
# add a chunk to memory
# when the chunk is just a list of values
# without slots
def addJustValues(self,chunk):
# convert chunk to a list of values
chunkList = chunk.split()
# define random Gaussian vectors for any undefined values
self.defineVectors(chunkList)
# update the memory vectors with the information from the chunk
for p in range(0,len(chunkList)):
# create a copy of chunkList
query = copy.deepcopy(chunkList)
# replace p with ? in query
query[p] = '?'
# compute chunk vector
chunkVector = self.getUOG(query)
# update memory
self.updateMemory(chunkList[p],chunkVector)
# function for constructing a vector that represents chunkList
# where chunkList is a list of values without slots
# and p is the location of ? in chunkList
# returns chunk, an HRR representing all unconstrained open grams in chunkList
# that include the ? at p.
# When slots are not used, the permutation "left" is used to preserve order
def getUOG(self, chunkList):
numOfItems = len(chunkList)
chunk = HRR(data=numpy.zeros(self.N))
sum = HRR(data=numpy.zeros(self.N))
p = numOfItems # initially, this will be set to index of ? when ? is found
for i in range (0,numOfItems):
# get the vector for the value i
value = chunkList[i]
# set p as the location of the placeholder ?
if value == '?':
p = i
# if value starts with ! then negate the environment vector
if value.startswith('!'):
valVec = -1 * self.env[value[1:]]
# otherwise use the environment vector as is
else:
valVec = self.env[value]
# compute the chunk vector
if i == 0:
sum = valVec
elif (i > 0) and (i < p):
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = chunk + leftOperand.convolve(valVec)
sum = sum + valVec
elif i == p: # force all skip grams to include item p
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = leftOperand.convolve(valVec)
sum = valVec
else: # i > p, i > 0
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = chunk + leftOperand.convolve(valVec)
return chunk
# function for constructing a vector that represents chunkList
# where chunkList is a list of values WITH slots as permutations
# returns chunk, an HRR representing all unconstrained open grams in chunkList
# that include the ?
def getUOGwithSlots(self, chunkList):
numOfItems = len(chunkList)
chunk = HRR(data=numpy.zeros(self.N))
sum = HRR(data=numpy.zeros(self.N))
#sumStr = ''
#chunkStr = ''
p = numOfItems # initially, this will be set to index of ? when ? is found
for i in range (0,numOfItems):
# get the vector for the slot value pair at i
slotvalue = chunkList[i]
slot = slotvalue[0]
value = slotvalue[1]
# set p as the location of the placeholder ?
if value == '?':
p = i
# if value starts with ! then negate the environment vector
if value.startswith('!'):
valVec = -1 * self.env[value[1:]]
# otherwise use the environment vector as is
else:
valVec = self.env[value]
# permute the environment vector by the slot
valVec = valVec.permute(self.slots[slot])
#slotvalueStr = slot+':'+value
# compute the chunk vector
if i == 0:
sum = valVec
#sumStr = slotvalueStr
elif (i > 0) and (i < p):
leftOperand = chunk + sum
chunk = chunk + leftOperand.convolve(valVec)
#chunkStr = chunkStr + ' + ' + slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
sum = sum + valVec
#sumStr = sumStr + ' + ' + slotvalueStr
elif i == p: # force all skip grams to include item p
leftOperand = chunk + sum
chunk = leftOperand.convolve(valVec)
#chunkStr = slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
sum = valVec
#sumStr = slotvalueStr
else: # i > p, i > 0
leftOperand = chunk + sum
chunk = chunk + leftOperand.convolve(valVec)
#chunkStr = chunkStr + ' + ' + slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
return chunk #, chunkStr
# for updating a memory vector for value with chunk
def updateMemory(self,value,chunking):
if value.startswith('!'):
if value[1:] not in self.mem:
self.mem[value[1:]] = -1*chunking
else:
self.mem[value[1:]] = self.forgetting * self.mem[value[1:]] - chunking
else:
if value not in self.mem:
self.mem[value] = chunking
else:
self.mem[value] = self.forgetting * self.mem[value] + chunking
# default request function, call this
def request(self,chunk,require_new=False):
self.busy=True
if self.error: self.error=False
self._request_count+=1
# add noise to memory
if (self.noise != 0):
self.addNoise()
# clear list of inhibited values from previous queries
self.inhibited = []
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk string and load inhibited values into self.inhibited
chunk = self.assignValues(chunk)
if '?' in chunk:
self.requestValue(chunk,require_new)
else:
self.resonance(chunk)
def requestValue(self,chunk,require_new=False):
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in chunk:
queryVec = self.queryWithSlots(chunk)
else:
queryVec = self.queryJustValues(chunk)
highestCosine = self.threshold
bestMatch = 'none'
if self.verbose:
print('Query is: ' + chunk)
print('inhibited values: ' + str(self.inhibited))
print('Finst contains: ' + str(self.finst.obj))
# find the best match to the query vector in memory
for mem,memVec in self.mem.items():
# skip inhibited values
if mem not in self.inhibited:
# skip previously reported values if require_new is true
if (not require_new) or (not self.finst.contains(mem)):
thisCosine = memVec.compare(queryVec)
if self.verbose:
print(mem, thisCosine)
if thisCosine > highestCosine:
highestCosine = thisCosine
bestMatch = mem
if bestMatch == 'none':
if self.verbose:
print('No matches found above threshold of cosine =', self.threshold)
self.fail(self._request_count)
else:
# replace the placeholder '?' with the retrieved memory 'bestMatch'
chunk = chunk.replace('?',bestMatch)
if self.verbose:
print('Best match is ' + bestMatch)
print('with a cosine of ' + str(highestCosine))
print('output chunk = ' + chunk)
chunkObj = Chunk(chunk)
chunkObj.activation = highestCosine
self.finst.add(bestMatch)
self.recall(chunkObj,matches=[],request_number=self._request_count)
# performs multiple queries to determine the "coherence" of the chunk
def resonance(self,chunk):
if '?' in chunk:
print('chunk is ' + chunk)
raise Exception("Use the resonance function when the chunk has no '?'. If there is a '?' use request instead")
coherence = self.get_activation(chunk)
if self.verbose:
print('The coherence is ' + str(coherence))
if coherence <= self.threshold:
self.fail(self._request_count)
else:
chunkObj = Chunk(chunk)
chunkObj.activation = coherence
self.recall(chunkObj,matches=[],request_number=self._request_count)
# compute the coherence / activation of a chunk
# called by resonance
# called by request when no ? values are present
# if logodds=True, the convert from mean cosine to logodds and return logodds
def get_activation(self,chunk,logodds=False):
# if this function has been called directly, we need to convert
if not self.busy:
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk string and load inhibited values into self.inhibited
chunk = self.assignValues(chunk)
# add noise to memory
if (self.noise != 0):
self.addNoise()
# keep track of the number of occurrences of a particular value in case of repeats
occurrences = {}
# keep a running sum of the cosines and a count of the values in the chunk
sumOfCosines = 0;
numOfValues = 0;
# perform a query for each value in chunk
for slotvalue in chunk.split():
# create a query by removing the value and replacing it with '?'
query = chunk.split() # turn chunk into list
query.pop(numOfValues) # remove this list item
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in slotvalue:
slot,value = slotvalue.split(':')
query.insert(numOfValues, slot+':?') # replace value with ?
query = ' '.join(query) # convert query to a string
queryVec = self.queryWithSlots(query)
else:
value = slotvalue
query.insert(numOfValues, '?') # replace value with ?
query = ' '.join(query) # convert query to a string
queryVec = self.queryJustValues(query)
numOfValues = numOfValues + 1;
# find the match between the query vector and the value's memory vector
self.defineVectors([value])
match = self.mem[value].compare(queryVec)
sumOfCosines = sumOfCosines + match
coherence = sumOfCosines / numOfValues
if logodds:
return self.cosine_to_logodds(coherence)
else:
return coherence
# create a query vector for a chunk consisting of slot:value pairs
# the query vector consists of the open n-grams of the slot:value pairs
# only open n-grams that contain ? are included
# the query vector must have one and only one query item "?"
def queryWithSlots(self,chunk):
# convert chunk to a list of (slot,value) pairs
chunkList = self.chunk2list(chunk)
# define random Gaussian vectors and random permutations for any undefined values and slots
self.defineVectors(chunkList)
# construct the query vector
queryVec = self.getUOGwithSlots(chunkList)
return queryVec
# create a query vector for a chunk consisting of slot:value pairs
# the query vector consists of the open n-grams of the values
# only n-grams that contain ? are included
# the query vector must have one and only one query item "?"
def queryJustValues(self,chunk):
# convert chunk to a list of values
chunkList = chunk.split()
# define random Gaussian vectors for any undefined values
self.defineVectors(chunkList)
# get all combinations ranging from pairs of slot-value pairs to sets
queryVec = self.getUOG(chunkList)
return queryVec
# chunk2str converts a chunk into a string
# or if it is already a string, chunk2str just returns the string unmodified
def chunk2str(self,chunk):
# if the chunk is a Buffer object, extract the Chunk object from inside it, then turn the Chunk into a string
if isinstance(chunk,Buffer):
chunk = Chunk(chunk.chunk)
# if the chunk is a Chunk object, turn the Chunk into a string
if isinstance(chunk,Chunk):
chunk = str(chunk)
return chunk
# chunk2list converts a chunk into a list of (slot,value) pairs
def chunk2list(self,chunk):
if ':' in chunk:
return [item.split(':') for item in chunk.split()]
else:
raise Exception("Wrong chunk format!")
return None
# assignValues checks for unassigned values, i.e., '?stuff'
# returns chunk as a string
def assignValues(self,chunk):
# convert chunk to str (if it isn't already)
chunk = self.chunk2str(chunk)
# replace instances of ?stuff with corresponding stuff
bound=None
if hasattr(self,'sch'):
bound=getattr(self.sch,'bound',None)
# split the chunkStr where there are spaces to get the list of attributes
attributes = chunk.split()
# find ?values that need to be substituted
chunkList = []
for attribute in attributes:
# this function needs to handle both chunks that are lists of slot:value pairs
# and chunks that are ordered lists of values
if ':' in attribute:
|
else:
value = attribute
slot = ''
# sometimes we want to specify things not to select
# for example, condiment:?unknown!mustard
# means find a condiment that isn't mustard
if value.startswith('?') and value != '?':
first = True
for subvalue in value.split('!'):
# we know the first value starts with ?, so let's substitute
if first:
first = False;
#check to see if it's not just a ? by itself
if subvalue == '?':
value = '?'
else:
try:
# take "?value" without the "?"
key = subvalue[1:]
# look it up in the "bound dictionary" and substitute
value = bound[key]
# if "value" in "?value" is undefined, replace with "?"
except:
value = '?'
# the following values all start with ! meaning things we don't want to retrieve
else:
if subvalue.startswith('?'):
# but some of them may start with ? indicating we need to substitute
try:
# take "?value" without the "?"
key = subvalue[1:]
# look it up in the "bound dictionary" and add to inhibited values list
subvalue = bound[key]
# if "value" in "?value" is undefined, raise exception
except:
print(chunk)
print('Error with subvalue: ' + subvalue + ' in chunk: ' + chunk)
raise Exception('Values beginning with ! are understood in this context as indicating values to be inhibited. The specified !value is undefined')
# add subvalue to inhibition list
self.inhibited.append(subvalue)
# add the value to the chunkList
chunkList.append(slot+value)
# convert chunkList into a string delimited by spaces
return ' '.join(chunkList)
#get environment vector for a given value
def get(self,value):
if value not in self.env:
self.env[value] = HRR(N=self.N)
self.mem[value] = HRR(data=numpy.zeros(self.N))
return self.env[value].copy()
#set environment vector for a given value to a specified vector
def set(self,value,vector):
try: # assume vector is an HRR object
newVec = vector.copy()
newVec.normalize()
self.env[value] = newVec
except: # assume vector is a list of numbers
vector = [float(i) for i in vector]
self.env[value] = HRR(data=vector)
self.env[value].normalize()
# check to see if it's in memory already, if not, define its memory as a vector of zeros
if value not in self.mem:
self.mem[value] = HRR(data=numpy.zeros(self.N))
# generate Gaussian vectors and random permutations for values & slots without
# chunkList is a list of attributes, each attribute is a string
def defineVectors(self,chunkList):
for attribute in chunkList:
# check to see if there is a slot, or if it's just a value without a slot
if isinstance(attribute,list):
slot,value = attribute
# if it's a new slot, create a new random permutation
if slot not in self.slots.keys():
self.slots[slot] = numpy.random.permutation(self.N)
else:
value = attribute
# if it starts with ! (i.e., not) just ignore that for now
if value.startswith('!'):
value = value[1:]
# if it's a new value, create a new random vector
if value not in self.env:
self.env[value] = HRR(N=self.N)
self.mem[value] = HRR(data=numpy.zeros(self.N))#self.env[value]
def fail(self,request_number):
if self.threshold == None:
time=self.maximum_time
else:
logodds = self.cosine_to_logodds(self.threshold)
time=self.latency*math.exp(-logodds)
if time>self.maximum_time: time=self.maximum_time
yield time
if request_number!=self._request_count: return
self.error=True
self._buffer.clear()
self.busy=False
def recall(self,chunk,matches,request_number):
logodds = self.cosine_to_logodds(chunk.activation)
time=self.latency*math.exp(-logodds)
if time>self.maximum_time: time=self.maximum_time
yield time
if request_number!=self._request_count: return
self._buffer.set(chunk)
for a in self.adaptors: a.recalled(chunk)
self.busy=False
# Converts vector cosine (which approximates root probability)
# to a log odds ratio (which is what ACT-R activation estimates)
def cosine_to_logodds(self,cosine):
if cosine > 0.999:
cosine = 0.999
return math.log(cosine**2 / (1 - cosine**2))
# Converts log odds ratio or ACT-R activation
# to a root probability (which the cosine approximates)
def logodds_to_cosine(self,logodds):
return math.sqrt(numpy.exp(logodds) / (numpy.exp(logodds) + 1))
class Finst:
def __init__(self,parent,size=4,time=3.0):
self.parent=parent
self.size=size
self.time=time
self.obj=[]
def contains(self,o):
return o in self.obj
def add(self,o):
if self.size==0: return
self.obj.append(o)
if len(self.obj)>self.size:
self.remove(self.obj[0])
self.parent.sch.add(self.remove,args=[o],delay=self.time)
def remove(self,o):
if o in self.obj: self.obj.remove(o)
| slot,value = attribute.split(':')
slot = slot + ':' | conditional_block |
hdm.py | # HDM: Holographic Declarative Memory
# A module for Python ACT-R
# written by M. A. Kelly
# except for the parts written by Terry C. Stewart
# Based on original research and the memory models
# BEAGLE (Jones & Mewhort, 2007) and DSHM (Rutledge-Taylor, Kelly, West, & Pyke, 2014)
#
# To use HDM:
# from ccm.lib.actr import *
# from ccm.lib.actr.hdm import *
# ...
# retrieval=Buffer()
# memory=HDM(retrieval)
#
# HDM has some unique parameters that DM does not have:
# N is the dimensionality of the vectors.
# Defaults to a generous 512 dimensions.
# As few as 64 and as many as 2048 have been used in the literature
# depending on the amount of noise or clarity desired.
# verbose defaults to False
# set to True if you want to see what HDM is doing in detail
# HDM also has some parameters that DM has and that are still important:
# buffer is the buffer used to output chunks retrieved from HDM
# latency is F in Fe^-a, where a is the activation calculated as
# a = ln(cosine^2 / (1 - cosine^2))
#
# HDM has three important functions to call:
# add(chunk): adds a chunk to memory
# request(chunk):
# 1. Given a chunk with exactly one unknown value '?',
# request finds the best value to fill '?'
# which it returns
# Reaction time is a function of cosine (similarity of chunk to memory)
#
# 2. Given a chunk with no unknown values,
# resonance will return the chunk if it is familiar
# or fail to return the chunk if it is unfamiliar
# i.e., has a cosine less than threshold
# Reaction time is a function of cosine (similarity of chunk to memory)
# get_activation(chunk):
# Computes the coherence of a chunk, which used in request type 2.
# Returns a mean cosine.
from __future__ import generators
import ccm
import math
import numpy
import copy
__all__=['HDM']
from ccm.lib.actr.buffer import Chunk,Buffer
# add for hdm
from ccm.lib.actr.dm import Memory
from ccm.pattern import Pattern
from ccm.lib.hrr import HRR
class HDM(Memory):
# buffer is the buffer that the retrieved chunk is placed in
# N is the vector dimensionality
# recommended dimensionality in the range of 512 to 2048, defaults to 512
# a smaller dimensionality than 512 can be used to introduce additional noise
# threshold is the lowest log odds activation allowed for a response
# this value is converted to a cosine similarity
# if no memory vector has a similarity to the query greater than threshold, the retrieval fails
# maximum time is the most time the memory system is allowed to take
# latency is used to calculate reaction time
# reaction time = latency * e^(-cosine)
# Note that using this equation, given a cosine of 0, the reaction time = latency
# Bigger latencies result in longer reaction times
# verbose defaults to FALSE.
# If TRUE, verbose turns on print statements giving details about what HDM is doing.
# forgetting controls the forgetting rate due to retroactive inhibition
# range [0 to 1]
# 1 = no forgetting
# 0 = no remembering
# When updating memory:
# memory vector = forgetting * memory vector + new information vector
# noise controls the amount of noise added to memory per time step
# Gaussian noise is added to all memory vectors
# whenever Request or Add is called
# When adding noise:
# memory vector = memory vector + noise * time since last update * noise vector
# Noise ranges from [0 ... ]
# where 0 is no noise
# and more is more noise
def __init__(self,buffer,latency=0.05,threshold=-4.6,maximum_time=10.0,finst_size=4,finst_time=3.0, N=512, verbose=False, forgetting=1.0, noise=0.0):
Memory.__init__(self,buffer)
self._buffer=buffer
self.N = N
self.verbose = verbose
self.env={'?': HRR(N=self.N)}
self.placeholder = self.env['?']
self.mem={}
self.slots={')': numpy.random.permutation(self.N)}
self.left=self.slots[')']
self.error=False
self.busy=False
self.adaptors=[]
self.latency=latency
self.threshold=self.logodds_to_cosine(threshold)
self.maximum_time=maximum_time
self.partials=[]
self.finst=Finst(self,size=finst_size,time=finst_time)
self._request_count=0
self.inhibited=[] # list of inhibited values
self.forgetting=forgetting
self.noise=noise
self.lastUpdate = 0.0
def clear(self):
self.mem.clear()
def add(self,chunk,record=None,**keys):
# if error flag is true, set to false for production system
if self.error: self.error=False
# add noise to memory
if (self.noise != 0):
self.addNoise()
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk
chunk = self.assignValues(chunk)
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in chunk:
# call addWithSlots to add a chunk with slot:value pairs to memory
self.addWithSlots(chunk)
else:
# call addJustValues to add a chunk with values and no slots to memory
self.addJustValues(chunk)
# function for adding noise over time to memory
def addNoise(self):
# weight by time difference
diff = self.now() - self.lastUpdate
for value in self.mem.keys():
noiseVector = HRR(N=self.N)
self.mem[value] = self.mem[value] + (self.noise * diff * noiseVector)
self.lastUpdate = self.now()
def addWithSlots(self,chunk):
# convert chunk to a list of (slot,value) pairs
chunkList = self.chunk2list(chunk)
# define random Gaussian vectors and random permutations for any undefined values and slots
self.defineVectors(chunkList)
# update the memory vectors with the information from the chunk
for p in range(0,len(chunkList)):
# create a copy of chunkList
query = copy.deepcopy(chunkList)
# replace p's value with ? in query, but leave slot as is
query[p][1] = '?'
print(chunkList[p][1])
print(query)
# compute chunk vector
chunkVector = self.getUOGwithSlots(query)
# update memory
self.updateMemory(chunkList[p][1],chunkVector)
# add a chunk to memory
# when the chunk is just a list of values
# without slots
def | (self,chunk):
# convert chunk to a list of values
chunkList = chunk.split()
# define random Gaussian vectors for any undefined values
self.defineVectors(chunkList)
# update the memory vectors with the information from the chunk
for p in range(0,len(chunkList)):
# create a copy of chunkList
query = copy.deepcopy(chunkList)
# replace p with ? in query
query[p] = '?'
# compute chunk vector
chunkVector = self.getUOG(query)
# update memory
self.updateMemory(chunkList[p],chunkVector)
# function for constructing a vector that represents chunkList
# where chunkList is a list of values without slots
# and p is the location of ? in chunkList
# returns chunk, an HRR representing all unconstrained open grams in chunkList
# that include the ? at p.
# When slots are not used, the permutation "left" is used to preserve order
def getUOG(self, chunkList):
numOfItems = len(chunkList)
chunk = HRR(data=numpy.zeros(self.N))
sum = HRR(data=numpy.zeros(self.N))
p = numOfItems # initially, this will be set to index of ? when ? is found
for i in range (0,numOfItems):
# get the vector for the value i
value = chunkList[i]
# set p as the location of the placeholder ?
if value == '?':
p = i
# if value starts with ! then negate the environment vector
if value.startswith('!'):
valVec = -1 * self.env[value[1:]]
# otherwise use the environment vector as is
else:
valVec = self.env[value]
# compute the chunk vector
if i == 0:
sum = valVec
elif (i > 0) and (i < p):
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = chunk + leftOperand.convolve(valVec)
sum = sum + valVec
elif i == p: # force all skip grams to include item p
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = leftOperand.convolve(valVec)
sum = valVec
else: # i > p, i > 0
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = chunk + leftOperand.convolve(valVec)
return chunk
# function for constructing a vector that represents chunkList
# where chunkList is a list of values WITH slots as permutations
# returns chunk, an HRR representing all unconstrained open grams in chunkList
# that include the ?
def getUOGwithSlots(self, chunkList):
numOfItems = len(chunkList)
chunk = HRR(data=numpy.zeros(self.N))
sum = HRR(data=numpy.zeros(self.N))
#sumStr = ''
#chunkStr = ''
p = numOfItems # initially, this will be set to index of ? when ? is found
for i in range (0,numOfItems):
# get the vector for the slot value pair at i
slotvalue = chunkList[i]
slot = slotvalue[0]
value = slotvalue[1]
# set p as the location of the placeholder ?
if value == '?':
p = i
# if value starts with ! then negate the environment vector
if value.startswith('!'):
valVec = -1 * self.env[value[1:]]
# otherwise use the environment vector as is
else:
valVec = self.env[value]
# permute the environment vector by the slot
valVec = valVec.permute(self.slots[slot])
#slotvalueStr = slot+':'+value
# compute the chunk vector
if i == 0:
sum = valVec
#sumStr = slotvalueStr
elif (i > 0) and (i < p):
leftOperand = chunk + sum
chunk = chunk + leftOperand.convolve(valVec)
#chunkStr = chunkStr + ' + ' + slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
sum = sum + valVec
#sumStr = sumStr + ' + ' + slotvalueStr
elif i == p: # force all skip grams to include item p
leftOperand = chunk + sum
chunk = leftOperand.convolve(valVec)
#chunkStr = slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
sum = valVec
#sumStr = slotvalueStr
else: # i > p, i > 0
leftOperand = chunk + sum
chunk = chunk + leftOperand.convolve(valVec)
#chunkStr = chunkStr + ' + ' + slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
return chunk #, chunkStr
# for updating a memory vector for value with chunk
def updateMemory(self,value,chunking):
if value.startswith('!'):
if value[1:] not in self.mem:
self.mem[value[1:]] = -1*chunking
else:
self.mem[value[1:]] = self.forgetting * self.mem[value[1:]] - chunking
else:
if value not in self.mem:
self.mem[value] = chunking
else:
self.mem[value] = self.forgetting * self.mem[value] + chunking
# default request function, call this
def request(self,chunk,require_new=False):
self.busy=True
if self.error: self.error=False
self._request_count+=1
# add noise to memory
if (self.noise != 0):
self.addNoise()
# clear list of inhibited values from previous queries
self.inhibited = []
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk string and load inhibited values into self.inhibited
chunk = self.assignValues(chunk)
if '?' in chunk:
self.requestValue(chunk,require_new)
else:
self.resonance(chunk)
def requestValue(self,chunk,require_new=False):
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in chunk:
queryVec = self.queryWithSlots(chunk)
else:
queryVec = self.queryJustValues(chunk)
highestCosine = self.threshold
bestMatch = 'none'
if self.verbose:
print('Query is: ' + chunk)
print('inhibited values: ' + str(self.inhibited))
print('Finst contains: ' + str(self.finst.obj))
# find the best match to the query vector in memory
for mem,memVec in self.mem.items():
# skip inhibited values
if mem not in self.inhibited:
# skip previously reported values if require_new is true
if (not require_new) or (not self.finst.contains(mem)):
thisCosine = memVec.compare(queryVec)
if self.verbose:
print(mem, thisCosine)
if thisCosine > highestCosine:
highestCosine = thisCosine
bestMatch = mem
if bestMatch == 'none':
if self.verbose:
print('No matches found above threshold of cosine =', self.threshold)
self.fail(self._request_count)
else:
# replace the placeholder '?' with the retrieved memory 'bestMatch'
chunk = chunk.replace('?',bestMatch)
if self.verbose:
print('Best match is ' + bestMatch)
print('with a cosine of ' + str(highestCosine))
print('output chunk = ' + chunk)
chunkObj = Chunk(chunk)
chunkObj.activation = highestCosine
self.finst.add(bestMatch)
self.recall(chunkObj,matches=[],request_number=self._request_count)
# performs multiple queries to determine the "coherence" of the chunk
def resonance(self,chunk):
if '?' in chunk:
print('chunk is ' + chunk)
raise Exception("Use the resonance function when the chunk has no '?'. If there is a '?' use request instead")
coherence = self.get_activation(chunk)
if self.verbose:
print('The coherence is ' + str(coherence))
if coherence <= self.threshold:
self.fail(self._request_count)
else:
chunkObj = Chunk(chunk)
chunkObj.activation = coherence
self.recall(chunkObj,matches=[],request_number=self._request_count)
# compute the coherence / activation of a chunk
# called by resonance
# called by request when no ? values are present
# if logodds=True, the convert from mean cosine to logodds and return logodds
def get_activation(self,chunk,logodds=False):
# if this function has been called directly, we need to convert
if not self.busy:
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk string and load inhibited values into self.inhibited
chunk = self.assignValues(chunk)
# add noise to memory
if (self.noise != 0):
self.addNoise()
# keep track of the number of occurrences of a particular value in case of repeats
occurrences = {}
# keep a running sum of the cosines and a count of the values in the chunk
sumOfCosines = 0;
numOfValues = 0;
# perform a query for each value in chunk
for slotvalue in chunk.split():
# create a query by removing the value and replacing it with '?'
query = chunk.split() # turn chunk into list
query.pop(numOfValues) # remove this list item
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in slotvalue:
slot,value = slotvalue.split(':')
query.insert(numOfValues, slot+':?') # replace value with ?
query = ' '.join(query) # convert query to a string
queryVec = self.queryWithSlots(query)
else:
value = slotvalue
query.insert(numOfValues, '?') # replace value with ?
query = ' '.join(query) # convert query to a string
queryVec = self.queryJustValues(query)
numOfValues = numOfValues + 1;
# find the match between the query vector and the value's memory vector
self.defineVectors([value])
match = self.mem[value].compare(queryVec)
sumOfCosines = sumOfCosines + match
coherence = sumOfCosines / numOfValues
if logodds:
return self.cosine_to_logodds(coherence)
else:
return coherence
# create a query vector for a chunk consisting of slot:value pairs
# the query vector consists of the open n-grams of the slot:value pairs
# only open n-grams that contain ? are included
# the query vector must have one and only one query item "?"
def queryWithSlots(self,chunk):
# convert chunk to a list of (slot,value) pairs
chunkList = self.chunk2list(chunk)
# define random Gaussian vectors and random permutations for any undefined values and slots
self.defineVectors(chunkList)
# construct the query vector
queryVec = self.getUOGwithSlots(chunkList)
return queryVec
# create a query vector for a chunk consisting of slot:value pairs
# the query vector consists of the open n-grams of the values
# only n-grams that contain ? are included
# the query vector must have one and only one query item "?"
def queryJustValues(self,chunk):
# convert chunk to a list of values
chunkList = chunk.split()
# define random Gaussian vectors for any undefined values
self.defineVectors(chunkList)
# get all combinations ranging from pairs of slot-value pairs to sets
queryVec = self.getUOG(chunkList)
return queryVec
# chunk2str converts a chunk into a string
# or if it is already a string, chunk2str just returns the string unmodified
def chunk2str(self,chunk):
# if the chunk is a Buffer object, extract the Chunk object from inside it, then turn the Chunk into a string
if isinstance(chunk,Buffer):
chunk = Chunk(chunk.chunk)
# if the chunk is a Chunk object, turn the Chunk into a string
if isinstance(chunk,Chunk):
chunk = str(chunk)
return chunk
# chunk2list converts a chunk into a list of (slot,value) pairs
def chunk2list(self,chunk):
if ':' in chunk:
return [item.split(':') for item in chunk.split()]
else:
raise Exception("Wrong chunk format!")
return None
# assignValues checks for unassigned values, i.e., '?stuff'
# returns chunk as a string
def assignValues(self,chunk):
# convert chunk to str (if it isn't already)
chunk = self.chunk2str(chunk)
# replace instances of ?stuff with corresponding stuff
bound=None
if hasattr(self,'sch'):
bound=getattr(self.sch,'bound',None)
# split the chunkStr where there are spaces to get the list of attributes
attributes = chunk.split()
# find ?values that need to be substituted
chunkList = []
for attribute in attributes:
# this function needs to handle both chunks that are lists of slot:value pairs
# and chunks that are ordered lists of values
if ':' in attribute:
slot,value = attribute.split(':')
slot = slot + ':'
else:
value = attribute
slot = ''
# sometimes we want to specify things not to select
# for example, condiment:?unknown!mustard
# means find a condiment that isn't mustard
if value.startswith('?') and value != '?':
first = True
for subvalue in value.split('!'):
# we know the first value starts with ?, so let's substitute
if first:
first = False;
#check to see if it's not just a ? by itself
if subvalue == '?':
value = '?'
else:
try:
# take "?value" without the "?"
key = subvalue[1:]
# look it up in the "bound dictionary" and substitute
value = bound[key]
# if "value" in "?value" is undefined, replace with "?"
except:
value = '?'
# the following values all start with ! meaning things we don't want to retrieve
else:
if subvalue.startswith('?'):
# but some of them may start with ? indicating we need to substitute
try:
# take "?value" without the "?"
key = subvalue[1:]
# look it up in the "bound dictionary" and add to inhibited values list
subvalue = bound[key]
# if "value" in "?value" is undefined, raise exception
except:
print(chunk)
print('Error with subvalue: ' + subvalue + ' in chunk: ' + chunk)
raise Exception('Values beginning with ! are understood in this context as indicating values to be inhibited. The specified !value is undefined')
# add subvalue to inhibition list
self.inhibited.append(subvalue)
# add the value to the chunkList
chunkList.append(slot+value)
# convert chunkList into a string delimited by spaces
return ' '.join(chunkList)
#get environment vector for a given value
def get(self,value):
if value not in self.env:
self.env[value] = HRR(N=self.N)
self.mem[value] = HRR(data=numpy.zeros(self.N))
return self.env[value].copy()
#set environment vector for a given value to a specified vector
def set(self,value,vector):
try: # assume vector is an HRR object
newVec = vector.copy()
newVec.normalize()
self.env[value] = newVec
except: # assume vector is a list of numbers
vector = [float(i) for i in vector]
self.env[value] = HRR(data=vector)
self.env[value].normalize()
# check to see if it's in memory already, if not, define its memory as a vector of zeros
if value not in self.mem:
self.mem[value] = HRR(data=numpy.zeros(self.N))
# generate Gaussian vectors and random permutations for values & slots without
# chunkList is a list of attributes, each attribute is a string
def defineVectors(self,chunkList):
for attribute in chunkList:
# check to see if there is a slot, or if it's just a value without a slot
if isinstance(attribute,list):
slot,value = attribute
# if it's a new slot, create a new random permutation
if slot not in self.slots.keys():
self.slots[slot] = numpy.random.permutation(self.N)
else:
value = attribute
# if it starts with ! (i.e., not) just ignore that for now
if value.startswith('!'):
value = value[1:]
# if it's a new value, create a new random vector
if value not in self.env:
self.env[value] = HRR(N=self.N)
self.mem[value] = HRR(data=numpy.zeros(self.N))#self.env[value]
def fail(self,request_number):
if self.threshold == None:
time=self.maximum_time
else:
logodds = self.cosine_to_logodds(self.threshold)
time=self.latency*math.exp(-logodds)
if time>self.maximum_time: time=self.maximum_time
yield time
if request_number!=self._request_count: return
self.error=True
self._buffer.clear()
self.busy=False
def recall(self,chunk,matches,request_number):
logodds = self.cosine_to_logodds(chunk.activation)
time=self.latency*math.exp(-logodds)
if time>self.maximum_time: time=self.maximum_time
yield time
if request_number!=self._request_count: return
self._buffer.set(chunk)
for a in self.adaptors: a.recalled(chunk)
self.busy=False
# Converts vector cosine (which approximates root probability)
# to a log odds ratio (which is what ACT-R activation estimates)
def cosine_to_logodds(self,cosine):
if cosine > 0.999:
cosine = 0.999
return math.log(cosine**2 / (1 - cosine**2))
# Converts log odds ratio or ACT-R activation
# to a root probability (which the cosine approximates)
def logodds_to_cosine(self,logodds):
return math.sqrt(numpy.exp(logodds) / (numpy.exp(logodds) + 1))
class Finst:
def __init__(self,parent,size=4,time=3.0):
self.parent=parent
self.size=size
self.time=time
self.obj=[]
def contains(self,o):
return o in self.obj
def add(self,o):
if self.size==0: return
self.obj.append(o)
if len(self.obj)>self.size:
self.remove(self.obj[0])
self.parent.sch.add(self.remove,args=[o],delay=self.time)
def remove(self,o):
if o in self.obj: self.obj.remove(o)
| addJustValues | identifier_name |
hdm.py | # HDM: Holographic Declarative Memory
# A module for Python ACT-R
# written by M. A. Kelly
# except for the parts written by Terry C. Stewart
# Based on original research and the memory models
# BEAGLE (Jones & Mewhort, 2007) and DSHM (Rutledge-Taylor, Kelly, West, & Pyke, 2014)
#
# To use HDM:
# from ccm.lib.actr import *
# from ccm.lib.actr.hdm import *
# ...
# retrieval=Buffer()
# memory=HDM(retrieval)
#
# HDM has some unique parameters that DM does not have:
# N is the dimensionality of the vectors.
# Defaults to a generous 512 dimensions.
# As few as 64 and as many as 2048 have been used in the literature
# depending on the amount of noise or clarity desired.
# verbose defaults to False
# set to True if you want to see what HDM is doing in detail
# HDM also has some parameters that DM has and that are still important:
# buffer is the buffer used to output chunks retrieved from HDM
# latency is F in Fe^-a, where a is the activation calculated as
# a = ln(cosine^2 / (1 - cosine^2))
#
# HDM has three important functions to call:
# add(chunk): adds a chunk to memory
# request(chunk):
# 1. Given a chunk with exactly one unknown value '?',
# request finds the best value to fill '?'
# which it returns
# Reaction time is a function of cosine (similarity of chunk to memory)
#
# 2. Given a chunk with no unknown values,
# resonance will return the chunk if it is familiar
# or fail to return the chunk if it is unfamiliar
# i.e., has a cosine less than threshold
# Reaction time is a function of cosine (similarity of chunk to memory)
# get_activation(chunk):
# Computes the coherence of a chunk, which used in request type 2.
# Returns a mean cosine.
from __future__ import generators
import ccm
import math
import numpy
import copy
__all__=['HDM']
from ccm.lib.actr.buffer import Chunk,Buffer
# add for hdm
from ccm.lib.actr.dm import Memory
from ccm.pattern import Pattern
from ccm.lib.hrr import HRR
class HDM(Memory):
# buffer is the buffer that the retrieved chunk is placed in
# N is the vector dimensionality
# recommended dimensionality in the range of 512 to 2048, defaults to 512
# a smaller dimensionality than 512 can be used to introduce additional noise
# threshold is the lowest log odds activation allowed for a response
# this value is converted to a cosine similarity
# if no memory vector has a similarity to the query greater than threshold, the retrieval fails
# maximum time is the most time the memory system is allowed to take
# latency is used to calculate reaction time
# reaction time = latency * e^(-cosine)
# Note that using this equation, given a cosine of 0, the reaction time = latency
# Bigger latencies result in longer reaction times
# verbose defaults to FALSE.
# If TRUE, verbose turns on print statements giving details about what HDM is doing.
# forgetting controls the forgetting rate due to retroactive inhibition
# range [0 to 1]
# 1 = no forgetting
# 0 = no remembering
# When updating memory:
# memory vector = forgetting * memory vector + new information vector
# noise controls the amount of noise added to memory per time step
# Gaussian noise is added to all memory vectors
# whenever Request or Add is called
# When adding noise:
# memory vector = memory vector + noise * time since last update * noise vector
# Noise ranges from [0 ... ]
# where 0 is no noise
# and more is more noise
def __init__(self,buffer,latency=0.05,threshold=-4.6,maximum_time=10.0,finst_size=4,finst_time=3.0, N=512, verbose=False, forgetting=1.0, noise=0.0):
Memory.__init__(self,buffer)
self._buffer=buffer
self.N = N
self.verbose = verbose
self.env={'?': HRR(N=self.N)}
self.placeholder = self.env['?']
self.mem={}
self.slots={')': numpy.random.permutation(self.N)}
self.left=self.slots[')']
self.error=False
self.busy=False
self.adaptors=[]
self.latency=latency
self.threshold=self.logodds_to_cosine(threshold)
self.maximum_time=maximum_time
self.partials=[]
self.finst=Finst(self,size=finst_size,time=finst_time)
self._request_count=0
self.inhibited=[] # list of inhibited values
self.forgetting=forgetting
self.noise=noise
self.lastUpdate = 0.0
def clear(self):
self.mem.clear()
def add(self,chunk,record=None,**keys):
# if error flag is true, set to false for production system
if self.error: self.error=False
# add noise to memory
if (self.noise != 0):
self.addNoise()
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk
chunk = self.assignValues(chunk)
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in chunk:
# call addWithSlots to add a chunk with slot:value pairs to memory
self.addWithSlots(chunk)
else:
# call addJustValues to add a chunk with values and no slots to memory
self.addJustValues(chunk)
# function for adding noise over time to memory
def addNoise(self):
# weight by time difference
diff = self.now() - self.lastUpdate
for value in self.mem.keys():
noiseVector = HRR(N=self.N)
self.mem[value] = self.mem[value] + (self.noise * diff * noiseVector)
self.lastUpdate = self.now()
def addWithSlots(self,chunk):
# convert chunk to a list of (slot,value) pairs
chunkList = self.chunk2list(chunk)
# define random Gaussian vectors and random permutations for any undefined values and slots
self.defineVectors(chunkList)
# update the memory vectors with the information from the chunk
for p in range(0,len(chunkList)):
# create a copy of chunkList
query = copy.deepcopy(chunkList)
# replace p's value with ? in query, but leave slot as is
query[p][1] = '?'
print(chunkList[p][1])
print(query)
# compute chunk vector
chunkVector = self.getUOGwithSlots(query)
# update memory
self.updateMemory(chunkList[p][1],chunkVector)
# add a chunk to memory
# when the chunk is just a list of values
# without slots
def addJustValues(self,chunk):
# convert chunk to a list of values
chunkList = chunk.split()
# define random Gaussian vectors for any undefined values
self.defineVectors(chunkList)
# update the memory vectors with the information from the chunk
for p in range(0,len(chunkList)):
# create a copy of chunkList
query = copy.deepcopy(chunkList)
# replace p with ? in query
query[p] = '?'
# compute chunk vector
chunkVector = self.getUOG(query)
# update memory
self.updateMemory(chunkList[p],chunkVector)
# function for constructing a vector that represents chunkList
# where chunkList is a list of values without slots
# and p is the location of ? in chunkList
# returns chunk, an HRR representing all unconstrained open grams in chunkList
# that include the ? at p.
# When slots are not used, the permutation "left" is used to preserve order
def getUOG(self, chunkList):
numOfItems = len(chunkList)
chunk = HRR(data=numpy.zeros(self.N))
sum = HRR(data=numpy.zeros(self.N))
p = numOfItems # initially, this will be set to index of ? when ? is found
for i in range (0,numOfItems):
# get the vector for the value i
value = chunkList[i]
# set p as the location of the placeholder ?
if value == '?':
p = i
# if value starts with ! then negate the environment vector
if value.startswith('!'):
valVec = -1 * self.env[value[1:]]
# otherwise use the environment vector as is
else:
valVec = self.env[value]
# compute the chunk vector
if i == 0:
sum = valVec
elif (i > 0) and (i < p):
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = chunk + leftOperand.convolve(valVec)
sum = sum + valVec
elif i == p: # force all skip grams to include item p
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = leftOperand.convolve(valVec)
sum = valVec
else: # i > p, i > 0
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = chunk + leftOperand.convolve(valVec)
return chunk
# function for constructing a vector that represents chunkList
# where chunkList is a list of values WITH slots as permutations
# returns chunk, an HRR representing all unconstrained open grams in chunkList
# that include the ?
def getUOGwithSlots(self, chunkList):
numOfItems = len(chunkList)
chunk = HRR(data=numpy.zeros(self.N))
sum = HRR(data=numpy.zeros(self.N))
#sumStr = ''
#chunkStr = ''
p = numOfItems # initially, this will be set to index of ? when ? is found
for i in range (0,numOfItems):
# get the vector for the slot value pair at i
slotvalue = chunkList[i]
slot = slotvalue[0]
value = slotvalue[1]
# set p as the location of the placeholder ?
if value == '?':
p = i
# if value starts with ! then negate the environment vector
if value.startswith('!'):
valVec = -1 * self.env[value[1:]]
# otherwise use the environment vector as is
else:
valVec = self.env[value]
# permute the environment vector by the slot
valVec = valVec.permute(self.slots[slot])
#slotvalueStr = slot+':'+value
# compute the chunk vector
if i == 0:
sum = valVec
#sumStr = slotvalueStr
elif (i > 0) and (i < p):
leftOperand = chunk + sum
chunk = chunk + leftOperand.convolve(valVec)
#chunkStr = chunkStr + ' + ' + slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
sum = sum + valVec
#sumStr = sumStr + ' + ' + slotvalueStr
elif i == p: # force all skip grams to include item p
leftOperand = chunk + sum
chunk = leftOperand.convolve(valVec)
#chunkStr = slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
sum = valVec
#sumStr = slotvalueStr
else: # i > p, i > 0
leftOperand = chunk + sum
chunk = chunk + leftOperand.convolve(valVec)
#chunkStr = chunkStr + ' + ' + slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
return chunk #, chunkStr
# for updating a memory vector for value with chunk
def updateMemory(self,value,chunking):
if value.startswith('!'):
if value[1:] not in self.mem:
self.mem[value[1:]] = -1*chunking
else:
self.mem[value[1:]] = self.forgetting * self.mem[value[1:]] - chunking
else:
if value not in self.mem:
self.mem[value] = chunking
else:
self.mem[value] = self.forgetting * self.mem[value] + chunking
# default request function, call this
def request(self,chunk,require_new=False):
self.busy=True
if self.error: self.error=False
self._request_count+=1
# add noise to memory
if (self.noise != 0):
self.addNoise()
# clear list of inhibited values from previous queries
self.inhibited = []
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk string and load inhibited values into self.inhibited
chunk = self.assignValues(chunk)
if '?' in chunk:
self.requestValue(chunk,require_new)
else:
self.resonance(chunk)
def requestValue(self,chunk,require_new=False):
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in chunk:
queryVec = self.queryWithSlots(chunk)
else:
queryVec = self.queryJustValues(chunk)
highestCosine = self.threshold
bestMatch = 'none'
if self.verbose:
print('Query is: ' + chunk)
print('inhibited values: ' + str(self.inhibited))
print('Finst contains: ' + str(self.finst.obj))
# find the best match to the query vector in memory
for mem,memVec in self.mem.items():
# skip inhibited values
if mem not in self.inhibited:
# skip previously reported values if require_new is true
if (not require_new) or (not self.finst.contains(mem)):
thisCosine = memVec.compare(queryVec)
if self.verbose:
print(mem, thisCosine)
if thisCosine > highestCosine:
highestCosine = thisCosine
bestMatch = mem
if bestMatch == 'none':
if self.verbose:
print('No matches found above threshold of cosine =', self.threshold)
self.fail(self._request_count)
else:
# replace the placeholder '?' with the retrieved memory 'bestMatch'
chunk = chunk.replace('?',bestMatch)
if self.verbose:
print('Best match is ' + bestMatch)
print('with a cosine of ' + str(highestCosine))
print('output chunk = ' + chunk)
chunkObj = Chunk(chunk)
chunkObj.activation = highestCosine
self.finst.add(bestMatch)
self.recall(chunkObj,matches=[],request_number=self._request_count)
# performs multiple queries to determine the "coherence" of the chunk
def resonance(self,chunk):
if '?' in chunk:
print('chunk is ' + chunk)
raise Exception("Use the resonance function when the chunk has no '?'. If there is a '?' use request instead")
coherence = self.get_activation(chunk)
if self.verbose:
print('The coherence is ' + str(coherence))
if coherence <= self.threshold:
self.fail(self._request_count)
else:
chunkObj = Chunk(chunk)
chunkObj.activation = coherence
self.recall(chunkObj,matches=[],request_number=self._request_count)
# compute the coherence / activation of a chunk
# called by resonance
# called by request when no ? values are present
# if logodds=True, the convert from mean cosine to logodds and return logodds
def get_activation(self,chunk,logodds=False):
# if this function has been called directly, we need to convert
if not self.busy:
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk string and load inhibited values into self.inhibited
chunk = self.assignValues(chunk)
# add noise to memory
if (self.noise != 0):
self.addNoise()
# keep track of the number of occurrences of a particular value in case of repeats
occurrences = {}
# keep a running sum of the cosines and a count of the values in the chunk
sumOfCosines = 0;
numOfValues = 0;
# perform a query for each value in chunk
for slotvalue in chunk.split():
# create a query by removing the value and replacing it with '?'
query = chunk.split() # turn chunk into list
query.pop(numOfValues) # remove this list item
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in slotvalue:
slot,value = slotvalue.split(':')
query.insert(numOfValues, slot+':?') # replace value with ?
query = ' '.join(query) # convert query to a string
queryVec = self.queryWithSlots(query)
else:
value = slotvalue
query.insert(numOfValues, '?') # replace value with ?
query = ' '.join(query) # convert query to a string
queryVec = self.queryJustValues(query)
numOfValues = numOfValues + 1;
# find the match between the query vector and the value's memory vector
self.defineVectors([value])
match = self.mem[value].compare(queryVec)
sumOfCosines = sumOfCosines + match
coherence = sumOfCosines / numOfValues
if logodds:
return self.cosine_to_logodds(coherence)
else:
return coherence
# create a query vector for a chunk consisting of slot:value pairs
# the query vector consists of the open n-grams of the slot:value pairs
# only open n-grams that contain ? are included
# the query vector must have one and only one query item "?"
def queryWithSlots(self,chunk):
# convert chunk to a list of (slot,value) pairs
chunkList = self.chunk2list(chunk)
# define random Gaussian vectors and random permutations for any undefined values and slots
self.defineVectors(chunkList)
# construct the query vector
queryVec = self.getUOGwithSlots(chunkList)
return queryVec
# create a query vector for a chunk consisting of slot:value pairs
# the query vector consists of the open n-grams of the values
# only n-grams that contain ? are included
# the query vector must have one and only one query item "?"
def queryJustValues(self,chunk):
# convert chunk to a list of values
chunkList = chunk.split()
# define random Gaussian vectors for any undefined values
self.defineVectors(chunkList)
# get all combinations ranging from pairs of slot-value pairs to sets
queryVec = self.getUOG(chunkList)
return queryVec
# chunk2str converts a chunk into a string
# or if it is already a string, chunk2str just returns the string unmodified
def chunk2str(self,chunk):
# if the chunk is a Buffer object, extract the Chunk object from inside it, then turn the Chunk into a string
if isinstance(chunk,Buffer):
chunk = Chunk(chunk.chunk)
# if the chunk is a Chunk object, turn the Chunk into a string
if isinstance(chunk,Chunk):
chunk = str(chunk)
return chunk
# chunk2list converts a chunk into a list of (slot,value) pairs
def chunk2list(self,chunk):
if ':' in chunk:
return [item.split(':') for item in chunk.split()]
else:
raise Exception("Wrong chunk format!")
return None
# assignValues checks for unassigned values, i.e., '?stuff'
# returns chunk as a string
def assignValues(self,chunk):
# convert chunk to str (if it isn't already)
chunk = self.chunk2str(chunk)
# replace instances of ?stuff with corresponding stuff
bound=None
if hasattr(self,'sch'):
bound=getattr(self.sch,'bound',None)
# split the chunkStr where there are spaces to get the list of attributes
attributes = chunk.split()
# find ?values that need to be substituted
chunkList = []
for attribute in attributes:
# this function needs to handle both chunks that are lists of slot:value pairs
# and chunks that are ordered lists of values
if ':' in attribute:
slot,value = attribute.split(':')
slot = slot + ':'
else:
value = attribute
slot = ''
# sometimes we want to specify things not to select
# for example, condiment:?unknown!mustard
# means find a condiment that isn't mustard
if value.startswith('?') and value != '?':
first = True
for subvalue in value.split('!'):
# we know the first value starts with ?, so let's substitute
if first:
first = False;
#check to see if it's not just a ? by itself
if subvalue == '?':
value = '?'
else:
try:
# take "?value" without the "?"
key = subvalue[1:]
# look it up in the "bound dictionary" and substitute
value = bound[key]
# if "value" in "?value" is undefined, replace with "?"
except:
value = '?'
# the following values all start with ! meaning things we don't want to retrieve
else:
if subvalue.startswith('?'):
# but some of them may start with ? indicating we need to substitute
try:
# take "?value" without the "?"
key = subvalue[1:]
# look it up in the "bound dictionary" and add to inhibited values list
subvalue = bound[key]
# if "value" in "?value" is undefined, raise exception
except:
print(chunk)
print('Error with subvalue: ' + subvalue + ' in chunk: ' + chunk)
raise Exception('Values beginning with ! are understood in this context as indicating values to be inhibited. The specified !value is undefined')
# add subvalue to inhibition list
self.inhibited.append(subvalue)
# add the value to the chunkList
chunkList.append(slot+value)
# convert chunkList into a string delimited by spaces
return ' '.join(chunkList)
#get environment vector for a given value
def get(self,value):
if value not in self.env:
self.env[value] = HRR(N=self.N)
self.mem[value] = HRR(data=numpy.zeros(self.N))
return self.env[value].copy()
#set environment vector for a given value to a specified vector
def set(self,value,vector):
try: # assume vector is an HRR object
newVec = vector.copy()
newVec.normalize()
self.env[value] = newVec
except: # assume vector is a list of numbers
vector = [float(i) for i in vector]
self.env[value] = HRR(data=vector)
self.env[value].normalize()
# check to see if it's in memory already, if not, define its memory as a vector of zeros
if value not in self.mem:
self.mem[value] = HRR(data=numpy.zeros(self.N))
# generate Gaussian vectors and random permutations for values & slots without
# chunkList is a list of attributes, each attribute is a string
def defineVectors(self,chunkList):
for attribute in chunkList:
# check to see if there is a slot, or if it's just a value without a slot
if isinstance(attribute,list):
slot,value = attribute
# if it's a new slot, create a new random permutation
if slot not in self.slots.keys():
self.slots[slot] = numpy.random.permutation(self.N)
else:
value = attribute
# if it starts with ! (i.e., not) just ignore that for now
if value.startswith('!'):
value = value[1:]
# if it's a new value, create a new random vector
if value not in self.env:
self.env[value] = HRR(N=self.N)
self.mem[value] = HRR(data=numpy.zeros(self.N))#self.env[value]
def fail(self,request_number):
|
def recall(self,chunk,matches,request_number):
logodds = self.cosine_to_logodds(chunk.activation)
time=self.latency*math.exp(-logodds)
if time>self.maximum_time: time=self.maximum_time
yield time
if request_number!=self._request_count: return
self._buffer.set(chunk)
for a in self.adaptors: a.recalled(chunk)
self.busy=False
# Converts vector cosine (which approximates root probability)
# to a log odds ratio (which is what ACT-R activation estimates)
def cosine_to_logodds(self,cosine):
if cosine > 0.999:
cosine = 0.999
return math.log(cosine**2 / (1 - cosine**2))
# Converts log odds ratio or ACT-R activation
# to a root probability (which the cosine approximates)
def logodds_to_cosine(self,logodds):
return math.sqrt(numpy.exp(logodds) / (numpy.exp(logodds) + 1))
class Finst:
def __init__(self,parent,size=4,time=3.0):
self.parent=parent
self.size=size
self.time=time
self.obj=[]
def contains(self,o):
return o in self.obj
def add(self,o):
if self.size==0: return
self.obj.append(o)
if len(self.obj)>self.size:
self.remove(self.obj[0])
self.parent.sch.add(self.remove,args=[o],delay=self.time)
def remove(self,o):
if o in self.obj: self.obj.remove(o)
| if self.threshold == None:
time=self.maximum_time
else:
logodds = self.cosine_to_logodds(self.threshold)
time=self.latency*math.exp(-logodds)
if time>self.maximum_time: time=self.maximum_time
yield time
if request_number!=self._request_count: return
self.error=True
self._buffer.clear()
self.busy=False | identifier_body |
hdm.py | # HDM: Holographic Declarative Memory
# A module for Python ACT-R
# written by M. A. Kelly
# except for the parts written by Terry C. Stewart
# Based on original research and the memory models
# BEAGLE (Jones & Mewhort, 2007) and DSHM (Rutledge-Taylor, Kelly, West, & Pyke, 2014)
#
# To use HDM:
# from ccm.lib.actr import *
# from ccm.lib.actr.hdm import *
# ...
# retrieval=Buffer()
# memory=HDM(retrieval)
#
# HDM has some unique parameters that DM does not have:
# N is the dimensionality of the vectors.
# Defaults to a generous 512 dimensions.
# As few as 64 and as many as 2048 have been used in the literature
# depending on the amount of noise or clarity desired.
# verbose defaults to False
# set to True if you want to see what HDM is doing in detail
# HDM also has some parameters that DM has and that are still important:
# buffer is the buffer used to output chunks retrieved from HDM
# latency is F in Fe^-a, where a is the activation calculated as
# a = ln(cosine^2 / (1 - cosine^2))
#
# HDM has three important functions to call:
# add(chunk): adds a chunk to memory
# request(chunk):
# 1. Given a chunk with exactly one unknown value '?',
# request finds the best value to fill '?'
# which it returns
# Reaction time is a function of cosine (similarity of chunk to memory)
#
# 2. Given a chunk with no unknown values,
# resonance will return the chunk if it is familiar
# or fail to return the chunk if it is unfamiliar
# i.e., has a cosine less than threshold
# Reaction time is a function of cosine (similarity of chunk to memory)
# get_activation(chunk):
# Computes the coherence of a chunk, which used in request type 2.
# Returns a mean cosine.
from __future__ import generators
import ccm
import math
import numpy
import copy
__all__=['HDM']
from ccm.lib.actr.buffer import Chunk,Buffer
# add for hdm
from ccm.lib.actr.dm import Memory
from ccm.pattern import Pattern
from ccm.lib.hrr import HRR
class HDM(Memory):
# buffer is the buffer that the retrieved chunk is placed in
# N is the vector dimensionality
# recommended dimensionality in the range of 512 to 2048, defaults to 512
# a smaller dimensionality than 512 can be used to introduce additional noise
# threshold is the lowest log odds activation allowed for a response
# this value is converted to a cosine similarity
# if no memory vector has a similarity to the query greater than threshold, the retrieval fails
# maximum time is the most time the memory system is allowed to take
# latency is used to calculate reaction time
# reaction time = latency * e^(-cosine)
# Note that using this equation, given a cosine of 0, the reaction time = latency
# Bigger latencies result in longer reaction times
# verbose defaults to FALSE.
# If TRUE, verbose turns on print statements giving details about what HDM is doing.
# forgetting controls the forgetting rate due to retroactive inhibition
# range [0 to 1]
# 1 = no forgetting
# 0 = no remembering
# When updating memory:
# memory vector = forgetting * memory vector + new information vector
# noise controls the amount of noise added to memory per time step
# Gaussian noise is added to all memory vectors
# whenever Request or Add is called
# When adding noise:
# memory vector = memory vector + noise * time since last update * noise vector
# Noise ranges from [0 ... ]
# where 0 is no noise
# and more is more noise
def __init__(self,buffer,latency=0.05,threshold=-4.6,maximum_time=10.0,finst_size=4,finst_time=3.0, N=512, verbose=False, forgetting=1.0, noise=0.0):
Memory.__init__(self,buffer)
self._buffer=buffer
self.N = N
self.verbose = verbose
self.env={'?': HRR(N=self.N)}
self.placeholder = self.env['?']
self.mem={}
self.slots={')': numpy.random.permutation(self.N)}
self.left=self.slots[')']
self.error=False
self.busy=False
self.adaptors=[]
self.latency=latency
self.threshold=self.logodds_to_cosine(threshold)
self.maximum_time=maximum_time
self.partials=[]
self.finst=Finst(self,size=finst_size,time=finst_time)
self._request_count=0
self.inhibited=[] # list of inhibited values
self.forgetting=forgetting
self.noise=noise
self.lastUpdate = 0.0
def clear(self):
self.mem.clear()
def add(self,chunk,record=None,**keys):
# if error flag is true, set to false for production system
if self.error: self.error=False
# add noise to memory
if (self.noise != 0):
self.addNoise()
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk
chunk = self.assignValues(chunk)
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in chunk:
# call addWithSlots to add a chunk with slot:value pairs to memory
self.addWithSlots(chunk)
else:
# call addJustValues to add a chunk with values and no slots to memory
self.addJustValues(chunk)
# function for adding noise over time to memory
def addNoise(self):
# weight by time difference
diff = self.now() - self.lastUpdate
for value in self.mem.keys():
noiseVector = HRR(N=self.N)
self.mem[value] = self.mem[value] + (self.noise * diff * noiseVector)
self.lastUpdate = self.now()
def addWithSlots(self,chunk):
# convert chunk to a list of (slot,value) pairs
chunkList = self.chunk2list(chunk)
# define random Gaussian vectors and random permutations for any undefined values and slots
self.defineVectors(chunkList)
# update the memory vectors with the information from the chunk
| query[p][1] = '?'
print(chunkList[p][1])
print(query)
# compute chunk vector
chunkVector = self.getUOGwithSlots(query)
# update memory
self.updateMemory(chunkList[p][1],chunkVector)
# add a chunk to memory
# when the chunk is just a list of values
# without slots
def addJustValues(self,chunk):
# convert chunk to a list of values
chunkList = chunk.split()
# define random Gaussian vectors for any undefined values
self.defineVectors(chunkList)
# update the memory vectors with the information from the chunk
for p in range(0,len(chunkList)):
# create a copy of chunkList
query = copy.deepcopy(chunkList)
# replace p with ? in query
query[p] = '?'
# compute chunk vector
chunkVector = self.getUOG(query)
# update memory
self.updateMemory(chunkList[p],chunkVector)
# function for constructing a vector that represents chunkList
# where chunkList is a list of values without slots
# and p is the location of ? in chunkList
# returns chunk, an HRR representing all unconstrained open grams in chunkList
# that include the ? at p.
# When slots are not used, the permutation "left" is used to preserve order
def getUOG(self, chunkList):
numOfItems = len(chunkList)
chunk = HRR(data=numpy.zeros(self.N))
sum = HRR(data=numpy.zeros(self.N))
p = numOfItems # initially, this will be set to index of ? when ? is found
for i in range (0,numOfItems):
# get the vector for the value i
value = chunkList[i]
# set p as the location of the placeholder ?
if value == '?':
p = i
# if value starts with ! then negate the environment vector
if value.startswith('!'):
valVec = -1 * self.env[value[1:]]
# otherwise use the environment vector as is
else:
valVec = self.env[value]
# compute the chunk vector
if i == 0:
sum = valVec
elif (i > 0) and (i < p):
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = chunk + leftOperand.convolve(valVec)
sum = sum + valVec
elif i == p: # force all skip grams to include item p
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = leftOperand.convolve(valVec)
sum = valVec
else: # i > p, i > 0
leftOperand = chunk + sum
leftOperand = leftOperand.permute(self.left)
chunk = chunk + leftOperand.convolve(valVec)
return chunk
# function for constructing a vector that represents chunkList
# where chunkList is a list of values WITH slots as permutations
# returns chunk, an HRR representing all unconstrained open grams in chunkList
# that include the ?
def getUOGwithSlots(self, chunkList):
numOfItems = len(chunkList)
chunk = HRR(data=numpy.zeros(self.N))
sum = HRR(data=numpy.zeros(self.N))
#sumStr = ''
#chunkStr = ''
p = numOfItems # initially, this will be set to index of ? when ? is found
for i in range (0,numOfItems):
# get the vector for the slot value pair at i
slotvalue = chunkList[i]
slot = slotvalue[0]
value = slotvalue[1]
# set p as the location of the placeholder ?
if value == '?':
p = i
# if value starts with ! then negate the environment vector
if value.startswith('!'):
valVec = -1 * self.env[value[1:]]
# otherwise use the environment vector as is
else:
valVec = self.env[value]
# permute the environment vector by the slot
valVec = valVec.permute(self.slots[slot])
#slotvalueStr = slot+':'+value
# compute the chunk vector
if i == 0:
sum = valVec
#sumStr = slotvalueStr
elif (i > 0) and (i < p):
leftOperand = chunk + sum
chunk = chunk + leftOperand.convolve(valVec)
#chunkStr = chunkStr + ' + ' + slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
sum = sum + valVec
#sumStr = sumStr + ' + ' + slotvalueStr
elif i == p: # force all skip grams to include item p
leftOperand = chunk + sum
chunk = leftOperand.convolve(valVec)
#chunkStr = slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
sum = valVec
#sumStr = slotvalueStr
else: # i > p, i > 0
leftOperand = chunk + sum
chunk = chunk + leftOperand.convolve(valVec)
#chunkStr = chunkStr + ' + ' + slotvalueStr + ' * (' + chunkStr + ' + ' + sumStr + ')'
return chunk #, chunkStr
# for updating a memory vector for value with chunk
def updateMemory(self,value,chunking):
if value.startswith('!'):
if value[1:] not in self.mem:
self.mem[value[1:]] = -1*chunking
else:
self.mem[value[1:]] = self.forgetting * self.mem[value[1:]] - chunking
else:
if value not in self.mem:
self.mem[value] = chunking
else:
self.mem[value] = self.forgetting * self.mem[value] + chunking
# default request function, call this
def request(self,chunk,require_new=False):
self.busy=True
if self.error: self.error=False
self._request_count+=1
# add noise to memory
if (self.noise != 0):
self.addNoise()
# clear list of inhibited values from previous queries
self.inhibited = []
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk string and load inhibited values into self.inhibited
chunk = self.assignValues(chunk)
if '?' in chunk:
self.requestValue(chunk,require_new)
else:
self.resonance(chunk)
def requestValue(self,chunk,require_new=False):
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in chunk:
queryVec = self.queryWithSlots(chunk)
else:
queryVec = self.queryJustValues(chunk)
highestCosine = self.threshold
bestMatch = 'none'
if self.verbose:
print('Query is: ' + chunk)
print('inhibited values: ' + str(self.inhibited))
print('Finst contains: ' + str(self.finst.obj))
# find the best match to the query vector in memory
for mem,memVec in self.mem.items():
# skip inhibited values
if mem not in self.inhibited:
# skip previously reported values if require_new is true
if (not require_new) or (not self.finst.contains(mem)):
thisCosine = memVec.compare(queryVec)
if self.verbose:
print(mem, thisCosine)
if thisCosine > highestCosine:
highestCosine = thisCosine
bestMatch = mem
if bestMatch == 'none':
if self.verbose:
print('No matches found above threshold of cosine =', self.threshold)
self.fail(self._request_count)
else:
# replace the placeholder '?' with the retrieved memory 'bestMatch'
chunk = chunk.replace('?',bestMatch)
if self.verbose:
print('Best match is ' + bestMatch)
print('with a cosine of ' + str(highestCosine))
print('output chunk = ' + chunk)
chunkObj = Chunk(chunk)
chunkObj.activation = highestCosine
self.finst.add(bestMatch)
self.recall(chunkObj,matches=[],request_number=self._request_count)
# performs multiple queries to determine the "coherence" of the chunk
def resonance(self,chunk):
if '?' in chunk:
print('chunk is ' + chunk)
raise Exception("Use the resonance function when the chunk has no '?'. If there is a '?' use request instead")
coherence = self.get_activation(chunk)
if self.verbose:
print('The coherence is ' + str(coherence))
if coherence <= self.threshold:
self.fail(self._request_count)
else:
chunkObj = Chunk(chunk)
chunkObj.activation = coherence
self.recall(chunkObj,matches=[],request_number=self._request_count)
# compute the coherence / activation of a chunk
# called by resonance
# called by request when no ? values are present
# if logodds=True, the convert from mean cosine to logodds and return logodds
def get_activation(self,chunk,logodds=False):
# if this function has been called directly, we need to convert
if not self.busy:
# convert chunk to string (if it isn't already a string)
chunk = self.chunk2str(chunk)
# assign any unassigned values in chunk string and load inhibited values into self.inhibited
chunk = self.assignValues(chunk)
# add noise to memory
if (self.noise != 0):
self.addNoise()
# keep track of the number of occurrences of a particular value in case of repeats
occurrences = {}
# keep a running sum of the cosines and a count of the values in the chunk
sumOfCosines = 0;
numOfValues = 0;
# perform a query for each value in chunk
for slotvalue in chunk.split():
# create a query by removing the value and replacing it with '?'
query = chunk.split() # turn chunk into list
query.pop(numOfValues) # remove this list item
# check if chunk has slots by checking for colons (which separate slots from values)
if ':' in slotvalue:
slot,value = slotvalue.split(':')
query.insert(numOfValues, slot+':?') # replace value with ?
query = ' '.join(query) # convert query to a string
queryVec = self.queryWithSlots(query)
else:
value = slotvalue
query.insert(numOfValues, '?') # replace value with ?
query = ' '.join(query) # convert query to a string
queryVec = self.queryJustValues(query)
numOfValues = numOfValues + 1;
# find the match between the query vector and the value's memory vector
self.defineVectors([value])
match = self.mem[value].compare(queryVec)
sumOfCosines = sumOfCosines + match
coherence = sumOfCosines / numOfValues
if logodds:
return self.cosine_to_logodds(coherence)
else:
return coherence
# create a query vector for a chunk consisting of slot:value pairs
# the query vector consists of the open n-grams of the slot:value pairs
# only open n-grams that contain ? are included
# the query vector must have one and only one query item "?"
def queryWithSlots(self,chunk):
# convert chunk to a list of (slot,value) pairs
chunkList = self.chunk2list(chunk)
# define random Gaussian vectors and random permutations for any undefined values and slots
self.defineVectors(chunkList)
# construct the query vector
queryVec = self.getUOGwithSlots(chunkList)
return queryVec
# create a query vector for a chunk consisting of slot:value pairs
# the query vector consists of the open n-grams of the values
# only n-grams that contain ? are included
# the query vector must have one and only one query item "?"
def queryJustValues(self,chunk):
# convert chunk to a list of values
chunkList = chunk.split()
# define random Gaussian vectors for any undefined values
self.defineVectors(chunkList)
# get all combinations ranging from pairs of slot-value pairs to sets
queryVec = self.getUOG(chunkList)
return queryVec
# chunk2str converts a chunk into a string
# or if it is already a string, chunk2str just returns the string unmodified
def chunk2str(self,chunk):
# if the chunk is a Buffer object, extract the Chunk object from inside it, then turn the Chunk into a string
if isinstance(chunk,Buffer):
chunk = Chunk(chunk.chunk)
# if the chunk is a Chunk object, turn the Chunk into a string
if isinstance(chunk,Chunk):
chunk = str(chunk)
return chunk
# chunk2list converts a chunk into a list of (slot,value) pairs
def chunk2list(self,chunk):
if ':' in chunk:
return [item.split(':') for item in chunk.split()]
else:
raise Exception("Wrong chunk format!")
return None
# assignValues checks for unassigned values, i.e., '?stuff'
# returns chunk as a string
def assignValues(self,chunk):
# convert chunk to str (if it isn't already)
chunk = self.chunk2str(chunk)
# replace instances of ?stuff with corresponding stuff
bound=None
if hasattr(self,'sch'):
bound=getattr(self.sch,'bound',None)
# split the chunkStr where there are spaces to get the list of attributes
attributes = chunk.split()
# find ?values that need to be substituted
chunkList = []
for attribute in attributes:
# this function needs to handle both chunks that are lists of slot:value pairs
# and chunks that are ordered lists of values
if ':' in attribute:
slot,value = attribute.split(':')
slot = slot + ':'
else:
value = attribute
slot = ''
# sometimes we want to specify things not to select
# for example, condiment:?unknown!mustard
# means find a condiment that isn't mustard
if value.startswith('?') and value != '?':
first = True
for subvalue in value.split('!'):
# we know the first value starts with ?, so let's substitute
if first:
first = False;
#check to see if it's not just a ? by itself
if subvalue == '?':
value = '?'
else:
try:
# take "?value" without the "?"
key = subvalue[1:]
# look it up in the "bound dictionary" and substitute
value = bound[key]
# if "value" in "?value" is undefined, replace with "?"
except:
value = '?'
# the following values all start with ! meaning things we don't want to retrieve
else:
if subvalue.startswith('?'):
# but some of them may start with ? indicating we need to substitute
try:
# take "?value" without the "?"
key = subvalue[1:]
# look it up in the "bound dictionary" and add to inhibited values list
subvalue = bound[key]
# if "value" in "?value" is undefined, raise exception
except:
print(chunk)
print('Error with subvalue: ' + subvalue + ' in chunk: ' + chunk)
raise Exception('Values beginning with ! are understood in this context as indicating values to be inhibited. The specified !value is undefined')
# add subvalue to inhibition list
self.inhibited.append(subvalue)
# add the value to the chunkList
chunkList.append(slot+value)
# convert chunkList into a string delimited by spaces
return ' '.join(chunkList)
#get environment vector for a given value
def get(self,value):
if value not in self.env:
self.env[value] = HRR(N=self.N)
self.mem[value] = HRR(data=numpy.zeros(self.N))
return self.env[value].copy()
#set environment vector for a given value to a specified vector
def set(self,value,vector):
try: # assume vector is an HRR object
newVec = vector.copy()
newVec.normalize()
self.env[value] = newVec
except: # assume vector is a list of numbers
vector = [float(i) for i in vector]
self.env[value] = HRR(data=vector)
self.env[value].normalize()
# check to see if it's in memory already, if not, define its memory as a vector of zeros
if value not in self.mem:
self.mem[value] = HRR(data=numpy.zeros(self.N))
# generate Gaussian vectors and random permutations for values & slots without
# chunkList is a list of attributes, each attribute is a string
def defineVectors(self,chunkList):
for attribute in chunkList:
# check to see if there is a slot, or if it's just a value without a slot
if isinstance(attribute,list):
slot,value = attribute
# if it's a new slot, create a new random permutation
if slot not in self.slots.keys():
self.slots[slot] = numpy.random.permutation(self.N)
else:
value = attribute
# if it starts with ! (i.e., not) just ignore that for now
if value.startswith('!'):
value = value[1:]
# if it's a new value, create a new random vector
if value not in self.env:
self.env[value] = HRR(N=self.N)
self.mem[value] = HRR(data=numpy.zeros(self.N))#self.env[value]
def fail(self,request_number):
if self.threshold == None:
time=self.maximum_time
else:
logodds = self.cosine_to_logodds(self.threshold)
time=self.latency*math.exp(-logodds)
if time>self.maximum_time: time=self.maximum_time
yield time
if request_number!=self._request_count: return
self.error=True
self._buffer.clear()
self.busy=False
def recall(self,chunk,matches,request_number):
logodds = self.cosine_to_logodds(chunk.activation)
time=self.latency*math.exp(-logodds)
if time>self.maximum_time: time=self.maximum_time
yield time
if request_number!=self._request_count: return
self._buffer.set(chunk)
for a in self.adaptors: a.recalled(chunk)
self.busy=False
# Converts vector cosine (which approximates root probability)
# to a log odds ratio (which is what ACT-R activation estimates)
def cosine_to_logodds(self,cosine):
if cosine > 0.999:
cosine = 0.999
return math.log(cosine**2 / (1 - cosine**2))
# Converts log odds ratio or ACT-R activation
# to a root probability (which the cosine approximates)
def logodds_to_cosine(self,logodds):
return math.sqrt(numpy.exp(logodds) / (numpy.exp(logodds) + 1))
class Finst:
def __init__(self,parent,size=4,time=3.0):
self.parent=parent
self.size=size
self.time=time
self.obj=[]
def contains(self,o):
return o in self.obj
def add(self,o):
if self.size==0: return
self.obj.append(o)
if len(self.obj)>self.size:
self.remove(self.obj[0])
self.parent.sch.add(self.remove,args=[o],delay=self.time)
def remove(self,o):
if o in self.obj: self.obj.remove(o) | for p in range(0,len(chunkList)):
# create a copy of chunkList
query = copy.deepcopy(chunkList)
# replace p's value with ? in query, but leave slot as is
| random_line_split |
glium_backend.rs | //! Glium-based backend for the Vitral GUI library.
#![deny(missing_docs)]
use euclid::{Point2D, Size2D};
use glium::glutin::dpi::{LogicalPosition, LogicalSize};
use glium::glutin::{self, Event, WindowEvent};
use glium::index::PrimitiveType;
use glium::{self, Surface};
use std::error::Error;
use std::fmt::Debug;
use std::hash::Hash;
use {
AtlasCache, CanvasZoom, Color, Core, ImageBuffer, Keycode, MouseButton, TextureIndex, Vertex,
};
/// Default texture type used by the backend.
type GliumTexture = glium::texture::SrgbTexture2d;
/// Glium-rendering backend for Vitral.
pub struct Backend<V> {
display: glium::Display,
events: glutin::EventsLoop,
program: glium::Program,
textures: Vec<GliumTexture>,
keypress: Vec<KeyEvent>,
canvas: Canvas,
zoom: CanvasZoom,
window_size: Size2D<u32>,
phantom: ::std::marker::PhantomData<V>,
}
impl<V: glium::Vertex + Vertex> Backend<V> {
/// Create a new Glium backend for Vitral.
///
/// The backend requires an user-supplied vertex type as a type parameter and a shader program
/// to render data of that type as argument to the constructor.
pub fn new(
display: glium::Display,
events: glutin::EventsLoop,
program: glium::Program,
width: u32,
height: u32,
) -> Backend<V> {
let (w, h) = get_size(&display);
let canvas = Canvas::new(&display, width, height);
Backend {
display,
events,
program,
textures: Vec::new(),
keypress: Vec::new(),
canvas,
zoom: CanvasZoom::PixelPerfect,
window_size: Size2D::new(w, h),
phantom: ::std::marker::PhantomData,
}
}
/// Open a Glium window and start a backend for it.
///
/// The custom shader must support a uniform named `tex` for texture data.
pub fn start<'a, S, P>(
width: u32,
height: u32,
title: S,
shader: P,
) -> Result<Backend<V>, Box<Error>>
where
S: Into<String>,
P: Into<glium::program::ProgramCreationInput<'a>>,
{
let events = glutin::EventsLoop::new();
let window = glutin::WindowBuilder::new().with_title(title);
let context = glutin::ContextBuilder::new()
.with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2)));
let display = glium::Display::new(window, context, &events)?;
let program = glium::Program::new(&display, shader.into())?;
{
// Start the window as a good fit on the primary monitor.
// Don't make it a completely fullscreen window, that might put the window title bar
// outside the screen.
const BUFFER: u32 = 8;
let monitor_size = display
.gl_window()
.window()
.get_primary_monitor()
.get_dimensions();
let monitor_size = Size2D::new(monitor_size.width as u32, monitor_size.height as u32);
let mut dim = Size2D::new(width, height);
while dim.width + width <= monitor_size.width - BUFFER
&& dim.height + height <= monitor_size.height - BUFFER
{
dim.width += width;
dim.height += height;
}
display
.gl_window()
.set_inner_size(LogicalSize::new(dim.width as f64, dim.height as f64));
display.gl_window().set_position(LogicalPosition::new(
(monitor_size.width - dim.width) as f64 / 2.0,
(monitor_size.height - dim.height) as f64 / 2.0,
));
}
Ok(Backend::new(display, events, program, width, height))
}
/// Return the pixel resolution of the backend.
///
/// Note that this is the logical size which will stay the same even when the
/// desktop window is resized.
pub fn canvas_size(&self) -> Size2D<u32> { self.canvas.size }
/// Return the current number of textures.
pub fn texture_count(&self) -> usize { self.textures.len() }
/// Make a new empty internal texture.
///
/// The new `TextureIndex` must equal the value `self.texture_count()` would have returned
/// just before calling this.
pub fn make_empty_texture(&mut self, width: u32, height: u32) -> TextureIndex {
let tex = glium::texture::SrgbTexture2d::empty(&self.display, width, height).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Rewrite an internal texture.
pub fn write_to_texture(&mut self, img: &ImageBuffer, texture: TextureIndex) {
assert!(
texture < self.textures.len(),
"Trying to write nonexistent texture"
);
let rect = glium::Rect {
left: 0,
bottom: 0,
width: img.size.width,
height: img.size.height,
};
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels.clone(),
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
self.textures[texture].write(rect, raw);
}
/// Make a new internal texture using image data.
pub fn make_texture(&mut self, img: ImageBuffer) -> TextureIndex {
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels,
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
let tex = glium::texture::SrgbTexture2d::new(&self.display, raw).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Update or construct textures based on changes in atlas cache.
pub fn sync_with_atlas_cache<T: Eq + Hash + Clone + Debug>(
&mut self,
atlas_cache: &mut AtlasCache<T>,
) {
for a in atlas_cache.atlases_mut() {
let idx = a.texture();
// If there are sheets in the atlas that don't have corresponding textures yet,
// construct those now.
while idx >= self.texture_count() {
self.make_empty_texture(a.size().width, a.size().height);
}
// Write the updated texture atlas to internal texture.
a.update_texture(|buf, idx| self.write_to_texture(buf, idx));
}
}
fn process_events(&mut self, core: &mut Core<V>) -> bool {
self.keypress.clear();
// polling and handling the events received by the window
let mut event_list = Vec::new();
self.events.poll_events(|event| event_list.push(event));
for e in event_list {
match e {
Event::WindowEvent {
ref event,
window_id,
}
if window_id == self.display.gl_window().id() =>
{
match event {
&WindowEvent::CloseRequested => return false,
&WindowEvent::CursorMoved { position, .. } => {
let position =
position.to_physical(self.display.gl_window().get_hidpi_factor());
let pos = self.zoom.screen_to_canvas(
self.window_size,
self.canvas.size(),
Point2D::new(position.x as f32, position.y as f32),
);
core.input_mouse_move(pos.x as i32, pos.y as i32);
}
&WindowEvent::MouseInput { state, button, .. } => core.input_mouse_button(
match button {
glutin::MouseButton::Left => MouseButton::Left,
glutin::MouseButton::Right => MouseButton::Right,
_ => MouseButton::Middle,
},
state == glutin::ElementState::Pressed,
),
&WindowEvent::ReceivedCharacter(c) => core.input_char(c),
&WindowEvent::KeyboardInput {
input:
glutin::KeyboardInput {
state,
scancode,
virtual_keycode,
..
},
..
} => {
self.keypress.push(KeyEvent {
state,
scancode: scancode as u8,
virtual_keycode,
});
let is_down = state == glutin::ElementState::Pressed;
use glium::glutin::VirtualKeyCode::*;
if let Some(vk) = match virtual_keycode {
Some(Tab) => Some(Keycode::Tab),
Some(LShift) | Some(RShift) => Some(Keycode::Shift),
Some(LControl) | Some(RControl) => Some(Keycode::Ctrl),
Some(NumpadEnter) | Some(Return) => Some(Keycode::Enter),
Some(Back) => Some(Keycode::Backspace),
Some(Delete) => Some(Keycode::Del),
Some(Numpad8) | Some(Up) => Some(Keycode::Up),
Some(Numpad2) | Some(Down) => Some(Keycode::Down),
Some(Numpad4) | Some(Left) => Some(Keycode::Left),
Some(Numpad6) | Some(Right) => Some(Keycode::Right),
_ => None,
} {
core.input_key_state(vk, is_down);
}
}
_ => (),
}
}
// Events in other windows, ignore
Event::WindowEvent { .. } => {}
Event::Awakened => {
// TODO: Suspend/awaken behavior
}
Event::DeviceEvent { .. } => {}
Event::Suspended(_) => {}
}
}
true
}
/// Return the next keypress event if there is one.
pub fn poll_key(&mut self) -> Option<KeyEvent> { self.keypress.pop() }
fn render(&mut self, core: &mut Core<V>) {
let mut target = self.canvas.get_framebuffer_target(&self.display);
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = target.get_dimensions();
for batch in core.end_frame() {
// building the uniforms
let uniforms = uniform! {
matrix: [
[2.0 / w as f32, 0.0, 0.0, -1.0],
[0.0, -2.0 / h as f32, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0f32]
],
tex: glium::uniforms::Sampler::new(&self.textures[batch.texture])
.magnify_filter(glium::uniforms::MagnifySamplerFilter::Nearest),
};
let vertex_buffer =
{ glium::VertexBuffer::new(&self.display, &batch.vertices).unwrap() };
// building the index buffer
let index_buffer = glium::IndexBuffer::new(
&self.display,
PrimitiveType::TrianglesList,
&batch.triangle_indices,
).unwrap();
let params = glium::draw_parameters::DrawParameters {
scissor: batch.clip.map(|clip| glium::Rect {
left: clip.origin.x as u32,
bottom: h - (clip.origin.y + clip.size.height) as u32,
width: clip.size.width as u32,
height: clip.size.height as u32,
}),
blend: glium::Blend::alpha_blending(),
..Default::default()
};
target
.draw(
&vertex_buffer,
&index_buffer,
&self.program,
&uniforms,
¶ms,
)
.unwrap();
}
}
fn update_window_size(&mut self) {
let (w, h) = get_size(&self.display);
self.window_size = Size2D::new(w, h);
}
/// Display the backend and read input events.
pub fn update(&mut self, core: &mut Core<V>) -> bool {
self.update_window_size();
self.render(core);
self.canvas.draw(&self.display, self.zoom);
self.process_events(core)
}
/// Return an image for the current contents of the screen.
pub fn screenshot(&self) -> ImageBuffer { self.canvas.screenshot() }
}
/// Type for key events not handled by Vitral.
#[derive(Debug)]
pub struct KeyEvent {
/// Was the key pressed or released
pub state: glutin::ElementState,
/// Layout-dependent keycode
pub virtual_keycode: Option<glutin::VirtualKeyCode>,
/// Keyboard layout independent hardware scancode for the key
pub scancode: u8,
}
/// Shader program for the `DefaultVertex` type
pub const DEFAULT_SHADER: glium::program::SourceCode = glium::program::SourceCode {
vertex_shader: "
#version 150 core
uniform mat4 matrix;
in vec2 pos;
in vec4 color;
in vec2 tex_coord;
out vec4 v_color;
out vec2 v_tex_coord;
void main() {
gl_Position = vec4(pos, 0.0, 1.0) * matrix;
v_color = color;
v_tex_coord = tex_coord;
}",
fragment_shader: "
#version 150 core
uniform sampler2D tex;
in vec4 v_color;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
// Discard fully transparent pixels to keep them from
// writing into the depth buffer.
if (tex_color.a == 0.0) discard;
f_color = v_color * tex_color;
}",
tessellation_control_shader: None,
tessellation_evaluation_shader: None,
geometry_shader: None,
};
/// A regular vertex that implements exactly the fields used by Vitral.
#[derive(Copy, Clone)]
pub struct DefaultVertex {
/// 2D position
pub pos: [f32; 2],
/// Texture coordinates
pub tex_coord: [f32; 2],
/// RGBA color
pub color: Color,
}
implement_vertex!(DefaultVertex, pos, tex_coord, color);
impl Vertex for DefaultVertex {
fn new(pos: Point2D<f32>, tex_coord: Point2D<f32>, color: Color) -> Self {
DefaultVertex {
pos: [pos.x, pos.y],
tex_coord: [tex_coord.x, tex_coord.y],
color,
}
}
}
/// A deferred rendering buffer for pixel-perfect display.
struct Canvas {
size: Size2D<u32>,
buffer: glium::texture::SrgbTexture2d,
depth_buffer: glium::framebuffer::DepthRenderBuffer,
shader: glium::Program,
}
impl Canvas {
pub fn new(display: &glium::Display, width: u32, height: u32) -> Canvas {
let shader = program!(
display,
150 => {
vertex: "
#version 150 core
in vec2 pos;
in vec2 tex_coord;
out vec2 v_tex_coord;
void main() {
v_tex_coord = tex_coord;
gl_Position = vec4(pos, 0.0, 1.0);
}",
fragment: "
#version 150 core
uniform sampler2D tex;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
tex_color.a = 1.0;
f_color = tex_color;
}"})
.unwrap();
let buffer = glium::texture::SrgbTexture2d::empty(display, width, height).unwrap();
let depth_buffer = glium::framebuffer::DepthRenderBuffer::new(
display,
glium::texture::DepthFormat::F32,
width,
height,
).unwrap();
Canvas {
size: Size2D::new(width, height),
buffer,
depth_buffer,
shader,
}
}
/// Get the render target to the pixel-perfect framebuffer.
pub fn get_framebuffer_target(
&mut self,
display: &glium::Display,
) -> glium::framebuffer::SimpleFrameBuffer {
glium::framebuffer::SimpleFrameBuffer::with_depth_buffer(
display,
&self.buffer,
&self.depth_buffer,
).unwrap()
}
pub fn draw(&mut self, display: &glium::Display, zoom: CanvasZoom) {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = get_size(display);
// Build the geometry for the on-screen rectangle.
let s_rect = zoom.fit_canvas(Size2D::new(w, h), self.size);
let (sx, sy) = (s_rect.origin.x, s_rect.origin.y);
let (sw, sh) = (s_rect.size.width, s_rect.size.height);
// XXX: This could use glium::Surface::blit_whole_color_to instead of
// the handmade blitting, but that was buggy on Windows around
// 2015-03.
let vertices = {
#[derive(Copy, Clone)]
struct BlitVertex {
pos: [f32; 2],
tex_coord: [f32; 2],
}
implement_vertex!(BlitVertex, pos, tex_coord);
glium::VertexBuffer::new(
display,
&[
BlitVertex {
pos: [sx, sy],
tex_coord: [0.0, 0.0],
},
BlitVertex {
pos: [sx + sw, sy],
tex_coord: [1.0, 0.0],
},
BlitVertex {
pos: [sx + sw, sy + sh],
tex_coord: [1.0, 1.0],
},
BlitVertex {
pos: [sx, sy + sh],
tex_coord: [0.0, 1.0],
},
],
).unwrap()
};
let indices = glium::IndexBuffer::new(
display,
glium::index::PrimitiveType::TrianglesList,
&[0u16, 1, 2, 0, 2, 3],
).unwrap();
// Set up the rest of the draw parameters.
let mut params: glium::DrawParameters = Default::default();
// Set an explicit viewport to apply the custom resolution that fixes
// pixel perfect rounding errors.
params.viewport = Some(glium::Rect {
left: 0,
bottom: 0,
width: w,
height: h,
});
// TODO: Option to use smooth filter & non-pixel-perfect scaling
let mag_filter = glium::uniforms::MagnifySamplerFilter::Nearest;
let uniforms = glium::uniforms::UniformsStorage::new(
"tex",
glium::uniforms::Sampler(
&self.buffer,
glium::uniforms::SamplerBehavior {
magnify_filter: mag_filter,
minify_filter: glium::uniforms::MinifySamplerFilter::Linear,
..Default::default()
},
),
);
// Draw the graphics buffer to the window.
target
.draw(&vertices, &indices, &self.shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
}
pub fn | (&self) -> Size2D<u32> { self.size }
pub fn screenshot(&self) -> ImageBuffer {
let image: glium::texture::RawImage2d<u8> = self.buffer.read();
ImageBuffer::from_fn(image.width, image.height, |x, y| {
let i = (x * 4 + (image.height - y - 1) * image.width * 4) as usize;
image.data[i] as u32
+ ((image.data[i + 1] as u32) << 8)
+ ((image.data[i + 2] as u32) << 16)
+ ((image.data[i + 3] as u32) << 24)
})
}
}
fn get_size(display: &glium::Display) -> (u32, u32) {
let size = display
.gl_window()
.get_inner_size()
.unwrap_or(LogicalSize::new(800.0, 600.0))
.to_physical(display.gl_window().get_hidpi_factor());
(size.width as u32, size.height as u32)
}
| size | identifier_name |
glium_backend.rs | //! Glium-based backend for the Vitral GUI library.
#![deny(missing_docs)]
use euclid::{Point2D, Size2D};
use glium::glutin::dpi::{LogicalPosition, LogicalSize};
use glium::glutin::{self, Event, WindowEvent};
use glium::index::PrimitiveType;
use glium::{self, Surface};
use std::error::Error;
use std::fmt::Debug;
use std::hash::Hash;
use {
AtlasCache, CanvasZoom, Color, Core, ImageBuffer, Keycode, MouseButton, TextureIndex, Vertex,
};
/// Default texture type used by the backend.
type GliumTexture = glium::texture::SrgbTexture2d;
/// Glium-rendering backend for Vitral.
pub struct Backend<V> {
display: glium::Display,
events: glutin::EventsLoop,
program: glium::Program,
textures: Vec<GliumTexture>,
keypress: Vec<KeyEvent>,
canvas: Canvas,
zoom: CanvasZoom,
window_size: Size2D<u32>,
phantom: ::std::marker::PhantomData<V>,
}
impl<V: glium::Vertex + Vertex> Backend<V> {
/// Create a new Glium backend for Vitral.
///
/// The backend requires an user-supplied vertex type as a type parameter and a shader program
/// to render data of that type as argument to the constructor.
pub fn new(
display: glium::Display,
events: glutin::EventsLoop,
program: glium::Program,
width: u32,
height: u32,
) -> Backend<V> {
let (w, h) = get_size(&display);
let canvas = Canvas::new(&display, width, height);
Backend {
display,
events,
program,
textures: Vec::new(),
keypress: Vec::new(),
canvas,
zoom: CanvasZoom::PixelPerfect,
window_size: Size2D::new(w, h),
phantom: ::std::marker::PhantomData,
}
}
/// Open a Glium window and start a backend for it.
///
/// The custom shader must support a uniform named `tex` for texture data.
pub fn start<'a, S, P>(
width: u32,
height: u32,
title: S,
shader: P,
) -> Result<Backend<V>, Box<Error>>
where
S: Into<String>,
P: Into<glium::program::ProgramCreationInput<'a>>,
{
let events = glutin::EventsLoop::new();
let window = glutin::WindowBuilder::new().with_title(title);
let context = glutin::ContextBuilder::new()
.with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2)));
let display = glium::Display::new(window, context, &events)?;
let program = glium::Program::new(&display, shader.into())?;
{
// Start the window as a good fit on the primary monitor.
// Don't make it a completely fullscreen window, that might put the window title bar
// outside the screen.
const BUFFER: u32 = 8;
let monitor_size = display
.gl_window()
.window()
.get_primary_monitor()
.get_dimensions();
let monitor_size = Size2D::new(monitor_size.width as u32, monitor_size.height as u32);
let mut dim = Size2D::new(width, height);
while dim.width + width <= monitor_size.width - BUFFER
&& dim.height + height <= monitor_size.height - BUFFER
{
dim.width += width;
dim.height += height;
}
display
.gl_window()
.set_inner_size(LogicalSize::new(dim.width as f64, dim.height as f64));
display.gl_window().set_position(LogicalPosition::new(
(monitor_size.width - dim.width) as f64 / 2.0,
(monitor_size.height - dim.height) as f64 / 2.0,
));
}
Ok(Backend::new(display, events, program, width, height))
}
/// Return the pixel resolution of the backend.
///
/// Note that this is the logical size which will stay the same even when the
/// desktop window is resized.
pub fn canvas_size(&self) -> Size2D<u32> { self.canvas.size }
/// Return the current number of textures.
pub fn texture_count(&self) -> usize { self.textures.len() }
/// Make a new empty internal texture.
///
/// The new `TextureIndex` must equal the value `self.texture_count()` would have returned
/// just before calling this.
pub fn make_empty_texture(&mut self, width: u32, height: u32) -> TextureIndex {
let tex = glium::texture::SrgbTexture2d::empty(&self.display, width, height).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Rewrite an internal texture.
pub fn write_to_texture(&mut self, img: &ImageBuffer, texture: TextureIndex) {
assert!(
texture < self.textures.len(),
"Trying to write nonexistent texture"
);
let rect = glium::Rect {
left: 0,
bottom: 0,
width: img.size.width,
height: img.size.height,
};
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels.clone(),
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
self.textures[texture].write(rect, raw);
}
/// Make a new internal texture using image data.
pub fn make_texture(&mut self, img: ImageBuffer) -> TextureIndex {
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels,
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
let tex = glium::texture::SrgbTexture2d::new(&self.display, raw).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Update or construct textures based on changes in atlas cache.
pub fn sync_with_atlas_cache<T: Eq + Hash + Clone + Debug>(
&mut self,
atlas_cache: &mut AtlasCache<T>,
) {
for a in atlas_cache.atlases_mut() {
let idx = a.texture();
// If there are sheets in the atlas that don't have corresponding textures yet,
// construct those now.
while idx >= self.texture_count() {
self.make_empty_texture(a.size().width, a.size().height);
}
// Write the updated texture atlas to internal texture.
a.update_texture(|buf, idx| self.write_to_texture(buf, idx));
}
}
fn process_events(&mut self, core: &mut Core<V>) -> bool {
self.keypress.clear();
// polling and handling the events received by the window
let mut event_list = Vec::new();
self.events.poll_events(|event| event_list.push(event));
for e in event_list {
match e {
Event::WindowEvent {
ref event,
window_id,
}
if window_id == self.display.gl_window().id() =>
{
match event {
&WindowEvent::CloseRequested => return false,
&WindowEvent::CursorMoved { position, .. } => {
let position =
position.to_physical(self.display.gl_window().get_hidpi_factor());
let pos = self.zoom.screen_to_canvas(
self.window_size,
self.canvas.size(),
Point2D::new(position.x as f32, position.y as f32),
);
core.input_mouse_move(pos.x as i32, pos.y as i32);
}
&WindowEvent::MouseInput { state, button, .. } => core.input_mouse_button(
match button {
glutin::MouseButton::Left => MouseButton::Left,
glutin::MouseButton::Right => MouseButton::Right,
_ => MouseButton::Middle,
},
state == glutin::ElementState::Pressed,
),
&WindowEvent::ReceivedCharacter(c) => core.input_char(c),
&WindowEvent::KeyboardInput {
input:
glutin::KeyboardInput {
state,
scancode,
virtual_keycode,
..
},
..
} => {
self.keypress.push(KeyEvent {
state,
scancode: scancode as u8,
virtual_keycode,
});
let is_down = state == glutin::ElementState::Pressed;
use glium::glutin::VirtualKeyCode::*;
if let Some(vk) = match virtual_keycode {
Some(Tab) => Some(Keycode::Tab),
Some(LShift) | Some(RShift) => Some(Keycode::Shift),
Some(LControl) | Some(RControl) => Some(Keycode::Ctrl),
Some(NumpadEnter) | Some(Return) => Some(Keycode::Enter),
Some(Back) => Some(Keycode::Backspace),
Some(Delete) => Some(Keycode::Del),
Some(Numpad8) | Some(Up) => Some(Keycode::Up),
Some(Numpad2) | Some(Down) => Some(Keycode::Down),
Some(Numpad4) | Some(Left) => Some(Keycode::Left),
Some(Numpad6) | Some(Right) => Some(Keycode::Right),
_ => None,
} {
core.input_key_state(vk, is_down);
}
}
_ => (),
}
}
// Events in other windows, ignore
Event::WindowEvent { .. } => {}
Event::Awakened => {
// TODO: Suspend/awaken behavior
}
Event::DeviceEvent { .. } => {}
Event::Suspended(_) => {}
}
}
true
}
/// Return the next keypress event if there is one.
pub fn poll_key(&mut self) -> Option<KeyEvent> { self.keypress.pop() }
fn render(&mut self, core: &mut Core<V>) {
let mut target = self.canvas.get_framebuffer_target(&self.display);
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = target.get_dimensions();
for batch in core.end_frame() {
// building the uniforms
let uniforms = uniform! {
matrix: [
[2.0 / w as f32, 0.0, 0.0, -1.0],
[0.0, -2.0 / h as f32, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0f32]
],
tex: glium::uniforms::Sampler::new(&self.textures[batch.texture])
.magnify_filter(glium::uniforms::MagnifySamplerFilter::Nearest),
};
let vertex_buffer =
{ glium::VertexBuffer::new(&self.display, &batch.vertices).unwrap() };
// building the index buffer
let index_buffer = glium::IndexBuffer::new(
&self.display,
PrimitiveType::TrianglesList,
&batch.triangle_indices,
).unwrap();
let params = glium::draw_parameters::DrawParameters {
scissor: batch.clip.map(|clip| glium::Rect {
left: clip.origin.x as u32,
bottom: h - (clip.origin.y + clip.size.height) as u32,
width: clip.size.width as u32,
height: clip.size.height as u32,
}),
blend: glium::Blend::alpha_blending(),
..Default::default()
};
target
.draw(
&vertex_buffer,
&index_buffer,
&self.program,
&uniforms,
¶ms,
)
.unwrap();
}
}
fn update_window_size(&mut self) {
let (w, h) = get_size(&self.display);
self.window_size = Size2D::new(w, h);
}
/// Display the backend and read input events.
pub fn update(&mut self, core: &mut Core<V>) -> bool {
self.update_window_size();
self.render(core);
self.canvas.draw(&self.display, self.zoom);
self.process_events(core)
}
/// Return an image for the current contents of the screen.
pub fn screenshot(&self) -> ImageBuffer { self.canvas.screenshot() }
}
/// Type for key events not handled by Vitral.
#[derive(Debug)]
pub struct KeyEvent {
/// Was the key pressed or released
pub state: glutin::ElementState,
/// Layout-dependent keycode
pub virtual_keycode: Option<glutin::VirtualKeyCode>,
/// Keyboard layout independent hardware scancode for the key
pub scancode: u8,
}
/// Shader program for the `DefaultVertex` type
pub const DEFAULT_SHADER: glium::program::SourceCode = glium::program::SourceCode {
vertex_shader: "
#version 150 core
uniform mat4 matrix;
in vec2 pos;
in vec4 color;
in vec2 tex_coord;
out vec4 v_color;
out vec2 v_tex_coord;
void main() {
gl_Position = vec4(pos, 0.0, 1.0) * matrix;
v_color = color;
v_tex_coord = tex_coord;
}",
fragment_shader: "
#version 150 core
uniform sampler2D tex;
in vec4 v_color;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
// Discard fully transparent pixels to keep them from
// writing into the depth buffer.
if (tex_color.a == 0.0) discard;
f_color = v_color * tex_color;
}",
tessellation_control_shader: None,
tessellation_evaluation_shader: None,
geometry_shader: None,
};
/// A regular vertex that implements exactly the fields used by Vitral.
#[derive(Copy, Clone)]
pub struct DefaultVertex {
/// 2D position
pub pos: [f32; 2],
/// Texture coordinates
pub tex_coord: [f32; 2],
/// RGBA color
pub color: Color,
}
implement_vertex!(DefaultVertex, pos, tex_coord, color);
impl Vertex for DefaultVertex {
fn new(pos: Point2D<f32>, tex_coord: Point2D<f32>, color: Color) -> Self {
DefaultVertex {
pos: [pos.x, pos.y],
tex_coord: [tex_coord.x, tex_coord.y],
color,
}
}
}
/// A deferred rendering buffer for pixel-perfect display.
struct Canvas {
size: Size2D<u32>,
buffer: glium::texture::SrgbTexture2d,
depth_buffer: glium::framebuffer::DepthRenderBuffer,
shader: glium::Program,
}
impl Canvas {
pub fn new(display: &glium::Display, width: u32, height: u32) -> Canvas {
let shader = program!(
display,
150 => {
vertex: "
#version 150 core
in vec2 pos;
in vec2 tex_coord;
out vec2 v_tex_coord;
void main() {
v_tex_coord = tex_coord;
gl_Position = vec4(pos, 0.0, 1.0);
}",
fragment: "
#version 150 core
uniform sampler2D tex;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
tex_color.a = 1.0;
f_color = tex_color;
}"})
.unwrap();
let buffer = glium::texture::SrgbTexture2d::empty(display, width, height).unwrap();
let depth_buffer = glium::framebuffer::DepthRenderBuffer::new(
display,
glium::texture::DepthFormat::F32,
width,
height,
).unwrap();
Canvas {
size: Size2D::new(width, height),
buffer,
depth_buffer,
shader,
}
}
/// Get the render target to the pixel-perfect framebuffer.
pub fn get_framebuffer_target(
&mut self,
display: &glium::Display,
) -> glium::framebuffer::SimpleFrameBuffer {
glium::framebuffer::SimpleFrameBuffer::with_depth_buffer(
display,
&self.buffer,
&self.depth_buffer,
).unwrap()
}
pub fn draw(&mut self, display: &glium::Display, zoom: CanvasZoom) {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = get_size(display);
// Build the geometry for the on-screen rectangle.
let s_rect = zoom.fit_canvas(Size2D::new(w, h), self.size);
let (sx, sy) = (s_rect.origin.x, s_rect.origin.y);
let (sw, sh) = (s_rect.size.width, s_rect.size.height);
// XXX: This could use glium::Surface::blit_whole_color_to instead of
// the handmade blitting, but that was buggy on Windows around
// 2015-03.
let vertices = {
#[derive(Copy, Clone)]
struct BlitVertex {
pos: [f32; 2],
tex_coord: [f32; 2],
}
implement_vertex!(BlitVertex, pos, tex_coord);
glium::VertexBuffer::new(
display,
&[
BlitVertex {
pos: [sx, sy],
tex_coord: [0.0, 0.0],
},
BlitVertex { | BlitVertex {
pos: [sx + sw, sy + sh],
tex_coord: [1.0, 1.0],
},
BlitVertex {
pos: [sx, sy + sh],
tex_coord: [0.0, 1.0],
},
],
).unwrap()
};
let indices = glium::IndexBuffer::new(
display,
glium::index::PrimitiveType::TrianglesList,
&[0u16, 1, 2, 0, 2, 3],
).unwrap();
// Set up the rest of the draw parameters.
let mut params: glium::DrawParameters = Default::default();
// Set an explicit viewport to apply the custom resolution that fixes
// pixel perfect rounding errors.
params.viewport = Some(glium::Rect {
left: 0,
bottom: 0,
width: w,
height: h,
});
// TODO: Option to use smooth filter & non-pixel-perfect scaling
let mag_filter = glium::uniforms::MagnifySamplerFilter::Nearest;
let uniforms = glium::uniforms::UniformsStorage::new(
"tex",
glium::uniforms::Sampler(
&self.buffer,
glium::uniforms::SamplerBehavior {
magnify_filter: mag_filter,
minify_filter: glium::uniforms::MinifySamplerFilter::Linear,
..Default::default()
},
),
);
// Draw the graphics buffer to the window.
target
.draw(&vertices, &indices, &self.shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
}
pub fn size(&self) -> Size2D<u32> { self.size }
pub fn screenshot(&self) -> ImageBuffer {
let image: glium::texture::RawImage2d<u8> = self.buffer.read();
ImageBuffer::from_fn(image.width, image.height, |x, y| {
let i = (x * 4 + (image.height - y - 1) * image.width * 4) as usize;
image.data[i] as u32
+ ((image.data[i + 1] as u32) << 8)
+ ((image.data[i + 2] as u32) << 16)
+ ((image.data[i + 3] as u32) << 24)
})
}
}
fn get_size(display: &glium::Display) -> (u32, u32) {
let size = display
.gl_window()
.get_inner_size()
.unwrap_or(LogicalSize::new(800.0, 600.0))
.to_physical(display.gl_window().get_hidpi_factor());
(size.width as u32, size.height as u32)
} | pos: [sx + sw, sy],
tex_coord: [1.0, 0.0],
}, | random_line_split |
glium_backend.rs | //! Glium-based backend for the Vitral GUI library.
#![deny(missing_docs)]
use euclid::{Point2D, Size2D};
use glium::glutin::dpi::{LogicalPosition, LogicalSize};
use glium::glutin::{self, Event, WindowEvent};
use glium::index::PrimitiveType;
use glium::{self, Surface};
use std::error::Error;
use std::fmt::Debug;
use std::hash::Hash;
use {
AtlasCache, CanvasZoom, Color, Core, ImageBuffer, Keycode, MouseButton, TextureIndex, Vertex,
};
/// Default texture type used by the backend.
type GliumTexture = glium::texture::SrgbTexture2d;
/// Glium-rendering backend for Vitral.
pub struct Backend<V> {
display: glium::Display,
events: glutin::EventsLoop,
program: glium::Program,
textures: Vec<GliumTexture>,
keypress: Vec<KeyEvent>,
canvas: Canvas,
zoom: CanvasZoom,
window_size: Size2D<u32>,
phantom: ::std::marker::PhantomData<V>,
}
impl<V: glium::Vertex + Vertex> Backend<V> {
/// Create a new Glium backend for Vitral.
///
/// The backend requires an user-supplied vertex type as a type parameter and a shader program
/// to render data of that type as argument to the constructor.
pub fn new(
display: glium::Display,
events: glutin::EventsLoop,
program: glium::Program,
width: u32,
height: u32,
) -> Backend<V> {
let (w, h) = get_size(&display);
let canvas = Canvas::new(&display, width, height);
Backend {
display,
events,
program,
textures: Vec::new(),
keypress: Vec::new(),
canvas,
zoom: CanvasZoom::PixelPerfect,
window_size: Size2D::new(w, h),
phantom: ::std::marker::PhantomData,
}
}
/// Open a Glium window and start a backend for it.
///
/// The custom shader must support a uniform named `tex` for texture data.
pub fn start<'a, S, P>(
width: u32,
height: u32,
title: S,
shader: P,
) -> Result<Backend<V>, Box<Error>>
where
S: Into<String>,
P: Into<glium::program::ProgramCreationInput<'a>>,
{
let events = glutin::EventsLoop::new();
let window = glutin::WindowBuilder::new().with_title(title);
let context = glutin::ContextBuilder::new()
.with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2)));
let display = glium::Display::new(window, context, &events)?;
let program = glium::Program::new(&display, shader.into())?;
{
// Start the window as a good fit on the primary monitor.
// Don't make it a completely fullscreen window, that might put the window title bar
// outside the screen.
const BUFFER: u32 = 8;
let monitor_size = display
.gl_window()
.window()
.get_primary_monitor()
.get_dimensions();
let monitor_size = Size2D::new(monitor_size.width as u32, monitor_size.height as u32);
let mut dim = Size2D::new(width, height);
while dim.width + width <= monitor_size.width - BUFFER
&& dim.height + height <= monitor_size.height - BUFFER
{
dim.width += width;
dim.height += height;
}
display
.gl_window()
.set_inner_size(LogicalSize::new(dim.width as f64, dim.height as f64));
display.gl_window().set_position(LogicalPosition::new(
(monitor_size.width - dim.width) as f64 / 2.0,
(monitor_size.height - dim.height) as f64 / 2.0,
));
}
Ok(Backend::new(display, events, program, width, height))
}
/// Return the pixel resolution of the backend.
///
/// Note that this is the logical size which will stay the same even when the
/// desktop window is resized.
pub fn canvas_size(&self) -> Size2D<u32> { self.canvas.size }
/// Return the current number of textures.
pub fn texture_count(&self) -> usize { self.textures.len() }
/// Make a new empty internal texture.
///
/// The new `TextureIndex` must equal the value `self.texture_count()` would have returned
/// just before calling this.
pub fn make_empty_texture(&mut self, width: u32, height: u32) -> TextureIndex {
let tex = glium::texture::SrgbTexture2d::empty(&self.display, width, height).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Rewrite an internal texture.
pub fn write_to_texture(&mut self, img: &ImageBuffer, texture: TextureIndex) |
/// Make a new internal texture using image data.
pub fn make_texture(&mut self, img: ImageBuffer) -> TextureIndex {
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels,
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
let tex = glium::texture::SrgbTexture2d::new(&self.display, raw).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Update or construct textures based on changes in atlas cache.
pub fn sync_with_atlas_cache<T: Eq + Hash + Clone + Debug>(
&mut self,
atlas_cache: &mut AtlasCache<T>,
) {
for a in atlas_cache.atlases_mut() {
let idx = a.texture();
// If there are sheets in the atlas that don't have corresponding textures yet,
// construct those now.
while idx >= self.texture_count() {
self.make_empty_texture(a.size().width, a.size().height);
}
// Write the updated texture atlas to internal texture.
a.update_texture(|buf, idx| self.write_to_texture(buf, idx));
}
}
fn process_events(&mut self, core: &mut Core<V>) -> bool {
self.keypress.clear();
// polling and handling the events received by the window
let mut event_list = Vec::new();
self.events.poll_events(|event| event_list.push(event));
for e in event_list {
match e {
Event::WindowEvent {
ref event,
window_id,
}
if window_id == self.display.gl_window().id() =>
{
match event {
&WindowEvent::CloseRequested => return false,
&WindowEvent::CursorMoved { position, .. } => {
let position =
position.to_physical(self.display.gl_window().get_hidpi_factor());
let pos = self.zoom.screen_to_canvas(
self.window_size,
self.canvas.size(),
Point2D::new(position.x as f32, position.y as f32),
);
core.input_mouse_move(pos.x as i32, pos.y as i32);
}
&WindowEvent::MouseInput { state, button, .. } => core.input_mouse_button(
match button {
glutin::MouseButton::Left => MouseButton::Left,
glutin::MouseButton::Right => MouseButton::Right,
_ => MouseButton::Middle,
},
state == glutin::ElementState::Pressed,
),
&WindowEvent::ReceivedCharacter(c) => core.input_char(c),
&WindowEvent::KeyboardInput {
input:
glutin::KeyboardInput {
state,
scancode,
virtual_keycode,
..
},
..
} => {
self.keypress.push(KeyEvent {
state,
scancode: scancode as u8,
virtual_keycode,
});
let is_down = state == glutin::ElementState::Pressed;
use glium::glutin::VirtualKeyCode::*;
if let Some(vk) = match virtual_keycode {
Some(Tab) => Some(Keycode::Tab),
Some(LShift) | Some(RShift) => Some(Keycode::Shift),
Some(LControl) | Some(RControl) => Some(Keycode::Ctrl),
Some(NumpadEnter) | Some(Return) => Some(Keycode::Enter),
Some(Back) => Some(Keycode::Backspace),
Some(Delete) => Some(Keycode::Del),
Some(Numpad8) | Some(Up) => Some(Keycode::Up),
Some(Numpad2) | Some(Down) => Some(Keycode::Down),
Some(Numpad4) | Some(Left) => Some(Keycode::Left),
Some(Numpad6) | Some(Right) => Some(Keycode::Right),
_ => None,
} {
core.input_key_state(vk, is_down);
}
}
_ => (),
}
}
// Events in other windows, ignore
Event::WindowEvent { .. } => {}
Event::Awakened => {
// TODO: Suspend/awaken behavior
}
Event::DeviceEvent { .. } => {}
Event::Suspended(_) => {}
}
}
true
}
/// Return the next keypress event if there is one.
pub fn poll_key(&mut self) -> Option<KeyEvent> { self.keypress.pop() }
fn render(&mut self, core: &mut Core<V>) {
let mut target = self.canvas.get_framebuffer_target(&self.display);
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = target.get_dimensions();
for batch in core.end_frame() {
// building the uniforms
let uniforms = uniform! {
matrix: [
[2.0 / w as f32, 0.0, 0.0, -1.0],
[0.0, -2.0 / h as f32, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0f32]
],
tex: glium::uniforms::Sampler::new(&self.textures[batch.texture])
.magnify_filter(glium::uniforms::MagnifySamplerFilter::Nearest),
};
let vertex_buffer =
{ glium::VertexBuffer::new(&self.display, &batch.vertices).unwrap() };
// building the index buffer
let index_buffer = glium::IndexBuffer::new(
&self.display,
PrimitiveType::TrianglesList,
&batch.triangle_indices,
).unwrap();
let params = glium::draw_parameters::DrawParameters {
scissor: batch.clip.map(|clip| glium::Rect {
left: clip.origin.x as u32,
bottom: h - (clip.origin.y + clip.size.height) as u32,
width: clip.size.width as u32,
height: clip.size.height as u32,
}),
blend: glium::Blend::alpha_blending(),
..Default::default()
};
target
.draw(
&vertex_buffer,
&index_buffer,
&self.program,
&uniforms,
¶ms,
)
.unwrap();
}
}
fn update_window_size(&mut self) {
let (w, h) = get_size(&self.display);
self.window_size = Size2D::new(w, h);
}
/// Display the backend and read input events.
pub fn update(&mut self, core: &mut Core<V>) -> bool {
self.update_window_size();
self.render(core);
self.canvas.draw(&self.display, self.zoom);
self.process_events(core)
}
/// Return an image for the current contents of the screen.
pub fn screenshot(&self) -> ImageBuffer { self.canvas.screenshot() }
}
/// Type for key events not handled by Vitral.
#[derive(Debug)]
pub struct KeyEvent {
/// Was the key pressed or released
pub state: glutin::ElementState,
/// Layout-dependent keycode
pub virtual_keycode: Option<glutin::VirtualKeyCode>,
/// Keyboard layout independent hardware scancode for the key
pub scancode: u8,
}
/// Shader program for the `DefaultVertex` type
pub const DEFAULT_SHADER: glium::program::SourceCode = glium::program::SourceCode {
vertex_shader: "
#version 150 core
uniform mat4 matrix;
in vec2 pos;
in vec4 color;
in vec2 tex_coord;
out vec4 v_color;
out vec2 v_tex_coord;
void main() {
gl_Position = vec4(pos, 0.0, 1.0) * matrix;
v_color = color;
v_tex_coord = tex_coord;
}",
fragment_shader: "
#version 150 core
uniform sampler2D tex;
in vec4 v_color;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
// Discard fully transparent pixels to keep them from
// writing into the depth buffer.
if (tex_color.a == 0.0) discard;
f_color = v_color * tex_color;
}",
tessellation_control_shader: None,
tessellation_evaluation_shader: None,
geometry_shader: None,
};
/// A regular vertex that implements exactly the fields used by Vitral.
#[derive(Copy, Clone)]
pub struct DefaultVertex {
/// 2D position
pub pos: [f32; 2],
/// Texture coordinates
pub tex_coord: [f32; 2],
/// RGBA color
pub color: Color,
}
implement_vertex!(DefaultVertex, pos, tex_coord, color);
impl Vertex for DefaultVertex {
fn new(pos: Point2D<f32>, tex_coord: Point2D<f32>, color: Color) -> Self {
DefaultVertex {
pos: [pos.x, pos.y],
tex_coord: [tex_coord.x, tex_coord.y],
color,
}
}
}
/// A deferred rendering buffer for pixel-perfect display.
struct Canvas {
size: Size2D<u32>,
buffer: glium::texture::SrgbTexture2d,
depth_buffer: glium::framebuffer::DepthRenderBuffer,
shader: glium::Program,
}
impl Canvas {
pub fn new(display: &glium::Display, width: u32, height: u32) -> Canvas {
let shader = program!(
display,
150 => {
vertex: "
#version 150 core
in vec2 pos;
in vec2 tex_coord;
out vec2 v_tex_coord;
void main() {
v_tex_coord = tex_coord;
gl_Position = vec4(pos, 0.0, 1.0);
}",
fragment: "
#version 150 core
uniform sampler2D tex;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
tex_color.a = 1.0;
f_color = tex_color;
}"})
.unwrap();
let buffer = glium::texture::SrgbTexture2d::empty(display, width, height).unwrap();
let depth_buffer = glium::framebuffer::DepthRenderBuffer::new(
display,
glium::texture::DepthFormat::F32,
width,
height,
).unwrap();
Canvas {
size: Size2D::new(width, height),
buffer,
depth_buffer,
shader,
}
}
/// Get the render target to the pixel-perfect framebuffer.
pub fn get_framebuffer_target(
&mut self,
display: &glium::Display,
) -> glium::framebuffer::SimpleFrameBuffer {
glium::framebuffer::SimpleFrameBuffer::with_depth_buffer(
display,
&self.buffer,
&self.depth_buffer,
).unwrap()
}
pub fn draw(&mut self, display: &glium::Display, zoom: CanvasZoom) {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = get_size(display);
// Build the geometry for the on-screen rectangle.
let s_rect = zoom.fit_canvas(Size2D::new(w, h), self.size);
let (sx, sy) = (s_rect.origin.x, s_rect.origin.y);
let (sw, sh) = (s_rect.size.width, s_rect.size.height);
// XXX: This could use glium::Surface::blit_whole_color_to instead of
// the handmade blitting, but that was buggy on Windows around
// 2015-03.
let vertices = {
#[derive(Copy, Clone)]
struct BlitVertex {
pos: [f32; 2],
tex_coord: [f32; 2],
}
implement_vertex!(BlitVertex, pos, tex_coord);
glium::VertexBuffer::new(
display,
&[
BlitVertex {
pos: [sx, sy],
tex_coord: [0.0, 0.0],
},
BlitVertex {
pos: [sx + sw, sy],
tex_coord: [1.0, 0.0],
},
BlitVertex {
pos: [sx + sw, sy + sh],
tex_coord: [1.0, 1.0],
},
BlitVertex {
pos: [sx, sy + sh],
tex_coord: [0.0, 1.0],
},
],
).unwrap()
};
let indices = glium::IndexBuffer::new(
display,
glium::index::PrimitiveType::TrianglesList,
&[0u16, 1, 2, 0, 2, 3],
).unwrap();
// Set up the rest of the draw parameters.
let mut params: glium::DrawParameters = Default::default();
// Set an explicit viewport to apply the custom resolution that fixes
// pixel perfect rounding errors.
params.viewport = Some(glium::Rect {
left: 0,
bottom: 0,
width: w,
height: h,
});
// TODO: Option to use smooth filter & non-pixel-perfect scaling
let mag_filter = glium::uniforms::MagnifySamplerFilter::Nearest;
let uniforms = glium::uniforms::UniformsStorage::new(
"tex",
glium::uniforms::Sampler(
&self.buffer,
glium::uniforms::SamplerBehavior {
magnify_filter: mag_filter,
minify_filter: glium::uniforms::MinifySamplerFilter::Linear,
..Default::default()
},
),
);
// Draw the graphics buffer to the window.
target
.draw(&vertices, &indices, &self.shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
}
pub fn size(&self) -> Size2D<u32> { self.size }
pub fn screenshot(&self) -> ImageBuffer {
let image: glium::texture::RawImage2d<u8> = self.buffer.read();
ImageBuffer::from_fn(image.width, image.height, |x, y| {
let i = (x * 4 + (image.height - y - 1) * image.width * 4) as usize;
image.data[i] as u32
+ ((image.data[i + 1] as u32) << 8)
+ ((image.data[i + 2] as u32) << 16)
+ ((image.data[i + 3] as u32) << 24)
})
}
}
fn get_size(display: &glium::Display) -> (u32, u32) {
let size = display
.gl_window()
.get_inner_size()
.unwrap_or(LogicalSize::new(800.0, 600.0))
.to_physical(display.gl_window().get_hidpi_factor());
(size.width as u32, size.height as u32)
}
| {
assert!(
texture < self.textures.len(),
"Trying to write nonexistent texture"
);
let rect = glium::Rect {
left: 0,
bottom: 0,
width: img.size.width,
height: img.size.height,
};
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels.clone(),
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
self.textures[texture].write(rect, raw);
} | identifier_body |
project_util.go | // Copyright 2018 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package projutil
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
homedir "github.com/mitchellh/go-homedir"
"github.com/rogpeppe/go-internal/modfile"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
kbutil "github.com/operator-framework/operator-sdk/internal/util/kubebuilder"
)
const (
GoPathEnv = "GOPATH"
GoFlagsEnv = "GOFLAGS"
GoModEnv = "GO111MODULE"
SrcDir = "src"
fsep = string(filepath.Separator)
mainFile = "main.go"
managerMainFile = "cmd" + fsep + "manager" + fsep + mainFile
buildDockerfile = "build" + fsep + "Dockerfile"
rolesDir = "roles"
requirementsFile = "requirements.yml"
moleculeDir = "molecule"
helmChartsDir = "helm-charts"
goModFile = "go.mod"
defaultPermission = 0644
noticeColor = "\033[1;36m%s\033[0m"
)
// OperatorType - the type of operator
type OperatorType = string
const (
// OperatorTypeGo - golang type of operator.
OperatorTypeGo OperatorType = "go"
// OperatorTypeAnsible - ansible type of operator.
OperatorTypeAnsible OperatorType = "ansible"
// OperatorTypeHelm - helm type of operator.
OperatorTypeHelm OperatorType = "helm"
// OperatorTypeUnknown - unknown type of operator.
OperatorTypeUnknown OperatorType = "unknown"
)
type ErrUnknownOperatorType struct {
Type string
}
func (e ErrUnknownOperatorType) Error() string {
if e.Type == "" {
return "unknown operator type"
}
return fmt.Sprintf(`unknown operator type "%v"`, e.Type)
}
// MustInProjectRoot checks if the current dir is the project root, and exits
// if not.
func MustInProjectRoot() {
if err := CheckProjectRoot(); err != nil {
log.Fatal(err)
}
}
// CheckProjectRoot checks if the current dir is the project root, and returns
// an error if not.
// "build/Dockerfile" may not be present in all projects
// todo: scaffold Project file for Ansible and Helm with the type information
func CheckProjectRoot() error {
if kbutil.HasProjectFile() {
return nil
}
// todo(camilamacedo86): remove the following check when we no longer support the legacy scaffold layout
// If the current directory has a "build/Dockerfile", then it is safe to say
// we are at the project root.
if _, err := os.Stat(buildDockerfile); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("must run command in project root dir: project structure requires %s",
buildDockerfile)
}
return fmt.Errorf("error while checking if current directory is the project root: %v", err)
}
return nil
}
func CheckGoProjectCmd(cmd *cobra.Command) error |
func MustGetwd() string {
wd, err := os.Getwd()
if err != nil {
log.Fatalf("Failed to get working directory: (%v)", err)
}
return wd
}
func getHomeDir() (string, error) {
hd, err := homedir.Dir()
if err != nil {
return "", err
}
return homedir.Expand(hd)
}
// TODO(hasbro17): If this function is called in the subdir of
// a module project it will fail to parse go.mod and return
// the correct import path.
// This needs to be fixed to return the pkg import path for any subdir
// in order for `generate csv` to correctly form pkg imports
// for API pkg paths that are not relative to the root dir.
// This might not be fixable since there is no good way to
// get the project root from inside the subdir of a module project.
//
// GetGoPkg returns the current directory's import path by parsing it from
// wd if this project's repository path is rooted under $GOPATH/src, or
// from go.mod the project uses Go modules to manage dependencies.
// If the project has a go.mod then wd must be the project root.
//
// Example: "github.com/example-inc/app-operator"
func GetGoPkg() string {
// Default to reading from go.mod, as it should usually have the (correct)
// package path, and no further processing need be done on it if so.
if _, err := os.Stat(goModFile); err != nil && !os.IsNotExist(err) {
log.Fatalf("Failed to read go.mod: %v", err)
} else if err == nil {
b, err := ioutil.ReadFile(goModFile)
if err != nil {
log.Fatalf("Read go.mod: %v", err)
}
mf, err := modfile.Parse(goModFile, b, nil)
if err != nil {
log.Fatalf("Parse go.mod: %v", err)
}
if mf.Module != nil && mf.Module.Mod.Path != "" {
return mf.Module.Mod.Path
}
}
// Then try parsing package path from $GOPATH (set env or default).
goPath, ok := os.LookupEnv(GoPathEnv)
if !ok || goPath == "" {
hd, err := getHomeDir()
if err != nil {
log.Fatal(err)
}
goPath = filepath.Join(hd, "go", "src")
} else {
// MustSetWdGopath is necessary here because the user has set GOPATH,
// which could be a path list.
goPath = MustSetWdGopath(goPath)
}
if !strings.HasPrefix(MustGetwd(), goPath) {
log.Fatal("Could not determine project repository path: $GOPATH not set, wd in default $HOME/go/src," +
" or wd does not contain a go.mod")
}
return parseGoPkg(goPath)
}
func parseGoPkg(gopath string) string {
goSrc := filepath.Join(gopath, SrcDir)
wd := MustGetwd()
pathedPkg := strings.Replace(wd, goSrc, "", 1)
// Make sure package only contains the "/" separator and no others, and
// trim any leading/trailing "/".
return strings.Trim(filepath.ToSlash(pathedPkg), "/")
}
// GetOperatorType returns type of operator is in cwd.
// This function should be called after verifying the user is in project root.
func GetOperatorType() OperatorType {
switch {
case IsOperatorGo():
return OperatorTypeGo
case IsOperatorAnsible():
return OperatorTypeAnsible
case IsOperatorHelm():
return OperatorTypeHelm
}
return OperatorTypeUnknown
}
func IsOperatorGo() bool {
// todo: in the future we should check the plugin prefix to ensure the operator type
// for now, we can assume that any project with the kubebuilder layout is Go Type
if kbutil.HasProjectFile() {
return true
}
// todo: remove the following code when the legacy layout is no longer supported
// we can check it using the Project File
_, err := os.Stat(managerMainFile)
if err == nil || os.IsExist(err) {
return true
}
// Aware of an alternative location for main.go.
_, err = os.Stat(mainFile)
return err == nil || os.IsExist(err)
}
func IsOperatorAnsible() bool {
stat, err := os.Stat(rolesDir)
if (err == nil && stat.IsDir()) || os.IsExist(err) {
return true
}
stat, err = os.Stat(moleculeDir)
if (err == nil && stat.IsDir()) || os.IsExist(err) {
return true
}
_, err = os.Stat(requirementsFile)
return err == nil || os.IsExist(err)
}
func IsOperatorHelm() bool {
stat, err := os.Stat(helmChartsDir)
return (err == nil && stat.IsDir()) || os.IsExist(err)
}
// MustGetGopath gets GOPATH and ensures it is set and non-empty. If GOPATH
// is not set or empty, MustGetGopath exits.
func MustGetGopath() string {
gopath, ok := os.LookupEnv(GoPathEnv)
if !ok || len(gopath) == 0 {
log.Fatal("GOPATH env not set")
}
return gopath
}
// MustSetWdGopath sets GOPATH to the first element of the path list in
// currentGopath that prefixes the wd, then returns the set path.
// If GOPATH cannot be set, MustSetWdGopath exits.
func MustSetWdGopath(currentGopath string) string {
var (
newGopath string
cwdInGopath bool
wd = MustGetwd()
)
for _, newGopath = range filepath.SplitList(currentGopath) {
if strings.HasPrefix(filepath.Dir(wd), newGopath) {
cwdInGopath = true
break
}
}
if !cwdInGopath {
log.Fatalf("Project not in $GOPATH")
}
if err := os.Setenv(GoPathEnv, newGopath); err != nil {
log.Fatal(err)
}
return newGopath
}
var flagRe = regexp.MustCompile("(.* )?-v(.* )?")
// SetGoVerbose sets GOFLAGS="${GOFLAGS} -v" if GOFLAGS does not
// already contain "-v" to make "go" command output verbose.
func SetGoVerbose() error {
gf, ok := os.LookupEnv(GoFlagsEnv)
if !ok || len(gf) == 0 {
return os.Setenv(GoFlagsEnv, "-v")
}
if !flagRe.MatchString(gf) {
return os.Setenv(GoFlagsEnv, gf+" -v")
}
return nil
}
// CheckRepo ensures dependency manager type and repo are being used in combination
// correctly, as different dependency managers have different Go environment
// requirements.
func CheckRepo(repo string) error {
inGopathSrc, err := WdInGoPathSrc()
if err != nil {
return err
}
if !inGopathSrc && repo == "" {
return fmt.Errorf(`flag --repo must be set if the working directory is not in $GOPATH/src.
See "operator-sdk new -h"`)
}
return nil
}
// CheckGoModules ensures that go modules are enabled.
func CheckGoModules() error {
goModOn, err := GoModOn()
if err != nil {
return err
}
if !goModOn {
return fmt.Errorf(`using go modules requires GO111MODULE="on", "auto", or unset.` +
` More info: https://sdk.operatorframework.io/docs/golang/quickstart/#a-note-on-dependency-management`)
}
return nil
}
// PrintDeprecationWarning prints a colored warning wrapping msg to the terminal.
func PrintDeprecationWarning(msg string) {
fmt.Printf(noticeColor, "[Deprecation Notice] "+msg+". Refer to the version upgrade guide "+
"for more information: https://sdk.operatorframework.io/docs/migration/version-upgrade-guide/\n\n")
}
// RewriteFileContents adds the provided content before the last occurrence of the word label
// and rewrites the file with the new content.
func RewriteFileContents(filename, instruction, content string) error {
text, err := ioutil.ReadFile(filename)
if err != nil {
return fmt.Errorf("error in getting contents from the file, %v", err)
}
existingContent := string(text)
modifiedContent, err := appendContent(existingContent, instruction, content)
if err != nil {
return err
}
err = ioutil.WriteFile(filename, []byte(modifiedContent), defaultPermission)
if err != nil {
return fmt.Errorf("error writing modified contents to file, %v", err)
}
return nil
}
func appendContent(fileContents, instruction, content string) (string, error) {
labelIndex := strings.LastIndex(fileContents, instruction)
if labelIndex == -1 {
return "", fmt.Errorf("instruction not present previously in dockerfile")
}
separationIndex := strings.Index(fileContents[labelIndex:], "\n")
if separationIndex == -1 {
return "", fmt.Errorf("no new line at the end of dockerfile command %s", fileContents[labelIndex:])
}
index := labelIndex + separationIndex + 1
newContent := fileContents[:index] + content + fileContents[index:]
return newContent, nil
}
| {
if IsOperatorGo() {
return nil
}
return fmt.Errorf("'%s' can only be run for Go operators; %s or %s do not exist",
cmd.CommandPath(), managerMainFile, mainFile)
} | identifier_body |
project_util.go | // Copyright 2018 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package projutil
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
homedir "github.com/mitchellh/go-homedir"
"github.com/rogpeppe/go-internal/modfile"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
kbutil "github.com/operator-framework/operator-sdk/internal/util/kubebuilder"
)
const (
GoPathEnv = "GOPATH"
GoFlagsEnv = "GOFLAGS"
GoModEnv = "GO111MODULE"
SrcDir = "src"
fsep = string(filepath.Separator)
mainFile = "main.go"
managerMainFile = "cmd" + fsep + "manager" + fsep + mainFile
buildDockerfile = "build" + fsep + "Dockerfile"
rolesDir = "roles"
requirementsFile = "requirements.yml"
moleculeDir = "molecule"
helmChartsDir = "helm-charts"
goModFile = "go.mod"
defaultPermission = 0644
noticeColor = "\033[1;36m%s\033[0m"
)
// OperatorType - the type of operator
type OperatorType = string
const (
// OperatorTypeGo - golang type of operator.
OperatorTypeGo OperatorType = "go"
// OperatorTypeAnsible - ansible type of operator.
OperatorTypeAnsible OperatorType = "ansible"
// OperatorTypeHelm - helm type of operator.
OperatorTypeHelm OperatorType = "helm"
// OperatorTypeUnknown - unknown type of operator.
OperatorTypeUnknown OperatorType = "unknown"
)
type ErrUnknownOperatorType struct {
Type string
}
func (e ErrUnknownOperatorType) Error() string {
if e.Type == "" {
return "unknown operator type"
}
return fmt.Sprintf(`unknown operator type "%v"`, e.Type)
}
// MustInProjectRoot checks if the current dir is the project root, and exits
// if not.
func MustInProjectRoot() {
if err := CheckProjectRoot(); err != nil {
log.Fatal(err)
}
}
// CheckProjectRoot checks if the current dir is the project root, and returns
// an error if not.
// "build/Dockerfile" may not be present in all projects
// todo: scaffold Project file for Ansible and Helm with the type information
func CheckProjectRoot() error {
if kbutil.HasProjectFile() {
return nil
}
// todo(camilamacedo86): remove the following check when we no longer support the legacy scaffold layout
// If the current directory has a "build/Dockerfile", then it is safe to say
// we are at the project root.
if _, err := os.Stat(buildDockerfile); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("must run command in project root dir: project structure requires %s",
buildDockerfile)
}
return fmt.Errorf("error while checking if current directory is the project root: %v", err)
}
return nil
}
func CheckGoProjectCmd(cmd *cobra.Command) error {
if IsOperatorGo() {
return nil
}
return fmt.Errorf("'%s' can only be run for Go operators; %s or %s do not exist",
cmd.CommandPath(), managerMainFile, mainFile)
}
func MustGetwd() string {
wd, err := os.Getwd()
if err != nil {
log.Fatalf("Failed to get working directory: (%v)", err)
}
return wd
}
func getHomeDir() (string, error) {
hd, err := homedir.Dir()
if err != nil {
return "", err
}
return homedir.Expand(hd)
}
// TODO(hasbro17): If this function is called in the subdir of
// a module project it will fail to parse go.mod and return
// the correct import path.
// This needs to be fixed to return the pkg import path for any subdir
// in order for `generate csv` to correctly form pkg imports
// for API pkg paths that are not relative to the root dir.
// This might not be fixable since there is no good way to
// get the project root from inside the subdir of a module project.
//
// GetGoPkg returns the current directory's import path by parsing it from
// wd if this project's repository path is rooted under $GOPATH/src, or
// from go.mod the project uses Go modules to manage dependencies.
// If the project has a go.mod then wd must be the project root.
//
// Example: "github.com/example-inc/app-operator"
func GetGoPkg() string {
// Default to reading from go.mod, as it should usually have the (correct)
// package path, and no further processing need be done on it if so.
if _, err := os.Stat(goModFile); err != nil && !os.IsNotExist(err) {
log.Fatalf("Failed to read go.mod: %v", err)
} else if err == nil {
b, err := ioutil.ReadFile(goModFile)
if err != nil {
log.Fatalf("Read go.mod: %v", err)
}
mf, err := modfile.Parse(goModFile, b, nil)
if err != nil {
log.Fatalf("Parse go.mod: %v", err)
}
if mf.Module != nil && mf.Module.Mod.Path != "" {
return mf.Module.Mod.Path
}
}
// Then try parsing package path from $GOPATH (set env or default).
goPath, ok := os.LookupEnv(GoPathEnv)
if !ok || goPath == "" {
hd, err := getHomeDir()
if err != nil {
log.Fatal(err)
}
goPath = filepath.Join(hd, "go", "src")
} else {
// MustSetWdGopath is necessary here because the user has set GOPATH,
// which could be a path list.
goPath = MustSetWdGopath(goPath)
}
if !strings.HasPrefix(MustGetwd(), goPath) {
log.Fatal("Could not determine project repository path: $GOPATH not set, wd in default $HOME/go/src," +
" or wd does not contain a go.mod")
}
return parseGoPkg(goPath)
}
func parseGoPkg(gopath string) string {
goSrc := filepath.Join(gopath, SrcDir)
wd := MustGetwd()
pathedPkg := strings.Replace(wd, goSrc, "", 1)
// Make sure package only contains the "/" separator and no others, and
// trim any leading/trailing "/".
return strings.Trim(filepath.ToSlash(pathedPkg), "/")
}
// GetOperatorType returns type of operator is in cwd.
// This function should be called after verifying the user is in project root.
func GetOperatorType() OperatorType {
switch {
case IsOperatorGo():
return OperatorTypeGo
case IsOperatorAnsible():
return OperatorTypeAnsible
case IsOperatorHelm():
return OperatorTypeHelm
}
return OperatorTypeUnknown
}
func IsOperatorGo() bool {
// todo: in the future we should check the plugin prefix to ensure the operator type
// for now, we can assume that any project with the kubebuilder layout is Go Type
if kbutil.HasProjectFile() {
return true
}
// todo: remove the following code when the legacy layout is no longer supported
// we can check it using the Project File
_, err := os.Stat(managerMainFile)
if err == nil || os.IsExist(err) {
return true
}
// Aware of an alternative location for main.go.
_, err = os.Stat(mainFile)
return err == nil || os.IsExist(err)
}
func IsOperatorAnsible() bool {
stat, err := os.Stat(rolesDir)
if (err == nil && stat.IsDir()) || os.IsExist(err) {
return true
}
stat, err = os.Stat(moleculeDir)
if (err == nil && stat.IsDir()) || os.IsExist(err) {
return true
}
_, err = os.Stat(requirementsFile)
return err == nil || os.IsExist(err)
}
func IsOperatorHelm() bool {
stat, err := os.Stat(helmChartsDir)
return (err == nil && stat.IsDir()) || os.IsExist(err)
}
// MustGetGopath gets GOPATH and ensures it is set and non-empty. If GOPATH
// is not set or empty, MustGetGopath exits.
func MustGetGopath() string {
gopath, ok := os.LookupEnv(GoPathEnv)
if !ok || len(gopath) == 0 {
log.Fatal("GOPATH env not set")
}
return gopath
}
// MustSetWdGopath sets GOPATH to the first element of the path list in
// currentGopath that prefixes the wd, then returns the set path. | newGopath string
cwdInGopath bool
wd = MustGetwd()
)
for _, newGopath = range filepath.SplitList(currentGopath) {
if strings.HasPrefix(filepath.Dir(wd), newGopath) {
cwdInGopath = true
break
}
}
if !cwdInGopath {
log.Fatalf("Project not in $GOPATH")
}
if err := os.Setenv(GoPathEnv, newGopath); err != nil {
log.Fatal(err)
}
return newGopath
}
var flagRe = regexp.MustCompile("(.* )?-v(.* )?")
// SetGoVerbose sets GOFLAGS="${GOFLAGS} -v" if GOFLAGS does not
// already contain "-v" to make "go" command output verbose.
func SetGoVerbose() error {
gf, ok := os.LookupEnv(GoFlagsEnv)
if !ok || len(gf) == 0 {
return os.Setenv(GoFlagsEnv, "-v")
}
if !flagRe.MatchString(gf) {
return os.Setenv(GoFlagsEnv, gf+" -v")
}
return nil
}
// CheckRepo ensures dependency manager type and repo are being used in combination
// correctly, as different dependency managers have different Go environment
// requirements.
func CheckRepo(repo string) error {
inGopathSrc, err := WdInGoPathSrc()
if err != nil {
return err
}
if !inGopathSrc && repo == "" {
return fmt.Errorf(`flag --repo must be set if the working directory is not in $GOPATH/src.
See "operator-sdk new -h"`)
}
return nil
}
// CheckGoModules ensures that go modules are enabled.
func CheckGoModules() error {
goModOn, err := GoModOn()
if err != nil {
return err
}
if !goModOn {
return fmt.Errorf(`using go modules requires GO111MODULE="on", "auto", or unset.` +
` More info: https://sdk.operatorframework.io/docs/golang/quickstart/#a-note-on-dependency-management`)
}
return nil
}
// PrintDeprecationWarning prints a colored warning wrapping msg to the terminal.
func PrintDeprecationWarning(msg string) {
fmt.Printf(noticeColor, "[Deprecation Notice] "+msg+". Refer to the version upgrade guide "+
"for more information: https://sdk.operatorframework.io/docs/migration/version-upgrade-guide/\n\n")
}
// RewriteFileContents adds the provided content before the last occurrence of the word label
// and rewrites the file with the new content.
func RewriteFileContents(filename, instruction, content string) error {
text, err := ioutil.ReadFile(filename)
if err != nil {
return fmt.Errorf("error in getting contents from the file, %v", err)
}
existingContent := string(text)
modifiedContent, err := appendContent(existingContent, instruction, content)
if err != nil {
return err
}
err = ioutil.WriteFile(filename, []byte(modifiedContent), defaultPermission)
if err != nil {
return fmt.Errorf("error writing modified contents to file, %v", err)
}
return nil
}
func appendContent(fileContents, instruction, content string) (string, error) {
labelIndex := strings.LastIndex(fileContents, instruction)
if labelIndex == -1 {
return "", fmt.Errorf("instruction not present previously in dockerfile")
}
separationIndex := strings.Index(fileContents[labelIndex:], "\n")
if separationIndex == -1 {
return "", fmt.Errorf("no new line at the end of dockerfile command %s", fileContents[labelIndex:])
}
index := labelIndex + separationIndex + 1
newContent := fileContents[:index] + content + fileContents[index:]
return newContent, nil
} | // If GOPATH cannot be set, MustSetWdGopath exits.
func MustSetWdGopath(currentGopath string) string {
var ( | random_line_split |
project_util.go | // Copyright 2018 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package projutil
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
homedir "github.com/mitchellh/go-homedir"
"github.com/rogpeppe/go-internal/modfile"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
kbutil "github.com/operator-framework/operator-sdk/internal/util/kubebuilder"
)
const (
GoPathEnv = "GOPATH"
GoFlagsEnv = "GOFLAGS"
GoModEnv = "GO111MODULE"
SrcDir = "src"
fsep = string(filepath.Separator)
mainFile = "main.go"
managerMainFile = "cmd" + fsep + "manager" + fsep + mainFile
buildDockerfile = "build" + fsep + "Dockerfile"
rolesDir = "roles"
requirementsFile = "requirements.yml"
moleculeDir = "molecule"
helmChartsDir = "helm-charts"
goModFile = "go.mod"
defaultPermission = 0644
noticeColor = "\033[1;36m%s\033[0m"
)
// OperatorType - the type of operator
type OperatorType = string
const (
// OperatorTypeGo - golang type of operator.
OperatorTypeGo OperatorType = "go"
// OperatorTypeAnsible - ansible type of operator.
OperatorTypeAnsible OperatorType = "ansible"
// OperatorTypeHelm - helm type of operator.
OperatorTypeHelm OperatorType = "helm"
// OperatorTypeUnknown - unknown type of operator.
OperatorTypeUnknown OperatorType = "unknown"
)
type ErrUnknownOperatorType struct {
Type string
}
func (e ErrUnknownOperatorType) Error() string {
if e.Type == "" {
return "unknown operator type"
}
return fmt.Sprintf(`unknown operator type "%v"`, e.Type)
}
// MustInProjectRoot checks if the current dir is the project root, and exits
// if not.
func MustInProjectRoot() {
if err := CheckProjectRoot(); err != nil {
log.Fatal(err)
}
}
// CheckProjectRoot checks if the current dir is the project root, and returns
// an error if not.
// "build/Dockerfile" may not be present in all projects
// todo: scaffold Project file for Ansible and Helm with the type information
func CheckProjectRoot() error {
if kbutil.HasProjectFile() {
return nil
}
// todo(camilamacedo86): remove the following check when we no longer support the legacy scaffold layout
// If the current directory has a "build/Dockerfile", then it is safe to say
// we are at the project root.
if _, err := os.Stat(buildDockerfile); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("must run command in project root dir: project structure requires %s",
buildDockerfile)
}
return fmt.Errorf("error while checking if current directory is the project root: %v", err)
}
return nil
}
func CheckGoProjectCmd(cmd *cobra.Command) error {
if IsOperatorGo() {
return nil
}
return fmt.Errorf("'%s' can only be run for Go operators; %s or %s do not exist",
cmd.CommandPath(), managerMainFile, mainFile)
}
func MustGetwd() string {
wd, err := os.Getwd()
if err != nil {
log.Fatalf("Failed to get working directory: (%v)", err)
}
return wd
}
func getHomeDir() (string, error) {
hd, err := homedir.Dir()
if err != nil {
return "", err
}
return homedir.Expand(hd)
}
// TODO(hasbro17): If this function is called in the subdir of
// a module project it will fail to parse go.mod and return
// the correct import path.
// This needs to be fixed to return the pkg import path for any subdir
// in order for `generate csv` to correctly form pkg imports
// for API pkg paths that are not relative to the root dir.
// This might not be fixable since there is no good way to
// get the project root from inside the subdir of a module project.
//
// GetGoPkg returns the current directory's import path by parsing it from
// wd if this project's repository path is rooted under $GOPATH/src, or
// from go.mod the project uses Go modules to manage dependencies.
// If the project has a go.mod then wd must be the project root.
//
// Example: "github.com/example-inc/app-operator"
func GetGoPkg() string {
// Default to reading from go.mod, as it should usually have the (correct)
// package path, and no further processing need be done on it if so.
if _, err := os.Stat(goModFile); err != nil && !os.IsNotExist(err) {
log.Fatalf("Failed to read go.mod: %v", err)
} else if err == nil {
b, err := ioutil.ReadFile(goModFile)
if err != nil {
log.Fatalf("Read go.mod: %v", err)
}
mf, err := modfile.Parse(goModFile, b, nil)
if err != nil {
log.Fatalf("Parse go.mod: %v", err)
}
if mf.Module != nil && mf.Module.Mod.Path != "" {
return mf.Module.Mod.Path
}
}
// Then try parsing package path from $GOPATH (set env or default).
goPath, ok := os.LookupEnv(GoPathEnv)
if !ok || goPath == "" {
hd, err := getHomeDir()
if err != nil {
log.Fatal(err)
}
goPath = filepath.Join(hd, "go", "src")
} else {
// MustSetWdGopath is necessary here because the user has set GOPATH,
// which could be a path list.
goPath = MustSetWdGopath(goPath)
}
if !strings.HasPrefix(MustGetwd(), goPath) {
log.Fatal("Could not determine project repository path: $GOPATH not set, wd in default $HOME/go/src," +
" or wd does not contain a go.mod")
}
return parseGoPkg(goPath)
}
func parseGoPkg(gopath string) string {
goSrc := filepath.Join(gopath, SrcDir)
wd := MustGetwd()
pathedPkg := strings.Replace(wd, goSrc, "", 1)
// Make sure package only contains the "/" separator and no others, and
// trim any leading/trailing "/".
return strings.Trim(filepath.ToSlash(pathedPkg), "/")
}
// GetOperatorType returns type of operator is in cwd.
// This function should be called after verifying the user is in project root.
func GetOperatorType() OperatorType {
switch {
case IsOperatorGo():
return OperatorTypeGo
case IsOperatorAnsible():
return OperatorTypeAnsible
case IsOperatorHelm():
return OperatorTypeHelm
}
return OperatorTypeUnknown
}
func IsOperatorGo() bool {
// todo: in the future we should check the plugin prefix to ensure the operator type
// for now, we can assume that any project with the kubebuilder layout is Go Type
if kbutil.HasProjectFile() {
return true
}
// todo: remove the following code when the legacy layout is no longer supported
// we can check it using the Project File
_, err := os.Stat(managerMainFile)
if err == nil || os.IsExist(err) {
return true
}
// Aware of an alternative location for main.go.
_, err = os.Stat(mainFile)
return err == nil || os.IsExist(err)
}
func IsOperatorAnsible() bool {
stat, err := os.Stat(rolesDir)
if (err == nil && stat.IsDir()) || os.IsExist(err) {
return true
}
stat, err = os.Stat(moleculeDir)
if (err == nil && stat.IsDir()) || os.IsExist(err) {
return true
}
_, err = os.Stat(requirementsFile)
return err == nil || os.IsExist(err)
}
func IsOperatorHelm() bool {
stat, err := os.Stat(helmChartsDir)
return (err == nil && stat.IsDir()) || os.IsExist(err)
}
// MustGetGopath gets GOPATH and ensures it is set and non-empty. If GOPATH
// is not set or empty, MustGetGopath exits.
func MustGetGopath() string {
gopath, ok := os.LookupEnv(GoPathEnv)
if !ok || len(gopath) == 0 {
log.Fatal("GOPATH env not set")
}
return gopath
}
// MustSetWdGopath sets GOPATH to the first element of the path list in
// currentGopath that prefixes the wd, then returns the set path.
// If GOPATH cannot be set, MustSetWdGopath exits.
func MustSetWdGopath(currentGopath string) string {
var (
newGopath string
cwdInGopath bool
wd = MustGetwd()
)
for _, newGopath = range filepath.SplitList(currentGopath) {
if strings.HasPrefix(filepath.Dir(wd), newGopath) {
cwdInGopath = true
break
}
}
if !cwdInGopath {
log.Fatalf("Project not in $GOPATH")
}
if err := os.Setenv(GoPathEnv, newGopath); err != nil {
log.Fatal(err)
}
return newGopath
}
var flagRe = regexp.MustCompile("(.* )?-v(.* )?")
// SetGoVerbose sets GOFLAGS="${GOFLAGS} -v" if GOFLAGS does not
// already contain "-v" to make "go" command output verbose.
func SetGoVerbose() error {
gf, ok := os.LookupEnv(GoFlagsEnv)
if !ok || len(gf) == 0 {
return os.Setenv(GoFlagsEnv, "-v")
}
if !flagRe.MatchString(gf) {
return os.Setenv(GoFlagsEnv, gf+" -v")
}
return nil
}
// CheckRepo ensures dependency manager type and repo are being used in combination
// correctly, as different dependency managers have different Go environment
// requirements.
func CheckRepo(repo string) error {
inGopathSrc, err := WdInGoPathSrc()
if err != nil {
return err
}
if !inGopathSrc && repo == "" {
return fmt.Errorf(`flag --repo must be set if the working directory is not in $GOPATH/src.
See "operator-sdk new -h"`)
}
return nil
}
// CheckGoModules ensures that go modules are enabled.
func CheckGoModules() error {
goModOn, err := GoModOn()
if err != nil {
return err
}
if !goModOn {
return fmt.Errorf(`using go modules requires GO111MODULE="on", "auto", or unset.` +
` More info: https://sdk.operatorframework.io/docs/golang/quickstart/#a-note-on-dependency-management`)
}
return nil
}
// PrintDeprecationWarning prints a colored warning wrapping msg to the terminal.
func PrintDeprecationWarning(msg string) {
fmt.Printf(noticeColor, "[Deprecation Notice] "+msg+". Refer to the version upgrade guide "+
"for more information: https://sdk.operatorframework.io/docs/migration/version-upgrade-guide/\n\n")
}
// RewriteFileContents adds the provided content before the last occurrence of the word label
// and rewrites the file with the new content.
func RewriteFileContents(filename, instruction, content string) error {
text, err := ioutil.ReadFile(filename)
if err != nil {
return fmt.Errorf("error in getting contents from the file, %v", err)
}
existingContent := string(text)
modifiedContent, err := appendContent(existingContent, instruction, content)
if err != nil {
return err
}
err = ioutil.WriteFile(filename, []byte(modifiedContent), defaultPermission)
if err != nil |
return nil
}
func appendContent(fileContents, instruction, content string) (string, error) {
labelIndex := strings.LastIndex(fileContents, instruction)
if labelIndex == -1 {
return "", fmt.Errorf("instruction not present previously in dockerfile")
}
separationIndex := strings.Index(fileContents[labelIndex:], "\n")
if separationIndex == -1 {
return "", fmt.Errorf("no new line at the end of dockerfile command %s", fileContents[labelIndex:])
}
index := labelIndex + separationIndex + 1
newContent := fileContents[:index] + content + fileContents[index:]
return newContent, nil
}
| {
return fmt.Errorf("error writing modified contents to file, %v", err)
} | conditional_block |
project_util.go | // Copyright 2018 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package projutil
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
homedir "github.com/mitchellh/go-homedir"
"github.com/rogpeppe/go-internal/modfile"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
kbutil "github.com/operator-framework/operator-sdk/internal/util/kubebuilder"
)
const (
GoPathEnv = "GOPATH"
GoFlagsEnv = "GOFLAGS"
GoModEnv = "GO111MODULE"
SrcDir = "src"
fsep = string(filepath.Separator)
mainFile = "main.go"
managerMainFile = "cmd" + fsep + "manager" + fsep + mainFile
buildDockerfile = "build" + fsep + "Dockerfile"
rolesDir = "roles"
requirementsFile = "requirements.yml"
moleculeDir = "molecule"
helmChartsDir = "helm-charts"
goModFile = "go.mod"
defaultPermission = 0644
noticeColor = "\033[1;36m%s\033[0m"
)
// OperatorType - the type of operator
type OperatorType = string
const (
// OperatorTypeGo - golang type of operator.
OperatorTypeGo OperatorType = "go"
// OperatorTypeAnsible - ansible type of operator.
OperatorTypeAnsible OperatorType = "ansible"
// OperatorTypeHelm - helm type of operator.
OperatorTypeHelm OperatorType = "helm"
// OperatorTypeUnknown - unknown type of operator.
OperatorTypeUnknown OperatorType = "unknown"
)
type ErrUnknownOperatorType struct {
Type string
}
func (e ErrUnknownOperatorType) Error() string {
if e.Type == "" {
return "unknown operator type"
}
return fmt.Sprintf(`unknown operator type "%v"`, e.Type)
}
// MustInProjectRoot checks if the current dir is the project root, and exits
// if not.
func MustInProjectRoot() {
if err := CheckProjectRoot(); err != nil {
log.Fatal(err)
}
}
// CheckProjectRoot checks if the current dir is the project root, and returns
// an error if not.
// "build/Dockerfile" may not be present in all projects
// todo: scaffold Project file for Ansible and Helm with the type information
func CheckProjectRoot() error {
if kbutil.HasProjectFile() {
return nil
}
// todo(camilamacedo86): remove the following check when we no longer support the legacy scaffold layout
// If the current directory has a "build/Dockerfile", then it is safe to say
// we are at the project root.
if _, err := os.Stat(buildDockerfile); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("must run command in project root dir: project structure requires %s",
buildDockerfile)
}
return fmt.Errorf("error while checking if current directory is the project root: %v", err)
}
return nil
}
func CheckGoProjectCmd(cmd *cobra.Command) error {
if IsOperatorGo() {
return nil
}
return fmt.Errorf("'%s' can only be run for Go operators; %s or %s do not exist",
cmd.CommandPath(), managerMainFile, mainFile)
}
func MustGetwd() string {
wd, err := os.Getwd()
if err != nil {
log.Fatalf("Failed to get working directory: (%v)", err)
}
return wd
}
func | () (string, error) {
hd, err := homedir.Dir()
if err != nil {
return "", err
}
return homedir.Expand(hd)
}
// TODO(hasbro17): If this function is called in the subdir of
// a module project it will fail to parse go.mod and return
// the correct import path.
// This needs to be fixed to return the pkg import path for any subdir
// in order for `generate csv` to correctly form pkg imports
// for API pkg paths that are not relative to the root dir.
// This might not be fixable since there is no good way to
// get the project root from inside the subdir of a module project.
//
// GetGoPkg returns the current directory's import path by parsing it from
// wd if this project's repository path is rooted under $GOPATH/src, or
// from go.mod the project uses Go modules to manage dependencies.
// If the project has a go.mod then wd must be the project root.
//
// Example: "github.com/example-inc/app-operator"
func GetGoPkg() string {
// Default to reading from go.mod, as it should usually have the (correct)
// package path, and no further processing need be done on it if so.
if _, err := os.Stat(goModFile); err != nil && !os.IsNotExist(err) {
log.Fatalf("Failed to read go.mod: %v", err)
} else if err == nil {
b, err := ioutil.ReadFile(goModFile)
if err != nil {
log.Fatalf("Read go.mod: %v", err)
}
mf, err := modfile.Parse(goModFile, b, nil)
if err != nil {
log.Fatalf("Parse go.mod: %v", err)
}
if mf.Module != nil && mf.Module.Mod.Path != "" {
return mf.Module.Mod.Path
}
}
// Then try parsing package path from $GOPATH (set env or default).
goPath, ok := os.LookupEnv(GoPathEnv)
if !ok || goPath == "" {
hd, err := getHomeDir()
if err != nil {
log.Fatal(err)
}
goPath = filepath.Join(hd, "go", "src")
} else {
// MustSetWdGopath is necessary here because the user has set GOPATH,
// which could be a path list.
goPath = MustSetWdGopath(goPath)
}
if !strings.HasPrefix(MustGetwd(), goPath) {
log.Fatal("Could not determine project repository path: $GOPATH not set, wd in default $HOME/go/src," +
" or wd does not contain a go.mod")
}
return parseGoPkg(goPath)
}
func parseGoPkg(gopath string) string {
goSrc := filepath.Join(gopath, SrcDir)
wd := MustGetwd()
pathedPkg := strings.Replace(wd, goSrc, "", 1)
// Make sure package only contains the "/" separator and no others, and
// trim any leading/trailing "/".
return strings.Trim(filepath.ToSlash(pathedPkg), "/")
}
// GetOperatorType returns type of operator is in cwd.
// This function should be called after verifying the user is in project root.
func GetOperatorType() OperatorType {
switch {
case IsOperatorGo():
return OperatorTypeGo
case IsOperatorAnsible():
return OperatorTypeAnsible
case IsOperatorHelm():
return OperatorTypeHelm
}
return OperatorTypeUnknown
}
func IsOperatorGo() bool {
// todo: in the future we should check the plugin prefix to ensure the operator type
// for now, we can assume that any project with the kubebuilder layout is Go Type
if kbutil.HasProjectFile() {
return true
}
// todo: remove the following code when the legacy layout is no longer supported
// we can check it using the Project File
_, err := os.Stat(managerMainFile)
if err == nil || os.IsExist(err) {
return true
}
// Aware of an alternative location for main.go.
_, err = os.Stat(mainFile)
return err == nil || os.IsExist(err)
}
func IsOperatorAnsible() bool {
stat, err := os.Stat(rolesDir)
if (err == nil && stat.IsDir()) || os.IsExist(err) {
return true
}
stat, err = os.Stat(moleculeDir)
if (err == nil && stat.IsDir()) || os.IsExist(err) {
return true
}
_, err = os.Stat(requirementsFile)
return err == nil || os.IsExist(err)
}
func IsOperatorHelm() bool {
stat, err := os.Stat(helmChartsDir)
return (err == nil && stat.IsDir()) || os.IsExist(err)
}
// MustGetGopath gets GOPATH and ensures it is set and non-empty. If GOPATH
// is not set or empty, MustGetGopath exits.
func MustGetGopath() string {
gopath, ok := os.LookupEnv(GoPathEnv)
if !ok || len(gopath) == 0 {
log.Fatal("GOPATH env not set")
}
return gopath
}
// MustSetWdGopath sets GOPATH to the first element of the path list in
// currentGopath that prefixes the wd, then returns the set path.
// If GOPATH cannot be set, MustSetWdGopath exits.
func MustSetWdGopath(currentGopath string) string {
var (
newGopath string
cwdInGopath bool
wd = MustGetwd()
)
for _, newGopath = range filepath.SplitList(currentGopath) {
if strings.HasPrefix(filepath.Dir(wd), newGopath) {
cwdInGopath = true
break
}
}
if !cwdInGopath {
log.Fatalf("Project not in $GOPATH")
}
if err := os.Setenv(GoPathEnv, newGopath); err != nil {
log.Fatal(err)
}
return newGopath
}
var flagRe = regexp.MustCompile("(.* )?-v(.* )?")
// SetGoVerbose sets GOFLAGS="${GOFLAGS} -v" if GOFLAGS does not
// already contain "-v" to make "go" command output verbose.
func SetGoVerbose() error {
gf, ok := os.LookupEnv(GoFlagsEnv)
if !ok || len(gf) == 0 {
return os.Setenv(GoFlagsEnv, "-v")
}
if !flagRe.MatchString(gf) {
return os.Setenv(GoFlagsEnv, gf+" -v")
}
return nil
}
// CheckRepo ensures dependency manager type and repo are being used in combination
// correctly, as different dependency managers have different Go environment
// requirements.
func CheckRepo(repo string) error {
inGopathSrc, err := WdInGoPathSrc()
if err != nil {
return err
}
if !inGopathSrc && repo == "" {
return fmt.Errorf(`flag --repo must be set if the working directory is not in $GOPATH/src.
See "operator-sdk new -h"`)
}
return nil
}
// CheckGoModules ensures that go modules are enabled.
func CheckGoModules() error {
goModOn, err := GoModOn()
if err != nil {
return err
}
if !goModOn {
return fmt.Errorf(`using go modules requires GO111MODULE="on", "auto", or unset.` +
` More info: https://sdk.operatorframework.io/docs/golang/quickstart/#a-note-on-dependency-management`)
}
return nil
}
// PrintDeprecationWarning prints a colored warning wrapping msg to the terminal.
func PrintDeprecationWarning(msg string) {
fmt.Printf(noticeColor, "[Deprecation Notice] "+msg+". Refer to the version upgrade guide "+
"for more information: https://sdk.operatorframework.io/docs/migration/version-upgrade-guide/\n\n")
}
// RewriteFileContents adds the provided content before the last occurrence of the word label
// and rewrites the file with the new content.
func RewriteFileContents(filename, instruction, content string) error {
text, err := ioutil.ReadFile(filename)
if err != nil {
return fmt.Errorf("error in getting contents from the file, %v", err)
}
existingContent := string(text)
modifiedContent, err := appendContent(existingContent, instruction, content)
if err != nil {
return err
}
err = ioutil.WriteFile(filename, []byte(modifiedContent), defaultPermission)
if err != nil {
return fmt.Errorf("error writing modified contents to file, %v", err)
}
return nil
}
func appendContent(fileContents, instruction, content string) (string, error) {
labelIndex := strings.LastIndex(fileContents, instruction)
if labelIndex == -1 {
return "", fmt.Errorf("instruction not present previously in dockerfile")
}
separationIndex := strings.Index(fileContents[labelIndex:], "\n")
if separationIndex == -1 {
return "", fmt.Errorf("no new line at the end of dockerfile command %s", fileContents[labelIndex:])
}
index := labelIndex + separationIndex + 1
newContent := fileContents[:index] + content + fileContents[index:]
return newContent, nil
}
| getHomeDir | identifier_name |
sdk-ui.ts | import { Component, ChangeDetectorRef } from '@angular/core';
import {
normalizeURL,
AlertController,
Platform,
ActionSheetController,
NavController,
LoadingController
} from 'ionic-angular';
import { Camera } from '@ionic-native/camera';
import ScanbotSdk, { Page, MrzScannerConfiguration, BarcodeScannerConfiguration } from 'cordova-plugin-scanbot-sdk'
import SdkInitializer, { IMAGE_QUALITY } from '../../services/sdk-initializer';
import { PageFilterPage } from "./filter";
const SBSDK = ScanbotSdk.promisify();
@Component({
selector: 'page-sdk-ui',
templateUrl: 'sdk-ui.html'
})
export class SdkUiPage {
public pages: Page[] = [];
public selectedPage: Page;
constructor(
private changeDetector: ChangeDetectorRef,
private alertCtrl: AlertController,
sdkInitializer: SdkInitializer,
private camera: Camera,
private platform: Platform,
private actionSheetCtrl: ActionSheetController,
private navCtrl: NavController,
private loadingCtrl: LoadingController
) {
sdkInitializer.onInitialize(err => {
if (err) {
//console.log(JSON.stringify(err));
this.showAlert(err.message);
} else {
// ...
}
});
}
private createLoading(message: string) {
return this.loadingCtrl.create({
content: message
});
}
public async pickImageFromGallery() {
let options = {
quality: IMAGE_QUALITY,
destinationType: this.camera.DestinationType.FILE_URI,
sourceType: this.camera.PictureSourceType.PHOTOLIBRARY
};
const originalImageFileUri: string = await this.camera.getPicture(options);
if (!(await this.checkLicense())) { return; }
let loading = this.createLoading('Auto-detecting and cropping...');
try {
loading.present();
// First create a new page with the selected original image file:
const createResult = await SBSDK.createPage({originalImageFileUri});
// and then run auto document detection and cropping on this new page:
const docResult = await SBSDK.detectDocumentOnPage({page: createResult.page});
this.updatePage(docResult.page);
} finally {
loading.dismiss();
}
}
public async startCameraUi() {
if (!(await this.checkLicense())) { return; }
const result = await SBSDK.UI.startDocumentScanner({
uiConfigs: {
// Customize colors, text resources, behavior, etc..
cameraPreviewMode: 'FIT_IN',
orientationLockMode: 'PORTRAIT',
pageCounterButtonTitle: '%d Page(s)'
//...
}
});
if (result.status == 'CANCELED') {
return;
}
this.pages = this.pages.concat(result.pages);
this.selectedPage = this.pages[this.pages.length - 1];
this.changeDetector.detectChanges();
}
public async startCroppingUi() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
const result = await SBSDK.UI.startCroppingScreen({
page: this.selectedPage,
uiConfigs: {
// Customize colors, text resources, behavior, etc..
orientationLockMode: 'PORTRAIT',
polygonColor: '#0000ff'
//...
}
});
if (result.status == 'CANCELED') {
return;
}
this.updatePage(result.page);
}
public async rotatePage(times: number) {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
let loading = this.createLoading('Rotating Page...');
try {
loading.present();
const result = await SBSDK.rotatePage({page: this.selectedPage, times});
this.updatePage(result.page);
} finally {
loading.dismiss();
}
}
public async performOcr() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Performing OCR ...');
try {
loading.present();
const result = await SBSDK.performOcr({
images: this.pages.map(p => p.documentImageFileUri),
languages: ['en'],
outputFormat: 'FULL_OCR_RESULT',
});
this.showAlert(result.plainText, "OCR result");
} finally {
loading.dismiss();
}
}
public async createPdf() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Creating PDF ...');
try {
loading.present();
const result = await SBSDK.createPdf({images: this.pages.map(p => p.documentImageFileUri), pageSize: 'FIXED_A4'});
this.showAlert(result.pdfFileUri, "PDF created");
} finally {
loading.dismiss();
}
}
public async writeTiff() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Creating TIFF ...');
try {
loading.present();
const result = await SBSDK.writeTiff({images: this.pages.map(p => p.documentImageFileUri), oneBitEncoded: true});
this.showAlert(result.tiffFileUri, "TIFF created");
} finally {
loading.dismiss();
}
}
public async startMrzScanner() {
if (!(await this.checkLicense())) { return; }
let config: MrzScannerConfiguration = {
// Customize colors, text resources, etc..
finderTextHint: 'Please hold your phone over the 2- or 3-line MRZ code at the front of your passport.'
};
if (this.platform.is('ios')) {
let widthPx = window.screen.width;
config.finderWidth = widthPx * 0.9;
config.finderHeight = widthPx * 0.18;
}
const result = await SBSDK.UI.startMrzScanner({uiConfigs: config});
if (result.status == 'OK') {
const fields = result.mrzResult.fields.map(f => `<div>${f.name}: ${f.value} (${f.confidence.toFixed(2)})</div>`);
this.showAlert(fields.join(''), 'MRZ Result');
}
}
public async startBarcodeScannerUi() {
if (!(await this.checkLicense())) { return; }
let config: BarcodeScannerConfiguration = {
finderTextHint: 'Please align the barcode or QR code in the frame above to scan it.'
};
const result = await SBSDK.UI.startBarcodeScanner({uiConfigs: config});
if (result.status == 'OK') {
this.showAlert(result.barcodeResult.textValue, `Barcode: ${result.barcodeResult.barcodeFormat}`);
}
}
public async removePage() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
await SBSDK.removePage({page: this.selectedPage});
let pageIndexToRemove = null;
this.pages.forEach((p, index) => {
if (this.selectedPage.pageId === p.pageId) {
pageIndexToRemove = index;
}
});
this.pages.splice(pageIndexToRemove, 1);
this.selectedPage = null;
this.changeDetector.detectChanges();
}
public async cleanup() |
public normalizeImageFileUri(imageFileUri: string) {
// normalizeURL - see https://ionicframework.com/docs/wkwebview/
return normalizeURL(imageFileUri);
}
public onImagePreviewTapped(page: Page) {
this.selectedPage = page;
this.changeDetector.detectChanges();
}
private updatePage(page: Page) {
let replaced = false;
for (let i = 0; i < this.pages.length; ++i) {
if (this.pages[i].pageId == page.pageId) {
this.pages[i] = page;
replaced = true;
break;
}
}
if (!replaced) {
this.pages.push(page);
}
this.selectedPage = page;
this.changeDetector.detectChanges();
}
private async checkLicense() {
const result = await SBSDK.isLicenseValid();
if (result.isLicenseValid == true) {
// OK - trial session, valid trial license or valid production license.
return true;
}
this.showAlert("Scanbot SDK (trial) license has expired!");
return false;
}
private checkSelectedPage() {
if (this.selectedPage && this.selectedPage.documentImageFileUri) {
return true;
} else {
this.showAlert(this.selectedPage ? "The selected page has not yet been cropped. Crop it and try again."
: "No page selected. Please snap an image via Document Scanner or select one from the phone's gallery.");
return false;
}
}
private checkAllPagesHaveDocuments() {
if (this.pages.length == 0) {
this.showAlert("Please snap some images via Document Scanner or select from the phone's gallery.");
return false;
}
let every = true;
this.pages.forEach(p => {
if (!p.documentImageFileUri) {
every = false;
}
});
if (!every) {
this.showAlert("Some pages have not yet been cropped. Crop all uncropped pages and try again.");
return false;
}
return true;
}
private showAlert(message: string, title: string = "Alert") {
const prompt = this.alertCtrl.create({
title,
message,
buttons: [
{
text: 'OK',
}
]
});
prompt.present();
}
public async presentPageEditActionsSheet() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
const actionSheet = this.actionSheetCtrl.create({
title: 'Edit selected Page',
buttons: [
{
text: 'Crop/Rotate (Cropping UI)',
icon: 'ios-crop',
handler: () => {
this.startCroppingUi();
}
},
{
text: 'Apply Image Filter',
icon: 'contrast',
handler: () => {
this.openPageFilterPage(this.selectedPage);
}
},
{
text: 'Rotate Clockwise',
icon: 'ios-redo',
handler: () => {
this.rotatePage(-1);
}
},
{
text: 'Rotate Counter-Clockwise',
icon: 'ios-undo',
handler: () => {
this.rotatePage(1);
}
},
{
text: 'Delete Page',
icon: 'trash',
handler: () => {
this.removePage();
}
},
{
text: 'Cancel',
icon: 'close',
role: 'cancel',
handler: () => {
console.log('Cancel clicked');
}
}
]
});
actionSheet.present();
}
private openPageFilterPage(page: Page) {
if (!this.checkSelectedPage()) return;
new Promise<{page: Page}>((resolve, reject) => {
this.navCtrl.push(PageFilterPage, {page: page, resolve: resolve});
}).then(data => {
this.updatePage(data.page);
});
}
}
| {
await SBSDK.cleanup();
this.pages = [];
this.selectedPage = null;
this.changeDetector.detectChanges();
} | identifier_body |
sdk-ui.ts | import { Component, ChangeDetectorRef } from '@angular/core';
import {
normalizeURL,
AlertController,
Platform,
ActionSheetController,
NavController,
LoadingController
} from 'ionic-angular';
import { Camera } from '@ionic-native/camera';
import ScanbotSdk, { Page, MrzScannerConfiguration, BarcodeScannerConfiguration } from 'cordova-plugin-scanbot-sdk'
import SdkInitializer, { IMAGE_QUALITY } from '../../services/sdk-initializer';
import { PageFilterPage } from "./filter";
const SBSDK = ScanbotSdk.promisify();
@Component({
selector: 'page-sdk-ui',
templateUrl: 'sdk-ui.html'
})
export class SdkUiPage {
public pages: Page[] = [];
public selectedPage: Page;
constructor(
private changeDetector: ChangeDetectorRef,
private alertCtrl: AlertController,
sdkInitializer: SdkInitializer,
private camera: Camera,
private platform: Platform,
private actionSheetCtrl: ActionSheetController,
private navCtrl: NavController,
private loadingCtrl: LoadingController
) {
sdkInitializer.onInitialize(err => {
if (err) {
//console.log(JSON.stringify(err));
this.showAlert(err.message);
} else {
// ...
}
});
}
private createLoading(message: string) {
return this.loadingCtrl.create({
content: message
});
}
public async pickImageFromGallery() {
let options = {
quality: IMAGE_QUALITY,
destinationType: this.camera.DestinationType.FILE_URI,
sourceType: this.camera.PictureSourceType.PHOTOLIBRARY
};
const originalImageFileUri: string = await this.camera.getPicture(options);
if (!(await this.checkLicense())) { return; }
let loading = this.createLoading('Auto-detecting and cropping...');
try {
loading.present();
// First create a new page with the selected original image file:
const createResult = await SBSDK.createPage({originalImageFileUri});
// and then run auto document detection and cropping on this new page:
const docResult = await SBSDK.detectDocumentOnPage({page: createResult.page});
this.updatePage(docResult.page);
} finally {
loading.dismiss();
}
}
public async startCameraUi() {
if (!(await this.checkLicense())) { return; }
const result = await SBSDK.UI.startDocumentScanner({
uiConfigs: {
// Customize colors, text resources, behavior, etc..
cameraPreviewMode: 'FIT_IN',
orientationLockMode: 'PORTRAIT',
pageCounterButtonTitle: '%d Page(s)'
//...
}
});
if (result.status == 'CANCELED') {
return;
}
this.pages = this.pages.concat(result.pages);
this.selectedPage = this.pages[this.pages.length - 1];
this.changeDetector.detectChanges();
}
public async startCroppingUi() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
const result = await SBSDK.UI.startCroppingScreen({
page: this.selectedPage,
uiConfigs: {
// Customize colors, text resources, behavior, etc..
orientationLockMode: 'PORTRAIT',
polygonColor: '#0000ff'
//...
}
});
if (result.status == 'CANCELED') {
return;
}
this.updatePage(result.page);
}
public async rotatePage(times: number) {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
let loading = this.createLoading('Rotating Page...');
try {
loading.present();
const result = await SBSDK.rotatePage({page: this.selectedPage, times});
this.updatePage(result.page);
} finally {
loading.dismiss();
}
}
public async performOcr() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Performing OCR ...');
try {
loading.present();
const result = await SBSDK.performOcr({
images: this.pages.map(p => p.documentImageFileUri),
languages: ['en'],
outputFormat: 'FULL_OCR_RESULT',
});
this.showAlert(result.plainText, "OCR result");
} finally {
loading.dismiss();
}
}
public async createPdf() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Creating PDF ...');
try {
loading.present();
const result = await SBSDK.createPdf({images: this.pages.map(p => p.documentImageFileUri), pageSize: 'FIXED_A4'});
this.showAlert(result.pdfFileUri, "PDF created");
} finally {
loading.dismiss();
}
}
public async writeTiff() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Creating TIFF ...');
try {
loading.present();
const result = await SBSDK.writeTiff({images: this.pages.map(p => p.documentImageFileUri), oneBitEncoded: true});
this.showAlert(result.tiffFileUri, "TIFF created");
} finally {
loading.dismiss();
}
}
public async startMrzScanner() {
if (!(await this.checkLicense())) { return; }
let config: MrzScannerConfiguration = {
// Customize colors, text resources, etc..
finderTextHint: 'Please hold your phone over the 2- or 3-line MRZ code at the front of your passport.'
};
if (this.platform.is('ios')) {
let widthPx = window.screen.width;
config.finderWidth = widthPx * 0.9;
config.finderHeight = widthPx * 0.18;
}
const result = await SBSDK.UI.startMrzScanner({uiConfigs: config});
if (result.status == 'OK') {
const fields = result.mrzResult.fields.map(f => `<div>${f.name}: ${f.value} (${f.confidence.toFixed(2)})</div>`);
this.showAlert(fields.join(''), 'MRZ Result');
}
}
public async startBarcodeScannerUi() {
if (!(await this.checkLicense())) { return; }
let config: BarcodeScannerConfiguration = {
finderTextHint: 'Please align the barcode or QR code in the frame above to scan it.'
};
const result = await SBSDK.UI.startBarcodeScanner({uiConfigs: config});
if (result.status == 'OK') {
this.showAlert(result.barcodeResult.textValue, `Barcode: ${result.barcodeResult.barcodeFormat}`);
}
}
public async removePage() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
await SBSDK.removePage({page: this.selectedPage});
let pageIndexToRemove = null;
this.pages.forEach((p, index) => {
if (this.selectedPage.pageId === p.pageId) {
pageIndexToRemove = index;
}
});
this.pages.splice(pageIndexToRemove, 1);
this.selectedPage = null;
this.changeDetector.detectChanges();
}
public async cleanup() {
await SBSDK.cleanup();
this.pages = [];
this.selectedPage = null;
this.changeDetector.detectChanges();
}
public normalizeImageFileUri(imageFileUri: string) {
// normalizeURL - see https://ionicframework.com/docs/wkwebview/
return normalizeURL(imageFileUri);
}
public onImagePreviewTapped(page: Page) {
this.selectedPage = page;
this.changeDetector.detectChanges();
}
private updatePage(page: Page) {
let replaced = false;
for (let i = 0; i < this.pages.length; ++i) {
if (this.pages[i].pageId == page.pageId) {
this.pages[i] = page;
replaced = true;
break;
}
}
if (!replaced) {
this.pages.push(page);
}
this.selectedPage = page;
this.changeDetector.detectChanges();
}
private async checkLicense() {
const result = await SBSDK.isLicenseValid();
if (result.isLicenseValid == true) {
// OK - trial session, valid trial license or valid production license.
return true;
}
this.showAlert("Scanbot SDK (trial) license has expired!");
return false;
}
private checkSelectedPage() {
if (this.selectedPage && this.selectedPage.documentImageFileUri) {
return true;
} else {
this.showAlert(this.selectedPage ? "The selected page has not yet been cropped. Crop it and try again."
: "No page selected. Please snap an image via Document Scanner or select one from the phone's gallery.");
return false;
}
}
private checkAllPagesHaveDocuments() {
if (this.pages.length == 0) {
this.showAlert("Please snap some images via Document Scanner or select from the phone's gallery.");
return false;
}
let every = true;
this.pages.forEach(p => {
if (!p.documentImageFileUri) {
every = false;
}
});
if (!every) {
this.showAlert("Some pages have not yet been cropped. Crop all uncropped pages and try again.");
return false;
}
return true;
}
private showAlert(message: string, title: string = "Alert") {
const prompt = this.alertCtrl.create({
title,
message,
buttons: [
{
text: 'OK',
}
]
});
prompt.present();
}
public async | () {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
const actionSheet = this.actionSheetCtrl.create({
title: 'Edit selected Page',
buttons: [
{
text: 'Crop/Rotate (Cropping UI)',
icon: 'ios-crop',
handler: () => {
this.startCroppingUi();
}
},
{
text: 'Apply Image Filter',
icon: 'contrast',
handler: () => {
this.openPageFilterPage(this.selectedPage);
}
},
{
text: 'Rotate Clockwise',
icon: 'ios-redo',
handler: () => {
this.rotatePage(-1);
}
},
{
text: 'Rotate Counter-Clockwise',
icon: 'ios-undo',
handler: () => {
this.rotatePage(1);
}
},
{
text: 'Delete Page',
icon: 'trash',
handler: () => {
this.removePage();
}
},
{
text: 'Cancel',
icon: 'close',
role: 'cancel',
handler: () => {
console.log('Cancel clicked');
}
}
]
});
actionSheet.present();
}
private openPageFilterPage(page: Page) {
if (!this.checkSelectedPage()) return;
new Promise<{page: Page}>((resolve, reject) => {
this.navCtrl.push(PageFilterPage, {page: page, resolve: resolve});
}).then(data => {
this.updatePage(data.page);
});
}
}
| presentPageEditActionsSheet | identifier_name |
sdk-ui.ts | import { Component, ChangeDetectorRef } from '@angular/core';
import {
normalizeURL,
AlertController,
Platform,
ActionSheetController,
NavController,
LoadingController
} from 'ionic-angular';
import { Camera } from '@ionic-native/camera';
import ScanbotSdk, { Page, MrzScannerConfiguration, BarcodeScannerConfiguration } from 'cordova-plugin-scanbot-sdk'
import SdkInitializer, { IMAGE_QUALITY } from '../../services/sdk-initializer';
import { PageFilterPage } from "./filter";
const SBSDK = ScanbotSdk.promisify();
@Component({
selector: 'page-sdk-ui',
templateUrl: 'sdk-ui.html'
})
export class SdkUiPage {
public pages: Page[] = [];
public selectedPage: Page;
constructor(
private changeDetector: ChangeDetectorRef,
private alertCtrl: AlertController,
sdkInitializer: SdkInitializer,
private camera: Camera,
private platform: Platform,
private actionSheetCtrl: ActionSheetController,
private navCtrl: NavController,
private loadingCtrl: LoadingController
) {
sdkInitializer.onInitialize(err => {
if (err) {
//console.log(JSON.stringify(err));
this.showAlert(err.message);
} else {
// ...
}
});
}
private createLoading(message: string) { | });
}
public async pickImageFromGallery() {
let options = {
quality: IMAGE_QUALITY,
destinationType: this.camera.DestinationType.FILE_URI,
sourceType: this.camera.PictureSourceType.PHOTOLIBRARY
};
const originalImageFileUri: string = await this.camera.getPicture(options);
if (!(await this.checkLicense())) { return; }
let loading = this.createLoading('Auto-detecting and cropping...');
try {
loading.present();
// First create a new page with the selected original image file:
const createResult = await SBSDK.createPage({originalImageFileUri});
// and then run auto document detection and cropping on this new page:
const docResult = await SBSDK.detectDocumentOnPage({page: createResult.page});
this.updatePage(docResult.page);
} finally {
loading.dismiss();
}
}
public async startCameraUi() {
if (!(await this.checkLicense())) { return; }
const result = await SBSDK.UI.startDocumentScanner({
uiConfigs: {
// Customize colors, text resources, behavior, etc..
cameraPreviewMode: 'FIT_IN',
orientationLockMode: 'PORTRAIT',
pageCounterButtonTitle: '%d Page(s)'
//...
}
});
if (result.status == 'CANCELED') {
return;
}
this.pages = this.pages.concat(result.pages);
this.selectedPage = this.pages[this.pages.length - 1];
this.changeDetector.detectChanges();
}
public async startCroppingUi() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
const result = await SBSDK.UI.startCroppingScreen({
page: this.selectedPage,
uiConfigs: {
// Customize colors, text resources, behavior, etc..
orientationLockMode: 'PORTRAIT',
polygonColor: '#0000ff'
//...
}
});
if (result.status == 'CANCELED') {
return;
}
this.updatePage(result.page);
}
public async rotatePage(times: number) {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
let loading = this.createLoading('Rotating Page...');
try {
loading.present();
const result = await SBSDK.rotatePage({page: this.selectedPage, times});
this.updatePage(result.page);
} finally {
loading.dismiss();
}
}
public async performOcr() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Performing OCR ...');
try {
loading.present();
const result = await SBSDK.performOcr({
images: this.pages.map(p => p.documentImageFileUri),
languages: ['en'],
outputFormat: 'FULL_OCR_RESULT',
});
this.showAlert(result.plainText, "OCR result");
} finally {
loading.dismiss();
}
}
public async createPdf() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Creating PDF ...');
try {
loading.present();
const result = await SBSDK.createPdf({images: this.pages.map(p => p.documentImageFileUri), pageSize: 'FIXED_A4'});
this.showAlert(result.pdfFileUri, "PDF created");
} finally {
loading.dismiss();
}
}
public async writeTiff() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Creating TIFF ...');
try {
loading.present();
const result = await SBSDK.writeTiff({images: this.pages.map(p => p.documentImageFileUri), oneBitEncoded: true});
this.showAlert(result.tiffFileUri, "TIFF created");
} finally {
loading.dismiss();
}
}
public async startMrzScanner() {
if (!(await this.checkLicense())) { return; }
let config: MrzScannerConfiguration = {
// Customize colors, text resources, etc..
finderTextHint: 'Please hold your phone over the 2- or 3-line MRZ code at the front of your passport.'
};
if (this.platform.is('ios')) {
let widthPx = window.screen.width;
config.finderWidth = widthPx * 0.9;
config.finderHeight = widthPx * 0.18;
}
const result = await SBSDK.UI.startMrzScanner({uiConfigs: config});
if (result.status == 'OK') {
const fields = result.mrzResult.fields.map(f => `<div>${f.name}: ${f.value} (${f.confidence.toFixed(2)})</div>`);
this.showAlert(fields.join(''), 'MRZ Result');
}
}
public async startBarcodeScannerUi() {
if (!(await this.checkLicense())) { return; }
let config: BarcodeScannerConfiguration = {
finderTextHint: 'Please align the barcode or QR code in the frame above to scan it.'
};
const result = await SBSDK.UI.startBarcodeScanner({uiConfigs: config});
if (result.status == 'OK') {
this.showAlert(result.barcodeResult.textValue, `Barcode: ${result.barcodeResult.barcodeFormat}`);
}
}
public async removePage() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
await SBSDK.removePage({page: this.selectedPage});
let pageIndexToRemove = null;
this.pages.forEach((p, index) => {
if (this.selectedPage.pageId === p.pageId) {
pageIndexToRemove = index;
}
});
this.pages.splice(pageIndexToRemove, 1);
this.selectedPage = null;
this.changeDetector.detectChanges();
}
public async cleanup() {
await SBSDK.cleanup();
this.pages = [];
this.selectedPage = null;
this.changeDetector.detectChanges();
}
public normalizeImageFileUri(imageFileUri: string) {
// normalizeURL - see https://ionicframework.com/docs/wkwebview/
return normalizeURL(imageFileUri);
}
public onImagePreviewTapped(page: Page) {
this.selectedPage = page;
this.changeDetector.detectChanges();
}
private updatePage(page: Page) {
let replaced = false;
for (let i = 0; i < this.pages.length; ++i) {
if (this.pages[i].pageId == page.pageId) {
this.pages[i] = page;
replaced = true;
break;
}
}
if (!replaced) {
this.pages.push(page);
}
this.selectedPage = page;
this.changeDetector.detectChanges();
}
private async checkLicense() {
const result = await SBSDK.isLicenseValid();
if (result.isLicenseValid == true) {
// OK - trial session, valid trial license or valid production license.
return true;
}
this.showAlert("Scanbot SDK (trial) license has expired!");
return false;
}
private checkSelectedPage() {
if (this.selectedPage && this.selectedPage.documentImageFileUri) {
return true;
} else {
this.showAlert(this.selectedPage ? "The selected page has not yet been cropped. Crop it and try again."
: "No page selected. Please snap an image via Document Scanner or select one from the phone's gallery.");
return false;
}
}
private checkAllPagesHaveDocuments() {
if (this.pages.length == 0) {
this.showAlert("Please snap some images via Document Scanner or select from the phone's gallery.");
return false;
}
let every = true;
this.pages.forEach(p => {
if (!p.documentImageFileUri) {
every = false;
}
});
if (!every) {
this.showAlert("Some pages have not yet been cropped. Crop all uncropped pages and try again.");
return false;
}
return true;
}
private showAlert(message: string, title: string = "Alert") {
const prompt = this.alertCtrl.create({
title,
message,
buttons: [
{
text: 'OK',
}
]
});
prompt.present();
}
public async presentPageEditActionsSheet() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
const actionSheet = this.actionSheetCtrl.create({
title: 'Edit selected Page',
buttons: [
{
text: 'Crop/Rotate (Cropping UI)',
icon: 'ios-crop',
handler: () => {
this.startCroppingUi();
}
},
{
text: 'Apply Image Filter',
icon: 'contrast',
handler: () => {
this.openPageFilterPage(this.selectedPage);
}
},
{
text: 'Rotate Clockwise',
icon: 'ios-redo',
handler: () => {
this.rotatePage(-1);
}
},
{
text: 'Rotate Counter-Clockwise',
icon: 'ios-undo',
handler: () => {
this.rotatePage(1);
}
},
{
text: 'Delete Page',
icon: 'trash',
handler: () => {
this.removePage();
}
},
{
text: 'Cancel',
icon: 'close',
role: 'cancel',
handler: () => {
console.log('Cancel clicked');
}
}
]
});
actionSheet.present();
}
private openPageFilterPage(page: Page) {
if (!this.checkSelectedPage()) return;
new Promise<{page: Page}>((resolve, reject) => {
this.navCtrl.push(PageFilterPage, {page: page, resolve: resolve});
}).then(data => {
this.updatePage(data.page);
});
}
} | return this.loadingCtrl.create({
content: message | random_line_split |
sdk-ui.ts | import { Component, ChangeDetectorRef } from '@angular/core';
import {
normalizeURL,
AlertController,
Platform,
ActionSheetController,
NavController,
LoadingController
} from 'ionic-angular';
import { Camera } from '@ionic-native/camera';
import ScanbotSdk, { Page, MrzScannerConfiguration, BarcodeScannerConfiguration } from 'cordova-plugin-scanbot-sdk'
import SdkInitializer, { IMAGE_QUALITY } from '../../services/sdk-initializer';
import { PageFilterPage } from "./filter";
const SBSDK = ScanbotSdk.promisify();
@Component({
selector: 'page-sdk-ui',
templateUrl: 'sdk-ui.html'
})
export class SdkUiPage {
public pages: Page[] = [];
public selectedPage: Page;
constructor(
private changeDetector: ChangeDetectorRef,
private alertCtrl: AlertController,
sdkInitializer: SdkInitializer,
private camera: Camera,
private platform: Platform,
private actionSheetCtrl: ActionSheetController,
private navCtrl: NavController,
private loadingCtrl: LoadingController
) {
sdkInitializer.onInitialize(err => {
if (err) {
//console.log(JSON.stringify(err));
this.showAlert(err.message);
} else {
// ...
}
});
}
private createLoading(message: string) {
return this.loadingCtrl.create({
content: message
});
}
public async pickImageFromGallery() {
let options = {
quality: IMAGE_QUALITY,
destinationType: this.camera.DestinationType.FILE_URI,
sourceType: this.camera.PictureSourceType.PHOTOLIBRARY
};
const originalImageFileUri: string = await this.camera.getPicture(options);
if (!(await this.checkLicense())) { return; }
let loading = this.createLoading('Auto-detecting and cropping...');
try {
loading.present();
// First create a new page with the selected original image file:
const createResult = await SBSDK.createPage({originalImageFileUri});
// and then run auto document detection and cropping on this new page:
const docResult = await SBSDK.detectDocumentOnPage({page: createResult.page});
this.updatePage(docResult.page);
} finally {
loading.dismiss();
}
}
public async startCameraUi() {
if (!(await this.checkLicense())) { return; }
const result = await SBSDK.UI.startDocumentScanner({
uiConfigs: {
// Customize colors, text resources, behavior, etc..
cameraPreviewMode: 'FIT_IN',
orientationLockMode: 'PORTRAIT',
pageCounterButtonTitle: '%d Page(s)'
//...
}
});
if (result.status == 'CANCELED') {
return;
}
this.pages = this.pages.concat(result.pages);
this.selectedPage = this.pages[this.pages.length - 1];
this.changeDetector.detectChanges();
}
public async startCroppingUi() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
const result = await SBSDK.UI.startCroppingScreen({
page: this.selectedPage,
uiConfigs: {
// Customize colors, text resources, behavior, etc..
orientationLockMode: 'PORTRAIT',
polygonColor: '#0000ff'
//...
}
});
if (result.status == 'CANCELED') {
return;
}
this.updatePage(result.page);
}
public async rotatePage(times: number) {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
let loading = this.createLoading('Rotating Page...');
try {
loading.present();
const result = await SBSDK.rotatePage({page: this.selectedPage, times});
this.updatePage(result.page);
} finally {
loading.dismiss();
}
}
public async performOcr() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Performing OCR ...');
try {
loading.present();
const result = await SBSDK.performOcr({
images: this.pages.map(p => p.documentImageFileUri),
languages: ['en'],
outputFormat: 'FULL_OCR_RESULT',
});
this.showAlert(result.plainText, "OCR result");
} finally {
loading.dismiss();
}
}
public async createPdf() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Creating PDF ...');
try {
loading.present();
const result = await SBSDK.createPdf({images: this.pages.map(p => p.documentImageFileUri), pageSize: 'FIXED_A4'});
this.showAlert(result.pdfFileUri, "PDF created");
} finally {
loading.dismiss();
}
}
public async writeTiff() {
if (!(await this.checkLicense())) { return; }
if (!this.checkAllPagesHaveDocuments()) { return; }
let loading = this.createLoading('Creating TIFF ...');
try {
loading.present();
const result = await SBSDK.writeTiff({images: this.pages.map(p => p.documentImageFileUri), oneBitEncoded: true});
this.showAlert(result.tiffFileUri, "TIFF created");
} finally {
loading.dismiss();
}
}
public async startMrzScanner() {
if (!(await this.checkLicense())) { return; }
let config: MrzScannerConfiguration = {
// Customize colors, text resources, etc..
finderTextHint: 'Please hold your phone over the 2- or 3-line MRZ code at the front of your passport.'
};
if (this.platform.is('ios')) {
let widthPx = window.screen.width;
config.finderWidth = widthPx * 0.9;
config.finderHeight = widthPx * 0.18;
}
const result = await SBSDK.UI.startMrzScanner({uiConfigs: config});
if (result.status == 'OK') {
const fields = result.mrzResult.fields.map(f => `<div>${f.name}: ${f.value} (${f.confidence.toFixed(2)})</div>`);
this.showAlert(fields.join(''), 'MRZ Result');
}
}
public async startBarcodeScannerUi() {
if (!(await this.checkLicense())) { return; }
let config: BarcodeScannerConfiguration = {
finderTextHint: 'Please align the barcode or QR code in the frame above to scan it.'
};
const result = await SBSDK.UI.startBarcodeScanner({uiConfigs: config});
if (result.status == 'OK') {
this.showAlert(result.barcodeResult.textValue, `Barcode: ${result.barcodeResult.barcodeFormat}`);
}
}
public async removePage() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) |
await SBSDK.removePage({page: this.selectedPage});
let pageIndexToRemove = null;
this.pages.forEach((p, index) => {
if (this.selectedPage.pageId === p.pageId) {
pageIndexToRemove = index;
}
});
this.pages.splice(pageIndexToRemove, 1);
this.selectedPage = null;
this.changeDetector.detectChanges();
}
public async cleanup() {
await SBSDK.cleanup();
this.pages = [];
this.selectedPage = null;
this.changeDetector.detectChanges();
}
public normalizeImageFileUri(imageFileUri: string) {
// normalizeURL - see https://ionicframework.com/docs/wkwebview/
return normalizeURL(imageFileUri);
}
public onImagePreviewTapped(page: Page) {
this.selectedPage = page;
this.changeDetector.detectChanges();
}
private updatePage(page: Page) {
let replaced = false;
for (let i = 0; i < this.pages.length; ++i) {
if (this.pages[i].pageId == page.pageId) {
this.pages[i] = page;
replaced = true;
break;
}
}
if (!replaced) {
this.pages.push(page);
}
this.selectedPage = page;
this.changeDetector.detectChanges();
}
private async checkLicense() {
const result = await SBSDK.isLicenseValid();
if (result.isLicenseValid == true) {
// OK - trial session, valid trial license or valid production license.
return true;
}
this.showAlert("Scanbot SDK (trial) license has expired!");
return false;
}
private checkSelectedPage() {
if (this.selectedPage && this.selectedPage.documentImageFileUri) {
return true;
} else {
this.showAlert(this.selectedPage ? "The selected page has not yet been cropped. Crop it and try again."
: "No page selected. Please snap an image via Document Scanner or select one from the phone's gallery.");
return false;
}
}
private checkAllPagesHaveDocuments() {
if (this.pages.length == 0) {
this.showAlert("Please snap some images via Document Scanner or select from the phone's gallery.");
return false;
}
let every = true;
this.pages.forEach(p => {
if (!p.documentImageFileUri) {
every = false;
}
});
if (!every) {
this.showAlert("Some pages have not yet been cropped. Crop all uncropped pages and try again.");
return false;
}
return true;
}
private showAlert(message: string, title: string = "Alert") {
const prompt = this.alertCtrl.create({
title,
message,
buttons: [
{
text: 'OK',
}
]
});
prompt.present();
}
public async presentPageEditActionsSheet() {
if (!(await this.checkLicense())) { return; }
if (!this.checkSelectedPage()) { return; }
const actionSheet = this.actionSheetCtrl.create({
title: 'Edit selected Page',
buttons: [
{
text: 'Crop/Rotate (Cropping UI)',
icon: 'ios-crop',
handler: () => {
this.startCroppingUi();
}
},
{
text: 'Apply Image Filter',
icon: 'contrast',
handler: () => {
this.openPageFilterPage(this.selectedPage);
}
},
{
text: 'Rotate Clockwise',
icon: 'ios-redo',
handler: () => {
this.rotatePage(-1);
}
},
{
text: 'Rotate Counter-Clockwise',
icon: 'ios-undo',
handler: () => {
this.rotatePage(1);
}
},
{
text: 'Delete Page',
icon: 'trash',
handler: () => {
this.removePage();
}
},
{
text: 'Cancel',
icon: 'close',
role: 'cancel',
handler: () => {
console.log('Cancel clicked');
}
}
]
});
actionSheet.present();
}
private openPageFilterPage(page: Page) {
if (!this.checkSelectedPage()) return;
new Promise<{page: Page}>((resolve, reject) => {
this.navCtrl.push(PageFilterPage, {page: page, resolve: resolve});
}).then(data => {
this.updatePage(data.page);
});
}
}
| { return; } | conditional_block |
main.go | package main
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/signal"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/Shopify/sarama"
"github.com/araddon/dateparse"
"github.com/riferrei/srclient"
flag "github.com/spf13/pflag"
)
const VERSION = "kafkabat v0.1.0"
func main() {
log.SetOutput(os.Stderr)
log.SetFlags(log.Lmicroseconds)
flags := Flags{}
flag.CommandLine.SortFlags = false
flag.Usage = usage
flag.StringVarP(&flags.brokers, "broker", "b", "localhost:9092", "Kafka broker bootstrap servers, separated by comma")
flag.StringVarP(&flags.topic, "topic", "t", "", "Kafka topic name")
flag.Int32VarP(&flags.partition, "partition", "p", -1, "partition, -1 means all")
flag.StringVarP(&flags.offset, "offset", "o", "begin", "offset to start consuming, possile values: begin, end, positive integer, timestamp")
flag.Uint64VarP(&flags.count, "count", "c", 0, "maximum count of messages to consume, 0 means no limit")
flag.StringVarP(&flags.registry, "registry", "r", "http://localhost:8081", "schema regisry URL")
flag.StringVarP(&flags.keySchema, "key-schema", "K", "", "key schema, can be numeric ID, file path or AVRO schema definition")
flag.StringVarP(&flags.valueSchema, "value-schema", "V", "", "value schema, can be numeric ID, file path or AVRO schema definition")
flag.StringVar(&flags.kafkaVersion, "kafka-version", "2.3.0", "Kafka server version")
flag.BoolVarP(&flags.follow, "follow", "f", false, "continue consuming when reach partition end")
flag.BoolVarP(&flags.list, "list", "l", false, "list partition meta information")
flag.BoolVarP(&flags.help, "help", "h", false, "show this help")
flag.BoolVarP(&flags.version, "version", "v", false, "show version")
flag.Parse()
if flags.version {
fmt.Println(VERSION)
return
}
if flags.help {
usage()
os.Exit(1)
}
if flags.list {
listTopic(&flags)
return
}
if flags.topic == "" {
fmt.Fprintln(os.Stderr, "ERROR: `topic` isn't specified!")
usage()
os.Exit(1)
}
if flag.NArg() > 0 {
runProducer(&flags)
} else {
runConsumer(&flags)
}
}
type Flags struct {
brokers string
topic string
partition int32
offset string
count uint64
registry string
keySchema string
valueSchema string
kafkaVersion string
list bool
follow bool
help bool
version bool
}
func usage() {
fmt.Fprint(os.Stderr, `Usage:
Produce:
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--key-schema=SCHEMA] [--value-schema=SCHEMA] key value
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--key-schema=SCHEMA] [--value-schema=SCHEMA] file
If "file" is "-", read line-by-line JSON objects from stdin, the object must contain keys "key" and "value".
Consume:
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--offset=OFFSET] [--count=N] [--follow]
List:
kafkabat KAFKA_OPTS [--topic=TOPIC] --list
KAFKA_OPTS:
[--broker=BROKERS] [--kafka-version=VERSION]
REGISTRY_OPTS
[--registry=URL]
`)
flag.PrintDefaults()
}
func listTopic(flags *Flags) {
var err error
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create create:", err)
}
defer client.Close()
topics, err := client.Topics()
if err != nil {
log.Fatalln("failed to get topics:", err)
}
for _, topic := range topics {
if flags.topic != "" && flags.topic != topic {
continue
}
partitions, err := client.Partitions(topic)
if err != nil {
log.Fatalf("failed to list partitions for topic %s: %s\n", topic, err)
}
for _, partition := range partitions {
if flags.partition >= 0 && flags.partition != partition {
continue
}
minOffset, err := client.GetOffset(topic, partition, sarama.OffsetOldest)
if err != nil {
log.Fatalf("failed to get oldest offset for topic %s partition %d: %s\n", topic, partition, err)
}
maxOffset, err := client.GetOffset(topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalf("failed to get newest offset for topic %s partition %d: %s\n", topic, partition, err)
}
if minOffset == maxOffset {
fmt.Printf("topic=%s partition=%d minOffset=%d maxOffset=%d\n", topic, partition, minOffset, maxOffset)
} else {
minMsg, err := getMessageByOffset(client, topic, partition, minOffset)
if err != nil {
log.Fatalf("failed to get first message for topic %s partition %d: %s\n", topic, partition, err)
}
// due to holes in segment, it's not reliable to obtain previous message
//maxMsg, err := getMessageByOffset(client, topic, partition, maxOffset-1)
//if err != nil {
// log.Fatalf("failed to get last message for topic %s partition %d: %s\n", topic, partition, err)
//}
fmt.Printf("topic=%s partition=%d minOffset=%d maxOffset=%d minTime=%s\n",
topic, partition, minMsg.Offset, maxOffset, minMsg.Timestamp.Format(time.RFC3339))
}
}
}
}
func runProducer(flags *Flags) {
registry := srclient.CreateSchemaRegistryClient(flags.registry)
keySchema, err := createSchema(registry, flags.keySchema, flags.topic, true)
if err != nil {
log.Fatalln("invalid key schema:", err)
}
valueSchema, err := createSchema(registry, flags.valueSchema, flags.topic, false)
if err != nil {
log.Fatalln("invalid value schema:", err)
}
config := sarama.NewConfig()
config.Producer.Return.Errors = true
config.Producer.Return.Successes = true
config.Producer.Retry.Max = 5
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Idempotent = true
config.Net.MaxOpenRequests = 1
if flags.partition >= 0 {
config.Producer.Partitioner = sarama.NewManualPartitioner
}
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create client:", err)
}
defer client.Close()
producer, err := sarama.NewSyncProducerFromClient(client)
if err != nil {
log.Fatalln("failed to create producer:", err)
}
defer producer.Close()
done := false
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
go func() {
signal := <-signals
done = true
log.Println("got signal:", signal)
}()
successes := 0
if flag.NArg() > 1 {
argKey, argValue := flag.Arg(0), flag.Arg(1)
key, err := str2Avro(keySchema, argKey)
if err != nil {
log.Fatalf("failed to encode key `%s`: %s", argKey, err)
}
value, err := str2Avro(valueSchema, argValue)
if err != nil {
log.Fatalf("failed to encode value `%s`: %s", argValue, err)
}
sendMessage(producer, flags.topic, flags.partition, key, value, &successes)
} else {
filename := flag.Arg(0)
f := os.Stdin
if filename != "-" {
if f, err = os.Open(filename); err != nil {
log.Fatalf("failed to open %s: %s", filename, err)
}
}
jsonDecoder := json.NewDecoder(f)
for {
var m map[string]interface{}
if err := jsonDecoder.Decode(&m); err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
jsonKey, jsonValue, ok := getKeyValueFromMap(m)
if !ok {
continue
}
key, err := json2Avro(keySchema, jsonKey)
if err != nil {
log.Fatalf("failed to encode key `%v`: %s", jsonKey, err)
}
value, err := json2Avro(valueSchema, jsonValue)
if err != nil {
log.Fatalf("failed to encode value `%v`: %s", jsonValue, err)
}
if done || !sendMessage(producer, flags.topic, flags.partition, key, value, &successes) {
break
}
}
}
}
func runConsumer(flags *Flags) {
var (
err error
count uint64
offset int64
offsetIsTime bool
)
if flags.offset == "begin" {
offset = sarama.OffsetOldest
} else if flags.offset == "end" {
offset = sarama.OffsetNewest
} else if t, err := dateparse.ParseLocal(flags.offset); err == nil {
offsetIsTime = true
offset = t.UnixNano() / 1e6
} else if offset, err = strconv.ParseInt(flags.offset, 10, 64); err != nil || offset < 0 {
log.Fatalln("`offset` must be `begin`, `end`, positive integer or timestamp")
}
registry := srclient.CreateSchemaRegistryClient(flags.registry)
keySchema, _ := registry.GetLatestSchema(flags.topic, true)
valueSchema, _ := registry.GetLatestSchema(flags.topic, false)
hasKeySchema := keySchema != nil
hasValueSchema := valueSchema != nil
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create create:", err)
}
defer client.Close()
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
log.Fatalln("failed to create consumer:", err)
}
defer consumer.Close()
partitions, err := client.Partitions(flags.topic)
if err != nil {
log.Fatalf("failed to list partitions for topic %s: %s\n", flags.topic, err)
}
wg := sync.WaitGroup{}
lock := sync.Mutex{}
for _, partition := range partitions {
if flags.partition >= 0 && flags.partition != partition {
continue
}
newestOffset, err := client.GetOffset(flags.topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalf("failed to get newest offset for topic %s parition %d: %s\n", flags.topic, partition, err)
}
startOffset := offset
if offsetIsTime {
startOffset, err = client.GetOffset(flags.topic, partition, startOffset)
if err != nil {
log.Fatalf("failed to get offset for topic %s partition %d since %s: %s\n", flags.topic, partition, flags.offset, err)
}
}
if startOffset == sarama.OffsetNewest || startOffset >= newestOffset {
if !flags.follow {
continue
} else {
startOffset = newestOffset
}
}
wg.Add(1)
go func(partition int32, newestOffset int64, startOffset int64) {
defer wg.Done()
partitionConsumer, err := consumer.ConsumePartition(flags.topic, partition, startOffset)
if err != nil {
log.Printf("failed to consume partition %d for topic %s: %s\n", partition, flags.topic, err)
return
}
defer partitionConsumer.Close()
for {
select {
case msg := <-partitionConsumer.Messages():
var key, value interface{}
if hasKeySchema {
key, err = decode(registry, msg.Key)
if err != nil {
log.Printf("failed to decode message key, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
} else {
key = string(msg.Key)
}
if hasValueSchema {
value, err = decode(registry, msg.Value)
if err != nil {
log.Printf("failed to decode message value, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
} else {
value = string(msg.Value)
}
m := map[string]interface{}{
"topic": flags.topic,
"partition": partition,
"offset": msg.Offset,
"timestamp": msg.Timestamp,
"key": key,
"value": value,
}
s, err := json.Marshal(m)
if err != nil {
log.Printf("failed to serialize to JSON, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
if flags.count > 0 && atomic.AddUint64(&count, 1) > flags.count {
return
}
lock.Lock()
fmt.Println(string(s))
lock.Unlock()
if !flags.follow && msg.Offset >= newestOffset-1 {
return
}
case <-time.After(2 * time.Second):
if !flags.follow || (flags.count > 0 && atomic.LoadUint64(&count) >= flags.count) {
return
}
case err := <-partitionConsumer.Errors():
log.Printf("failed to consume partition %d for topic %s: %s\n", partition, flags.topic, err)
return
}
}
}(partition, newestOffset, startOffset)
}
wg.Wait()
}
func createSchema(registry *srclient.SchemaRegistryClient, schema string, topic string, isKey bool) (*srclient.Schema, error) {
if schema == "" {
s, err := registry.GetLatestSchema(topic, isKey)
if err != nil && strings.HasPrefix(err.Error(), "404 Not Found") {
return nil, nil
}
return s, err
}
if _, err := os.Stat(schema); os.IsNotExist(err) {
if id, err := strconv.Atoi(schema); err == nil {
return registry.GetSchema(id)
}
return registry.CreateSchema(topic, schema, isKey)
}
content, err := ioutil.ReadFile(schema)
if err != nil {
return nil, err
}
return registry.CreateSchema(topic, string(content), isKey)
}
func | (schema *srclient.Schema, datum interface{}) (sarama.ByteEncoder, error) {
buffer := make([]byte, 5, 256)
buffer[0] = 0
binary.BigEndian.PutUint32(buffer[1:5], uint32(schema.ID()))
bytes, err := schema.Codec().BinaryFromNative(buffer, datum)
if err != nil {
return nil, err
}
return sarama.ByteEncoder(bytes), nil
}
func str2Avro(schema *srclient.Schema, s string) (sarama.Encoder, error) {
if schema == nil {
return sarama.StringEncoder(s), nil
}
var obj interface{}
if err := json.Unmarshal([]byte(s), &obj); err != nil {
return nil, err
}
return encode(schema, obj)
}
func json2Avro(schema *srclient.Schema, obj interface{}) (sarama.Encoder, error) {
if schema != nil {
return encode(schema, obj)
}
if s, ok := obj.(string); ok {
return sarama.StringEncoder(s), nil
}
bytes, err := json.Marshal(obj)
if err != nil {
return nil, err
}
return sarama.ByteEncoder(bytes), nil
}
func sendMessage(producer sarama.SyncProducer, topic string, partition int32, key sarama.Encoder, value sarama.Encoder, successes *int) bool {
msg := sarama.ProducerMessage{Topic: topic, Partition: partition, Key: key, Value: value, Timestamp: time.Now()}
partition, offset, err := producer.SendMessage(&msg)
s, _ := json.Marshal(msg)
if err != nil {
log.Printf("failed to send, err=%s, msg=%s\n", err.Error(), s)
return false
}
*successes++
log.Printf("[%d] partition=%d, offset=%d, msg=%s\n", *successes, partition, offset, s)
return true
}
func getFieldFromMap(m map[string]interface{}, k1 string, k2 string) (interface{}, bool) {
var (
value interface{}
ok bool
)
if value, ok = m[k1]; !ok {
if value, ok = m[k2]; !ok {
log.Printf("no `%s` or `%s` field found in object %s\n", k1, k2, m)
return nil, false
}
}
if value == nil {
log.Printf("skip null %s in object %s\n", k2, m)
return nil, false
}
return value, true
}
func getKeyValueFromMap(m map[string]interface{}) (interface{}, interface{}, bool) {
key, ok1 := getFieldFromMap(m, "Key", "key")
value, ok2 := getFieldFromMap(m, "Value", "value")
return key, value, ok1 && ok2
}
func getMessageByOffset(client sarama.Client, topic string, partition int32, offset int64) (*sarama.ConsumerMessage, error) {
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
return nil, err
}
defer consumer.Close()
partitionConsumer, err := consumer.ConsumePartition(topic, partition, offset)
if err != nil {
return nil, err
}
defer partitionConsumer.Close()
select {
case msg := <-partitionConsumer.Messages():
return msg, nil
case err := <-partitionConsumer.Errors():
return nil, err
}
}
func decode(registry *srclient.SchemaRegistryClient, msg []byte) (interface{}, error) {
if msg == nil || len(msg) < 6 {
return nil, errors.New("invalid message")
}
schemaID := binary.BigEndian.Uint32(msg[1:5])
schema, err := registry.GetSchema(int(schemaID))
if err != nil {
return nil, err
}
datum, _, err := schema.Codec().NativeFromBinary(msg[5:])
return datum, err
}
| encode | identifier_name |
main.go | package main
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/signal"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/Shopify/sarama"
"github.com/araddon/dateparse"
"github.com/riferrei/srclient"
flag "github.com/spf13/pflag"
)
const VERSION = "kafkabat v0.1.0"
func main() {
log.SetOutput(os.Stderr)
log.SetFlags(log.Lmicroseconds)
flags := Flags{}
flag.CommandLine.SortFlags = false
flag.Usage = usage
flag.StringVarP(&flags.brokers, "broker", "b", "localhost:9092", "Kafka broker bootstrap servers, separated by comma")
flag.StringVarP(&flags.topic, "topic", "t", "", "Kafka topic name")
flag.Int32VarP(&flags.partition, "partition", "p", -1, "partition, -1 means all")
flag.StringVarP(&flags.offset, "offset", "o", "begin", "offset to start consuming, possile values: begin, end, positive integer, timestamp")
flag.Uint64VarP(&flags.count, "count", "c", 0, "maximum count of messages to consume, 0 means no limit")
flag.StringVarP(&flags.registry, "registry", "r", "http://localhost:8081", "schema regisry URL")
flag.StringVarP(&flags.keySchema, "key-schema", "K", "", "key schema, can be numeric ID, file path or AVRO schema definition")
flag.StringVarP(&flags.valueSchema, "value-schema", "V", "", "value schema, can be numeric ID, file path or AVRO schema definition")
flag.StringVar(&flags.kafkaVersion, "kafka-version", "2.3.0", "Kafka server version")
flag.BoolVarP(&flags.follow, "follow", "f", false, "continue consuming when reach partition end")
flag.BoolVarP(&flags.list, "list", "l", false, "list partition meta information")
flag.BoolVarP(&flags.help, "help", "h", false, "show this help")
flag.BoolVarP(&flags.version, "version", "v", false, "show version")
flag.Parse()
if flags.version {
fmt.Println(VERSION)
return
}
if flags.help {
usage()
os.Exit(1)
}
if flags.list {
listTopic(&flags)
return
}
if flags.topic == "" {
fmt.Fprintln(os.Stderr, "ERROR: `topic` isn't specified!")
usage()
os.Exit(1)
}
if flag.NArg() > 0 {
runProducer(&flags)
} else {
runConsumer(&flags)
}
}
type Flags struct {
brokers string
topic string
partition int32
offset string
count uint64
registry string
keySchema string
valueSchema string
kafkaVersion string
list bool
follow bool
help bool
version bool
}
func usage() {
fmt.Fprint(os.Stderr, `Usage:
Produce:
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--key-schema=SCHEMA] [--value-schema=SCHEMA] key value
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--key-schema=SCHEMA] [--value-schema=SCHEMA] file
If "file" is "-", read line-by-line JSON objects from stdin, the object must contain keys "key" and "value".
Consume:
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--offset=OFFSET] [--count=N] [--follow]
List:
kafkabat KAFKA_OPTS [--topic=TOPIC] --list
KAFKA_OPTS:
[--broker=BROKERS] [--kafka-version=VERSION]
REGISTRY_OPTS
[--registry=URL]
`)
flag.PrintDefaults()
}
func listTopic(flags *Flags) {
var err error
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create create:", err)
}
defer client.Close()
topics, err := client.Topics()
if err != nil {
log.Fatalln("failed to get topics:", err)
}
for _, topic := range topics {
if flags.topic != "" && flags.topic != topic {
continue
}
partitions, err := client.Partitions(topic)
if err != nil {
log.Fatalf("failed to list partitions for topic %s: %s\n", topic, err)
}
for _, partition := range partitions {
if flags.partition >= 0 && flags.partition != partition {
continue
}
minOffset, err := client.GetOffset(topic, partition, sarama.OffsetOldest)
if err != nil {
log.Fatalf("failed to get oldest offset for topic %s partition %d: %s\n", topic, partition, err)
}
maxOffset, err := client.GetOffset(topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalf("failed to get newest offset for topic %s partition %d: %s\n", topic, partition, err)
}
if minOffset == maxOffset {
fmt.Printf("topic=%s partition=%d minOffset=%d maxOffset=%d\n", topic, partition, minOffset, maxOffset)
} else {
minMsg, err := getMessageByOffset(client, topic, partition, minOffset)
if err != nil {
log.Fatalf("failed to get first message for topic %s partition %d: %s\n", topic, partition, err)
}
// due to holes in segment, it's not reliable to obtain previous message
//maxMsg, err := getMessageByOffset(client, topic, partition, maxOffset-1)
//if err != nil {
// log.Fatalf("failed to get last message for topic %s partition %d: %s\n", topic, partition, err)
//}
fmt.Printf("topic=%s partition=%d minOffset=%d maxOffset=%d minTime=%s\n",
topic, partition, minMsg.Offset, maxOffset, minMsg.Timestamp.Format(time.RFC3339))
}
}
}
}
func runProducer(flags *Flags) {
registry := srclient.CreateSchemaRegistryClient(flags.registry)
keySchema, err := createSchema(registry, flags.keySchema, flags.topic, true)
if err != nil {
log.Fatalln("invalid key schema:", err)
}
valueSchema, err := createSchema(registry, flags.valueSchema, flags.topic, false)
if err != nil {
log.Fatalln("invalid value schema:", err)
}
config := sarama.NewConfig()
config.Producer.Return.Errors = true
config.Producer.Return.Successes = true
config.Producer.Retry.Max = 5
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Idempotent = true
config.Net.MaxOpenRequests = 1
if flags.partition >= 0 {
config.Producer.Partitioner = sarama.NewManualPartitioner
}
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create client:", err)
}
defer client.Close()
producer, err := sarama.NewSyncProducerFromClient(client)
if err != nil {
log.Fatalln("failed to create producer:", err)
}
defer producer.Close()
done := false
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
go func() {
signal := <-signals
done = true
log.Println("got signal:", signal)
}()
successes := 0
if flag.NArg() > 1 {
argKey, argValue := flag.Arg(0), flag.Arg(1)
key, err := str2Avro(keySchema, argKey)
if err != nil {
log.Fatalf("failed to encode key `%s`: %s", argKey, err)
}
value, err := str2Avro(valueSchema, argValue)
if err != nil {
log.Fatalf("failed to encode value `%s`: %s", argValue, err)
}
sendMessage(producer, flags.topic, flags.partition, key, value, &successes)
} else {
filename := flag.Arg(0)
f := os.Stdin
if filename != "-" {
if f, err = os.Open(filename); err != nil {
log.Fatalf("failed to open %s: %s", filename, err)
}
}
jsonDecoder := json.NewDecoder(f)
for {
var m map[string]interface{}
if err := jsonDecoder.Decode(&m); err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
jsonKey, jsonValue, ok := getKeyValueFromMap(m)
if !ok {
continue
}
key, err := json2Avro(keySchema, jsonKey)
if err != nil {
log.Fatalf("failed to encode key `%v`: %s", jsonKey, err)
}
value, err := json2Avro(valueSchema, jsonValue)
if err != nil {
log.Fatalf("failed to encode value `%v`: %s", jsonValue, err)
}
if done || !sendMessage(producer, flags.topic, flags.partition, key, value, &successes) {
break
}
}
}
}
func runConsumer(flags *Flags) {
var (
err error
count uint64
offset int64
offsetIsTime bool
)
if flags.offset == "begin" {
offset = sarama.OffsetOldest
} else if flags.offset == "end" {
offset = sarama.OffsetNewest
} else if t, err := dateparse.ParseLocal(flags.offset); err == nil {
offsetIsTime = true
offset = t.UnixNano() / 1e6
} else if offset, err = strconv.ParseInt(flags.offset, 10, 64); err != nil || offset < 0 {
log.Fatalln("`offset` must be `begin`, `end`, positive integer or timestamp")
}
registry := srclient.CreateSchemaRegistryClient(flags.registry)
keySchema, _ := registry.GetLatestSchema(flags.topic, true)
valueSchema, _ := registry.GetLatestSchema(flags.topic, false)
hasKeySchema := keySchema != nil
hasValueSchema := valueSchema != nil
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create create:", err)
}
defer client.Close()
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
log.Fatalln("failed to create consumer:", err)
}
defer consumer.Close()
partitions, err := client.Partitions(flags.topic)
if err != nil {
log.Fatalf("failed to list partitions for topic %s: %s\n", flags.topic, err)
}
wg := sync.WaitGroup{}
lock := sync.Mutex{}
for _, partition := range partitions {
if flags.partition >= 0 && flags.partition != partition {
continue
}
newestOffset, err := client.GetOffset(flags.topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalf("failed to get newest offset for topic %s parition %d: %s\n", flags.topic, partition, err)
}
startOffset := offset
if offsetIsTime {
startOffset, err = client.GetOffset(flags.topic, partition, startOffset)
if err != nil {
log.Fatalf("failed to get offset for topic %s partition %d since %s: %s\n", flags.topic, partition, flags.offset, err)
}
}
if startOffset == sarama.OffsetNewest || startOffset >= newestOffset {
if !flags.follow {
continue
} else {
startOffset = newestOffset
}
}
wg.Add(1)
go func(partition int32, newestOffset int64, startOffset int64) {
defer wg.Done()
partitionConsumer, err := consumer.ConsumePartition(flags.topic, partition, startOffset)
if err != nil {
log.Printf("failed to consume partition %d for topic %s: %s\n", partition, flags.topic, err)
return
}
defer partitionConsumer.Close()
for {
select {
case msg := <-partitionConsumer.Messages():
var key, value interface{}
if hasKeySchema {
key, err = decode(registry, msg.Key)
if err != nil {
log.Printf("failed to decode message key, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
} else {
key = string(msg.Key)
}
if hasValueSchema {
value, err = decode(registry, msg.Value)
if err != nil {
log.Printf("failed to decode message value, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
} else {
value = string(msg.Value)
}
m := map[string]interface{}{
"topic": flags.topic,
"partition": partition,
"offset": msg.Offset,
"timestamp": msg.Timestamp,
"key": key,
"value": value,
}
s, err := json.Marshal(m)
if err != nil {
log.Printf("failed to serialize to JSON, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
if flags.count > 0 && atomic.AddUint64(&count, 1) > flags.count {
return
}
lock.Lock()
fmt.Println(string(s))
lock.Unlock()
if !flags.follow && msg.Offset >= newestOffset-1 {
return
}
case <-time.After(2 * time.Second):
if !flags.follow || (flags.count > 0 && atomic.LoadUint64(&count) >= flags.count) {
return
}
case err := <-partitionConsumer.Errors():
log.Printf("failed to consume partition %d for topic %s: %s\n", partition, flags.topic, err)
return
}
}
}(partition, newestOffset, startOffset)
}
wg.Wait()
}
func createSchema(registry *srclient.SchemaRegistryClient, schema string, topic string, isKey bool) (*srclient.Schema, error) {
if schema == "" {
s, err := registry.GetLatestSchema(topic, isKey)
if err != nil && strings.HasPrefix(err.Error(), "404 Not Found") {
return nil, nil
}
return s, err
}
if _, err := os.Stat(schema); os.IsNotExist(err) {
if id, err := strconv.Atoi(schema); err == nil {
return registry.GetSchema(id)
}
return registry.CreateSchema(topic, schema, isKey)
}
content, err := ioutil.ReadFile(schema)
if err != nil {
return nil, err
}
return registry.CreateSchema(topic, string(content), isKey)
}
func encode(schema *srclient.Schema, datum interface{}) (sarama.ByteEncoder, error) {
buffer := make([]byte, 5, 256)
buffer[0] = 0
binary.BigEndian.PutUint32(buffer[1:5], uint32(schema.ID()))
bytes, err := schema.Codec().BinaryFromNative(buffer, datum)
if err != nil {
return nil, err
}
return sarama.ByteEncoder(bytes), nil
}
func str2Avro(schema *srclient.Schema, s string) (sarama.Encoder, error) {
if schema == nil {
return sarama.StringEncoder(s), nil
}
var obj interface{}
if err := json.Unmarshal([]byte(s), &obj); err != nil {
return nil, err
}
return encode(schema, obj)
}
func json2Avro(schema *srclient.Schema, obj interface{}) (sarama.Encoder, error) {
if schema != nil {
return encode(schema, obj)
}
if s, ok := obj.(string); ok {
return sarama.StringEncoder(s), nil
}
bytes, err := json.Marshal(obj)
if err != nil {
return nil, err
}
return sarama.ByteEncoder(bytes), nil
}
func sendMessage(producer sarama.SyncProducer, topic string, partition int32, key sarama.Encoder, value sarama.Encoder, successes *int) bool {
msg := sarama.ProducerMessage{Topic: topic, Partition: partition, Key: key, Value: value, Timestamp: time.Now()}
partition, offset, err := producer.SendMessage(&msg)
s, _ := json.Marshal(msg)
if err != nil {
log.Printf("failed to send, err=%s, msg=%s\n", err.Error(), s)
return false
}
*successes++
log.Printf("[%d] partition=%d, offset=%d, msg=%s\n", *successes, partition, offset, s)
return true
}
func getFieldFromMap(m map[string]interface{}, k1 string, k2 string) (interface{}, bool) {
var (
value interface{}
ok bool
)
if value, ok = m[k1]; !ok {
if value, ok = m[k2]; !ok {
log.Printf("no `%s` or `%s` field found in object %s\n", k1, k2, m)
return nil, false
}
}
if value == nil {
log.Printf("skip null %s in object %s\n", k2, m)
return nil, false
}
return value, true
}
func getKeyValueFromMap(m map[string]interface{}) (interface{}, interface{}, bool) {
key, ok1 := getFieldFromMap(m, "Key", "key")
value, ok2 := getFieldFromMap(m, "Value", "value")
return key, value, ok1 && ok2
}
func getMessageByOffset(client sarama.Client, topic string, partition int32, offset int64) (*sarama.ConsumerMessage, error) |
func decode(registry *srclient.SchemaRegistryClient, msg []byte) (interface{}, error) {
if msg == nil || len(msg) < 6 {
return nil, errors.New("invalid message")
}
schemaID := binary.BigEndian.Uint32(msg[1:5])
schema, err := registry.GetSchema(int(schemaID))
if err != nil {
return nil, err
}
datum, _, err := schema.Codec().NativeFromBinary(msg[5:])
return datum, err
}
| {
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
return nil, err
}
defer consumer.Close()
partitionConsumer, err := consumer.ConsumePartition(topic, partition, offset)
if err != nil {
return nil, err
}
defer partitionConsumer.Close()
select {
case msg := <-partitionConsumer.Messages():
return msg, nil
case err := <-partitionConsumer.Errors():
return nil, err
}
} | identifier_body |
main.go | package main
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/signal"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/Shopify/sarama"
"github.com/araddon/dateparse"
"github.com/riferrei/srclient"
flag "github.com/spf13/pflag"
)
const VERSION = "kafkabat v0.1.0"
func main() {
log.SetOutput(os.Stderr)
log.SetFlags(log.Lmicroseconds)
flags := Flags{}
flag.CommandLine.SortFlags = false
flag.Usage = usage
flag.StringVarP(&flags.brokers, "broker", "b", "localhost:9092", "Kafka broker bootstrap servers, separated by comma")
flag.StringVarP(&flags.topic, "topic", "t", "", "Kafka topic name")
flag.Int32VarP(&flags.partition, "partition", "p", -1, "partition, -1 means all")
flag.StringVarP(&flags.offset, "offset", "o", "begin", "offset to start consuming, possile values: begin, end, positive integer, timestamp")
flag.Uint64VarP(&flags.count, "count", "c", 0, "maximum count of messages to consume, 0 means no limit")
flag.StringVarP(&flags.registry, "registry", "r", "http://localhost:8081", "schema regisry URL")
flag.StringVarP(&flags.keySchema, "key-schema", "K", "", "key schema, can be numeric ID, file path or AVRO schema definition")
flag.StringVarP(&flags.valueSchema, "value-schema", "V", "", "value schema, can be numeric ID, file path or AVRO schema definition")
flag.StringVar(&flags.kafkaVersion, "kafka-version", "2.3.0", "Kafka server version")
flag.BoolVarP(&flags.follow, "follow", "f", false, "continue consuming when reach partition end")
flag.BoolVarP(&flags.list, "list", "l", false, "list partition meta information")
flag.BoolVarP(&flags.help, "help", "h", false, "show this help")
flag.BoolVarP(&flags.version, "version", "v", false, "show version")
flag.Parse()
if flags.version {
fmt.Println(VERSION)
return
}
if flags.help |
if flags.list {
listTopic(&flags)
return
}
if flags.topic == "" {
fmt.Fprintln(os.Stderr, "ERROR: `topic` isn't specified!")
usage()
os.Exit(1)
}
if flag.NArg() > 0 {
runProducer(&flags)
} else {
runConsumer(&flags)
}
}
type Flags struct {
brokers string
topic string
partition int32
offset string
count uint64
registry string
keySchema string
valueSchema string
kafkaVersion string
list bool
follow bool
help bool
version bool
}
func usage() {
fmt.Fprint(os.Stderr, `Usage:
Produce:
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--key-schema=SCHEMA] [--value-schema=SCHEMA] key value
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--key-schema=SCHEMA] [--value-schema=SCHEMA] file
If "file" is "-", read line-by-line JSON objects from stdin, the object must contain keys "key" and "value".
Consume:
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--offset=OFFSET] [--count=N] [--follow]
List:
kafkabat KAFKA_OPTS [--topic=TOPIC] --list
KAFKA_OPTS:
[--broker=BROKERS] [--kafka-version=VERSION]
REGISTRY_OPTS
[--registry=URL]
`)
flag.PrintDefaults()
}
func listTopic(flags *Flags) {
var err error
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create create:", err)
}
defer client.Close()
topics, err := client.Topics()
if err != nil {
log.Fatalln("failed to get topics:", err)
}
for _, topic := range topics {
if flags.topic != "" && flags.topic != topic {
continue
}
partitions, err := client.Partitions(topic)
if err != nil {
log.Fatalf("failed to list partitions for topic %s: %s\n", topic, err)
}
for _, partition := range partitions {
if flags.partition >= 0 && flags.partition != partition {
continue
}
minOffset, err := client.GetOffset(topic, partition, sarama.OffsetOldest)
if err != nil {
log.Fatalf("failed to get oldest offset for topic %s partition %d: %s\n", topic, partition, err)
}
maxOffset, err := client.GetOffset(topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalf("failed to get newest offset for topic %s partition %d: %s\n", topic, partition, err)
}
if minOffset == maxOffset {
fmt.Printf("topic=%s partition=%d minOffset=%d maxOffset=%d\n", topic, partition, minOffset, maxOffset)
} else {
minMsg, err := getMessageByOffset(client, topic, partition, minOffset)
if err != nil {
log.Fatalf("failed to get first message for topic %s partition %d: %s\n", topic, partition, err)
}
// due to holes in segment, it's not reliable to obtain previous message
//maxMsg, err := getMessageByOffset(client, topic, partition, maxOffset-1)
//if err != nil {
// log.Fatalf("failed to get last message for topic %s partition %d: %s\n", topic, partition, err)
//}
fmt.Printf("topic=%s partition=%d minOffset=%d maxOffset=%d minTime=%s\n",
topic, partition, minMsg.Offset, maxOffset, minMsg.Timestamp.Format(time.RFC3339))
}
}
}
}
func runProducer(flags *Flags) {
registry := srclient.CreateSchemaRegistryClient(flags.registry)
keySchema, err := createSchema(registry, flags.keySchema, flags.topic, true)
if err != nil {
log.Fatalln("invalid key schema:", err)
}
valueSchema, err := createSchema(registry, flags.valueSchema, flags.topic, false)
if err != nil {
log.Fatalln("invalid value schema:", err)
}
config := sarama.NewConfig()
config.Producer.Return.Errors = true
config.Producer.Return.Successes = true
config.Producer.Retry.Max = 5
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Idempotent = true
config.Net.MaxOpenRequests = 1
if flags.partition >= 0 {
config.Producer.Partitioner = sarama.NewManualPartitioner
}
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create client:", err)
}
defer client.Close()
producer, err := sarama.NewSyncProducerFromClient(client)
if err != nil {
log.Fatalln("failed to create producer:", err)
}
defer producer.Close()
done := false
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
go func() {
signal := <-signals
done = true
log.Println("got signal:", signal)
}()
successes := 0
if flag.NArg() > 1 {
argKey, argValue := flag.Arg(0), flag.Arg(1)
key, err := str2Avro(keySchema, argKey)
if err != nil {
log.Fatalf("failed to encode key `%s`: %s", argKey, err)
}
value, err := str2Avro(valueSchema, argValue)
if err != nil {
log.Fatalf("failed to encode value `%s`: %s", argValue, err)
}
sendMessage(producer, flags.topic, flags.partition, key, value, &successes)
} else {
filename := flag.Arg(0)
f := os.Stdin
if filename != "-" {
if f, err = os.Open(filename); err != nil {
log.Fatalf("failed to open %s: %s", filename, err)
}
}
jsonDecoder := json.NewDecoder(f)
for {
var m map[string]interface{}
if err := jsonDecoder.Decode(&m); err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
jsonKey, jsonValue, ok := getKeyValueFromMap(m)
if !ok {
continue
}
key, err := json2Avro(keySchema, jsonKey)
if err != nil {
log.Fatalf("failed to encode key `%v`: %s", jsonKey, err)
}
value, err := json2Avro(valueSchema, jsonValue)
if err != nil {
log.Fatalf("failed to encode value `%v`: %s", jsonValue, err)
}
if done || !sendMessage(producer, flags.topic, flags.partition, key, value, &successes) {
break
}
}
}
}
func runConsumer(flags *Flags) {
var (
err error
count uint64
offset int64
offsetIsTime bool
)
if flags.offset == "begin" {
offset = sarama.OffsetOldest
} else if flags.offset == "end" {
offset = sarama.OffsetNewest
} else if t, err := dateparse.ParseLocal(flags.offset); err == nil {
offsetIsTime = true
offset = t.UnixNano() / 1e6
} else if offset, err = strconv.ParseInt(flags.offset, 10, 64); err != nil || offset < 0 {
log.Fatalln("`offset` must be `begin`, `end`, positive integer or timestamp")
}
registry := srclient.CreateSchemaRegistryClient(flags.registry)
keySchema, _ := registry.GetLatestSchema(flags.topic, true)
valueSchema, _ := registry.GetLatestSchema(flags.topic, false)
hasKeySchema := keySchema != nil
hasValueSchema := valueSchema != nil
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create create:", err)
}
defer client.Close()
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
log.Fatalln("failed to create consumer:", err)
}
defer consumer.Close()
partitions, err := client.Partitions(flags.topic)
if err != nil {
log.Fatalf("failed to list partitions for topic %s: %s\n", flags.topic, err)
}
wg := sync.WaitGroup{}
lock := sync.Mutex{}
for _, partition := range partitions {
if flags.partition >= 0 && flags.partition != partition {
continue
}
newestOffset, err := client.GetOffset(flags.topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalf("failed to get newest offset for topic %s parition %d: %s\n", flags.topic, partition, err)
}
startOffset := offset
if offsetIsTime {
startOffset, err = client.GetOffset(flags.topic, partition, startOffset)
if err != nil {
log.Fatalf("failed to get offset for topic %s partition %d since %s: %s\n", flags.topic, partition, flags.offset, err)
}
}
if startOffset == sarama.OffsetNewest || startOffset >= newestOffset {
if !flags.follow {
continue
} else {
startOffset = newestOffset
}
}
wg.Add(1)
go func(partition int32, newestOffset int64, startOffset int64) {
defer wg.Done()
partitionConsumer, err := consumer.ConsumePartition(flags.topic, partition, startOffset)
if err != nil {
log.Printf("failed to consume partition %d for topic %s: %s\n", partition, flags.topic, err)
return
}
defer partitionConsumer.Close()
for {
select {
case msg := <-partitionConsumer.Messages():
var key, value interface{}
if hasKeySchema {
key, err = decode(registry, msg.Key)
if err != nil {
log.Printf("failed to decode message key, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
} else {
key = string(msg.Key)
}
if hasValueSchema {
value, err = decode(registry, msg.Value)
if err != nil {
log.Printf("failed to decode message value, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
} else {
value = string(msg.Value)
}
m := map[string]interface{}{
"topic": flags.topic,
"partition": partition,
"offset": msg.Offset,
"timestamp": msg.Timestamp,
"key": key,
"value": value,
}
s, err := json.Marshal(m)
if err != nil {
log.Printf("failed to serialize to JSON, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
if flags.count > 0 && atomic.AddUint64(&count, 1) > flags.count {
return
}
lock.Lock()
fmt.Println(string(s))
lock.Unlock()
if !flags.follow && msg.Offset >= newestOffset-1 {
return
}
case <-time.After(2 * time.Second):
if !flags.follow || (flags.count > 0 && atomic.LoadUint64(&count) >= flags.count) {
return
}
case err := <-partitionConsumer.Errors():
log.Printf("failed to consume partition %d for topic %s: %s\n", partition, flags.topic, err)
return
}
}
}(partition, newestOffset, startOffset)
}
wg.Wait()
}
func createSchema(registry *srclient.SchemaRegistryClient, schema string, topic string, isKey bool) (*srclient.Schema, error) {
if schema == "" {
s, err := registry.GetLatestSchema(topic, isKey)
if err != nil && strings.HasPrefix(err.Error(), "404 Not Found") {
return nil, nil
}
return s, err
}
if _, err := os.Stat(schema); os.IsNotExist(err) {
if id, err := strconv.Atoi(schema); err == nil {
return registry.GetSchema(id)
}
return registry.CreateSchema(topic, schema, isKey)
}
content, err := ioutil.ReadFile(schema)
if err != nil {
return nil, err
}
return registry.CreateSchema(topic, string(content), isKey)
}
func encode(schema *srclient.Schema, datum interface{}) (sarama.ByteEncoder, error) {
buffer := make([]byte, 5, 256)
buffer[0] = 0
binary.BigEndian.PutUint32(buffer[1:5], uint32(schema.ID()))
bytes, err := schema.Codec().BinaryFromNative(buffer, datum)
if err != nil {
return nil, err
}
return sarama.ByteEncoder(bytes), nil
}
func str2Avro(schema *srclient.Schema, s string) (sarama.Encoder, error) {
if schema == nil {
return sarama.StringEncoder(s), nil
}
var obj interface{}
if err := json.Unmarshal([]byte(s), &obj); err != nil {
return nil, err
}
return encode(schema, obj)
}
func json2Avro(schema *srclient.Schema, obj interface{}) (sarama.Encoder, error) {
if schema != nil {
return encode(schema, obj)
}
if s, ok := obj.(string); ok {
return sarama.StringEncoder(s), nil
}
bytes, err := json.Marshal(obj)
if err != nil {
return nil, err
}
return sarama.ByteEncoder(bytes), nil
}
func sendMessage(producer sarama.SyncProducer, topic string, partition int32, key sarama.Encoder, value sarama.Encoder, successes *int) bool {
msg := sarama.ProducerMessage{Topic: topic, Partition: partition, Key: key, Value: value, Timestamp: time.Now()}
partition, offset, err := producer.SendMessage(&msg)
s, _ := json.Marshal(msg)
if err != nil {
log.Printf("failed to send, err=%s, msg=%s\n", err.Error(), s)
return false
}
*successes++
log.Printf("[%d] partition=%d, offset=%d, msg=%s\n", *successes, partition, offset, s)
return true
}
func getFieldFromMap(m map[string]interface{}, k1 string, k2 string) (interface{}, bool) {
var (
value interface{}
ok bool
)
if value, ok = m[k1]; !ok {
if value, ok = m[k2]; !ok {
log.Printf("no `%s` or `%s` field found in object %s\n", k1, k2, m)
return nil, false
}
}
if value == nil {
log.Printf("skip null %s in object %s\n", k2, m)
return nil, false
}
return value, true
}
func getKeyValueFromMap(m map[string]interface{}) (interface{}, interface{}, bool) {
key, ok1 := getFieldFromMap(m, "Key", "key")
value, ok2 := getFieldFromMap(m, "Value", "value")
return key, value, ok1 && ok2
}
func getMessageByOffset(client sarama.Client, topic string, partition int32, offset int64) (*sarama.ConsumerMessage, error) {
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
return nil, err
}
defer consumer.Close()
partitionConsumer, err := consumer.ConsumePartition(topic, partition, offset)
if err != nil {
return nil, err
}
defer partitionConsumer.Close()
select {
case msg := <-partitionConsumer.Messages():
return msg, nil
case err := <-partitionConsumer.Errors():
return nil, err
}
}
func decode(registry *srclient.SchemaRegistryClient, msg []byte) (interface{}, error) {
if msg == nil || len(msg) < 6 {
return nil, errors.New("invalid message")
}
schemaID := binary.BigEndian.Uint32(msg[1:5])
schema, err := registry.GetSchema(int(schemaID))
if err != nil {
return nil, err
}
datum, _, err := schema.Codec().NativeFromBinary(msg[5:])
return datum, err
}
| {
usage()
os.Exit(1)
} | conditional_block |
main.go | package main
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/signal"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/Shopify/sarama"
"github.com/araddon/dateparse"
"github.com/riferrei/srclient"
flag "github.com/spf13/pflag"
)
const VERSION = "kafkabat v0.1.0"
func main() {
log.SetOutput(os.Stderr)
log.SetFlags(log.Lmicroseconds)
flags := Flags{}
flag.CommandLine.SortFlags = false
flag.Usage = usage
flag.StringVarP(&flags.brokers, "broker", "b", "localhost:9092", "Kafka broker bootstrap servers, separated by comma")
flag.StringVarP(&flags.topic, "topic", "t", "", "Kafka topic name")
flag.Int32VarP(&flags.partition, "partition", "p", -1, "partition, -1 means all")
flag.StringVarP(&flags.offset, "offset", "o", "begin", "offset to start consuming, possile values: begin, end, positive integer, timestamp")
flag.Uint64VarP(&flags.count, "count", "c", 0, "maximum count of messages to consume, 0 means no limit")
flag.StringVarP(&flags.registry, "registry", "r", "http://localhost:8081", "schema regisry URL")
flag.StringVarP(&flags.keySchema, "key-schema", "K", "", "key schema, can be numeric ID, file path or AVRO schema definition")
flag.StringVarP(&flags.valueSchema, "value-schema", "V", "", "value schema, can be numeric ID, file path or AVRO schema definition")
flag.StringVar(&flags.kafkaVersion, "kafka-version", "2.3.0", "Kafka server version")
flag.BoolVarP(&flags.follow, "follow", "f", false, "continue consuming when reach partition end")
flag.BoolVarP(&flags.list, "list", "l", false, "list partition meta information")
flag.BoolVarP(&flags.help, "help", "h", false, "show this help")
flag.BoolVarP(&flags.version, "version", "v", false, "show version")
flag.Parse()
if flags.version {
fmt.Println(VERSION)
return
}
if flags.help {
usage()
os.Exit(1)
}
if flags.list {
listTopic(&flags)
return
}
if flags.topic == "" {
fmt.Fprintln(os.Stderr, "ERROR: `topic` isn't specified!")
usage()
os.Exit(1)
}
if flag.NArg() > 0 {
runProducer(&flags)
} else {
runConsumer(&flags)
}
}
type Flags struct {
brokers string
topic string
partition int32
offset string
count uint64
registry string
keySchema string
valueSchema string
kafkaVersion string
list bool
follow bool
help bool
version bool
}
func usage() {
fmt.Fprint(os.Stderr, `Usage:
Produce:
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--key-schema=SCHEMA] [--value-schema=SCHEMA] key value
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--key-schema=SCHEMA] [--value-schema=SCHEMA] file
If "file" is "-", read line-by-line JSON objects from stdin, the object must contain keys "key" and "value".
Consume:
kafkabat KAFKA_OPTS REGISTRY_OPTS --topic=TOPIC [--partition=N] [--offset=OFFSET] [--count=N] [--follow]
List:
kafkabat KAFKA_OPTS [--topic=TOPIC] --list
KAFKA_OPTS:
[--broker=BROKERS] [--kafka-version=VERSION]
REGISTRY_OPTS
[--registry=URL]
`)
flag.PrintDefaults()
}
func listTopic(flags *Flags) {
var err error
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create create:", err)
}
defer client.Close()
topics, err := client.Topics()
if err != nil {
log.Fatalln("failed to get topics:", err)
}
for _, topic := range topics {
if flags.topic != "" && flags.topic != topic {
continue
}
partitions, err := client.Partitions(topic)
if err != nil {
log.Fatalf("failed to list partitions for topic %s: %s\n", topic, err)
}
for _, partition := range partitions {
if flags.partition >= 0 && flags.partition != partition {
continue
}
minOffset, err := client.GetOffset(topic, partition, sarama.OffsetOldest)
if err != nil {
log.Fatalf("failed to get oldest offset for topic %s partition %d: %s\n", topic, partition, err)
}
maxOffset, err := client.GetOffset(topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalf("failed to get newest offset for topic %s partition %d: %s\n", topic, partition, err)
}
if minOffset == maxOffset {
fmt.Printf("topic=%s partition=%d minOffset=%d maxOffset=%d\n", topic, partition, minOffset, maxOffset)
} else {
minMsg, err := getMessageByOffset(client, topic, partition, minOffset)
if err != nil {
log.Fatalf("failed to get first message for topic %s partition %d: %s\n", topic, partition, err)
}
// due to holes in segment, it's not reliable to obtain previous message
//maxMsg, err := getMessageByOffset(client, topic, partition, maxOffset-1)
//if err != nil {
// log.Fatalf("failed to get last message for topic %s partition %d: %s\n", topic, partition, err)
//}
fmt.Printf("topic=%s partition=%d minOffset=%d maxOffset=%d minTime=%s\n",
topic, partition, minMsg.Offset, maxOffset, minMsg.Timestamp.Format(time.RFC3339))
}
}
}
}
func runProducer(flags *Flags) {
registry := srclient.CreateSchemaRegistryClient(flags.registry)
keySchema, err := createSchema(registry, flags.keySchema, flags.topic, true)
if err != nil {
log.Fatalln("invalid key schema:", err)
}
valueSchema, err := createSchema(registry, flags.valueSchema, flags.topic, false)
if err != nil {
log.Fatalln("invalid value schema:", err)
}
config := sarama.NewConfig()
config.Producer.Return.Errors = true
config.Producer.Return.Successes = true
config.Producer.Retry.Max = 5
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Idempotent = true
config.Net.MaxOpenRequests = 1
if flags.partition >= 0 {
config.Producer.Partitioner = sarama.NewManualPartitioner
}
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create client:", err)
}
defer client.Close()
producer, err := sarama.NewSyncProducerFromClient(client)
if err != nil {
log.Fatalln("failed to create producer:", err)
}
defer producer.Close()
done := false
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
go func() {
signal := <-signals
done = true
log.Println("got signal:", signal)
}()
successes := 0
if flag.NArg() > 1 {
argKey, argValue := flag.Arg(0), flag.Arg(1)
key, err := str2Avro(keySchema, argKey)
if err != nil {
log.Fatalf("failed to encode key `%s`: %s", argKey, err)
}
value, err := str2Avro(valueSchema, argValue)
if err != nil {
log.Fatalf("failed to encode value `%s`: %s", argValue, err)
}
sendMessage(producer, flags.topic, flags.partition, key, value, &successes)
} else {
filename := flag.Arg(0)
f := os.Stdin
if filename != "-" {
if f, err = os.Open(filename); err != nil {
log.Fatalf("failed to open %s: %s", filename, err)
}
}
jsonDecoder := json.NewDecoder(f)
for {
var m map[string]interface{}
if err := jsonDecoder.Decode(&m); err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
jsonKey, jsonValue, ok := getKeyValueFromMap(m)
if !ok {
continue
}
key, err := json2Avro(keySchema, jsonKey)
if err != nil {
log.Fatalf("failed to encode key `%v`: %s", jsonKey, err)
}
value, err := json2Avro(valueSchema, jsonValue)
if err != nil {
log.Fatalf("failed to encode value `%v`: %s", jsonValue, err)
}
if done || !sendMessage(producer, flags.topic, flags.partition, key, value, &successes) {
break
}
}
}
}
func runConsumer(flags *Flags) {
var (
err error
count uint64
offset int64
offsetIsTime bool
)
if flags.offset == "begin" {
offset = sarama.OffsetOldest
} else if flags.offset == "end" {
offset = sarama.OffsetNewest
} else if t, err := dateparse.ParseLocal(flags.offset); err == nil {
offsetIsTime = true
offset = t.UnixNano() / 1e6
} else if offset, err = strconv.ParseInt(flags.offset, 10, 64); err != nil || offset < 0 {
log.Fatalln("`offset` must be `begin`, `end`, positive integer or timestamp")
}
registry := srclient.CreateSchemaRegistryClient(flags.registry)
keySchema, _ := registry.GetLatestSchema(flags.topic, true)
valueSchema, _ := registry.GetLatestSchema(flags.topic, false)
hasKeySchema := keySchema != nil
hasValueSchema := valueSchema != nil
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
if config.Version, err = sarama.ParseKafkaVersion(flags.kafkaVersion); err != nil {
log.Fatalln("invalid kafka version")
}
|
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
log.Fatalln("failed to create consumer:", err)
}
defer consumer.Close()
partitions, err := client.Partitions(flags.topic)
if err != nil {
log.Fatalf("failed to list partitions for topic %s: %s\n", flags.topic, err)
}
wg := sync.WaitGroup{}
lock := sync.Mutex{}
for _, partition := range partitions {
if flags.partition >= 0 && flags.partition != partition {
continue
}
newestOffset, err := client.GetOffset(flags.topic, partition, sarama.OffsetNewest)
if err != nil {
log.Fatalf("failed to get newest offset for topic %s parition %d: %s\n", flags.topic, partition, err)
}
startOffset := offset
if offsetIsTime {
startOffset, err = client.GetOffset(flags.topic, partition, startOffset)
if err != nil {
log.Fatalf("failed to get offset for topic %s partition %d since %s: %s\n", flags.topic, partition, flags.offset, err)
}
}
if startOffset == sarama.OffsetNewest || startOffset >= newestOffset {
if !flags.follow {
continue
} else {
startOffset = newestOffset
}
}
wg.Add(1)
go func(partition int32, newestOffset int64, startOffset int64) {
defer wg.Done()
partitionConsumer, err := consumer.ConsumePartition(flags.topic, partition, startOffset)
if err != nil {
log.Printf("failed to consume partition %d for topic %s: %s\n", partition, flags.topic, err)
return
}
defer partitionConsumer.Close()
for {
select {
case msg := <-partitionConsumer.Messages():
var key, value interface{}
if hasKeySchema {
key, err = decode(registry, msg.Key)
if err != nil {
log.Printf("failed to decode message key, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
} else {
key = string(msg.Key)
}
if hasValueSchema {
value, err = decode(registry, msg.Value)
if err != nil {
log.Printf("failed to decode message value, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
} else {
value = string(msg.Value)
}
m := map[string]interface{}{
"topic": flags.topic,
"partition": partition,
"offset": msg.Offset,
"timestamp": msg.Timestamp,
"key": key,
"value": value,
}
s, err := json.Marshal(m)
if err != nil {
log.Printf("failed to serialize to JSON, topic=%s partition=%d offset=%d: %s\n", flags.topic, partition, msg.Offset, err)
break
}
if flags.count > 0 && atomic.AddUint64(&count, 1) > flags.count {
return
}
lock.Lock()
fmt.Println(string(s))
lock.Unlock()
if !flags.follow && msg.Offset >= newestOffset-1 {
return
}
case <-time.After(2 * time.Second):
if !flags.follow || (flags.count > 0 && atomic.LoadUint64(&count) >= flags.count) {
return
}
case err := <-partitionConsumer.Errors():
log.Printf("failed to consume partition %d for topic %s: %s\n", partition, flags.topic, err)
return
}
}
}(partition, newestOffset, startOffset)
}
wg.Wait()
}
func createSchema(registry *srclient.SchemaRegistryClient, schema string, topic string, isKey bool) (*srclient.Schema, error) {
if schema == "" {
s, err := registry.GetLatestSchema(topic, isKey)
if err != nil && strings.HasPrefix(err.Error(), "404 Not Found") {
return nil, nil
}
return s, err
}
if _, err := os.Stat(schema); os.IsNotExist(err) {
if id, err := strconv.Atoi(schema); err == nil {
return registry.GetSchema(id)
}
return registry.CreateSchema(topic, schema, isKey)
}
content, err := ioutil.ReadFile(schema)
if err != nil {
return nil, err
}
return registry.CreateSchema(topic, string(content), isKey)
}
func encode(schema *srclient.Schema, datum interface{}) (sarama.ByteEncoder, error) {
buffer := make([]byte, 5, 256)
buffer[0] = 0
binary.BigEndian.PutUint32(buffer[1:5], uint32(schema.ID()))
bytes, err := schema.Codec().BinaryFromNative(buffer, datum)
if err != nil {
return nil, err
}
return sarama.ByteEncoder(bytes), nil
}
func str2Avro(schema *srclient.Schema, s string) (sarama.Encoder, error) {
if schema == nil {
return sarama.StringEncoder(s), nil
}
var obj interface{}
if err := json.Unmarshal([]byte(s), &obj); err != nil {
return nil, err
}
return encode(schema, obj)
}
func json2Avro(schema *srclient.Schema, obj interface{}) (sarama.Encoder, error) {
if schema != nil {
return encode(schema, obj)
}
if s, ok := obj.(string); ok {
return sarama.StringEncoder(s), nil
}
bytes, err := json.Marshal(obj)
if err != nil {
return nil, err
}
return sarama.ByteEncoder(bytes), nil
}
func sendMessage(producer sarama.SyncProducer, topic string, partition int32, key sarama.Encoder, value sarama.Encoder, successes *int) bool {
msg := sarama.ProducerMessage{Topic: topic, Partition: partition, Key: key, Value: value, Timestamp: time.Now()}
partition, offset, err := producer.SendMessage(&msg)
s, _ := json.Marshal(msg)
if err != nil {
log.Printf("failed to send, err=%s, msg=%s\n", err.Error(), s)
return false
}
*successes++
log.Printf("[%d] partition=%d, offset=%d, msg=%s\n", *successes, partition, offset, s)
return true
}
func getFieldFromMap(m map[string]interface{}, k1 string, k2 string) (interface{}, bool) {
var (
value interface{}
ok bool
)
if value, ok = m[k1]; !ok {
if value, ok = m[k2]; !ok {
log.Printf("no `%s` or `%s` field found in object %s\n", k1, k2, m)
return nil, false
}
}
if value == nil {
log.Printf("skip null %s in object %s\n", k2, m)
return nil, false
}
return value, true
}
func getKeyValueFromMap(m map[string]interface{}) (interface{}, interface{}, bool) {
key, ok1 := getFieldFromMap(m, "Key", "key")
value, ok2 := getFieldFromMap(m, "Value", "value")
return key, value, ok1 && ok2
}
func getMessageByOffset(client sarama.Client, topic string, partition int32, offset int64) (*sarama.ConsumerMessage, error) {
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
return nil, err
}
defer consumer.Close()
partitionConsumer, err := consumer.ConsumePartition(topic, partition, offset)
if err != nil {
return nil, err
}
defer partitionConsumer.Close()
select {
case msg := <-partitionConsumer.Messages():
return msg, nil
case err := <-partitionConsumer.Errors():
return nil, err
}
}
func decode(registry *srclient.SchemaRegistryClient, msg []byte) (interface{}, error) {
if msg == nil || len(msg) < 6 {
return nil, errors.New("invalid message")
}
schemaID := binary.BigEndian.Uint32(msg[1:5])
schema, err := registry.GetSchema(int(schemaID))
if err != nil {
return nil, err
}
datum, _, err := schema.Codec().NativeFromBinary(msg[5:])
return datum, err
} | client, err := sarama.NewClient(strings.Split(flags.brokers, ","), config)
if err != nil {
log.Fatalln("failed to create create:", err)
}
defer client.Close() | random_line_split |
server.rs | #[feature(struct_variant)];
#[feature(macro_rules)];
use std::io::net::ip::{Ipv4Addr, SocketAddr};
use std::io::net::udp::{UdpSocket, UdpStream};
use std::io::timer;
use udptransport::UdpTransport;
use transport::RaftRpcTransport;
use rpc::{ServerId, LogEntry, AppendEntries, AppendEntriesResponse, RequestVote, RequestVoteResponse, RaftRpc};
use rpc::{AppendEntriesRpc, AppendEntriesResponseRpc, RequestVoteRpc, RequestVoteResponseRpc};
use std::os;
use std::vec;
use std::rand;
#[path="./rust-osc/osc.rs"]
mod osc;
mod udptransport;
mod transport;
mod rpc;
enum ServerType {
Follower,
Candidate,
Leader
}
struct RaftServer {
currentTerm: int,
votedFor: Option<ServerId>,
log: ~[LogEntry],
commitIndex: int,
lastApplied: int,
serverType: ServerType,
electionTimeout: int,
receivedVotes: int,
// Leader state:
// for each server, index of the next log entry to send to that
// server, initialized to last log index + 1
nextIndex: ~[int],
// for each server, index of highest log entry known to be
// replicated on server, initialized to 0
matchIndex: ~[int],
// current set of servers
servers: ~[ServerId],
// serverId corresponding to self
serverId: ServerId,
// transport layer to send RPC's over
transport: ~RaftRpcTransport
}
impl RaftServer {
fn new(transport: ~RaftRpcTransport, serverId: ServerId, servers: ~[ServerId]) -> RaftServer {
return RaftServer {
currentTerm: 0,
votedFor: None,
log: ~[],
commitIndex: 0,
lastApplied: 0,
electionTimeout: 0,
receivedVotes: 0,
serverType: Follower,
nextIndex: vec::with_capacity(servers.len()),
matchIndex: vec::with_capacity(servers.len()),
servers: servers,
serverId: serverId,
transport: transport
}
}
fn run(&mut self) {
loop {
match self.serverType {
Candidate => self.candidateStep(),
Follower => self.followerStep(),
Leader => self.leaderStep()
}
}
}
// Act as a candidate
// if votes received from a majority of servers become leader
// if appendentries received from new leader convert to follower
// if election times out try again
fn candidateStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.candidateRespond(rpc),
None => {}
}
if self.receivedVotes > (self.servers.len()/2) as int {
self.convertToLeader();
}
}
// Respond as a candidate to a given RPC
// RequestVoteResponse with success means we get a vote :D
// AppendEntries with term >= our term means we lost T_T
fn candidateRespond(&mut self, rpc: RaftRpc) {
match rpc {
RequestVoteResponse(rvr) => self.candidateRequestVoteResponse(rvr),
AppendEntries(ae) => self.candidateAppendEntries(ae),
_ => {}
};
}
fn candidateRequestVoteResponse(&mut self, rpc: RequestVoteResponseRpc) {
if rpc.voteGranted {
// TODO check to see if the server already voted for us this cycle
self.receivedVotes += 1; | if rpc.term >= self.currentTerm {
// we lost the election... D:
self.convertToFollower();
}
// pretend we didn't hear them whether or not they won, the resend will occur anyway
}
// Update the server when it is a Follower
// Paper:
// Respond to RPCs from candidates and leaders
// If election timeout elapses without receiving AppendEntries RPC
// or granting vote to candidate: convert to candidate
fn followerStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.followerRespond(rpc),
None => {}
}
self.electionTimeout -= 1;
if self.electionTimeout < 0 {
self.convertToCandidate();
}
}
// Respond to an incoming RPC as a follower
fn followerRespond(&mut self, rpc: RaftRpc) {
let response = match rpc {
AppendEntries(ref ae) => Some(self.followerAppendEntries(ae)),
RequestVote(ref rv) => Some(self.followerRequestVote(rv)),
_ => None
};
match response {
// send response to original rpc sender
Some(responseRpc) => self.transport.sendRpc(rpc.sender(), &responseRpc),
None => {}
}
}
// As a follower, handle an appendEntries RPC
fn followerAppendEntries(&mut self, rpc: &AppendEntriesRpc) -> RaftRpc {
let fail = AppendEntriesResponse(AppendEntriesResponseRpc{sender: self.serverId, term: self.currentTerm, success: false, logIndex: 0});
if rpc.term < self.currentTerm {
return fail;
}
// If log doesn't contain an entry with matching term return false
if rpc.prevLogIndex < self.log.len() as int {
if self.log[rpc.prevLogIndex].term != rpc.prevLogTerm {
return fail;
}
} else {
return fail;
}
// 3. If an existing entry conflicts with a new one delete the
// existing entry and all that follow it
let startLogIndex = rpc.prevLogIndex+1;
for logOffset in range(0, rpc.entries.len()) {
let logIndex = startLogIndex + logOffset as int;
let entry = rpc.entries[logOffset].clone();
if logIndex < self.log.len() as int {
if self.log[logIndex].term != entry.term {
// delete it and all following
self.log.truncate(logIndex as uint);
self.log.push(entry);
}
} else {
self.log.push(entry);
}
}
return AppendEntriesResponse(AppendEntriesResponseRpc {
sender: self.serverId, term: self.currentTerm, success: true,
logIndex: (self.log.len() - 1) as int
});
}
// As a follower handle a requestVote rpc
// From paper:
// 1. Reply false if term < currentTerm
// 2. If votedFor is null or candidateId and candidate's log is
// at least as up-to-date as receiver's log, grant vote
fn followerRequestVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
let fail = RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: false});
if rpc.term < self.currentTerm {
return fail;
}
// if we haven't voted for anything or we voted for candidate
match self.votedFor {
None => {
return self.followerVote(rpc);
},
Some(id) if rpc.candidateId == id => {
return self.followerVote(rpc);
}
_ => {
return fail;
}
}
}
fn followerVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
// if the candidate's log is at least as up-to-date as ours vote for them
let mut voteGranted = false;
let lastLogIndex = (self.log.len() - 1) as int;
if self.log.len() == 0 || (rpc.lastLogIndex >= lastLogIndex &&
rpc.lastLogTerm >= self.log[lastLogIndex].term) {
self.votedFor = Some(rpc.candidateId);
voteGranted = true
}
return RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: voteGranted});
}
// Update as a leader
// Paper:
// If last log index > nextIndex for a follower send AppendEntries RPC with log entries starting at nextIndex
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
// If there exists an N such that N > commitIndex, a majority of matchIndex >= N and log[N].term == currentTerm
// set commitIndex = N
fn leaderStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.leaderRespond(rpc),
None => {}
}
}
fn leaderRespond(&mut self, rpc: RaftRpc) {
match rpc {
AppendEntriesResponse(aer) => self.leaderAppendEntriesResponse(aer),
_ => {}
}
}
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
fn leaderAppendEntriesResponse(&mut self, rpc: AppendEntriesResponseRpc) {
let followerIndex = self.getServerIndex(rpc.sender);
if rpc.success {
self.nextIndex[followerIndex] = rpc.logIndex;
self.matchIndex[followerIndex] = rpc.logIndex;
} else {
if self.nextIndex[followerIndex] > 0 {
self.nextIndex[followerIndex] -= 1;
}
}
}
// Become a candidate (start election)
fn convertToCandidate(&mut self) {
self.serverType = Candidate;
self.currentTerm += 1;
self.receivedVotes = 1; // vote for self
self.setNewTimeout();
// RequestVote {sender: ServerId, term: int, candidateId: ServerId, lastLogIndex: int, lastLogTerm: int},
let lastLogIndex = (self.log.len() - 1) as int;
let requestVote = RequestVote(RequestVoteRpc {
sender: self.serverId, term: self.currentTerm,
candidateId: self.serverId, lastLogIndex: lastLogIndex,
lastLogTerm: self.log[lastLogIndex].term
});
// Broadcast requestVote to all servers
self.broadcastRpc(requestVote);
}
fn convertToFollower(&mut self) {
self.serverType = Follower;
self.setNewTimeout();
}
fn convertToLeader(&mut self) {
self.serverType = Leader;
self.broadcastHeartbeat();
}
fn broadcastRpc(&mut self, rpc: RaftRpc) {
for &serverId in self.servers.iter() {
if serverId == self.serverId {
continue;
}
self.transport.sendRpc(serverId, &rpc);
}
}
fn broadcastHeartbeat(&mut self) {
// Send an empty appendEntries to all servers
// AppendEntries {sender: ServerId, term: int, leaderId: ServerId, prevLogIndex: int, entries: ~[LogEntry], leaderCommitIndex: int},
let lastLogIndex = (self.log.len() - 1) as int;
let appendEntries = AppendEntries(AppendEntriesRpc {
sender: self.serverId, term: self.currentTerm, leaderId: self.serverId,
prevLogIndex: lastLogIndex, prevLogTerm: self.log[lastLogIndex].term,
entries: ~[], leaderCommitIndex: lastLogIndex
});
self.broadcastRpc(appendEntries);
}
fn getServerIndex(&self, serverId: ServerId) -> int {
for i in range(0,self.servers.len() as int) {
if self.servers[i] == serverId {
return i;
}
}
return -1;
}
fn setNewTimeout(&mut self) {
let val:int = rand::random();
self.electionTimeout = val % 500 + 500;
}
}
fn getLocalAddr(port: u16) -> SocketAddr {
return SocketAddr {ip: Ipv4Addr(127,0,0,1), port: port};
}
fn main() {
let args = os::args();
if args.len() != 2 {
fail!("usage: {} <port>", args[0]);
}
let port = match from_str::<int>(args[1]) {
Some(val) => val,
None => fail!("usage: {} <port:int>", args[0])
};
let udpTransport = UdpTransport::new();
// config with 5 servers
let servers = vec::from_fn(5, |idx| getLocalAddr(9000 + idx as u16));
let mut server = RaftServer::new(~udpTransport, getLocalAddr(port as u16), servers);
server.run();
} | }
}
fn candidateAppendEntries(&mut self, rpc: AppendEntriesRpc) { | random_line_split |
server.rs | #[feature(struct_variant)];
#[feature(macro_rules)];
use std::io::net::ip::{Ipv4Addr, SocketAddr};
use std::io::net::udp::{UdpSocket, UdpStream};
use std::io::timer;
use udptransport::UdpTransport;
use transport::RaftRpcTransport;
use rpc::{ServerId, LogEntry, AppendEntries, AppendEntriesResponse, RequestVote, RequestVoteResponse, RaftRpc};
use rpc::{AppendEntriesRpc, AppendEntriesResponseRpc, RequestVoteRpc, RequestVoteResponseRpc};
use std::os;
use std::vec;
use std::rand;
#[path="./rust-osc/osc.rs"]
mod osc;
mod udptransport;
mod transport;
mod rpc;
enum ServerType {
Follower,
Candidate,
Leader
}
struct RaftServer {
currentTerm: int,
votedFor: Option<ServerId>,
log: ~[LogEntry],
commitIndex: int,
lastApplied: int,
serverType: ServerType,
electionTimeout: int,
receivedVotes: int,
// Leader state:
// for each server, index of the next log entry to send to that
// server, initialized to last log index + 1
nextIndex: ~[int],
// for each server, index of highest log entry known to be
// replicated on server, initialized to 0
matchIndex: ~[int],
// current set of servers
servers: ~[ServerId],
// serverId corresponding to self
serverId: ServerId,
// transport layer to send RPC's over
transport: ~RaftRpcTransport
}
impl RaftServer {
fn new(transport: ~RaftRpcTransport, serverId: ServerId, servers: ~[ServerId]) -> RaftServer {
return RaftServer {
currentTerm: 0,
votedFor: None,
log: ~[],
commitIndex: 0,
lastApplied: 0,
electionTimeout: 0,
receivedVotes: 0,
serverType: Follower,
nextIndex: vec::with_capacity(servers.len()),
matchIndex: vec::with_capacity(servers.len()),
servers: servers,
serverId: serverId,
transport: transport
}
}
fn run(&mut self) {
loop {
match self.serverType {
Candidate => self.candidateStep(),
Follower => self.followerStep(),
Leader => self.leaderStep()
}
}
}
// Act as a candidate
// if votes received from a majority of servers become leader
// if appendentries received from new leader convert to follower
// if election times out try again
fn candidateStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.candidateRespond(rpc),
None => {}
}
if self.receivedVotes > (self.servers.len()/2) as int {
self.convertToLeader();
}
}
// Respond as a candidate to a given RPC
// RequestVoteResponse with success means we get a vote :D
// AppendEntries with term >= our term means we lost T_T
fn candidateRespond(&mut self, rpc: RaftRpc) {
match rpc {
RequestVoteResponse(rvr) => self.candidateRequestVoteResponse(rvr),
AppendEntries(ae) => self.candidateAppendEntries(ae),
_ => {}
};
}
fn candidateRequestVoteResponse(&mut self, rpc: RequestVoteResponseRpc) {
if rpc.voteGranted {
// TODO check to see if the server already voted for us this cycle
self.receivedVotes += 1;
}
}
fn candidateAppendEntries(&mut self, rpc: AppendEntriesRpc) {
if rpc.term >= self.currentTerm {
// we lost the election... D:
self.convertToFollower();
}
// pretend we didn't hear them whether or not they won, the resend will occur anyway
}
// Update the server when it is a Follower
// Paper:
// Respond to RPCs from candidates and leaders
// If election timeout elapses without receiving AppendEntries RPC
// or granting vote to candidate: convert to candidate
fn followerStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.followerRespond(rpc),
None => {}
}
self.electionTimeout -= 1;
if self.electionTimeout < 0 {
self.convertToCandidate();
}
}
// Respond to an incoming RPC as a follower
fn followerRespond(&mut self, rpc: RaftRpc) {
let response = match rpc {
AppendEntries(ref ae) => Some(self.followerAppendEntries(ae)),
RequestVote(ref rv) => Some(self.followerRequestVote(rv)),
_ => None
};
match response {
// send response to original rpc sender
Some(responseRpc) => self.transport.sendRpc(rpc.sender(), &responseRpc),
None => {}
}
}
// As a follower, handle an appendEntries RPC
fn followerAppendEntries(&mut self, rpc: &AppendEntriesRpc) -> RaftRpc {
let fail = AppendEntriesResponse(AppendEntriesResponseRpc{sender: self.serverId, term: self.currentTerm, success: false, logIndex: 0});
if rpc.term < self.currentTerm {
return fail;
}
// If log doesn't contain an entry with matching term return false
if rpc.prevLogIndex < self.log.len() as int {
if self.log[rpc.prevLogIndex].term != rpc.prevLogTerm {
return fail;
}
} else |
// 3. If an existing entry conflicts with a new one delete the
// existing entry and all that follow it
let startLogIndex = rpc.prevLogIndex+1;
for logOffset in range(0, rpc.entries.len()) {
let logIndex = startLogIndex + logOffset as int;
let entry = rpc.entries[logOffset].clone();
if logIndex < self.log.len() as int {
if self.log[logIndex].term != entry.term {
// delete it and all following
self.log.truncate(logIndex as uint);
self.log.push(entry);
}
} else {
self.log.push(entry);
}
}
return AppendEntriesResponse(AppendEntriesResponseRpc {
sender: self.serverId, term: self.currentTerm, success: true,
logIndex: (self.log.len() - 1) as int
});
}
// As a follower handle a requestVote rpc
// From paper:
// 1. Reply false if term < currentTerm
// 2. If votedFor is null or candidateId and candidate's log is
// at least as up-to-date as receiver's log, grant vote
fn followerRequestVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
let fail = RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: false});
if rpc.term < self.currentTerm {
return fail;
}
// if we haven't voted for anything or we voted for candidate
match self.votedFor {
None => {
return self.followerVote(rpc);
},
Some(id) if rpc.candidateId == id => {
return self.followerVote(rpc);
}
_ => {
return fail;
}
}
}
fn followerVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
// if the candidate's log is at least as up-to-date as ours vote for them
let mut voteGranted = false;
let lastLogIndex = (self.log.len() - 1) as int;
if self.log.len() == 0 || (rpc.lastLogIndex >= lastLogIndex &&
rpc.lastLogTerm >= self.log[lastLogIndex].term) {
self.votedFor = Some(rpc.candidateId);
voteGranted = true
}
return RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: voteGranted});
}
// Update as a leader
// Paper:
// If last log index > nextIndex for a follower send AppendEntries RPC with log entries starting at nextIndex
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
// If there exists an N such that N > commitIndex, a majority of matchIndex >= N and log[N].term == currentTerm
// set commitIndex = N
fn leaderStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.leaderRespond(rpc),
None => {}
}
}
fn leaderRespond(&mut self, rpc: RaftRpc) {
match rpc {
AppendEntriesResponse(aer) => self.leaderAppendEntriesResponse(aer),
_ => {}
}
}
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
fn leaderAppendEntriesResponse(&mut self, rpc: AppendEntriesResponseRpc) {
let followerIndex = self.getServerIndex(rpc.sender);
if rpc.success {
self.nextIndex[followerIndex] = rpc.logIndex;
self.matchIndex[followerIndex] = rpc.logIndex;
} else {
if self.nextIndex[followerIndex] > 0 {
self.nextIndex[followerIndex] -= 1;
}
}
}
// Become a candidate (start election)
fn convertToCandidate(&mut self) {
self.serverType = Candidate;
self.currentTerm += 1;
self.receivedVotes = 1; // vote for self
self.setNewTimeout();
// RequestVote {sender: ServerId, term: int, candidateId: ServerId, lastLogIndex: int, lastLogTerm: int},
let lastLogIndex = (self.log.len() - 1) as int;
let requestVote = RequestVote(RequestVoteRpc {
sender: self.serverId, term: self.currentTerm,
candidateId: self.serverId, lastLogIndex: lastLogIndex,
lastLogTerm: self.log[lastLogIndex].term
});
// Broadcast requestVote to all servers
self.broadcastRpc(requestVote);
}
fn convertToFollower(&mut self) {
self.serverType = Follower;
self.setNewTimeout();
}
fn convertToLeader(&mut self) {
self.serverType = Leader;
self.broadcastHeartbeat();
}
fn broadcastRpc(&mut self, rpc: RaftRpc) {
for &serverId in self.servers.iter() {
if serverId == self.serverId {
continue;
}
self.transport.sendRpc(serverId, &rpc);
}
}
fn broadcastHeartbeat(&mut self) {
// Send an empty appendEntries to all servers
// AppendEntries {sender: ServerId, term: int, leaderId: ServerId, prevLogIndex: int, entries: ~[LogEntry], leaderCommitIndex: int},
let lastLogIndex = (self.log.len() - 1) as int;
let appendEntries = AppendEntries(AppendEntriesRpc {
sender: self.serverId, term: self.currentTerm, leaderId: self.serverId,
prevLogIndex: lastLogIndex, prevLogTerm: self.log[lastLogIndex].term,
entries: ~[], leaderCommitIndex: lastLogIndex
});
self.broadcastRpc(appendEntries);
}
fn getServerIndex(&self, serverId: ServerId) -> int {
for i in range(0,self.servers.len() as int) {
if self.servers[i] == serverId {
return i;
}
}
return -1;
}
fn setNewTimeout(&mut self) {
let val:int = rand::random();
self.electionTimeout = val % 500 + 500;
}
}
fn getLocalAddr(port: u16) -> SocketAddr {
return SocketAddr {ip: Ipv4Addr(127,0,0,1), port: port};
}
fn main() {
let args = os::args();
if args.len() != 2 {
fail!("usage: {} <port>", args[0]);
}
let port = match from_str::<int>(args[1]) {
Some(val) => val,
None => fail!("usage: {} <port:int>", args[0])
};
let udpTransport = UdpTransport::new();
// config with 5 servers
let servers = vec::from_fn(5, |idx| getLocalAddr(9000 + idx as u16));
let mut server = RaftServer::new(~udpTransport, getLocalAddr(port as u16), servers);
server.run();
}
| {
return fail;
} | conditional_block |
server.rs | #[feature(struct_variant)];
#[feature(macro_rules)];
use std::io::net::ip::{Ipv4Addr, SocketAddr};
use std::io::net::udp::{UdpSocket, UdpStream};
use std::io::timer;
use udptransport::UdpTransport;
use transport::RaftRpcTransport;
use rpc::{ServerId, LogEntry, AppendEntries, AppendEntriesResponse, RequestVote, RequestVoteResponse, RaftRpc};
use rpc::{AppendEntriesRpc, AppendEntriesResponseRpc, RequestVoteRpc, RequestVoteResponseRpc};
use std::os;
use std::vec;
use std::rand;
#[path="./rust-osc/osc.rs"]
mod osc;
mod udptransport;
mod transport;
mod rpc;
enum ServerType {
Follower,
Candidate,
Leader
}
struct RaftServer {
currentTerm: int,
votedFor: Option<ServerId>,
log: ~[LogEntry],
commitIndex: int,
lastApplied: int,
serverType: ServerType,
electionTimeout: int,
receivedVotes: int,
// Leader state:
// for each server, index of the next log entry to send to that
// server, initialized to last log index + 1
nextIndex: ~[int],
// for each server, index of highest log entry known to be
// replicated on server, initialized to 0
matchIndex: ~[int],
// current set of servers
servers: ~[ServerId],
// serverId corresponding to self
serverId: ServerId,
// transport layer to send RPC's over
transport: ~RaftRpcTransport
}
impl RaftServer {
fn new(transport: ~RaftRpcTransport, serverId: ServerId, servers: ~[ServerId]) -> RaftServer {
return RaftServer {
currentTerm: 0,
votedFor: None,
log: ~[],
commitIndex: 0,
lastApplied: 0,
electionTimeout: 0,
receivedVotes: 0,
serverType: Follower,
nextIndex: vec::with_capacity(servers.len()),
matchIndex: vec::with_capacity(servers.len()),
servers: servers,
serverId: serverId,
transport: transport
}
}
fn run(&mut self) {
loop {
match self.serverType {
Candidate => self.candidateStep(),
Follower => self.followerStep(),
Leader => self.leaderStep()
}
}
}
// Act as a candidate
// if votes received from a majority of servers become leader
// if appendentries received from new leader convert to follower
// if election times out try again
fn candidateStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.candidateRespond(rpc),
None => {}
}
if self.receivedVotes > (self.servers.len()/2) as int {
self.convertToLeader();
}
}
// Respond as a candidate to a given RPC
// RequestVoteResponse with success means we get a vote :D
// AppendEntries with term >= our term means we lost T_T
fn candidateRespond(&mut self, rpc: RaftRpc) {
match rpc {
RequestVoteResponse(rvr) => self.candidateRequestVoteResponse(rvr),
AppendEntries(ae) => self.candidateAppendEntries(ae),
_ => {}
};
}
fn candidateRequestVoteResponse(&mut self, rpc: RequestVoteResponseRpc) {
if rpc.voteGranted {
// TODO check to see if the server already voted for us this cycle
self.receivedVotes += 1;
}
}
fn candidateAppendEntries(&mut self, rpc: AppendEntriesRpc) {
if rpc.term >= self.currentTerm {
// we lost the election... D:
self.convertToFollower();
}
// pretend we didn't hear them whether or not they won, the resend will occur anyway
}
// Update the server when it is a Follower
// Paper:
// Respond to RPCs from candidates and leaders
// If election timeout elapses without receiving AppendEntries RPC
// or granting vote to candidate: convert to candidate
fn followerStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.followerRespond(rpc),
None => {}
}
self.electionTimeout -= 1;
if self.electionTimeout < 0 {
self.convertToCandidate();
}
}
// Respond to an incoming RPC as a follower
fn followerRespond(&mut self, rpc: RaftRpc) {
let response = match rpc {
AppendEntries(ref ae) => Some(self.followerAppendEntries(ae)),
RequestVote(ref rv) => Some(self.followerRequestVote(rv)),
_ => None
};
match response {
// send response to original rpc sender
Some(responseRpc) => self.transport.sendRpc(rpc.sender(), &responseRpc),
None => {}
}
}
// As a follower, handle an appendEntries RPC
fn followerAppendEntries(&mut self, rpc: &AppendEntriesRpc) -> RaftRpc {
let fail = AppendEntriesResponse(AppendEntriesResponseRpc{sender: self.serverId, term: self.currentTerm, success: false, logIndex: 0});
if rpc.term < self.currentTerm {
return fail;
}
// If log doesn't contain an entry with matching term return false
if rpc.prevLogIndex < self.log.len() as int {
if self.log[rpc.prevLogIndex].term != rpc.prevLogTerm {
return fail;
}
} else {
return fail;
}
// 3. If an existing entry conflicts with a new one delete the
// existing entry and all that follow it
let startLogIndex = rpc.prevLogIndex+1;
for logOffset in range(0, rpc.entries.len()) {
let logIndex = startLogIndex + logOffset as int;
let entry = rpc.entries[logOffset].clone();
if logIndex < self.log.len() as int {
if self.log[logIndex].term != entry.term {
// delete it and all following
self.log.truncate(logIndex as uint);
self.log.push(entry);
}
} else {
self.log.push(entry);
}
}
return AppendEntriesResponse(AppendEntriesResponseRpc {
sender: self.serverId, term: self.currentTerm, success: true,
logIndex: (self.log.len() - 1) as int
});
}
// As a follower handle a requestVote rpc
// From paper:
// 1. Reply false if term < currentTerm
// 2. If votedFor is null or candidateId and candidate's log is
// at least as up-to-date as receiver's log, grant vote
fn followerRequestVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc |
fn followerVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
// if the candidate's log is at least as up-to-date as ours vote for them
let mut voteGranted = false;
let lastLogIndex = (self.log.len() - 1) as int;
if self.log.len() == 0 || (rpc.lastLogIndex >= lastLogIndex &&
rpc.lastLogTerm >= self.log[lastLogIndex].term) {
self.votedFor = Some(rpc.candidateId);
voteGranted = true
}
return RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: voteGranted});
}
// Update as a leader
// Paper:
// If last log index > nextIndex for a follower send AppendEntries RPC with log entries starting at nextIndex
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
// If there exists an N such that N > commitIndex, a majority of matchIndex >= N and log[N].term == currentTerm
// set commitIndex = N
fn leaderStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.leaderRespond(rpc),
None => {}
}
}
fn leaderRespond(&mut self, rpc: RaftRpc) {
match rpc {
AppendEntriesResponse(aer) => self.leaderAppendEntriesResponse(aer),
_ => {}
}
}
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
fn leaderAppendEntriesResponse(&mut self, rpc: AppendEntriesResponseRpc) {
let followerIndex = self.getServerIndex(rpc.sender);
if rpc.success {
self.nextIndex[followerIndex] = rpc.logIndex;
self.matchIndex[followerIndex] = rpc.logIndex;
} else {
if self.nextIndex[followerIndex] > 0 {
self.nextIndex[followerIndex] -= 1;
}
}
}
// Become a candidate (start election)
fn convertToCandidate(&mut self) {
self.serverType = Candidate;
self.currentTerm += 1;
self.receivedVotes = 1; // vote for self
self.setNewTimeout();
// RequestVote {sender: ServerId, term: int, candidateId: ServerId, lastLogIndex: int, lastLogTerm: int},
let lastLogIndex = (self.log.len() - 1) as int;
let requestVote = RequestVote(RequestVoteRpc {
sender: self.serverId, term: self.currentTerm,
candidateId: self.serverId, lastLogIndex: lastLogIndex,
lastLogTerm: self.log[lastLogIndex].term
});
// Broadcast requestVote to all servers
self.broadcastRpc(requestVote);
}
fn convertToFollower(&mut self) {
self.serverType = Follower;
self.setNewTimeout();
}
fn convertToLeader(&mut self) {
self.serverType = Leader;
self.broadcastHeartbeat();
}
fn broadcastRpc(&mut self, rpc: RaftRpc) {
for &serverId in self.servers.iter() {
if serverId == self.serverId {
continue;
}
self.transport.sendRpc(serverId, &rpc);
}
}
fn broadcastHeartbeat(&mut self) {
// Send an empty appendEntries to all servers
// AppendEntries {sender: ServerId, term: int, leaderId: ServerId, prevLogIndex: int, entries: ~[LogEntry], leaderCommitIndex: int},
let lastLogIndex = (self.log.len() - 1) as int;
let appendEntries = AppendEntries(AppendEntriesRpc {
sender: self.serverId, term: self.currentTerm, leaderId: self.serverId,
prevLogIndex: lastLogIndex, prevLogTerm: self.log[lastLogIndex].term,
entries: ~[], leaderCommitIndex: lastLogIndex
});
self.broadcastRpc(appendEntries);
}
fn getServerIndex(&self, serverId: ServerId) -> int {
for i in range(0,self.servers.len() as int) {
if self.servers[i] == serverId {
return i;
}
}
return -1;
}
fn setNewTimeout(&mut self) {
let val:int = rand::random();
self.electionTimeout = val % 500 + 500;
}
}
fn getLocalAddr(port: u16) -> SocketAddr {
return SocketAddr {ip: Ipv4Addr(127,0,0,1), port: port};
}
fn main() {
let args = os::args();
if args.len() != 2 {
fail!("usage: {} <port>", args[0]);
}
let port = match from_str::<int>(args[1]) {
Some(val) => val,
None => fail!("usage: {} <port:int>", args[0])
};
let udpTransport = UdpTransport::new();
// config with 5 servers
let servers = vec::from_fn(5, |idx| getLocalAddr(9000 + idx as u16));
let mut server = RaftServer::new(~udpTransport, getLocalAddr(port as u16), servers);
server.run();
}
| {
let fail = RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: false});
if rpc.term < self.currentTerm {
return fail;
}
// if we haven't voted for anything or we voted for candidate
match self.votedFor {
None => {
return self.followerVote(rpc);
},
Some(id) if rpc.candidateId == id => {
return self.followerVote(rpc);
}
_ => {
return fail;
}
}
} | identifier_body |
server.rs | #[feature(struct_variant)];
#[feature(macro_rules)];
use std::io::net::ip::{Ipv4Addr, SocketAddr};
use std::io::net::udp::{UdpSocket, UdpStream};
use std::io::timer;
use udptransport::UdpTransport;
use transport::RaftRpcTransport;
use rpc::{ServerId, LogEntry, AppendEntries, AppendEntriesResponse, RequestVote, RequestVoteResponse, RaftRpc};
use rpc::{AppendEntriesRpc, AppendEntriesResponseRpc, RequestVoteRpc, RequestVoteResponseRpc};
use std::os;
use std::vec;
use std::rand;
#[path="./rust-osc/osc.rs"]
mod osc;
mod udptransport;
mod transport;
mod rpc;
enum ServerType {
Follower,
Candidate,
Leader
}
struct | {
currentTerm: int,
votedFor: Option<ServerId>,
log: ~[LogEntry],
commitIndex: int,
lastApplied: int,
serverType: ServerType,
electionTimeout: int,
receivedVotes: int,
// Leader state:
// for each server, index of the next log entry to send to that
// server, initialized to last log index + 1
nextIndex: ~[int],
// for each server, index of highest log entry known to be
// replicated on server, initialized to 0
matchIndex: ~[int],
// current set of servers
servers: ~[ServerId],
// serverId corresponding to self
serverId: ServerId,
// transport layer to send RPC's over
transport: ~RaftRpcTransport
}
impl RaftServer {
fn new(transport: ~RaftRpcTransport, serverId: ServerId, servers: ~[ServerId]) -> RaftServer {
return RaftServer {
currentTerm: 0,
votedFor: None,
log: ~[],
commitIndex: 0,
lastApplied: 0,
electionTimeout: 0,
receivedVotes: 0,
serverType: Follower,
nextIndex: vec::with_capacity(servers.len()),
matchIndex: vec::with_capacity(servers.len()),
servers: servers,
serverId: serverId,
transport: transport
}
}
fn run(&mut self) {
loop {
match self.serverType {
Candidate => self.candidateStep(),
Follower => self.followerStep(),
Leader => self.leaderStep()
}
}
}
// Act as a candidate
// if votes received from a majority of servers become leader
// if appendentries received from new leader convert to follower
// if election times out try again
fn candidateStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.candidateRespond(rpc),
None => {}
}
if self.receivedVotes > (self.servers.len()/2) as int {
self.convertToLeader();
}
}
// Respond as a candidate to a given RPC
// RequestVoteResponse with success means we get a vote :D
// AppendEntries with term >= our term means we lost T_T
fn candidateRespond(&mut self, rpc: RaftRpc) {
match rpc {
RequestVoteResponse(rvr) => self.candidateRequestVoteResponse(rvr),
AppendEntries(ae) => self.candidateAppendEntries(ae),
_ => {}
};
}
fn candidateRequestVoteResponse(&mut self, rpc: RequestVoteResponseRpc) {
if rpc.voteGranted {
// TODO check to see if the server already voted for us this cycle
self.receivedVotes += 1;
}
}
fn candidateAppendEntries(&mut self, rpc: AppendEntriesRpc) {
if rpc.term >= self.currentTerm {
// we lost the election... D:
self.convertToFollower();
}
// pretend we didn't hear them whether or not they won, the resend will occur anyway
}
// Update the server when it is a Follower
// Paper:
// Respond to RPCs from candidates and leaders
// If election timeout elapses without receiving AppendEntries RPC
// or granting vote to candidate: convert to candidate
fn followerStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.followerRespond(rpc),
None => {}
}
self.electionTimeout -= 1;
if self.electionTimeout < 0 {
self.convertToCandidate();
}
}
// Respond to an incoming RPC as a follower
fn followerRespond(&mut self, rpc: RaftRpc) {
let response = match rpc {
AppendEntries(ref ae) => Some(self.followerAppendEntries(ae)),
RequestVote(ref rv) => Some(self.followerRequestVote(rv)),
_ => None
};
match response {
// send response to original rpc sender
Some(responseRpc) => self.transport.sendRpc(rpc.sender(), &responseRpc),
None => {}
}
}
// As a follower, handle an appendEntries RPC
fn followerAppendEntries(&mut self, rpc: &AppendEntriesRpc) -> RaftRpc {
let fail = AppendEntriesResponse(AppendEntriesResponseRpc{sender: self.serverId, term: self.currentTerm, success: false, logIndex: 0});
if rpc.term < self.currentTerm {
return fail;
}
// If log doesn't contain an entry with matching term return false
if rpc.prevLogIndex < self.log.len() as int {
if self.log[rpc.prevLogIndex].term != rpc.prevLogTerm {
return fail;
}
} else {
return fail;
}
// 3. If an existing entry conflicts with a new one delete the
// existing entry and all that follow it
let startLogIndex = rpc.prevLogIndex+1;
for logOffset in range(0, rpc.entries.len()) {
let logIndex = startLogIndex + logOffset as int;
let entry = rpc.entries[logOffset].clone();
if logIndex < self.log.len() as int {
if self.log[logIndex].term != entry.term {
// delete it and all following
self.log.truncate(logIndex as uint);
self.log.push(entry);
}
} else {
self.log.push(entry);
}
}
return AppendEntriesResponse(AppendEntriesResponseRpc {
sender: self.serverId, term: self.currentTerm, success: true,
logIndex: (self.log.len() - 1) as int
});
}
// As a follower handle a requestVote rpc
// From paper:
// 1. Reply false if term < currentTerm
// 2. If votedFor is null or candidateId and candidate's log is
// at least as up-to-date as receiver's log, grant vote
fn followerRequestVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
let fail = RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: false});
if rpc.term < self.currentTerm {
return fail;
}
// if we haven't voted for anything or we voted for candidate
match self.votedFor {
None => {
return self.followerVote(rpc);
},
Some(id) if rpc.candidateId == id => {
return self.followerVote(rpc);
}
_ => {
return fail;
}
}
}
fn followerVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
// if the candidate's log is at least as up-to-date as ours vote for them
let mut voteGranted = false;
let lastLogIndex = (self.log.len() - 1) as int;
if self.log.len() == 0 || (rpc.lastLogIndex >= lastLogIndex &&
rpc.lastLogTerm >= self.log[lastLogIndex].term) {
self.votedFor = Some(rpc.candidateId);
voteGranted = true
}
return RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: voteGranted});
}
// Update as a leader
// Paper:
// If last log index > nextIndex for a follower send AppendEntries RPC with log entries starting at nextIndex
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
// If there exists an N such that N > commitIndex, a majority of matchIndex >= N and log[N].term == currentTerm
// set commitIndex = N
fn leaderStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.leaderRespond(rpc),
None => {}
}
}
fn leaderRespond(&mut self, rpc: RaftRpc) {
match rpc {
AppendEntriesResponse(aer) => self.leaderAppendEntriesResponse(aer),
_ => {}
}
}
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
fn leaderAppendEntriesResponse(&mut self, rpc: AppendEntriesResponseRpc) {
let followerIndex = self.getServerIndex(rpc.sender);
if rpc.success {
self.nextIndex[followerIndex] = rpc.logIndex;
self.matchIndex[followerIndex] = rpc.logIndex;
} else {
if self.nextIndex[followerIndex] > 0 {
self.nextIndex[followerIndex] -= 1;
}
}
}
// Become a candidate (start election)
fn convertToCandidate(&mut self) {
self.serverType = Candidate;
self.currentTerm += 1;
self.receivedVotes = 1; // vote for self
self.setNewTimeout();
// RequestVote {sender: ServerId, term: int, candidateId: ServerId, lastLogIndex: int, lastLogTerm: int},
let lastLogIndex = (self.log.len() - 1) as int;
let requestVote = RequestVote(RequestVoteRpc {
sender: self.serverId, term: self.currentTerm,
candidateId: self.serverId, lastLogIndex: lastLogIndex,
lastLogTerm: self.log[lastLogIndex].term
});
// Broadcast requestVote to all servers
self.broadcastRpc(requestVote);
}
fn convertToFollower(&mut self) {
self.serverType = Follower;
self.setNewTimeout();
}
fn convertToLeader(&mut self) {
self.serverType = Leader;
self.broadcastHeartbeat();
}
fn broadcastRpc(&mut self, rpc: RaftRpc) {
for &serverId in self.servers.iter() {
if serverId == self.serverId {
continue;
}
self.transport.sendRpc(serverId, &rpc);
}
}
fn broadcastHeartbeat(&mut self) {
// Send an empty appendEntries to all servers
// AppendEntries {sender: ServerId, term: int, leaderId: ServerId, prevLogIndex: int, entries: ~[LogEntry], leaderCommitIndex: int},
let lastLogIndex = (self.log.len() - 1) as int;
let appendEntries = AppendEntries(AppendEntriesRpc {
sender: self.serverId, term: self.currentTerm, leaderId: self.serverId,
prevLogIndex: lastLogIndex, prevLogTerm: self.log[lastLogIndex].term,
entries: ~[], leaderCommitIndex: lastLogIndex
});
self.broadcastRpc(appendEntries);
}
fn getServerIndex(&self, serverId: ServerId) -> int {
for i in range(0,self.servers.len() as int) {
if self.servers[i] == serverId {
return i;
}
}
return -1;
}
fn setNewTimeout(&mut self) {
let val:int = rand::random();
self.electionTimeout = val % 500 + 500;
}
}
fn getLocalAddr(port: u16) -> SocketAddr {
return SocketAddr {ip: Ipv4Addr(127,0,0,1), port: port};
}
fn main() {
let args = os::args();
if args.len() != 2 {
fail!("usage: {} <port>", args[0]);
}
let port = match from_str::<int>(args[1]) {
Some(val) => val,
None => fail!("usage: {} <port:int>", args[0])
};
let udpTransport = UdpTransport::new();
// config with 5 servers
let servers = vec::from_fn(5, |idx| getLocalAddr(9000 + idx as u16));
let mut server = RaftServer::new(~udpTransport, getLocalAddr(port as u16), servers);
server.run();
}
| RaftServer | identifier_name |
WXmain.js | var startEvt, moveEvt, endEvt;
if ('ontouchstart' in window) {
startEvt = 'touchstart';
moveEvt = 'touchmove';
endEvt = 'touchend';
} else {
startEvt = 'mousedown';
moveEvt = 'mousemove';
endEvt = 'mouseup';
};
var Datas = document.getElementById("DeviceId").value;
/*************************Hamburger Btn******************************************/
var sideBarBtn = document.querySelector('.sideBarBtn');
sideBarBtn.state = 'closed';
var sidebarWrap = document.querySelector('.sidebarWrap');
var $sidebarWrap = $('.sidebarWrap');
var $Btn_lines = $('.Btn_line');
var mask = document.querySelector('.mask');
var sidebarWrapHeight = sidebarWrap.offsetHeight;
$sidebarWrap.css('display', 'none');
var BtnTimer = null;
sideBarBtn.addEventListener(startEvt, function (e) {
e.stopPropagation();
e.preventDefault();
loopCount = 0;
editor.setValue(Blockly.JavaScript.workspaceToCode(workspace));
BtnMove();
},false);
mask.addEventListener('click',function () {
sideBarBtn.state = 'opened';
BtnMove();
},false);
/***************************************Hamburger Btn animation********************************************/
function BtnMove() {
if (sideBarBtn.state == 'closed') {
$Btn_lines[0].classList.add('Btn_top_clockwise');
$Btn_lines[1].classList.add('Btn_mid_hide');
$Btn_lines[2].classList.add('Btn_bottom_anticlockwise');
sideBarBtn.state = 'opened';
contentMove('up', $sidebarWrap);
} else {
$Btn_lines.removeClass('Btn_top_clockwise Btn_mid_hide Btn_bottom_anticlockwise');
$Btn_lines[0].classList.add('Btn_top_anticlockwise');
$Btn_lines[1].classList.add('Btn_mid_show');
$Btn_lines[2].classList.add('Btn_bottom_clockwise');
BtnTimer = setTimeout(function () {
$Btn_lines.removeClass('Btn_top_anticlockwise Btn_mid_show Btn_bottom_clockwise');
}, 500);
sideBarBtn.state = 'closed';
contentMove('down', $sidebarWrap);
};
};
var contentWrap = document.querySelector(".contentWrap");
var sidebarContent = document.querySelectorAll(".sidebarContent");
var contentLogo = document.querySelector(".contentLogo");
var navList = document.querySelectorAll(".navList");
var contentIndex = 0;
var touchX
var disX;
contentWrap.addEventListener(startEvt, function (e) {
touchX = e.changedTouches[0].clientX;
},false);
contentWrap.addEventListener(moveEvt, function(e) {
disX = e.changedTouches[0].clientX - touchX;
},false);
contentWrap.addEventListener(endEvt, function () {
if (disX > 150) {
contentIndex--;
if (contentIndex < 0) contentIndex = sidebarContent.length - 1;
} else if (disX < -150) {
contentIndex++;
if (contentIndex > sidebarContent.length - 1) contentIndex = 0;
}
for (var i = 0; i < sidebarContent.length; i++) {
sidebarContent[i].style.display = "none";
navList[i].classList.remove('curNav');
}
sidebarContent[contentIndex].style.display = "block";
navList[contentIndex].classList.add('curNav');
contentLogo.style.backgroundPositionX = -100 * contentIndex + "px";
disX = 0;
},false);
function contentMove(direction, ele) {
if (direction == 'up') {
ele.css('display', 'block').animate({ 'bottom': 0 + 'px' }, 300);
mask.style.display = 'block';
} else {
ele.animate({ 'bottom': -sidebarWrapHeight + 'px' }, 300, function () {
ele.css('display', 'none');
mask.style.display = 'none';
});
};
};
/*******************************Hamburger Btn animation end***********************************/
/********************************save and share*************************************/
var saveFile = document.querySelector("#saveFile");
var shareBtn = document.querySelector(".shareBtn");
var saveBtn = document.querySelector(".saveBtn");
var saveError = document.querySelector(".saveError");
var fileWrap = document.querySelector(".fileWrap");
var fileUl = document.querySelector(".fileUl");
var filelist = document.querySelector(".filelist");
var clickGetFile = document.querySelector(".clickGetFile");
var deleteFile;
var shareFile;
var fileInfo;
var allSavedXml = [];
var allSavedTime = [];
/*******************************save blocks************************************/
saveBtn.addEventListener(startEvt, function(e) {
e.preventDefault();
}, false);
saveBtn.addEventListener(endEvt, save, false);
function save() {
var blocks = workspace.getAllBlocks();
var saveName = saveFile.value;
/* allow to save */
if (blocks.length !== 0 && saveName.length !== 0 && saveName.length < 10) {
var confirmtoSave = confirm("确定保存当前模块?");
if (confirmtoSave) {
saveError.innerHTML = "";
var Timestamp = new Date().getTime();
var xml = Blockly.Xml.workspaceToDom(workspace);
var xmlText = Blockly.Xml.domToText(xml);
xmlText = xmlText.replace(/"/g, "'");
if (fileWrap.style.display == "block") {
var clonedNode = document.querySelector(".filelist");
var newList = clonedNode.cloneNode(true);
newList.style.display = "block";
newList.querySelector(".fileName").innerHTML = saveName;
fileUl.insertBefore(newList, clonedNode);
allSavedXml.unshift(xmlText);
allSavedTime.unshift(Timestamp);
};
$.ajax({
url: "blocakly",
type: "POST",
data : {
xmlText : xmlText,
saveName : saveName,
Timestamp : Timestamp,
deviceId : Datas
},
success: function(result) {
document.querySelector("#saveFile").value = "";
},
error: function () {
alert("请检查您的网络");
}
});
console.log("保存的模块:" + "----" + xmlText);
console.log("保存的名字:" + "----" + saveName);
console.log("保存的时间戳:" + "----" + Timestamp);
document.querySelector("#saveFile").value = "";
}
} else if (blocks.length === 0) {
saveError.innerHTML = "还没添加模块";
} else if (saveName.length === 0 || saveName.length >= 10) {
saveError.innerHTML = "名字长度1-9";
}
delOrshareFile();
}
/**************************************share to friend********************************************/
/*******************************get blocks********************************/
clickGetFile.addEventListener(startEvt, function (e) {
e.preventDefault();
},false);
clickGetFile.addEventListener(endEvt, getBlocks, false);
function getBlocks () {
$.ajax({
url : "blocaklySelect/"+Datas,
type : "GET",
dataType : "json",
success : function (data) {
clickGetFile.style.display = "none";
fileWrap.style.display = "block";
var saveFileLen = data.length;
for (var i = 0; i < saveFileLen; i++) {
var newList = filelist.cloneNode(true);
newList.style.display = "block";
newList.querySelector(".fileName").innerHTML = data[i].saveName;
fileUl.insertBefore(newList, filelist);
allSavedXml.push(data[i].xmlText);
allSavedTime.push(data[i].Timestamp);
}
delOrshareFile();
},
error : function () {
clickGetFile.innerHTML = "你还没保存过模块";
}
});
};
/****************************************delete Or share File*****************************************/
function delOrshareFile () {
/*when delete the file get .filelist DOM again*/
deleteFile = document.querySelectorAll(".deleteFile");
shareFile = document.querySelectorAll(".shareFile");
fileInfo = document.querySelectorAll(".fileInfo");
for (var i = 0; i < deleteFile.length; i++) {
deleteFile[i].index = i;
deleteFile[i].onclick = function (e) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
var confirmDelete = confirm("确定要删除" + "(" + this.parentNode.parentNode.firstElementChild.firstElementChild.innerHTML + ")");
/*deleteTimestamp*/
if (confirmDelete) {
var deleteTimestamp = allSavedTime[this.index];
fileUl.removeChild(this.parentNode.parentNode);
allSavedXml.splice(this.index, 1);
allSavedTime.splice(this.index, 1);
console.log("删除" + "----" + deleteTimestamp);
$.ajax({
url: "blocakly",
type: "DELETE",
data : {
deviceId : Datas,
Timestamp : deleteTimestamp
},
success: function(result) {
},
error: function () {
alert("请检查您的网络");
}
});
}
}
};
for (var i = 0; i < shareFile.length; i++) {
shareFile[i].index = i;
shareFile[i].onclick = function (e, callback) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
/*shareXml*/
var shareXml = allSavedXml[this.index];
console.log("分享" + "----" + shareXml);
}
};
for (var i = 0; i < fileInfo.length; i++) {
fileInfo[i].index = i;
fileInfo[i].onclick = function (e) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
var loadXml = confirm("是否要将模块替换成" + "(" + this.firstElementChild.innerHTML + ")");
if (loadXml) {
workspace.clear();
Blockly.Xml.domToWorkspace(Blockly.Xml.textToDom(allSavedXml[this.index]), workspace);
| }
}
}
}
/***********************************save and share end**************************************/
| conditional_block | |
WXmain.js | var startEvt, moveEvt, endEvt;
if ('ontouchstart' in window) {
startEvt = 'touchstart';
moveEvt = 'touchmove';
endEvt = 'touchend';
} else {
startEvt = 'mousedown';
moveEvt = 'mousemove';
endEvt = 'mouseup';
};
var Datas = document.getElementById("DeviceId").value;
/*************************Hamburger Btn******************************************/
var sideBarBtn = document.querySelector('.sideBarBtn');
sideBarBtn.state = 'closed';
var sidebarWrap = document.querySelector('.sidebarWrap');
var $sidebarWrap = $('.sidebarWrap');
var $Btn_lines = $('.Btn_line');
var mask = document.querySelector('.mask');
var sidebarWrapHeight = sidebarWrap.offsetHeight;
$sidebarWrap.css('display', 'none');
var BtnTimer = null;
sideBarBtn.addEventListener(startEvt, function (e) {
e.stopPropagation();
e.preventDefault();
loopCount = 0;
editor.setValue(Blockly.JavaScript.workspaceToCode(workspace));
BtnMove();
},false);
mask.addEventListener('click',function () {
sideBarBtn.state = 'opened';
BtnMove();
},false);
/***************************************Hamburger Btn animation********************************************/
function BtnMove() {
if (sideBarBtn.state == 'closed') {
$Btn_lines[0].classList.add('Btn_top_clockwise');
$Btn_lines[1].classList.add('Btn_mid_hide');
$Btn_lines[2].classList.add('Btn_bottom_anticlockwise');
sideBarBtn.state = 'opened';
contentMove('up', $sidebarWrap);
} else {
$Btn_lines.removeClass('Btn_top_clockwise Btn_mid_hide Btn_bottom_anticlockwise');
$Btn_lines[0].classList.add('Btn_top_anticlockwise');
$Btn_lines[1].classList.add('Btn_mid_show');
$Btn_lines[2].classList.add('Btn_bottom_clockwise');
BtnTimer = setTimeout(function () {
$Btn_lines.removeClass('Btn_top_anticlockwise Btn_mid_show Btn_bottom_clockwise');
}, 500);
sideBarBtn.state = 'closed';
contentMove('down', $sidebarWrap);
};
};
var contentWrap = document.querySelector(".contentWrap");
var sidebarContent = document.querySelectorAll(".sidebarContent");
var contentLogo = document.querySelector(".contentLogo");
var navList = document.querySelectorAll(".navList");
var contentIndex = 0;
var touchX
var disX;
contentWrap.addEventListener(startEvt, function (e) {
touchX = e.changedTouches[0].clientX;
},false);
contentWrap.addEventListener(moveEvt, function(e) {
disX = e.changedTouches[0].clientX - touchX;
},false);
contentWrap.addEventListener(endEvt, function () {
if (disX > 150) {
contentIndex--;
if (contentIndex < 0) contentIndex = sidebarContent.length - 1;
} else if (disX < -150) {
contentIndex++;
if (contentIndex > sidebarContent.length - 1) contentIndex = 0;
}
for (var i = 0; i < sidebarContent.length; i++) {
sidebarContent[i].style.display = "none";
navList[i].classList.remove('curNav');
}
sidebarContent[contentIndex].style.display = "block";
navList[contentIndex].classList.add('curNav');
contentLogo.style.backgroundPositionX = -100 * contentIndex + "px";
disX = 0;
},false);
function contentMove(direction, ele) {
if (direction == 'up') {
ele.css('display', 'block').animate({ 'bottom': 0 + 'px' }, 300);
mask.style.display = 'block';
} else {
ele.animate({ 'bottom': -sidebarWrapHeight + 'px' }, 300, function () {
ele.css('display', 'none');
mask.style.display = 'none';
});
};
};
/*******************************Hamburger Btn animation end***********************************/
/********************************save and share*************************************/
var saveFile = document.querySelector("#saveFile");
var shareBtn = document.querySelector(".shareBtn");
var saveBtn = document.querySelector(".saveBtn");
var saveError = document.querySelector(".saveError");
var fileWrap = document.querySelector(".fileWrap");
var fileUl = document.querySelector(".fileUl");
var filelist = document.querySelector(".filelist");
var clickGetFile = document.querySelector(".clickGetFile");
var deleteFile;
var shareFile;
var fileInfo;
var allSavedXml = [];
var allSavedTime = [];
/*******************************save blocks************************************/
saveBtn.addEventListener(startEvt, function(e) {
e.preventDefault();
}, false);
saveBtn.addEventListener(endEvt, save, false);
function save() {
var blocks = workspace.getAllBlocks();
var saveName = saveFile.value;
/* allow to save */
if (blocks.length !== 0 && saveName.length !== 0 && saveName.length < 10) {
var confirmtoSave = confirm("确定保存当前模块?");
if (confirmtoSave) {
saveError.innerHTML = "";
var Timestamp = new Date().getTime();
var xml = Blockly.Xml.workspaceToDom(workspace);
var xmlText = Blockly.Xml.domToText(xml);
xmlText = xmlText.replace(/"/g, "'");
if (fileWrap.style.display == "block") {
var clonedNode = document.querySelector(".filelist");
var newList = clonedNode.cloneNode(true);
newList.style.display = "block";
newList.querySelector(".fileName").innerHTML = saveName;
fileUl.insertBefore(newList, clonedNode);
allSavedXml.unshift(xmlText);
allSavedTime.unshift(Timestamp);
};
$.ajax({
url: "blocakly",
type: "POST",
data : {
xmlText : xmlText,
saveName : saveName,
Timestamp : Timestamp,
deviceId : Datas
},
success: function(result) {
document.querySelector("#saveFile").value = "";
},
error: function () {
alert("请检查您的网络");
}
});
console.log("保存的模块:" + "----" + xmlText);
console.log("保存的名字:" + "----" + saveName);
console.log("保存的时间戳:" + "----" + Timestamp);
document.querySelector("#saveFile").value = "";
}
} else if (blocks.length === 0) {
saveError.innerHTML = "还没添加模块";
} else if (saveName.length === 0 || saveName.length >= 10) {
saveError.innerHTML = "名字长度1-9";
}
delOrshareFile();
}
/**************************************share to friend********************************************/
/*******************************get blocks********************************/
clickGetFile.addEventListener(startEvt, function (e) {
e.preventDefault();
},false);
clickGetFile.addEventListener(endEvt, getBlocks, false);
function getBlocks () {
$.ajax({
url : "blocaklySelect/"+Datas,
type : "GET", | pe : "json",
success : function (data) {
clickGetFile.style.display = "none";
fileWrap.style.display = "block";
var saveFileLen = data.length;
for (var i = 0; i < saveFileLen; i++) {
var newList = filelist.cloneNode(true);
newList.style.display = "block";
newList.querySelector(".fileName").innerHTML = data[i].saveName;
fileUl.insertBefore(newList, filelist);
allSavedXml.push(data[i].xmlText);
allSavedTime.push(data[i].Timestamp);
}
delOrshareFile();
},
error : function () {
clickGetFile.innerHTML = "你还没保存过模块";
}
});
};
/****************************************delete Or share File*****************************************/
function delOrshareFile () {
/*when delete the file get .filelist DOM again*/
deleteFile = document.querySelectorAll(".deleteFile");
shareFile = document.querySelectorAll(".shareFile");
fileInfo = document.querySelectorAll(".fileInfo");
for (var i = 0; i < deleteFile.length; i++) {
deleteFile[i].index = i;
deleteFile[i].onclick = function (e) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
var confirmDelete = confirm("确定要删除" + "(" + this.parentNode.parentNode.firstElementChild.firstElementChild.innerHTML + ")");
/*deleteTimestamp*/
if (confirmDelete) {
var deleteTimestamp = allSavedTime[this.index];
fileUl.removeChild(this.parentNode.parentNode);
allSavedXml.splice(this.index, 1);
allSavedTime.splice(this.index, 1);
console.log("删除" + "----" + deleteTimestamp);
$.ajax({
url: "blocakly",
type: "DELETE",
data : {
deviceId : Datas,
Timestamp : deleteTimestamp
},
success: function(result) {
},
error: function () {
alert("请检查您的网络");
}
});
}
}
};
for (var i = 0; i < shareFile.length; i++) {
shareFile[i].index = i;
shareFile[i].onclick = function (e, callback) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
/*shareXml*/
var shareXml = allSavedXml[this.index];
console.log("分享" + "----" + shareXml);
}
};
for (var i = 0; i < fileInfo.length; i++) {
fileInfo[i].index = i;
fileInfo[i].onclick = function (e) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
var loadXml = confirm("是否要将模块替换成" + "(" + this.firstElementChild.innerHTML + ")");
if (loadXml) {
workspace.clear();
Blockly.Xml.domToWorkspace(Blockly.Xml.textToDom(allSavedXml[this.index]), workspace);
}
}
}
}
/***********************************save and share end**************************************/
|
dataTy | identifier_name |
WXmain.js | var startEvt, moveEvt, endEvt;
if ('ontouchstart' in window) {
startEvt = 'touchstart';
moveEvt = 'touchmove';
endEvt = 'touchend';
} else {
startEvt = 'mousedown';
moveEvt = 'mousemove';
endEvt = 'mouseup';
};
var Datas = document.getElementById("DeviceId").value;
/*************************Hamburger Btn******************************************/
var sideBarBtn = document.querySelector('.sideBarBtn');
sideBarBtn.state = 'closed';
var sidebarWrap = document.querySelector('.sidebarWrap');
var $sidebarWrap = $('.sidebarWrap');
var $Btn_lines = $('.Btn_line');
var mask = document.querySelector('.mask');
var sidebarWrapHeight = sidebarWrap.offsetHeight;
$sidebarWrap.css('display', 'none');
var BtnTimer = null;
sideBarBtn.addEventListener(startEvt, function (e) {
e.stopPropagation();
e.preventDefault();
loopCount = 0;
editor.setValue(Blockly.JavaScript.workspaceToCode(workspace));
BtnMove();
},false);
mask.addEventListener('click',function () {
sideBarBtn.state = 'opened';
BtnMove();
},false);
/***************************************Hamburger Btn animation********************************************/
function BtnMove() | ;
var contentWrap = document.querySelector(".contentWrap");
var sidebarContent = document.querySelectorAll(".sidebarContent");
var contentLogo = document.querySelector(".contentLogo");
var navList = document.querySelectorAll(".navList");
var contentIndex = 0;
var touchX
var disX;
contentWrap.addEventListener(startEvt, function (e) {
touchX = e.changedTouches[0].clientX;
},false);
contentWrap.addEventListener(moveEvt, function(e) {
disX = e.changedTouches[0].clientX - touchX;
},false);
contentWrap.addEventListener(endEvt, function () {
if (disX > 150) {
contentIndex--;
if (contentIndex < 0) contentIndex = sidebarContent.length - 1;
} else if (disX < -150) {
contentIndex++;
if (contentIndex > sidebarContent.length - 1) contentIndex = 0;
}
for (var i = 0; i < sidebarContent.length; i++) {
sidebarContent[i].style.display = "none";
navList[i].classList.remove('curNav');
}
sidebarContent[contentIndex].style.display = "block";
navList[contentIndex].classList.add('curNav');
contentLogo.style.backgroundPositionX = -100 * contentIndex + "px";
disX = 0;
},false);
function contentMove(direction, ele) {
if (direction == 'up') {
ele.css('display', 'block').animate({ 'bottom': 0 + 'px' }, 300);
mask.style.display = 'block';
} else {
ele.animate({ 'bottom': -sidebarWrapHeight + 'px' }, 300, function () {
ele.css('display', 'none');
mask.style.display = 'none';
});
};
};
/*******************************Hamburger Btn animation end***********************************/
/********************************save and share*************************************/
var saveFile = document.querySelector("#saveFile");
var shareBtn = document.querySelector(".shareBtn");
var saveBtn = document.querySelector(".saveBtn");
var saveError = document.querySelector(".saveError");
var fileWrap = document.querySelector(".fileWrap");
var fileUl = document.querySelector(".fileUl");
var filelist = document.querySelector(".filelist");
var clickGetFile = document.querySelector(".clickGetFile");
var deleteFile;
var shareFile;
var fileInfo;
var allSavedXml = [];
var allSavedTime = [];
/*******************************save blocks************************************/
saveBtn.addEventListener(startEvt, function(e) {
e.preventDefault();
}, false);
saveBtn.addEventListener(endEvt, save, false);
function save() {
var blocks = workspace.getAllBlocks();
var saveName = saveFile.value;
/* allow to save */
if (blocks.length !== 0 && saveName.length !== 0 && saveName.length < 10) {
var confirmtoSave = confirm("确定保存当前模块?");
if (confirmtoSave) {
saveError.innerHTML = "";
var Timestamp = new Date().getTime();
var xml = Blockly.Xml.workspaceToDom(workspace);
var xmlText = Blockly.Xml.domToText(xml);
xmlText = xmlText.replace(/"/g, "'");
if (fileWrap.style.display == "block") {
var clonedNode = document.querySelector(".filelist");
var newList = clonedNode.cloneNode(true);
newList.style.display = "block";
newList.querySelector(".fileName").innerHTML = saveName;
fileUl.insertBefore(newList, clonedNode);
allSavedXml.unshift(xmlText);
allSavedTime.unshift(Timestamp);
};
$.ajax({
url: "blocakly",
type: "POST",
data : {
xmlText : xmlText,
saveName : saveName,
Timestamp : Timestamp,
deviceId : Datas
},
success: function(result) {
document.querySelector("#saveFile").value = "";
},
error: function () {
alert("请检查您的网络");
}
});
console.log("保存的模块:" + "----" + xmlText);
console.log("保存的名字:" + "----" + saveName);
console.log("保存的时间戳:" + "----" + Timestamp);
document.querySelector("#saveFile").value = "";
}
} else if (blocks.length === 0) {
saveError.innerHTML = "还没添加模块";
} else if (saveName.length === 0 || saveName.length >= 10) {
saveError.innerHTML = "名字长度1-9";
}
delOrshareFile();
}
/**************************************share to friend********************************************/
/*******************************get blocks********************************/
clickGetFile.addEventListener(startEvt, function (e) {
e.preventDefault();
},false);
clickGetFile.addEventListener(endEvt, getBlocks, false);
function getBlocks () {
$.ajax({
url : "blocaklySelect/"+Datas,
type : "GET",
dataType : "json",
success : function (data) {
clickGetFile.style.display = "none";
fileWrap.style.display = "block";
var saveFileLen = data.length;
for (var i = 0; i < saveFileLen; i++) {
var newList = filelist.cloneNode(true);
newList.style.display = "block";
newList.querySelector(".fileName").innerHTML = data[i].saveName;
fileUl.insertBefore(newList, filelist);
allSavedXml.push(data[i].xmlText);
allSavedTime.push(data[i].Timestamp);
}
delOrshareFile();
},
error : function () {
clickGetFile.innerHTML = "你还没保存过模块";
}
});
};
/****************************************delete Or share File*****************************************/
function delOrshareFile () {
/*when delete the file get .filelist DOM again*/
deleteFile = document.querySelectorAll(".deleteFile");
shareFile = document.querySelectorAll(".shareFile");
fileInfo = document.querySelectorAll(".fileInfo");
for (var i = 0; i < deleteFile.length; i++) {
deleteFile[i].index = i;
deleteFile[i].onclick = function (e) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
var confirmDelete = confirm("确定要删除" + "(" + this.parentNode.parentNode.firstElementChild.firstElementChild.innerHTML + ")");
/*deleteTimestamp*/
if (confirmDelete) {
var deleteTimestamp = allSavedTime[this.index];
fileUl.removeChild(this.parentNode.parentNode);
allSavedXml.splice(this.index, 1);
allSavedTime.splice(this.index, 1);
console.log("删除" + "----" + deleteTimestamp);
$.ajax({
url: "blocakly",
type: "DELETE",
data : {
deviceId : Datas,
Timestamp : deleteTimestamp
},
success: function(result) {
},
error: function () {
alert("请检查您的网络");
}
});
}
}
};
for (var i = 0; i < shareFile.length; i++) {
shareFile[i].index = i;
shareFile[i].onclick = function (e, callback) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
/*shareXml*/
var shareXml = allSavedXml[this.index];
console.log("分享" + "----" + shareXml);
}
};
for (var i = 0; i < fileInfo.length; i++) {
fileInfo[i].index = i;
fileInfo[i].onclick = function (e) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
var loadXml = confirm("是否要将模块替换成" + "(" + this.firstElementChild.innerHTML + ")");
if (loadXml) {
workspace.clear();
Blockly.Xml.domToWorkspace(Blockly.Xml.textToDom(allSavedXml[this.index]), workspace);
}
}
}
}
/***********************************save and share end**************************************/
| {
if (sideBarBtn.state == 'closed') {
$Btn_lines[0].classList.add('Btn_top_clockwise');
$Btn_lines[1].classList.add('Btn_mid_hide');
$Btn_lines[2].classList.add('Btn_bottom_anticlockwise');
sideBarBtn.state = 'opened';
contentMove('up', $sidebarWrap);
} else {
$Btn_lines.removeClass('Btn_top_clockwise Btn_mid_hide Btn_bottom_anticlockwise');
$Btn_lines[0].classList.add('Btn_top_anticlockwise');
$Btn_lines[1].classList.add('Btn_mid_show');
$Btn_lines[2].classList.add('Btn_bottom_clockwise');
BtnTimer = setTimeout(function () {
$Btn_lines.removeClass('Btn_top_anticlockwise Btn_mid_show Btn_bottom_clockwise');
}, 500);
sideBarBtn.state = 'closed';
contentMove('down', $sidebarWrap);
};
} | identifier_body |
WXmain.js | var startEvt, moveEvt, endEvt;
if ('ontouchstart' in window) {
startEvt = 'touchstart';
moveEvt = 'touchmove';
endEvt = 'touchend';
} else {
startEvt = 'mousedown';
moveEvt = 'mousemove';
endEvt = 'mouseup';
};
var Datas = document.getElementById("DeviceId").value;
/*************************Hamburger Btn******************************************/
var sideBarBtn = document.querySelector('.sideBarBtn');
sideBarBtn.state = 'closed';
var sidebarWrap = document.querySelector('.sidebarWrap');
var $sidebarWrap = $('.sidebarWrap');
var $Btn_lines = $('.Btn_line');
var mask = document.querySelector('.mask');
var sidebarWrapHeight = sidebarWrap.offsetHeight;
$sidebarWrap.css('display', 'none');
var BtnTimer = null;
sideBarBtn.addEventListener(startEvt, function (e) {
e.stopPropagation();
e.preventDefault();
loopCount = 0;
editor.setValue(Blockly.JavaScript.workspaceToCode(workspace));
BtnMove();
},false);
mask.addEventListener('click',function () {
sideBarBtn.state = 'opened';
BtnMove();
},false);
/***************************************Hamburger Btn animation********************************************/
function BtnMove() {
if (sideBarBtn.state == 'closed') {
$Btn_lines[0].classList.add('Btn_top_clockwise');
$Btn_lines[1].classList.add('Btn_mid_hide');
$Btn_lines[2].classList.add('Btn_bottom_anticlockwise');
sideBarBtn.state = 'opened';
contentMove('up', $sidebarWrap);
} else {
$Btn_lines.removeClass('Btn_top_clockwise Btn_mid_hide Btn_bottom_anticlockwise');
$Btn_lines[0].classList.add('Btn_top_anticlockwise');
$Btn_lines[1].classList.add('Btn_mid_show');
$Btn_lines[2].classList.add('Btn_bottom_clockwise');
BtnTimer = setTimeout(function () {
$Btn_lines.removeClass('Btn_top_anticlockwise Btn_mid_show Btn_bottom_clockwise');
}, 500);
sideBarBtn.state = 'closed';
contentMove('down', $sidebarWrap);
};
};
var contentWrap = document.querySelector(".contentWrap");
var sidebarContent = document.querySelectorAll(".sidebarContent");
var contentLogo = document.querySelector(".contentLogo");
var navList = document.querySelectorAll(".navList");
var contentIndex = 0;
var touchX
var disX;
contentWrap.addEventListener(startEvt, function (e) {
touchX = e.changedTouches[0].clientX;
},false);
contentWrap.addEventListener(moveEvt, function(e) {
disX = e.changedTouches[0].clientX - touchX;
},false);
contentWrap.addEventListener(endEvt, function () {
if (disX > 150) {
contentIndex--;
if (contentIndex < 0) contentIndex = sidebarContent.length - 1;
} else if (disX < -150) {
contentIndex++;
if (contentIndex > sidebarContent.length - 1) contentIndex = 0;
}
for (var i = 0; i < sidebarContent.length; i++) {
sidebarContent[i].style.display = "none";
navList[i].classList.remove('curNav');
}
sidebarContent[contentIndex].style.display = "block";
navList[contentIndex].classList.add('curNav');
contentLogo.style.backgroundPositionX = -100 * contentIndex + "px";
disX = 0;
},false);
function contentMove(direction, ele) {
if (direction == 'up') {
ele.css('display', 'block').animate({ 'bottom': 0 + 'px' }, 300);
mask.style.display = 'block';
} else {
ele.animate({ 'bottom': -sidebarWrapHeight + 'px' }, 300, function () {
ele.css('display', 'none');
mask.style.display = 'none';
});
};
};
/*******************************Hamburger Btn animation end***********************************/
/********************************save and share*************************************/
var saveFile = document.querySelector("#saveFile");
var shareBtn = document.querySelector(".shareBtn");
var saveBtn = document.querySelector(".saveBtn");
var saveError = document.querySelector(".saveError");
var fileWrap = document.querySelector(".fileWrap");
var fileUl = document.querySelector(".fileUl");
var filelist = document.querySelector(".filelist");
var clickGetFile = document.querySelector(".clickGetFile");
var deleteFile;
var shareFile;
var fileInfo;
var allSavedXml = [];
var allSavedTime = [];
/*******************************save blocks************************************/
saveBtn.addEventListener(startEvt, function(e) {
e.preventDefault();
}, false);
saveBtn.addEventListener(endEvt, save, false);
function save() {
var blocks = workspace.getAllBlocks();
var saveName = saveFile.value;
/* allow to save */
if (blocks.length !== 0 && saveName.length !== 0 && saveName.length < 10) {
var confirmtoSave = confirm("确定保存当前模块?");
if (confirmtoSave) {
saveError.innerHTML = "";
var Timestamp = new Date().getTime();
var xml = Blockly.Xml.workspaceToDom(workspace);
var xmlText = Blockly.Xml.domToText(xml);
xmlText = xmlText.replace(/"/g, "'");
if (fileWrap.style.display == "block") {
var clonedNode = document.querySelector(".filelist");
var newList = clonedNode.cloneNode(true);
newList.style.display = "block";
newList.querySelector(".fileName").innerHTML = saveName;
fileUl.insertBefore(newList, clonedNode);
allSavedXml.unshift(xmlText);
allSavedTime.unshift(Timestamp);
};
$.ajax({
url: "blocakly",
type: "POST",
data : {
xmlText : xmlText,
saveName : saveName,
Timestamp : Timestamp,
deviceId : Datas
},
success: function(result) {
document.querySelector("#saveFile").value = "";
},
error: function () {
alert("请检查您的网络");
}
});
console.log("保存的模块:" + "----" + xmlText);
console.log("保存的名字:" + "----" + saveName);
console.log("保存的时间戳:" + "----" + Timestamp);
document.querySelector("#saveFile").value = "";
}
} else if (blocks.length === 0) {
saveError.innerHTML = "还没添加模块";
} else if (saveName.length === 0 || saveName.length >= 10) {
saveError.innerHTML = "名字长度1-9";
}
delOrshareFile();
}
/**************************************share to friend********************************************/
/*******************************get blocks********************************/
clickGetFile.addEventListener(startEvt, function (e) {
e.preventDefault();
},false);
clickGetFile.addEventListener(endEvt, getBlocks, false);
function getBlocks () {
$.ajax({
url : "blocaklySelect/"+Datas,
type : "GET",
dataType : "json",
success : function (data) {
clickGetFile.style.display = "none";
fileWrap.style.display = "block";
var saveFileLen = data.length;
for (var i = 0; i < saveFileLen; i++) {
var newList = filelist.cloneNode(true);
newList.style.display = "block";
newList.querySelector(".fileName").innerHTML = data[i].saveName;
fileUl.insertBefore(newList, filelist);
allSavedXml.push(data[i].xmlText);
allSavedTime.push(data[i].Timestamp);
}
delOrshareFile();
},
error : function () {
clickGetFile.innerHTML = "你还没保存过模块";
}
});
};
/****************************************delete Or share File*****************************************/
function delOrshareFile () {
/*when delete the file get .filelist DOM again*/
deleteFile = document.querySelectorAll(".deleteFile");
shareFile = document.querySelectorAll(".shareFile");
fileInfo = document.querySelectorAll(".fileInfo");
| e.preventDefault();
e.stopPropagation();
delOrshareFile();
var confirmDelete = confirm("确定要删除" + "(" + this.parentNode.parentNode.firstElementChild.firstElementChild.innerHTML + ")");
/*deleteTimestamp*/
if (confirmDelete) {
var deleteTimestamp = allSavedTime[this.index];
fileUl.removeChild(this.parentNode.parentNode);
allSavedXml.splice(this.index, 1);
allSavedTime.splice(this.index, 1);
console.log("删除" + "----" + deleteTimestamp);
$.ajax({
url: "blocakly",
type: "DELETE",
data : {
deviceId : Datas,
Timestamp : deleteTimestamp
},
success: function(result) {
},
error: function () {
alert("请检查您的网络");
}
});
}
}
};
for (var i = 0; i < shareFile.length; i++) {
shareFile[i].index = i;
shareFile[i].onclick = function (e, callback) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
/*shareXml*/
var shareXml = allSavedXml[this.index];
console.log("分享" + "----" + shareXml);
}
};
for (var i = 0; i < fileInfo.length; i++) {
fileInfo[i].index = i;
fileInfo[i].onclick = function (e) {
e.preventDefault();
e.stopPropagation();
delOrshareFile();
var loadXml = confirm("是否要将模块替换成" + "(" + this.firstElementChild.innerHTML + ")");
if (loadXml) {
workspace.clear();
Blockly.Xml.domToWorkspace(Blockly.Xml.textToDom(allSavedXml[this.index]), workspace);
}
}
}
}
/***********************************save and share end**************************************/ | for (var i = 0; i < deleteFile.length; i++) {
deleteFile[i].index = i;
deleteFile[i].onclick = function (e) {
| random_line_split |
ether.service.ts | import { Inject, Injectable, Provider } from '@angular/core';
import { Signer, utils, providers, Wallet, ethers, Contract } from 'ethers';
import { PROVIDER } from './provider-injection-token';
import WalletConnect from '@walletconnect/client';
import QRCodeModal from '@walletconnect/qrcode-modal';
import {
LOCAL_STORAGE,
StorageService,
StorageTranscoders,
} from 'ngx-webstorage-service';
import { BehaviorSubject, of } from 'rxjs';
import { WEB3 } from './web3';
import Web3 from 'web3';
import { HttpClient } from '@angular/common/http';
import { catchError, map, tap } from 'rxjs/operators';
import { astroNFT } from '../models/astroNFT.enum';
import { environment } from 'src/environments/environment';
declare let require: any;
declare let window: any;
// const Web3 = require('web3');
const daiAbi = [
// Some details about the token
'function name() view returns (string)',
'function symbol() view returns (string)',
// Get the account balance
'function balanceOf(address) view returns (uint)',
// Send some of your tokens to someone else
'function transfer(address to, uint amount)',
// An event triggered whenever anyone transfers to someone else
'event Transfer(address indexed from, address indexed to, uint amount)',
'function getValue() view returns (string value)',
];
@Injectable({ providedIn: 'root' })
export class EtherService {
private wallet: Wallet;
// web3;
enable;
account;
astroAddress = '0xcbd55d4ffc43467142761a764763652b48b969ff';
secondTokenAddress = '0x62359ed7505efc61ff1d56fef82158ccaffa23d7';
powerUpAddress = '0xd8cd8cb7f468ef175bc01c48497d3f7fa27b4653';
connectedAddress = '';
astroAmount = 0;
secondTokenAmount = 0;
idAccount: string;
connectedAddress$ = new BehaviorSubject('');
hasLvl1 = false;
hasLvl2 = false;
hasLvl3 = false;
public connector;
constructor(
@Inject(PROVIDER) public provider: providers.BaseProvider,
@Inject(WEB3) private web3: Web3,
@Inject(LOCAL_STORAGE) private storage: StorageService,
private http: HttpClient
) {
// Create a connector
this.createConnector();
if (window.ethereum === undefined) {
// alert('Non-Ethereum browser detected. Install MetaMask');
} else {
if (typeof window.web3 !== 'undefined') {
// this.web3 = window.web3.currentProvider;
} else {
// this.web3 = new Web3.providers.HttpProvider('http://localhost:8545');
}
// console.log('transfer.service :: constructor :: window.ethereum');
// this.web3 = new Web3(window.ethereum);
// console.log('transfer.service :: constructor :: this.web3');
// console.log(this.web3);
window.ethereum.on('accountsChanged', async (res) => {
this.web3.eth.getAccounts(async (error, accounts) => {
// console.log(accounts[0], 'current account after account change');
const account = accounts && accounts[0];
this.idAccount = account;
await this.afterConnection(account);
});
});
}
this.connectedAddress$.subscribe(async (res) => {
if (!res) return;
this.idAccount = res;
// this.hasLvl1 = await this.getHasNFT(astroNFT.Lvl_1_NoobCanon, res.toLowerCase());
this.hasLvl2 = await this.getHasNFT(
astroNFT.Lvl_2_PipeCleaner,
res.toLowerCase()
);
this.hasLvl3 = await this.getHasNFT(
astroNFT.Lvl_3_BFG9001,
res.toLowerCase()
);
// console.log('levels', { lvl1: this.hasLvl1, lvl2: this.hasLvl2, lvl3: this.hasLvl3});
});
}
private createConnector() {
this.connector = new WalletConnect({
bridge: 'https://bridge.walletconnect.org',
qrcodeModal: QRCodeModal,
});
this.connector.on('connect', async (error, payload) => {
if (error) {
throw error;
}
QRCodeModal.close();
// Get provided accounts and chainId
const { accounts, chainId } = payload.s[0];
await this.afterConnection(accounts && accounts[0]);
});
this.connector.on('disconnect', (error, payload) => {
if (error) {
throw error;
}
this.afterConnection('');
});
this.connector.on('session_update', async (error, payload) => {
if (error) {
throw error;
}
const { accounts, chainId } = payload.params[0];
await this.afterConnection(accounts && accounts[0]);
});
if (this.connector.connected) {
this.afterConnection(this.connector.accounts[0]);
}
}
private async afterConnection(account: string) {
this.connectedAddress = account;
this.connectedAddress$.next(this.connectedAddress);
const balance = await this.getTokenBalance(account, this.astroAddress);
/* const balanceSecondToken = await this.getTokenBalance(
account,
this.secondTokenAddress
); */
this.astroAmount =
+balance > this.astroAmount ? +balance : this.astroAmount;
/* this.secondTokenAmount =
+balanceSecondToken > this.secondTokenAmount
? +balanceSecondToken
: this.secondTokenAmount; */
return Promise.resolve(true);
}
getAccountConnected(): string {
return this.idAccount ?? null;
}
getHasNFT(id: number, address: string): Promise<boolean> {
const url = `https://api-mainnet.rarible.com/ownerships/0xd8cd8cb7f468ef175bc01c48497d3f7fa27b4653%3A${id}%3A${address}`;
return this.http
.get<any>(url)
.pipe(
catchError((error) => {
if (error.error instanceof ErrorEvent) {
} else {
}
return of(false);
}),
map((r) => {
if (!r) {
return false;
} else {
return r.ownership.token === this.powerUpAddress;
}
})
)
.toPromise();
}
// 8-bit - 0
get noAstro() {
return this.astroAmount < 1;
}
// 8-bit - 1
get astroTier1() |
// 16-bit - 1000
get astroTier2() {
return (
(this.astroAmount >= 250 || this.secondTokenAmount >= 0.1) &&
this.astroAmount < 1000
);
}
// 32-bit - 1000
get astroTier3() {
return (
(this.astroAmount >= 1000 && this.astroAmount < 20000) ||
(this.hasLvl2 && !this.hasLvl3)
);
}
// 64-bit - 20000
get astroTier4() {
return this.astroAmount >= 20000 || this.hasLvl3 || !environment.production;
}
get hasTier1Access() {
return (
this.astroTier1 || this.astroTier2 || this.astroTier3 || this.astroTier4
);
}
get hasTier2Access() {
return this.astroTier2 || this.astroTier3 || this.astroTier4;
}
get hasTier3Access() {
return this.astroTier3 || this.astroTier4;
}
public async getBalances() {
this.astroAmount = await this.getTokenBalance('', this.astroAddress);
/* this.secondTokenAmount = await this.getTokenBalance(
'',
this.secondTokenAddress
); */
}
public async getTokenBalance(
account: string,
tokenAddress = this.astroAddress
): Promise<any> {
if (!account) {
account = await this.getAccount();
this.connectedAddress = account;
this.connectedAddress$.next(this.connectedAddress);
// console.log('transfer.service :: getUserBalance :: account');
// console.log(account);
}
const contract = new Contract(tokenAddress, daiAbi, this.provider);
const balance =
//20000;
(await contract.balanceOf(account)) / 10 ** 18;
return balance;
}
async enableMetaMaskAccount(): Promise<any> {
let enable = false;
await new Promise((resolve, reject) => {
enable = window.ethereum.enable();
});
return Promise.resolve(enable);
}
async enableWalletConnect(): Promise<any> {
if (!this.connector.connected) {
this.createConnector();
this.connector.createSession().then(() => {
// get uri for QR Code modal
const uri = this.connector.uri;
QRCodeModal.open(uri, () => {
QRCodeModal.close();
});
});
}
return Promise.resolve(true);
}
disconnectWalletConnect() {
this.connector.killSession();
}
private async getAccount(): Promise<any> {
// console.log('transfer.service :: getAccount :: start');
if (this.account == null) {
this.account = (await new Promise((resolve, reject) => {
// console.log('transfer.service :: getAccount :: eth');
// console.log(this.web3.eth);
this.web3.eth.getAccounts((err, retAccount) => {
/* console.log('transfer.service :: getAccount: retAccount');
console.log(retAccount); */
if (retAccount) {
this.account = retAccount[0];
resolve(this.account);
} else {
/* alert('transfer.service :: getAccount :: no accounts found.');
reject('No accounts found.'); */
}
if (err != null) {
/* alert('transfer.service :: getAccount :: error retrieving account');
reject('Error retrieving account'); */
}
});
})) as Promise<any>;
}
return Promise.resolve(this.account);
}
async getFrom(txId: string) {
const res = await this.web3.eth.getTransactionReceipt(txId);
return res.from;
}
public async getUserBalance(): Promise<any> {
const account = await this.getAccount();
// console.log('transfer.service :: getUserBalance :: account');
// console.log(account);
return new Promise((resolve, reject) => {
this.web3.eth.getBalance(account, function (err, balance) {
// console.log('transfer.service :: getUserBalance :: getBalance');
// console.log(balance);
if (!err) {
const retVal = {
account,
balance,
};
resolve(retVal);
} else {
reject({ account: 'error', balance: 0 });
}
});
}) as Promise<any>;
}
getInfo(): Contract {
return new ethers.Contract(this.astroAddress, daiAbi, this.provider);
}
}
| {
return this.astroAmount >= 1 && this.astroAmount < 250;
} | identifier_body |
ether.service.ts | import { Inject, Injectable, Provider } from '@angular/core';
import { Signer, utils, providers, Wallet, ethers, Contract } from 'ethers';
import { PROVIDER } from './provider-injection-token';
import WalletConnect from '@walletconnect/client';
import QRCodeModal from '@walletconnect/qrcode-modal';
import {
LOCAL_STORAGE,
StorageService,
StorageTranscoders,
} from 'ngx-webstorage-service';
import { BehaviorSubject, of } from 'rxjs';
import { WEB3 } from './web3';
import Web3 from 'web3';
import { HttpClient } from '@angular/common/http';
import { catchError, map, tap } from 'rxjs/operators';
import { astroNFT } from '../models/astroNFT.enum';
import { environment } from 'src/environments/environment';
declare let require: any;
declare let window: any;
// const Web3 = require('web3');
const daiAbi = [
// Some details about the token
'function name() view returns (string)',
'function symbol() view returns (string)',
// Get the account balance
'function balanceOf(address) view returns (uint)',
// Send some of your tokens to someone else
'function transfer(address to, uint amount)',
// An event triggered whenever anyone transfers to someone else
'event Transfer(address indexed from, address indexed to, uint amount)',
'function getValue() view returns (string value)',
];
@Injectable({ providedIn: 'root' })
export class EtherService {
private wallet: Wallet;
// web3;
enable;
account;
astroAddress = '0xcbd55d4ffc43467142761a764763652b48b969ff';
secondTokenAddress = '0x62359ed7505efc61ff1d56fef82158ccaffa23d7';
powerUpAddress = '0xd8cd8cb7f468ef175bc01c48497d3f7fa27b4653';
connectedAddress = '';
astroAmount = 0;
secondTokenAmount = 0;
idAccount: string;
connectedAddress$ = new BehaviorSubject('');
hasLvl1 = false;
hasLvl2 = false;
hasLvl3 = false;
public connector;
constructor(
@Inject(PROVIDER) public provider: providers.BaseProvider,
@Inject(WEB3) private web3: Web3,
@Inject(LOCAL_STORAGE) private storage: StorageService,
private http: HttpClient
) {
// Create a connector
this.createConnector();
if (window.ethereum === undefined) {
// alert('Non-Ethereum browser detected. Install MetaMask');
} else |
this.connectedAddress$.subscribe(async (res) => {
if (!res) return;
this.idAccount = res;
// this.hasLvl1 = await this.getHasNFT(astroNFT.Lvl_1_NoobCanon, res.toLowerCase());
this.hasLvl2 = await this.getHasNFT(
astroNFT.Lvl_2_PipeCleaner,
res.toLowerCase()
);
this.hasLvl3 = await this.getHasNFT(
astroNFT.Lvl_3_BFG9001,
res.toLowerCase()
);
// console.log('levels', { lvl1: this.hasLvl1, lvl2: this.hasLvl2, lvl3: this.hasLvl3});
});
}
private createConnector() {
this.connector = new WalletConnect({
bridge: 'https://bridge.walletconnect.org',
qrcodeModal: QRCodeModal,
});
this.connector.on('connect', async (error, payload) => {
if (error) {
throw error;
}
QRCodeModal.close();
// Get provided accounts and chainId
const { accounts, chainId } = payload.s[0];
await this.afterConnection(accounts && accounts[0]);
});
this.connector.on('disconnect', (error, payload) => {
if (error) {
throw error;
}
this.afterConnection('');
});
this.connector.on('session_update', async (error, payload) => {
if (error) {
throw error;
}
const { accounts, chainId } = payload.params[0];
await this.afterConnection(accounts && accounts[0]);
});
if (this.connector.connected) {
this.afterConnection(this.connector.accounts[0]);
}
}
private async afterConnection(account: string) {
this.connectedAddress = account;
this.connectedAddress$.next(this.connectedAddress);
const balance = await this.getTokenBalance(account, this.astroAddress);
/* const balanceSecondToken = await this.getTokenBalance(
account,
this.secondTokenAddress
); */
this.astroAmount =
+balance > this.astroAmount ? +balance : this.astroAmount;
/* this.secondTokenAmount =
+balanceSecondToken > this.secondTokenAmount
? +balanceSecondToken
: this.secondTokenAmount; */
return Promise.resolve(true);
}
getAccountConnected(): string {
return this.idAccount ?? null;
}
getHasNFT(id: number, address: string): Promise<boolean> {
const url = `https://api-mainnet.rarible.com/ownerships/0xd8cd8cb7f468ef175bc01c48497d3f7fa27b4653%3A${id}%3A${address}`;
return this.http
.get<any>(url)
.pipe(
catchError((error) => {
if (error.error instanceof ErrorEvent) {
} else {
}
return of(false);
}),
map((r) => {
if (!r) {
return false;
} else {
return r.ownership.token === this.powerUpAddress;
}
})
)
.toPromise();
}
// 8-bit - 0
get noAstro() {
return this.astroAmount < 1;
}
// 8-bit - 1
get astroTier1() {
return this.astroAmount >= 1 && this.astroAmount < 250;
}
// 16-bit - 1000
get astroTier2() {
return (
(this.astroAmount >= 250 || this.secondTokenAmount >= 0.1) &&
this.astroAmount < 1000
);
}
// 32-bit - 1000
get astroTier3() {
return (
(this.astroAmount >= 1000 && this.astroAmount < 20000) ||
(this.hasLvl2 && !this.hasLvl3)
);
}
// 64-bit - 20000
get astroTier4() {
return this.astroAmount >= 20000 || this.hasLvl3 || !environment.production;
}
get hasTier1Access() {
return (
this.astroTier1 || this.astroTier2 || this.astroTier3 || this.astroTier4
);
}
get hasTier2Access() {
return this.astroTier2 || this.astroTier3 || this.astroTier4;
}
get hasTier3Access() {
return this.astroTier3 || this.astroTier4;
}
public async getBalances() {
this.astroAmount = await this.getTokenBalance('', this.astroAddress);
/* this.secondTokenAmount = await this.getTokenBalance(
'',
this.secondTokenAddress
); */
}
public async getTokenBalance(
account: string,
tokenAddress = this.astroAddress
): Promise<any> {
if (!account) {
account = await this.getAccount();
this.connectedAddress = account;
this.connectedAddress$.next(this.connectedAddress);
// console.log('transfer.service :: getUserBalance :: account');
// console.log(account);
}
const contract = new Contract(tokenAddress, daiAbi, this.provider);
const balance =
//20000;
(await contract.balanceOf(account)) / 10 ** 18;
return balance;
}
async enableMetaMaskAccount(): Promise<any> {
let enable = false;
await new Promise((resolve, reject) => {
enable = window.ethereum.enable();
});
return Promise.resolve(enable);
}
async enableWalletConnect(): Promise<any> {
if (!this.connector.connected) {
this.createConnector();
this.connector.createSession().then(() => {
// get uri for QR Code modal
const uri = this.connector.uri;
QRCodeModal.open(uri, () => {
QRCodeModal.close();
});
});
}
return Promise.resolve(true);
}
disconnectWalletConnect() {
this.connector.killSession();
}
private async getAccount(): Promise<any> {
// console.log('transfer.service :: getAccount :: start');
if (this.account == null) {
this.account = (await new Promise((resolve, reject) => {
// console.log('transfer.service :: getAccount :: eth');
// console.log(this.web3.eth);
this.web3.eth.getAccounts((err, retAccount) => {
/* console.log('transfer.service :: getAccount: retAccount');
console.log(retAccount); */
if (retAccount) {
this.account = retAccount[0];
resolve(this.account);
} else {
/* alert('transfer.service :: getAccount :: no accounts found.');
reject('No accounts found.'); */
}
if (err != null) {
/* alert('transfer.service :: getAccount :: error retrieving account');
reject('Error retrieving account'); */
}
});
})) as Promise<any>;
}
return Promise.resolve(this.account);
}
async getFrom(txId: string) {
const res = await this.web3.eth.getTransactionReceipt(txId);
return res.from;
}
public async getUserBalance(): Promise<any> {
const account = await this.getAccount();
// console.log('transfer.service :: getUserBalance :: account');
// console.log(account);
return new Promise((resolve, reject) => {
this.web3.eth.getBalance(account, function (err, balance) {
// console.log('transfer.service :: getUserBalance :: getBalance');
// console.log(balance);
if (!err) {
const retVal = {
account,
balance,
};
resolve(retVal);
} else {
reject({ account: 'error', balance: 0 });
}
});
}) as Promise<any>;
}
getInfo(): Contract {
return new ethers.Contract(this.astroAddress, daiAbi, this.provider);
}
}
| {
if (typeof window.web3 !== 'undefined') {
// this.web3 = window.web3.currentProvider;
} else {
// this.web3 = new Web3.providers.HttpProvider('http://localhost:8545');
}
// console.log('transfer.service :: constructor :: window.ethereum');
// this.web3 = new Web3(window.ethereum);
// console.log('transfer.service :: constructor :: this.web3');
// console.log(this.web3);
window.ethereum.on('accountsChanged', async (res) => {
this.web3.eth.getAccounts(async (error, accounts) => {
// console.log(accounts[0], 'current account after account change');
const account = accounts && accounts[0];
this.idAccount = account;
await this.afterConnection(account);
});
});
} | conditional_block |
ether.service.ts | import { Inject, Injectable, Provider } from '@angular/core';
import { Signer, utils, providers, Wallet, ethers, Contract } from 'ethers';
import { PROVIDER } from './provider-injection-token';
import WalletConnect from '@walletconnect/client';
import QRCodeModal from '@walletconnect/qrcode-modal';
import {
LOCAL_STORAGE,
StorageService,
StorageTranscoders,
} from 'ngx-webstorage-service';
import { BehaviorSubject, of } from 'rxjs';
import { WEB3 } from './web3';
import Web3 from 'web3';
import { HttpClient } from '@angular/common/http';
import { catchError, map, tap } from 'rxjs/operators';
import { astroNFT } from '../models/astroNFT.enum';
import { environment } from 'src/environments/environment';
declare let require: any;
declare let window: any;
// const Web3 = require('web3');
const daiAbi = [
// Some details about the token
'function name() view returns (string)',
'function symbol() view returns (string)',
// Get the account balance
'function balanceOf(address) view returns (uint)',
// Send some of your tokens to someone else
'function transfer(address to, uint amount)',
// An event triggered whenever anyone transfers to someone else
'event Transfer(address indexed from, address indexed to, uint amount)',
'function getValue() view returns (string value)',
];
@Injectable({ providedIn: 'root' })
export class EtherService {
private wallet: Wallet;
// web3;
enable;
account;
astroAddress = '0xcbd55d4ffc43467142761a764763652b48b969ff';
secondTokenAddress = '0x62359ed7505efc61ff1d56fef82158ccaffa23d7';
powerUpAddress = '0xd8cd8cb7f468ef175bc01c48497d3f7fa27b4653';
connectedAddress = '';
astroAmount = 0;
secondTokenAmount = 0;
idAccount: string;
connectedAddress$ = new BehaviorSubject('');
hasLvl1 = false;
hasLvl2 = false;
hasLvl3 = false;
public connector;
constructor(
@Inject(PROVIDER) public provider: providers.BaseProvider,
@Inject(WEB3) private web3: Web3,
@Inject(LOCAL_STORAGE) private storage: StorageService,
private http: HttpClient
) {
// Create a connector
this.createConnector();
if (window.ethereum === undefined) {
// alert('Non-Ethereum browser detected. Install MetaMask');
} else {
if (typeof window.web3 !== 'undefined') {
// this.web3 = window.web3.currentProvider;
} else {
// this.web3 = new Web3.providers.HttpProvider('http://localhost:8545');
}
// console.log('transfer.service :: constructor :: window.ethereum');
// this.web3 = new Web3(window.ethereum);
// console.log('transfer.service :: constructor :: this.web3');
// console.log(this.web3);
window.ethereum.on('accountsChanged', async (res) => {
this.web3.eth.getAccounts(async (error, accounts) => {
// console.log(accounts[0], 'current account after account change');
const account = accounts && accounts[0];
this.idAccount = account;
await this.afterConnection(account);
});
});
}
this.connectedAddress$.subscribe(async (res) => {
if (!res) return;
this.idAccount = res;
// this.hasLvl1 = await this.getHasNFT(astroNFT.Lvl_1_NoobCanon, res.toLowerCase());
this.hasLvl2 = await this.getHasNFT(
astroNFT.Lvl_2_PipeCleaner,
res.toLowerCase()
);
this.hasLvl3 = await this.getHasNFT(
astroNFT.Lvl_3_BFG9001,
res.toLowerCase()
);
// console.log('levels', { lvl1: this.hasLvl1, lvl2: this.hasLvl2, lvl3: this.hasLvl3});
});
}
private createConnector() {
this.connector = new WalletConnect({
bridge: 'https://bridge.walletconnect.org',
qrcodeModal: QRCodeModal,
});
this.connector.on('connect', async (error, payload) => {
if (error) {
throw error;
}
QRCodeModal.close();
// Get provided accounts and chainId
const { accounts, chainId } = payload.s[0];
await this.afterConnection(accounts && accounts[0]);
});
this.connector.on('disconnect', (error, payload) => {
if (error) {
throw error;
}
this.afterConnection('');
});
this.connector.on('session_update', async (error, payload) => {
if (error) {
throw error;
}
const { accounts, chainId } = payload.params[0];
await this.afterConnection(accounts && accounts[0]);
});
if (this.connector.connected) {
this.afterConnection(this.connector.accounts[0]);
}
}
private async afterConnection(account: string) {
this.connectedAddress = account;
this.connectedAddress$.next(this.connectedAddress);
const balance = await this.getTokenBalance(account, this.astroAddress);
/* const balanceSecondToken = await this.getTokenBalance(
account,
this.secondTokenAddress
); */
this.astroAmount =
+balance > this.astroAmount ? +balance : this.astroAmount;
/* this.secondTokenAmount =
+balanceSecondToken > this.secondTokenAmount
? +balanceSecondToken
: this.secondTokenAmount; */
return Promise.resolve(true);
}
getAccountConnected(): string {
return this.idAccount ?? null;
}
getHasNFT(id: number, address: string): Promise<boolean> {
const url = `https://api-mainnet.rarible.com/ownerships/0xd8cd8cb7f468ef175bc01c48497d3f7fa27b4653%3A${id}%3A${address}`;
return this.http
.get<any>(url)
.pipe(
catchError((error) => {
if (error.error instanceof ErrorEvent) {
} else {
}
return of(false);
}),
map((r) => {
if (!r) {
return false;
} else {
return r.ownership.token === this.powerUpAddress;
}
})
)
.toPromise();
}
// 8-bit - 0
get noAstro() {
return this.astroAmount < 1;
}
// 8-bit - 1
get astroTier1() {
return this.astroAmount >= 1 && this.astroAmount < 250;
}
// 16-bit - 1000
get astroTier2() {
return (
(this.astroAmount >= 250 || this.secondTokenAmount >= 0.1) &&
this.astroAmount < 1000
);
}
// 32-bit - 1000
get astroTier3() {
return (
(this.astroAmount >= 1000 && this.astroAmount < 20000) ||
(this.hasLvl2 && !this.hasLvl3)
);
}
// 64-bit - 20000
get astroTier4() {
return this.astroAmount >= 20000 || this.hasLvl3 || !environment.production;
}
get hasTier1Access() {
return (
this.astroTier1 || this.astroTier2 || this.astroTier3 || this.astroTier4
);
}
get hasTier2Access() {
return this.astroTier2 || this.astroTier3 || this.astroTier4;
}
get hasTier3Access() {
return this.astroTier3 || this.astroTier4;
}
public async getBalances() {
this.astroAmount = await this.getTokenBalance('', this.astroAddress);
/* this.secondTokenAmount = await this.getTokenBalance(
'',
this.secondTokenAddress
); */
}
public async getTokenBalance(
account: string,
tokenAddress = this.astroAddress
): Promise<any> {
if (!account) {
account = await this.getAccount();
this.connectedAddress = account;
this.connectedAddress$.next(this.connectedAddress);
// console.log('transfer.service :: getUserBalance :: account');
// console.log(account);
}
const contract = new Contract(tokenAddress, daiAbi, this.provider);
const balance =
//20000;
(await contract.balanceOf(account)) / 10 ** 18;
return balance;
}
async enableMetaMaskAccount(): Promise<any> {
let enable = false;
await new Promise((resolve, reject) => {
enable = window.ethereum.enable();
});
return Promise.resolve(enable);
}
async enableWalletConnect(): Promise<any> {
if (!this.connector.connected) {
this.createConnector();
this.connector.createSession().then(() => {
// get uri for QR Code modal
const uri = this.connector.uri;
QRCodeModal.open(uri, () => {
QRCodeModal.close();
});
});
}
return Promise.resolve(true);
}
disconnectWalletConnect() {
this.connector.killSession();
}
private async | (): Promise<any> {
// console.log('transfer.service :: getAccount :: start');
if (this.account == null) {
this.account = (await new Promise((resolve, reject) => {
// console.log('transfer.service :: getAccount :: eth');
// console.log(this.web3.eth);
this.web3.eth.getAccounts((err, retAccount) => {
/* console.log('transfer.service :: getAccount: retAccount');
console.log(retAccount); */
if (retAccount) {
this.account = retAccount[0];
resolve(this.account);
} else {
/* alert('transfer.service :: getAccount :: no accounts found.');
reject('No accounts found.'); */
}
if (err != null) {
/* alert('transfer.service :: getAccount :: error retrieving account');
reject('Error retrieving account'); */
}
});
})) as Promise<any>;
}
return Promise.resolve(this.account);
}
async getFrom(txId: string) {
const res = await this.web3.eth.getTransactionReceipt(txId);
return res.from;
}
public async getUserBalance(): Promise<any> {
const account = await this.getAccount();
// console.log('transfer.service :: getUserBalance :: account');
// console.log(account);
return new Promise((resolve, reject) => {
this.web3.eth.getBalance(account, function (err, balance) {
// console.log('transfer.service :: getUserBalance :: getBalance');
// console.log(balance);
if (!err) {
const retVal = {
account,
balance,
};
resolve(retVal);
} else {
reject({ account: 'error', balance: 0 });
}
});
}) as Promise<any>;
}
getInfo(): Contract {
return new ethers.Contract(this.astroAddress, daiAbi, this.provider);
}
}
| getAccount | identifier_name |
ether.service.ts | import { Inject, Injectable, Provider } from '@angular/core';
import { Signer, utils, providers, Wallet, ethers, Contract } from 'ethers';
import { PROVIDER } from './provider-injection-token';
import WalletConnect from '@walletconnect/client';
import QRCodeModal from '@walletconnect/qrcode-modal';
import {
LOCAL_STORAGE,
StorageService,
StorageTranscoders,
} from 'ngx-webstorage-service';
import { BehaviorSubject, of } from 'rxjs';
import { WEB3 } from './web3';
import Web3 from 'web3';
import { HttpClient } from '@angular/common/http';
import { catchError, map, tap } from 'rxjs/operators';
import { astroNFT } from '../models/astroNFT.enum';
import { environment } from 'src/environments/environment';
declare let require: any;
declare let window: any;
// const Web3 = require('web3');
const daiAbi = [
// Some details about the token
'function name() view returns (string)',
'function symbol() view returns (string)',
// Get the account balance
'function balanceOf(address) view returns (uint)',
// Send some of your tokens to someone else
'function transfer(address to, uint amount)',
// An event triggered whenever anyone transfers to someone else
'event Transfer(address indexed from, address indexed to, uint amount)',
'function getValue() view returns (string value)',
];
@Injectable({ providedIn: 'root' })
export class EtherService { | astroAddress = '0xcbd55d4ffc43467142761a764763652b48b969ff';
secondTokenAddress = '0x62359ed7505efc61ff1d56fef82158ccaffa23d7';
powerUpAddress = '0xd8cd8cb7f468ef175bc01c48497d3f7fa27b4653';
connectedAddress = '';
astroAmount = 0;
secondTokenAmount = 0;
idAccount: string;
connectedAddress$ = new BehaviorSubject('');
hasLvl1 = false;
hasLvl2 = false;
hasLvl3 = false;
public connector;
constructor(
@Inject(PROVIDER) public provider: providers.BaseProvider,
@Inject(WEB3) private web3: Web3,
@Inject(LOCAL_STORAGE) private storage: StorageService,
private http: HttpClient
) {
// Create a connector
this.createConnector();
if (window.ethereum === undefined) {
// alert('Non-Ethereum browser detected. Install MetaMask');
} else {
if (typeof window.web3 !== 'undefined') {
// this.web3 = window.web3.currentProvider;
} else {
// this.web3 = new Web3.providers.HttpProvider('http://localhost:8545');
}
// console.log('transfer.service :: constructor :: window.ethereum');
// this.web3 = new Web3(window.ethereum);
// console.log('transfer.service :: constructor :: this.web3');
// console.log(this.web3);
window.ethereum.on('accountsChanged', async (res) => {
this.web3.eth.getAccounts(async (error, accounts) => {
// console.log(accounts[0], 'current account after account change');
const account = accounts && accounts[0];
this.idAccount = account;
await this.afterConnection(account);
});
});
}
this.connectedAddress$.subscribe(async (res) => {
if (!res) return;
this.idAccount = res;
// this.hasLvl1 = await this.getHasNFT(astroNFT.Lvl_1_NoobCanon, res.toLowerCase());
this.hasLvl2 = await this.getHasNFT(
astroNFT.Lvl_2_PipeCleaner,
res.toLowerCase()
);
this.hasLvl3 = await this.getHasNFT(
astroNFT.Lvl_3_BFG9001,
res.toLowerCase()
);
// console.log('levels', { lvl1: this.hasLvl1, lvl2: this.hasLvl2, lvl3: this.hasLvl3});
});
}
private createConnector() {
this.connector = new WalletConnect({
bridge: 'https://bridge.walletconnect.org',
qrcodeModal: QRCodeModal,
});
this.connector.on('connect', async (error, payload) => {
if (error) {
throw error;
}
QRCodeModal.close();
// Get provided accounts and chainId
const { accounts, chainId } = payload.s[0];
await this.afterConnection(accounts && accounts[0]);
});
this.connector.on('disconnect', (error, payload) => {
if (error) {
throw error;
}
this.afterConnection('');
});
this.connector.on('session_update', async (error, payload) => {
if (error) {
throw error;
}
const { accounts, chainId } = payload.params[0];
await this.afterConnection(accounts && accounts[0]);
});
if (this.connector.connected) {
this.afterConnection(this.connector.accounts[0]);
}
}
private async afterConnection(account: string) {
this.connectedAddress = account;
this.connectedAddress$.next(this.connectedAddress);
const balance = await this.getTokenBalance(account, this.astroAddress);
/* const balanceSecondToken = await this.getTokenBalance(
account,
this.secondTokenAddress
); */
this.astroAmount =
+balance > this.astroAmount ? +balance : this.astroAmount;
/* this.secondTokenAmount =
+balanceSecondToken > this.secondTokenAmount
? +balanceSecondToken
: this.secondTokenAmount; */
return Promise.resolve(true);
}
getAccountConnected(): string {
return this.idAccount ?? null;
}
getHasNFT(id: number, address: string): Promise<boolean> {
const url = `https://api-mainnet.rarible.com/ownerships/0xd8cd8cb7f468ef175bc01c48497d3f7fa27b4653%3A${id}%3A${address}`;
return this.http
.get<any>(url)
.pipe(
catchError((error) => {
if (error.error instanceof ErrorEvent) {
} else {
}
return of(false);
}),
map((r) => {
if (!r) {
return false;
} else {
return r.ownership.token === this.powerUpAddress;
}
})
)
.toPromise();
}
// 8-bit - 0
get noAstro() {
return this.astroAmount < 1;
}
// 8-bit - 1
get astroTier1() {
return this.astroAmount >= 1 && this.astroAmount < 250;
}
// 16-bit - 1000
get astroTier2() {
return (
(this.astroAmount >= 250 || this.secondTokenAmount >= 0.1) &&
this.astroAmount < 1000
);
}
// 32-bit - 1000
get astroTier3() {
return (
(this.astroAmount >= 1000 && this.astroAmount < 20000) ||
(this.hasLvl2 && !this.hasLvl3)
);
}
// 64-bit - 20000
get astroTier4() {
return this.astroAmount >= 20000 || this.hasLvl3 || !environment.production;
}
get hasTier1Access() {
return (
this.astroTier1 || this.astroTier2 || this.astroTier3 || this.astroTier4
);
}
get hasTier2Access() {
return this.astroTier2 || this.astroTier3 || this.astroTier4;
}
get hasTier3Access() {
return this.astroTier3 || this.astroTier4;
}
public async getBalances() {
this.astroAmount = await this.getTokenBalance('', this.astroAddress);
/* this.secondTokenAmount = await this.getTokenBalance(
'',
this.secondTokenAddress
); */
}
public async getTokenBalance(
account: string,
tokenAddress = this.astroAddress
): Promise<any> {
if (!account) {
account = await this.getAccount();
this.connectedAddress = account;
this.connectedAddress$.next(this.connectedAddress);
// console.log('transfer.service :: getUserBalance :: account');
// console.log(account);
}
const contract = new Contract(tokenAddress, daiAbi, this.provider);
const balance =
//20000;
(await contract.balanceOf(account)) / 10 ** 18;
return balance;
}
async enableMetaMaskAccount(): Promise<any> {
let enable = false;
await new Promise((resolve, reject) => {
enable = window.ethereum.enable();
});
return Promise.resolve(enable);
}
async enableWalletConnect(): Promise<any> {
if (!this.connector.connected) {
this.createConnector();
this.connector.createSession().then(() => {
// get uri for QR Code modal
const uri = this.connector.uri;
QRCodeModal.open(uri, () => {
QRCodeModal.close();
});
});
}
return Promise.resolve(true);
}
disconnectWalletConnect() {
this.connector.killSession();
}
private async getAccount(): Promise<any> {
// console.log('transfer.service :: getAccount :: start');
if (this.account == null) {
this.account = (await new Promise((resolve, reject) => {
// console.log('transfer.service :: getAccount :: eth');
// console.log(this.web3.eth);
this.web3.eth.getAccounts((err, retAccount) => {
/* console.log('transfer.service :: getAccount: retAccount');
console.log(retAccount); */
if (retAccount) {
this.account = retAccount[0];
resolve(this.account);
} else {
/* alert('transfer.service :: getAccount :: no accounts found.');
reject('No accounts found.'); */
}
if (err != null) {
/* alert('transfer.service :: getAccount :: error retrieving account');
reject('Error retrieving account'); */
}
});
})) as Promise<any>;
}
return Promise.resolve(this.account);
}
async getFrom(txId: string) {
const res = await this.web3.eth.getTransactionReceipt(txId);
return res.from;
}
public async getUserBalance(): Promise<any> {
const account = await this.getAccount();
// console.log('transfer.service :: getUserBalance :: account');
// console.log(account);
return new Promise((resolve, reject) => {
this.web3.eth.getBalance(account, function (err, balance) {
// console.log('transfer.service :: getUserBalance :: getBalance');
// console.log(balance);
if (!err) {
const retVal = {
account,
balance,
};
resolve(retVal);
} else {
reject({ account: 'error', balance: 0 });
}
});
}) as Promise<any>;
}
getInfo(): Contract {
return new ethers.Contract(this.astroAddress, daiAbi, this.provider);
}
} | private wallet: Wallet;
// web3;
enable;
account; | random_line_split |
app.py | import numpy as np
import yaml
import pickle
import os
from flask import Flask, request, jsonify, render_template, redirect, url_for, flash
from flask_mail import Mail, Message
from flask_wtf import FlaskForm
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, EqualTo
from wtforms.validators import InputRequired, Email, Length
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
app = Flask(__name__)
model = pickle.load(open('model_GB.pkl', 'rb'))
ENV = 'prod'
def get_config(fname):
'''
Creates connection to yaml file which holds the DB user and pass
'''
with open(fname) as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
return cfg
if ENV == 'dev':
cfg = get_config('config.yml')
connection = cfg['connection'][ENV]
app.config['SECRET_KEY'] = connection['secret_key']
app.debug = True
app.config[connection['username']] = connection['password']
app.config['TESTING'] = False
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = connection['mail_user']
app.config['MAIL_PASSWORD'] = connection['mail_pass']
app.config['MAIL_DEFAULT_SENDER'] = 'mail@syndicate.com'
app.config['MAIL_MAX_EMAILS'] = None
app.config['MAIL_ASCII_ATTACHMENTS'] = False
else:
app.debug = False
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['MAIL_SERVER'] = os.environ['MAIL_SERVER']
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = False
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']
app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
mail = Mail(app)
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds = 1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id' : self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class UpdateAccountForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
submit = SubmitField('Update')
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
if username.data != current_user.username:
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
|
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators = [DataRequired()])
confirm_password = PasswordField('Confirm Password', validators = [DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/',methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/error/')
def error():
return render_template('error.html')
@app.route('/login_error/')
def login_error():
return render_template('login_error.html')
@app.route('/login/',methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username = form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
@app.route('/signup/', methods = ['GET','POST'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
new_user = User(username = form.username.data, email = form.email.data, password = hashed_password)
db.session.add(new_user)
db.session.commit()
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
else:
return render_template('signup.html', form = form, message= 'Username / Email Already Exists')
# return '<h1>' + form.email.data + ' ' + form.username.data + ' ' + form.password.data + '<h1>'
return render_template('signup.html', form = form)
@app.route('/logout/')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/learn_more/',methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
@app.route('/email_sent/',methods=['GET', 'POST'])
def email_sent():
return render_template('email_sent.html')
@app.route('/account/',methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('account.html', title = 'Account', form = form)
@app.route('/model_page/', methods = ['GET','POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject = 'Password Reset Request',
sender = 'noreply@syndicate.com',
recipients=[user.email])
msg.body = f''' To reset your password, visit the following link :
{url_for('reset_token', token = token, _external = True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route('/reset_password/',methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
flask('An email has been sent with instructions to resset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title = 'Rest Password', form = form)
@app.route('/reset_password/<token>',methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid / expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
user.password = hashed_password
db.session.commit()
flash('Your password has been updated!', 'success')
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
return render_template('reset_token.html', title = 'Rest Password', form = form)
@app.route('/predict_model', methods=['GET', 'POST'])
def predict_model():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
map_dict = {1 : 'DT Toronto', 3 : 'North York', 4 : 'Scarborough', 6 : 'Etobicoke'}
output = map_dict[output]
return render_template('model_page.html', prediction_text = 'The Crime Occurred in : {}'.format(output))
if __name__ == "__main__":
if ENV == 'prod':
app.run()
else:
app.run(debug=True)
| '''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user is None:
raise ValidationError('There is no accouunt with that email. You must register first.') | identifier_body |
app.py | import numpy as np
import yaml
import pickle
import os
from flask import Flask, request, jsonify, render_template, redirect, url_for, flash
from flask_mail import Mail, Message
from flask_wtf import FlaskForm
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, EqualTo
from wtforms.validators import InputRequired, Email, Length
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
app = Flask(__name__)
model = pickle.load(open('model_GB.pkl', 'rb'))
ENV = 'prod'
def get_config(fname):
'''
Creates connection to yaml file which holds the DB user and pass
'''
with open(fname) as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
return cfg
if ENV == 'dev':
cfg = get_config('config.yml')
connection = cfg['connection'][ENV]
app.config['SECRET_KEY'] = connection['secret_key']
app.debug = True
app.config[connection['username']] = connection['password']
app.config['TESTING'] = False
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = connection['mail_user']
app.config['MAIL_PASSWORD'] = connection['mail_pass']
app.config['MAIL_DEFAULT_SENDER'] = 'mail@syndicate.com'
app.config['MAIL_MAX_EMAILS'] = None
app.config['MAIL_ASCII_ATTACHMENTS'] = False
else:
app.debug = False
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['MAIL_SERVER'] = os.environ['MAIL_SERVER']
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = False
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']
app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
mail = Mail(app)
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds = 1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id' : self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class | (FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
submit = SubmitField('Update')
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
if username.data != current_user.username:
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user is None:
raise ValidationError('There is no accouunt with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators = [DataRequired()])
confirm_password = PasswordField('Confirm Password', validators = [DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/',methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/error/')
def error():
return render_template('error.html')
@app.route('/login_error/')
def login_error():
return render_template('login_error.html')
@app.route('/login/',methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username = form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
@app.route('/signup/', methods = ['GET','POST'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
new_user = User(username = form.username.data, email = form.email.data, password = hashed_password)
db.session.add(new_user)
db.session.commit()
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
else:
return render_template('signup.html', form = form, message= 'Username / Email Already Exists')
# return '<h1>' + form.email.data + ' ' + form.username.data + ' ' + form.password.data + '<h1>'
return render_template('signup.html', form = form)
@app.route('/logout/')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/learn_more/',methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
@app.route('/email_sent/',methods=['GET', 'POST'])
def email_sent():
return render_template('email_sent.html')
@app.route('/account/',methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('account.html', title = 'Account', form = form)
@app.route('/model_page/', methods = ['GET','POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject = 'Password Reset Request',
sender = 'noreply@syndicate.com',
recipients=[user.email])
msg.body = f''' To reset your password, visit the following link :
{url_for('reset_token', token = token, _external = True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route('/reset_password/',methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
flask('An email has been sent with instructions to resset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title = 'Rest Password', form = form)
@app.route('/reset_password/<token>',methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid / expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
user.password = hashed_password
db.session.commit()
flash('Your password has been updated!', 'success')
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
return render_template('reset_token.html', title = 'Rest Password', form = form)
@app.route('/predict_model', methods=['GET', 'POST'])
def predict_model():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
map_dict = {1 : 'DT Toronto', 3 : 'North York', 4 : 'Scarborough', 6 : 'Etobicoke'}
output = map_dict[output]
return render_template('model_page.html', prediction_text = 'The Crime Occurred in : {}'.format(output))
if __name__ == "__main__":
if ENV == 'prod':
app.run()
else:
app.run(debug=True)
| UpdateAccountForm | identifier_name |
app.py | import numpy as np
import yaml
import pickle
import os
from flask import Flask, request, jsonify, render_template, redirect, url_for, flash
from flask_mail import Mail, Message
from flask_wtf import FlaskForm
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, EqualTo
from wtforms.validators import InputRequired, Email, Length
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
app = Flask(__name__)
model = pickle.load(open('model_GB.pkl', 'rb'))
ENV = 'prod'
def get_config(fname):
'''
Creates connection to yaml file which holds the DB user and pass
'''
with open(fname) as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
return cfg
if ENV == 'dev':
cfg = get_config('config.yml')
connection = cfg['connection'][ENV]
app.config['SECRET_KEY'] = connection['secret_key']
app.debug = True
app.config[connection['username']] = connection['password']
app.config['TESTING'] = False
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = connection['mail_user']
app.config['MAIL_PASSWORD'] = connection['mail_pass']
app.config['MAIL_DEFAULT_SENDER'] = 'mail@syndicate.com'
app.config['MAIL_MAX_EMAILS'] = None
app.config['MAIL_ASCII_ATTACHMENTS'] = False
else:
app.debug = False
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['MAIL_SERVER'] = os.environ['MAIL_SERVER']
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = False
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']
app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
mail = Mail(app)
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds = 1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id' : self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class UpdateAccountForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
submit = SubmitField('Update')
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
if username.data != current_user.username:
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user is None:
raise ValidationError('There is no accouunt with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators = [DataRequired()])
confirm_password = PasswordField('Confirm Password', validators = [DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/',methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/error/')
def error():
return render_template('error.html')
@app.route('/login_error/')
def login_error():
return render_template('login_error.html')
@app.route('/login/',methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username = form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
@app.route('/signup/', methods = ['GET','POST'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
new_user = User(username = form.username.data, email = form.email.data, password = hashed_password)
db.session.add(new_user)
db.session.commit()
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
else:
return render_template('signup.html', form = form, message= 'Username / Email Already Exists')
# return '<h1>' + form.email.data + ' ' + form.username.data + ' ' + form.password.data + '<h1>'
return render_template('signup.html', form = form)
@app.route('/logout/')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/learn_more/',methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
@app.route('/email_sent/',methods=['GET', 'POST'])
def email_sent():
return render_template('email_sent.html')
@app.route('/account/',methods=['GET', 'POST'])
@login_required
def account(): | db.session.commit()
flash('Your account has been updated', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('account.html', title = 'Account', form = form)
@app.route('/model_page/', methods = ['GET','POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject = 'Password Reset Request',
sender = 'noreply@syndicate.com',
recipients=[user.email])
msg.body = f''' To reset your password, visit the following link :
{url_for('reset_token', token = token, _external = True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route('/reset_password/',methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
flask('An email has been sent with instructions to resset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title = 'Rest Password', form = form)
@app.route('/reset_password/<token>',methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid / expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
user.password = hashed_password
db.session.commit()
flash('Your password has been updated!', 'success')
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
return render_template('reset_token.html', title = 'Rest Password', form = form)
@app.route('/predict_model', methods=['GET', 'POST'])
def predict_model():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
map_dict = {1 : 'DT Toronto', 3 : 'North York', 4 : 'Scarborough', 6 : 'Etobicoke'}
output = map_dict[output]
return render_template('model_page.html', prediction_text = 'The Crime Occurred in : {}'.format(output))
if __name__ == "__main__":
if ENV == 'prod':
app.run()
else:
app.run(debug=True) | form = UpdateAccountForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data | random_line_split |
app.py | import numpy as np
import yaml
import pickle
import os
from flask import Flask, request, jsonify, render_template, redirect, url_for, flash
from flask_mail import Mail, Message
from flask_wtf import FlaskForm
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, EqualTo
from wtforms.validators import InputRequired, Email, Length
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
app = Flask(__name__)
model = pickle.load(open('model_GB.pkl', 'rb'))
ENV = 'prod'
def get_config(fname):
'''
Creates connection to yaml file which holds the DB user and pass
'''
with open(fname) as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
return cfg
if ENV == 'dev':
cfg = get_config('config.yml')
connection = cfg['connection'][ENV]
app.config['SECRET_KEY'] = connection['secret_key']
app.debug = True
app.config[connection['username']] = connection['password']
app.config['TESTING'] = False
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = connection['mail_user']
app.config['MAIL_PASSWORD'] = connection['mail_pass']
app.config['MAIL_DEFAULT_SENDER'] = 'mail@syndicate.com'
app.config['MAIL_MAX_EMAILS'] = None
app.config['MAIL_ASCII_ATTACHMENTS'] = False
else:
|
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
mail = Mail(app)
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds = 1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id' : self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class UpdateAccountForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
submit = SubmitField('Update')
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
if username.data != current_user.username:
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user is None:
raise ValidationError('There is no accouunt with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators = [DataRequired()])
confirm_password = PasswordField('Confirm Password', validators = [DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/',methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/error/')
def error():
return render_template('error.html')
@app.route('/login_error/')
def login_error():
return render_template('login_error.html')
@app.route('/login/',methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username = form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
@app.route('/signup/', methods = ['GET','POST'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
new_user = User(username = form.username.data, email = form.email.data, password = hashed_password)
db.session.add(new_user)
db.session.commit()
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
else:
return render_template('signup.html', form = form, message= 'Username / Email Already Exists')
# return '<h1>' + form.email.data + ' ' + form.username.data + ' ' + form.password.data + '<h1>'
return render_template('signup.html', form = form)
@app.route('/logout/')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/learn_more/',methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
@app.route('/email_sent/',methods=['GET', 'POST'])
def email_sent():
return render_template('email_sent.html')
@app.route('/account/',methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('account.html', title = 'Account', form = form)
@app.route('/model_page/', methods = ['GET','POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject = 'Password Reset Request',
sender = 'noreply@syndicate.com',
recipients=[user.email])
msg.body = f''' To reset your password, visit the following link :
{url_for('reset_token', token = token, _external = True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route('/reset_password/',methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
flask('An email has been sent with instructions to resset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title = 'Rest Password', form = form)
@app.route('/reset_password/<token>',methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid / expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
user.password = hashed_password
db.session.commit()
flash('Your password has been updated!', 'success')
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
return render_template('reset_token.html', title = 'Rest Password', form = form)
@app.route('/predict_model', methods=['GET', 'POST'])
def predict_model():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
map_dict = {1 : 'DT Toronto', 3 : 'North York', 4 : 'Scarborough', 6 : 'Etobicoke'}
output = map_dict[output]
return render_template('model_page.html', prediction_text = 'The Crime Occurred in : {}'.format(output))
if __name__ == "__main__":
if ENV == 'prod':
app.run()
else:
app.run(debug=True)
| app.debug = False
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['MAIL_SERVER'] = os.environ['MAIL_SERVER']
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = False
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']
app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL'] | conditional_block |
main.go | package main
import (
"fmt"
"image"
"log"
"math/rand"
"sync/atomic"
"time"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil"
"github.com/BurntSushi/xgbutil/xevent"
"github.com/BurntSushi/xgbutil/xgraphics"
"github.com/BurntSushi/xgbutil/xwindow"
)
var DisplayWindow *xwindow.Window
var WindowImage *xgraphics.Image
var X *xgbutil.XUtil
type Instruction uint32
type XY [2]int
// Static address list (relative to a center pixel)
const (
PROGRAM_COUNTER uint32 = iota // 24 bit program counter
STACK_POINTER // 24 bit stack pointer
REGISTER_A // Register A
REGISTER_B // Register B
PROGRAM // Start of program
)
// Instructions (4 bits instruction, 20 bits data):
const (
NOP Instruction = iota
JUMP // Relative jump (Add data (signed) to PC)
COND // Conditional jump (If A is non-zero, perform relative jump)
SETA // Set A (Set A to data (unsigned))
ADDI // Add immediate (Add data (signed) to A)
PUSH // Push stack (Push A to stack)
POP // Pop stack (Pop from stack and store in A)
ADDS // Add stack (Pop from stack and add to A)
SWAP // Swap A and B
RAND // Add random value to A (A = rand % data, unless data == 0)
SHIFT // Shift A (Shift contents of A by data (signed))
LOCAL // Set A to the current program's x,y coordinates
RPUSH // Push remote (Push A to remote stack of x,y = B)
RSET // Write remote (Store A at x,y = B, r = data)
RGET // Read remote (Read A from x,y = B, r = data)
FORK // Fork (call go Exec(B))
)
func (i Instruction) String() string {
return []string{"NOP", "JUMP", "COND", "SETA", "ADDI", "PUSH", "POP", "ADDS", "SWAP", "RAND", "SHIFT", "LOCAL", "RPUSH", "RSET", "RGET", "FORK"}[i]
}
func WriteProgram(x, y uint32) {
r := PROGRAM
// Padding for static memory usage
WriteInstruction(x, y, &r, JUMP, 3)
mem1 := r
WriteInstruction(x, y, &r, NOP, 0)
mem2 := r
WriteInstruction(x, y, &r, NOP, 0)
{ // Store end of program in the static slot 1
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, RSET, int32(mem1))
}
start := r
{ // for i := 1024; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 1024)
loop := r
{ // Push random blue to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // for i := 2048; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 2048)
loop := r
{ // Push random green to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, SHIFT, 8)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // for i := 4096; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 4096)
loop := r
{ // Push random red to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, SHIFT, 16)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // Copy the current program
{ // Store current stack pointer in static slot 2
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, RSET, int32(mem2))
}
{ // Set stack pointer to end of program
WriteInstruction(x, y, &r, RGET, int32(mem1))
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
{ // Select random coordinates for new program and set stack pointer to end of remote program
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, SETA, 0)
WriteInstruction(x, y, &r, RAND, 1024)
WriteInstruction(x, y, &r, SHIFT, 12)
WriteInstruction(x, y, &r, RAND, 1024)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
{ // Pop from local stack, push to local stack in reverse order
WriteInstruction(x, y, &r, ADDI, -1)
loop := r
WriteInstruction(x, y, &r, ADDI, 1)
WriteInstruction(x, y, &r, POP, 0)
WriteInstruction(x, y, &r, RPUSH, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, ADDI, -2)
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // Fork remote program
WriteInstruction(x, y, &r, RGET, int32(mem1)) // Restore remote stack pointer
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, SETA, 0)
WriteInstruction(x, y, &r, RSET, int32(PROGRAM_COUNTER))
WriteInstruction(x, y, &r, FORK, 0)
}
{ // Restore local stack
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(mem2))
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
}
WriteInstruction(x, y, &r, JUMP, int32(start-r))
data, _ := Coords(x, y, PROGRAM_COUNTER)
Write24Bit(data, 0) // Initial program counter
data, _ = Coords(x, y, STACK_POINTER)
Write24Bit(data, r) // Set stack pointer to end of program
// fmt.Println("Program length:", r)
}
func main() {
var err error
X, err = xgbutil.NewConn()
if err != nil {
log.Fatal(err)
}
// Create a window
DisplayWindow, err = xwindow.Generate(X)
if err != nil {
log.Fatalf("Could not generate a new window X id: %s", err)
}
DisplayWindow.Create(X.RootWin(), 0, 0, 1024, 1024, xproto.CwBackPixel, 0)
DisplayWindow.Map()
WindowImage = xgraphics.New(X, image.Rect(0, 0, 1024, 1024))
err = WindowImage.XSurfaceSet(DisplayWindow.Id)
if err != nil {
log.Printf("Error while setting window surface to image %d: %s\n", DisplayWindow, err)
}
// Start a program in the center of the screen if there have been no recent forks
go func() {
for {
count := atomic.SwapInt64(&forkCount, 0)
if count < 1 {
fmt.Println("No forks in the last 10 seconds, spawning new program")
WriteProgram(512, 512)
go Exec(512, 512)
}
time.Sleep(10 * time.Second)
}
}()
drawTimer = time.Tick(16 * time.Millisecond)
printTimer = time.Tick(1 * time.Second)
xevent.Main(X)
}
var execCount int64 = 0
var forkCount int64 = 0
var fpsCount int64 = 0
var drawTimer <-chan time.Time
var printTimer <-chan time.Time
var execs [1024][1024]uint32
func Exec(x, y uint32) {
// Limit executions to 100 concurrent programs
count := atomic.AddInt64(&execCount, 1)
atomic.AddInt64(&forkCount, 1)
defer atomic.AddInt64(&execCount, -1)
if count > 100 {
return
}
// De-duplicate programs running at the same location
if int(x) >= WindowImage.Rect.Max.X || int(y) >= WindowImage.Rect.Max.Y {
return
}
old := atomic.SwapUint32(&execs[x][y], 1)
if old != 0 {
return
}
defer atomic.StoreUint32(&execs[x][y], 0)
// fmt.Println("Starting program at", x, y)
for {
// Read Program Counter
mem, ok := Coords(x, y, PROGRAM_COUNTER)
if !ok {
// fmt.Println("Program", x, y, "invalid")
return
}
PC := Read24Bit(mem)
// Read Stack Pointer
mem, ok = Coords(x, y, STACK_POINTER)
if !ok {
// fmt.Println("Program", x, y, "has invalid stack pointer")
return
}
S := Read24Bit(mem)
// Read Register A
mem, ok = Coords(x, y, REGISTER_A)
if !ok {
// fmt.Println("Program", x, y, "has invalid register A")
return
}
A := Read24Bit(mem)
// Read Register B
mem, ok = Coords(x, y, REGISTER_B)
if !ok {
// fmt.Println("Program", x, y, "has invalid register B")
return
}
B := Read24Bit(mem)
// Read instruction
mem, ok = Coords(x, y, PROGRAM+PC)
if !ok {
// fmt.Println("Program", x, y, "has invalid PC:", PC)
return
}
instrData := Read24Bit(mem)
instr := Instruction((instrData & 0xF00000) >> 20)
data := instrData & 0xFFFFF
// Evaluate instruction
// fmt.Printf("(%d, %d): PC(%4d) S(%4d) A(%10d) B(%10d) Instruction(%s)\tData(%d)\n", x, y, PC, S, A, B, instr, int32(Signed20Bit(data)))
switch instr {
case NOP: // NOP
case JUMP: // Relative jump
PC += Signed20Bit(data) - 1
case COND: // Conditional jump
if A != 0 {
PC += Signed20Bit(data) - 1
}
case SETA: // Set register A
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, data)
case ADDI: // Add immediate to A
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A+Signed20Bit(data))
case PUSH: // Push A to stack
mem, ok = Coords(x, y, S)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S)
return
}
Write24Bit(mem, A)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S+1)
case POP: // Pop stack into A
mem, ok = Coords(x, y, S-1)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S-1)
return
}
r := Read24Bit(mem)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S-1)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, r)
case ADDS: // Pop from stack and add to A
mem, ok = Coords(x, y, S-1)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S-1)
return
}
r := Read24Bit(mem)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S-1)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A+r)
case SWAP: // Swap registers A and B
mem, _ = Coords(x, y, REGISTER_B)
Write24Bit(mem, A)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, B)
case RAND: // Add random bits to A
mem, _ = Coords(x, y, REGISTER_A)
r := rand.Uint32()
if data != 0 {
r = r % data
}
Write24Bit(mem, A+r)
case SHIFT: // Shift A by data (signed)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A<<Signed20Bit(data))
case LOCAL: // Set A to x,y
mem, _ = Coords(x, y, REGISTER_A)
Write12Bit(mem, x, y)
case RPUSH: // Push to remote stack
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, STACK_POINTER)
if !ok {
// fmt.Println("Program", x2, y2, "has invalid stack pointer")
} else {
S2 := Read24Bit(mem)
mem, ok = Coords(x2, y2, S2)
if !ok {
// fmt.Println("Program", x2, y2, "has remote stack overflow:", S2)
} else {
Write24Bit(mem, A)
}
mem, _ = Coords(x2, y2, STACK_POINTER)
Write24Bit(mem, S2+1)
}
case RSET: // Write A into remote address
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, data)
if !ok {
// fmt.Println("Program", x, y, "has invalid remote write:", x2, y2, data)
} else {
Write24Bit(mem, A)
}
case RGET: // Read remote address into A
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, data)
if !ok {
// fmt.Println("Program", x, y, "has invalid remote read:", x2, y2, data)
} else {
r := Read24Bit(mem)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, r)
}
case FORK: // Call go Exec(B)
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
go Exec(x2, y2)
default:
fmt.Println("Program", x, y, "running unknown instruction:", instr)
return
}
// Advance program counter
mem, _ = Coords(x, y, PROGRAM_COUNTER)
Write24Bit(mem, PC+1)
if (atomic.AddUint32(&execs[x][y], 1) > 1000000) {
return
}
select {
case <-drawTimer:
atomic.AddInt64(&fpsCount, 1)
DrawImage(WindowImage)
case <-printTimer:
execs := atomic.LoadInt64(&execCount)
fps := atomic.SwapInt64(&fpsCount, 0)
fmt.Println("FPS:", fps, "Current executions:", execs)
default:
time.Sleep(100 * time.Nanosecond)
}
}
}
// Write an instruction to x,y,r
func WriteInstruction(x, y uint32, r *uint32, instr Instruction, data int32) {
mem, _ := Coords(x, y, *r)
instrData := (uint32(instr&0xF) << 20) | uint32(data&0xFFFFF)
Write24Bit(mem, instrData)
*r++
}
// Read a 24 bit pixel as 2x 12 bit uints
func Read12Bit(in []uint8) (outx, outy uint32) {
outx = uint32(in[0]) | (uint32(in[1]&0xF) << 8)
outy = (uint32(in[1]&0xF0) >> 4) | (uint32(in[2]) << 4)
return outx, outy
}
// Write 2x 12 bit uints as a 24 bit pixel
func Write12Bit(out []uint8, inx, iny uint32) {
out[0] = uint8(inx & 0xFF)
out[1] = uint8(((inx & 0xF00) >> 8) | ((iny & 0xF) << 4))
out[2] = uint8(((iny & 0xFF0) >> 4))
}
// Read a 24 bit pixel as a 24 bit uint
func Read24Bit(in []uint8) (out uint32) {
out = uint32(in[0]) | (uint32(in[1]) << 8) | (uint32(in[2]) << 16)
return out
}
// Write a 24 bit uint as a 24 bit pixel
func Write24Bit(out []uint8, in uint32) {
out[0] = uint8(in & 0xFF)
out[1] = uint8(((in & 0xFF00) >> 8))
out[2] = uint8(((in & 0xFF0000) >> 16))
}
// Convert a 2s complement 20-bit uint to a 32-bit uint
func Signed20Bit(in uint32) (out uint32) {
out = in
if (out & 0x80000) != 0 {
// Data is negative
out |= 0xFFF00000
}
return out
}
// Generate a roughly spiral shaped memory space.
// CoordOffset represents an XY offset relative to a center coordinate
// Larger indexes in CoordOffset will have a larger distance from center
var CoordOffset []XY
// Return the slice of pixel data for the psuedo-polar coordinates x,y,r
func Coords(x, y, r uint32) ([]uint8, bool) {
if r >= uint32(len(CoordOffset)) {
// log.Printf("Coord radius too large: %d\n", r)
return []uint8{0, 0, 0}, false
}
dx := CoordOffset[r][0]
dy := CoordOffset[r][1]
if int(x)+dx < 0 || int(x)+dx >= WindowImage.Rect.Max.X || int(y)+dy < 0 || int(y)+dy >= WindowImage.Rect.Max.Y {
return []uint8{0, 0, 0}, false
}
i := (int(x)+dx)*4 + (int(y)+dy)*WindowImage.Stride
return WindowImage.Pix[i : i+3], true
}
var DirOffset = [8]XY{
{0, -1},
{-1, -1},
{-1, 0},
{-1, 1},
{0, 1},
{1, 1},
{1, 0},
{1, -1},
}
func init() {
// Generate CoordOffset lookup table using a boundary fill algorithm
var area [2048][2048]bool
originx := 1024
originy := 1024
CoordOffset = []XY{{0, 0}, {0, 1}}
area[originx][originy] = true
area[originx][originy+1] = true
dx := 0
dy := 1
for {
var dir int
if dx >= 0 {
if dy > dx {
dir = 0
} else if dy >= 0 {
dir = 1
} else if dx > -dy {
dir = 2
} else {
dir = 3
}
} else {
if dy < dx {
dir = 4
} else if dy < 0 {
dir = 5
} else if -dx > dy {
dir = 6
} else {
dir = 7
}
}
for i := 0; i < 8; i++ {
ddx := DirOffset[(dir+i)%8][0]
ddy := DirOffset[(dir+i)%8][1]
if dx+ddx < -originx || dx+ddx >= originx || dy+ddy < -originy || dy+ddy >= originy {
return
}
if !area[originx+dx+ddx][originy+dy+ddy] {
dx += ddx
dy += ddy
CoordOffset = append(CoordOffset, XY{dx, dy})
area[originx+dx][originy+dy] = true
break
}
}
}
}
func | (img *xgraphics.Image) {
img.XDraw()
img.XPaint(DisplayWindow.Id)
}
| DrawImage | identifier_name |
main.go | package main
import (
"fmt"
"image"
"log"
"math/rand"
"sync/atomic"
"time"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil"
"github.com/BurntSushi/xgbutil/xevent"
"github.com/BurntSushi/xgbutil/xgraphics"
"github.com/BurntSushi/xgbutil/xwindow"
)
var DisplayWindow *xwindow.Window
var WindowImage *xgraphics.Image
var X *xgbutil.XUtil
type Instruction uint32
type XY [2]int
// Static address list (relative to a center pixel)
const (
PROGRAM_COUNTER uint32 = iota // 24 bit program counter
STACK_POINTER // 24 bit stack pointer
REGISTER_A // Register A
REGISTER_B // Register B
PROGRAM // Start of program
)
// Instructions (4 bits instruction, 20 bits data):
const (
NOP Instruction = iota
JUMP // Relative jump (Add data (signed) to PC)
COND // Conditional jump (If A is non-zero, perform relative jump)
SETA // Set A (Set A to data (unsigned))
ADDI // Add immediate (Add data (signed) to A)
PUSH // Push stack (Push A to stack)
POP // Pop stack (Pop from stack and store in A)
ADDS // Add stack (Pop from stack and add to A)
SWAP // Swap A and B
RAND // Add random value to A (A = rand % data, unless data == 0)
SHIFT // Shift A (Shift contents of A by data (signed))
LOCAL // Set A to the current program's x,y coordinates
RPUSH // Push remote (Push A to remote stack of x,y = B)
RSET // Write remote (Store A at x,y = B, r = data)
RGET // Read remote (Read A from x,y = B, r = data)
FORK // Fork (call go Exec(B))
)
func (i Instruction) String() string {
return []string{"NOP", "JUMP", "COND", "SETA", "ADDI", "PUSH", "POP", "ADDS", "SWAP", "RAND", "SHIFT", "LOCAL", "RPUSH", "RSET", "RGET", "FORK"}[i]
}
func WriteProgram(x, y uint32) {
r := PROGRAM
// Padding for static memory usage
WriteInstruction(x, y, &r, JUMP, 3)
mem1 := r
WriteInstruction(x, y, &r, NOP, 0)
mem2 := r
WriteInstruction(x, y, &r, NOP, 0)
{ // Store end of program in the static slot 1
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, RSET, int32(mem1))
}
start := r
{ // for i := 1024; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 1024)
loop := r
{ // Push random blue to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // for i := 2048; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 2048)
loop := r
{ // Push random green to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, SHIFT, 8)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // for i := 4096; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 4096)
loop := r
{ // Push random red to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, SHIFT, 16)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // Copy the current program
{ // Store current stack pointer in static slot 2
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, RSET, int32(mem2))
}
{ // Set stack pointer to end of program
WriteInstruction(x, y, &r, RGET, int32(mem1))
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
{ // Select random coordinates for new program and set stack pointer to end of remote program
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, SETA, 0)
WriteInstruction(x, y, &r, RAND, 1024)
WriteInstruction(x, y, &r, SHIFT, 12)
WriteInstruction(x, y, &r, RAND, 1024)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
{ // Pop from local stack, push to local stack in reverse order
WriteInstruction(x, y, &r, ADDI, -1)
loop := r
WriteInstruction(x, y, &r, ADDI, 1)
WriteInstruction(x, y, &r, POP, 0)
WriteInstruction(x, y, &r, RPUSH, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, ADDI, -2)
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // Fork remote program
WriteInstruction(x, y, &r, RGET, int32(mem1)) // Restore remote stack pointer
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, SETA, 0)
WriteInstruction(x, y, &r, RSET, int32(PROGRAM_COUNTER))
WriteInstruction(x, y, &r, FORK, 0)
}
{ // Restore local stack
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(mem2))
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
}
WriteInstruction(x, y, &r, JUMP, int32(start-r))
data, _ := Coords(x, y, PROGRAM_COUNTER)
Write24Bit(data, 0) // Initial program counter
data, _ = Coords(x, y, STACK_POINTER)
Write24Bit(data, r) // Set stack pointer to end of program
// fmt.Println("Program length:", r)
}
func main() {
var err error
X, err = xgbutil.NewConn()
if err != nil {
log.Fatal(err)
}
// Create a window
DisplayWindow, err = xwindow.Generate(X)
if err != nil {
log.Fatalf("Could not generate a new window X id: %s", err)
}
DisplayWindow.Create(X.RootWin(), 0, 0, 1024, 1024, xproto.CwBackPixel, 0)
DisplayWindow.Map()
WindowImage = xgraphics.New(X, image.Rect(0, 0, 1024, 1024))
err = WindowImage.XSurfaceSet(DisplayWindow.Id)
if err != nil {
log.Printf("Error while setting window surface to image %d: %s\n", DisplayWindow, err)
}
// Start a program in the center of the screen if there have been no recent forks
go func() {
for {
count := atomic.SwapInt64(&forkCount, 0)
if count < 1 {
fmt.Println("No forks in the last 10 seconds, spawning new program")
WriteProgram(512, 512)
go Exec(512, 512)
}
time.Sleep(10 * time.Second)
}
}()
drawTimer = time.Tick(16 * time.Millisecond)
printTimer = time.Tick(1 * time.Second)
xevent.Main(X)
}
var execCount int64 = 0
var forkCount int64 = 0
var fpsCount int64 = 0
var drawTimer <-chan time.Time
var printTimer <-chan time.Time
var execs [1024][1024]uint32
func Exec(x, y uint32) {
// Limit executions to 100 concurrent programs
count := atomic.AddInt64(&execCount, 1)
atomic.AddInt64(&forkCount, 1)
defer atomic.AddInt64(&execCount, -1)
if count > 100 {
return
}
// De-duplicate programs running at the same location
if int(x) >= WindowImage.Rect.Max.X || int(y) >= WindowImage.Rect.Max.Y {
return
}
old := atomic.SwapUint32(&execs[x][y], 1)
if old != 0 {
return
}
defer atomic.StoreUint32(&execs[x][y], 0)
// fmt.Println("Starting program at", x, y)
for {
// Read Program Counter
mem, ok := Coords(x, y, PROGRAM_COUNTER)
if !ok {
// fmt.Println("Program", x, y, "invalid")
return
}
PC := Read24Bit(mem)
// Read Stack Pointer
mem, ok = Coords(x, y, STACK_POINTER)
if !ok {
// fmt.Println("Program", x, y, "has invalid stack pointer")
return
}
S := Read24Bit(mem)
// Read Register A
mem, ok = Coords(x, y, REGISTER_A)
if !ok {
// fmt.Println("Program", x, y, "has invalid register A")
return
}
A := Read24Bit(mem)
// Read Register B
mem, ok = Coords(x, y, REGISTER_B)
if !ok {
// fmt.Println("Program", x, y, "has invalid register B")
return
}
B := Read24Bit(mem)
// Read instruction
mem, ok = Coords(x, y, PROGRAM+PC)
if !ok {
// fmt.Println("Program", x, y, "has invalid PC:", PC)
return
}
instrData := Read24Bit(mem)
instr := Instruction((instrData & 0xF00000) >> 20)
data := instrData & 0xFFFFF
// Evaluate instruction
// fmt.Printf("(%d, %d): PC(%4d) S(%4d) A(%10d) B(%10d) Instruction(%s)\tData(%d)\n", x, y, PC, S, A, B, instr, int32(Signed20Bit(data)))
switch instr {
case NOP: // NOP
case JUMP: // Relative jump
PC += Signed20Bit(data) - 1
case COND: // Conditional jump
if A != 0 {
PC += Signed20Bit(data) - 1
}
case SETA: // Set register A
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, data)
case ADDI: // Add immediate to A
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A+Signed20Bit(data))
case PUSH: // Push A to stack
mem, ok = Coords(x, y, S)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S)
return
}
Write24Bit(mem, A)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S+1)
case POP: // Pop stack into A
mem, ok = Coords(x, y, S-1)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S-1)
return
}
r := Read24Bit(mem)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S-1)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, r)
case ADDS: // Pop from stack and add to A
mem, ok = Coords(x, y, S-1)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S-1)
return
}
r := Read24Bit(mem)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S-1)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A+r)
case SWAP: // Swap registers A and B
mem, _ = Coords(x, y, REGISTER_B)
Write24Bit(mem, A)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, B)
case RAND: // Add random bits to A
mem, _ = Coords(x, y, REGISTER_A)
r := rand.Uint32()
if data != 0 {
r = r % data
}
Write24Bit(mem, A+r)
case SHIFT: // Shift A by data (signed)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A<<Signed20Bit(data))
case LOCAL: // Set A to x,y
mem, _ = Coords(x, y, REGISTER_A)
Write12Bit(mem, x, y)
case RPUSH: // Push to remote stack
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, STACK_POINTER)
if !ok {
// fmt.Println("Program", x2, y2, "has invalid stack pointer")
} else {
S2 := Read24Bit(mem)
mem, ok = Coords(x2, y2, S2)
if !ok {
// fmt.Println("Program", x2, y2, "has remote stack overflow:", S2)
} else {
Write24Bit(mem, A)
}
mem, _ = Coords(x2, y2, STACK_POINTER)
Write24Bit(mem, S2+1)
}
case RSET: // Write A into remote address
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, data)
if !ok {
// fmt.Println("Program", x, y, "has invalid remote write:", x2, y2, data)
} else {
Write24Bit(mem, A)
}
case RGET: // Read remote address into A
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, data)
if !ok {
// fmt.Println("Program", x, y, "has invalid remote read:", x2, y2, data)
} else {
r := Read24Bit(mem)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, r)
}
case FORK: // Call go Exec(B)
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
go Exec(x2, y2)
default:
fmt.Println("Program", x, y, "running unknown instruction:", instr)
return
}
// Advance program counter
mem, _ = Coords(x, y, PROGRAM_COUNTER)
Write24Bit(mem, PC+1)
if (atomic.AddUint32(&execs[x][y], 1) > 1000000) {
return
}
select {
case <-drawTimer:
atomic.AddInt64(&fpsCount, 1)
DrawImage(WindowImage)
case <-printTimer:
execs := atomic.LoadInt64(&execCount)
fps := atomic.SwapInt64(&fpsCount, 0)
fmt.Println("FPS:", fps, "Current executions:", execs)
default:
time.Sleep(100 * time.Nanosecond)
}
}
}
// Write an instruction to x,y,r
func WriteInstruction(x, y uint32, r *uint32, instr Instruction, data int32) {
mem, _ := Coords(x, y, *r)
instrData := (uint32(instr&0xF) << 20) | uint32(data&0xFFFFF)
Write24Bit(mem, instrData)
*r++
}
// Read a 24 bit pixel as 2x 12 bit uints
func Read12Bit(in []uint8) (outx, outy uint32) {
outx = uint32(in[0]) | (uint32(in[1]&0xF) << 8)
outy = (uint32(in[1]&0xF0) >> 4) | (uint32(in[2]) << 4)
return outx, outy
}
// Write 2x 12 bit uints as a 24 bit pixel
func Write12Bit(out []uint8, inx, iny uint32) {
out[0] = uint8(inx & 0xFF)
out[1] = uint8(((inx & 0xF00) >> 8) | ((iny & 0xF) << 4))
out[2] = uint8(((iny & 0xFF0) >> 4))
}
// Read a 24 bit pixel as a 24 bit uint
func Read24Bit(in []uint8) (out uint32) {
out = uint32(in[0]) | (uint32(in[1]) << 8) | (uint32(in[2]) << 16)
return out
}
// Write a 24 bit uint as a 24 bit pixel
func Write24Bit(out []uint8, in uint32) {
out[0] = uint8(in & 0xFF)
out[1] = uint8(((in & 0xFF00) >> 8))
out[2] = uint8(((in & 0xFF0000) >> 16))
}
// Convert a 2s complement 20-bit uint to a 32-bit uint
func Signed20Bit(in uint32) (out uint32) {
out = in
if (out & 0x80000) != 0 {
// Data is negative
out |= 0xFFF00000
}
return out
}
// Generate a roughly spiral shaped memory space.
// CoordOffset represents an XY offset relative to a center coordinate
// Larger indexes in CoordOffset will have a larger distance from center
var CoordOffset []XY
// Return the slice of pixel data for the psuedo-polar coordinates x,y,r
func Coords(x, y, r uint32) ([]uint8, bool) {
if r >= uint32(len(CoordOffset)) {
// log.Printf("Coord radius too large: %d\n", r)
return []uint8{0, 0, 0}, false
}
dx := CoordOffset[r][0]
dy := CoordOffset[r][1]
if int(x)+dx < 0 || int(x)+dx >= WindowImage.Rect.Max.X || int(y)+dy < 0 || int(y)+dy >= WindowImage.Rect.Max.Y {
return []uint8{0, 0, 0}, false
}
i := (int(x)+dx)*4 + (int(y)+dy)*WindowImage.Stride
return WindowImage.Pix[i : i+3], true
}
var DirOffset = [8]XY{
{0, -1},
{-1, -1},
{-1, 0},
{-1, 1},
{0, 1},
{1, 1},
{1, 0},
{1, -1},
}
func init() {
// Generate CoordOffset lookup table using a boundary fill algorithm
var area [2048][2048]bool
originx := 1024
originy := 1024
CoordOffset = []XY{{0, 0}, {0, 1}}
area[originx][originy] = true
area[originx][originy+1] = true
dx := 0
dy := 1
for {
var dir int
if dx >= 0 {
if dy > dx {
dir = 0
} else if dy >= 0 {
dir = 1
} else if dx > -dy {
dir = 2
} else {
dir = 3
}
} else {
if dy < dx {
dir = 4
} else if dy < 0 {
dir = 5
} else if -dx > dy {
dir = 6
} else {
dir = 7
}
}
for i := 0; i < 8; i++ {
ddx := DirOffset[(dir+i)%8][0]
ddy := DirOffset[(dir+i)%8][1]
if dx+ddx < -originx || dx+ddx >= originx || dy+ddy < -originy || dy+ddy >= originy {
return
}
if !area[originx+dx+ddx][originy+dy+ddy] {
dx += ddx
dy += ddy
CoordOffset = append(CoordOffset, XY{dx, dy})
area[originx+dx][originy+dy] = true
break
}
}
}
}
| func DrawImage(img *xgraphics.Image) {
img.XDraw()
img.XPaint(DisplayWindow.Id)
} | random_line_split | |
main.go | package main
import (
"fmt"
"image"
"log"
"math/rand"
"sync/atomic"
"time"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil"
"github.com/BurntSushi/xgbutil/xevent"
"github.com/BurntSushi/xgbutil/xgraphics"
"github.com/BurntSushi/xgbutil/xwindow"
)
var DisplayWindow *xwindow.Window
var WindowImage *xgraphics.Image
var X *xgbutil.XUtil
type Instruction uint32
type XY [2]int
// Static address list (relative to a center pixel)
const (
PROGRAM_COUNTER uint32 = iota // 24 bit program counter
STACK_POINTER // 24 bit stack pointer
REGISTER_A // Register A
REGISTER_B // Register B
PROGRAM // Start of program
)
// Instructions (4 bits instruction, 20 bits data):
const (
NOP Instruction = iota
JUMP // Relative jump (Add data (signed) to PC)
COND // Conditional jump (If A is non-zero, perform relative jump)
SETA // Set A (Set A to data (unsigned))
ADDI // Add immediate (Add data (signed) to A)
PUSH // Push stack (Push A to stack)
POP // Pop stack (Pop from stack and store in A)
ADDS // Add stack (Pop from stack and add to A)
SWAP // Swap A and B
RAND // Add random value to A (A = rand % data, unless data == 0)
SHIFT // Shift A (Shift contents of A by data (signed))
LOCAL // Set A to the current program's x,y coordinates
RPUSH // Push remote (Push A to remote stack of x,y = B)
RSET // Write remote (Store A at x,y = B, r = data)
RGET // Read remote (Read A from x,y = B, r = data)
FORK // Fork (call go Exec(B))
)
func (i Instruction) String() string {
return []string{"NOP", "JUMP", "COND", "SETA", "ADDI", "PUSH", "POP", "ADDS", "SWAP", "RAND", "SHIFT", "LOCAL", "RPUSH", "RSET", "RGET", "FORK"}[i]
}
func WriteProgram(x, y uint32) {
r := PROGRAM
// Padding for static memory usage
WriteInstruction(x, y, &r, JUMP, 3)
mem1 := r
WriteInstruction(x, y, &r, NOP, 0)
mem2 := r
WriteInstruction(x, y, &r, NOP, 0)
{ // Store end of program in the static slot 1
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, RSET, int32(mem1))
}
start := r
{ // for i := 1024; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 1024)
loop := r
{ // Push random blue to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // for i := 2048; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 2048)
loop := r
{ // Push random green to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, SHIFT, 8)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // for i := 4096; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 4096)
loop := r
{ // Push random red to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, SHIFT, 16)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // Copy the current program
{ // Store current stack pointer in static slot 2
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, RSET, int32(mem2))
}
{ // Set stack pointer to end of program
WriteInstruction(x, y, &r, RGET, int32(mem1))
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
{ // Select random coordinates for new program and set stack pointer to end of remote program
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, SETA, 0)
WriteInstruction(x, y, &r, RAND, 1024)
WriteInstruction(x, y, &r, SHIFT, 12)
WriteInstruction(x, y, &r, RAND, 1024)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
{ // Pop from local stack, push to local stack in reverse order
WriteInstruction(x, y, &r, ADDI, -1)
loop := r
WriteInstruction(x, y, &r, ADDI, 1)
WriteInstruction(x, y, &r, POP, 0)
WriteInstruction(x, y, &r, RPUSH, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, ADDI, -2)
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // Fork remote program
WriteInstruction(x, y, &r, RGET, int32(mem1)) // Restore remote stack pointer
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, SETA, 0)
WriteInstruction(x, y, &r, RSET, int32(PROGRAM_COUNTER))
WriteInstruction(x, y, &r, FORK, 0)
}
{ // Restore local stack
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(mem2))
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
}
WriteInstruction(x, y, &r, JUMP, int32(start-r))
data, _ := Coords(x, y, PROGRAM_COUNTER)
Write24Bit(data, 0) // Initial program counter
data, _ = Coords(x, y, STACK_POINTER)
Write24Bit(data, r) // Set stack pointer to end of program
// fmt.Println("Program length:", r)
}
func main() {
var err error
X, err = xgbutil.NewConn()
if err != nil |
// Create a window
DisplayWindow, err = xwindow.Generate(X)
if err != nil {
log.Fatalf("Could not generate a new window X id: %s", err)
}
DisplayWindow.Create(X.RootWin(), 0, 0, 1024, 1024, xproto.CwBackPixel, 0)
DisplayWindow.Map()
WindowImage = xgraphics.New(X, image.Rect(0, 0, 1024, 1024))
err = WindowImage.XSurfaceSet(DisplayWindow.Id)
if err != nil {
log.Printf("Error while setting window surface to image %d: %s\n", DisplayWindow, err)
}
// Start a program in the center of the screen if there have been no recent forks
go func() {
for {
count := atomic.SwapInt64(&forkCount, 0)
if count < 1 {
fmt.Println("No forks in the last 10 seconds, spawning new program")
WriteProgram(512, 512)
go Exec(512, 512)
}
time.Sleep(10 * time.Second)
}
}()
drawTimer = time.Tick(16 * time.Millisecond)
printTimer = time.Tick(1 * time.Second)
xevent.Main(X)
}
var execCount int64 = 0
var forkCount int64 = 0
var fpsCount int64 = 0
var drawTimer <-chan time.Time
var printTimer <-chan time.Time
var execs [1024][1024]uint32
func Exec(x, y uint32) {
// Limit executions to 100 concurrent programs
count := atomic.AddInt64(&execCount, 1)
atomic.AddInt64(&forkCount, 1)
defer atomic.AddInt64(&execCount, -1)
if count > 100 {
return
}
// De-duplicate programs running at the same location
if int(x) >= WindowImage.Rect.Max.X || int(y) >= WindowImage.Rect.Max.Y {
return
}
old := atomic.SwapUint32(&execs[x][y], 1)
if old != 0 {
return
}
defer atomic.StoreUint32(&execs[x][y], 0)
// fmt.Println("Starting program at", x, y)
for {
// Read Program Counter
mem, ok := Coords(x, y, PROGRAM_COUNTER)
if !ok {
// fmt.Println("Program", x, y, "invalid")
return
}
PC := Read24Bit(mem)
// Read Stack Pointer
mem, ok = Coords(x, y, STACK_POINTER)
if !ok {
// fmt.Println("Program", x, y, "has invalid stack pointer")
return
}
S := Read24Bit(mem)
// Read Register A
mem, ok = Coords(x, y, REGISTER_A)
if !ok {
// fmt.Println("Program", x, y, "has invalid register A")
return
}
A := Read24Bit(mem)
// Read Register B
mem, ok = Coords(x, y, REGISTER_B)
if !ok {
// fmt.Println("Program", x, y, "has invalid register B")
return
}
B := Read24Bit(mem)
// Read instruction
mem, ok = Coords(x, y, PROGRAM+PC)
if !ok {
// fmt.Println("Program", x, y, "has invalid PC:", PC)
return
}
instrData := Read24Bit(mem)
instr := Instruction((instrData & 0xF00000) >> 20)
data := instrData & 0xFFFFF
// Evaluate instruction
// fmt.Printf("(%d, %d): PC(%4d) S(%4d) A(%10d) B(%10d) Instruction(%s)\tData(%d)\n", x, y, PC, S, A, B, instr, int32(Signed20Bit(data)))
switch instr {
case NOP: // NOP
case JUMP: // Relative jump
PC += Signed20Bit(data) - 1
case COND: // Conditional jump
if A != 0 {
PC += Signed20Bit(data) - 1
}
case SETA: // Set register A
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, data)
case ADDI: // Add immediate to A
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A+Signed20Bit(data))
case PUSH: // Push A to stack
mem, ok = Coords(x, y, S)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S)
return
}
Write24Bit(mem, A)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S+1)
case POP: // Pop stack into A
mem, ok = Coords(x, y, S-1)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S-1)
return
}
r := Read24Bit(mem)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S-1)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, r)
case ADDS: // Pop from stack and add to A
mem, ok = Coords(x, y, S-1)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S-1)
return
}
r := Read24Bit(mem)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S-1)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A+r)
case SWAP: // Swap registers A and B
mem, _ = Coords(x, y, REGISTER_B)
Write24Bit(mem, A)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, B)
case RAND: // Add random bits to A
mem, _ = Coords(x, y, REGISTER_A)
r := rand.Uint32()
if data != 0 {
r = r % data
}
Write24Bit(mem, A+r)
case SHIFT: // Shift A by data (signed)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A<<Signed20Bit(data))
case LOCAL: // Set A to x,y
mem, _ = Coords(x, y, REGISTER_A)
Write12Bit(mem, x, y)
case RPUSH: // Push to remote stack
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, STACK_POINTER)
if !ok {
// fmt.Println("Program", x2, y2, "has invalid stack pointer")
} else {
S2 := Read24Bit(mem)
mem, ok = Coords(x2, y2, S2)
if !ok {
// fmt.Println("Program", x2, y2, "has remote stack overflow:", S2)
} else {
Write24Bit(mem, A)
}
mem, _ = Coords(x2, y2, STACK_POINTER)
Write24Bit(mem, S2+1)
}
case RSET: // Write A into remote address
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, data)
if !ok {
// fmt.Println("Program", x, y, "has invalid remote write:", x2, y2, data)
} else {
Write24Bit(mem, A)
}
case RGET: // Read remote address into A
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, data)
if !ok {
// fmt.Println("Program", x, y, "has invalid remote read:", x2, y2, data)
} else {
r := Read24Bit(mem)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, r)
}
case FORK: // Call go Exec(B)
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
go Exec(x2, y2)
default:
fmt.Println("Program", x, y, "running unknown instruction:", instr)
return
}
// Advance program counter
mem, _ = Coords(x, y, PROGRAM_COUNTER)
Write24Bit(mem, PC+1)
if (atomic.AddUint32(&execs[x][y], 1) > 1000000) {
return
}
select {
case <-drawTimer:
atomic.AddInt64(&fpsCount, 1)
DrawImage(WindowImage)
case <-printTimer:
execs := atomic.LoadInt64(&execCount)
fps := atomic.SwapInt64(&fpsCount, 0)
fmt.Println("FPS:", fps, "Current executions:", execs)
default:
time.Sleep(100 * time.Nanosecond)
}
}
}
// Write an instruction to x,y,r
func WriteInstruction(x, y uint32, r *uint32, instr Instruction, data int32) {
mem, _ := Coords(x, y, *r)
instrData := (uint32(instr&0xF) << 20) | uint32(data&0xFFFFF)
Write24Bit(mem, instrData)
*r++
}
// Read a 24 bit pixel as 2x 12 bit uints
func Read12Bit(in []uint8) (outx, outy uint32) {
outx = uint32(in[0]) | (uint32(in[1]&0xF) << 8)
outy = (uint32(in[1]&0xF0) >> 4) | (uint32(in[2]) << 4)
return outx, outy
}
// Write 2x 12 bit uints as a 24 bit pixel
func Write12Bit(out []uint8, inx, iny uint32) {
out[0] = uint8(inx & 0xFF)
out[1] = uint8(((inx & 0xF00) >> 8) | ((iny & 0xF) << 4))
out[2] = uint8(((iny & 0xFF0) >> 4))
}
// Read a 24 bit pixel as a 24 bit uint
func Read24Bit(in []uint8) (out uint32) {
out = uint32(in[0]) | (uint32(in[1]) << 8) | (uint32(in[2]) << 16)
return out
}
// Write a 24 bit uint as a 24 bit pixel
func Write24Bit(out []uint8, in uint32) {
out[0] = uint8(in & 0xFF)
out[1] = uint8(((in & 0xFF00) >> 8))
out[2] = uint8(((in & 0xFF0000) >> 16))
}
// Convert a 2s complement 20-bit uint to a 32-bit uint
func Signed20Bit(in uint32) (out uint32) {
out = in
if (out & 0x80000) != 0 {
// Data is negative
out |= 0xFFF00000
}
return out
}
// Generate a roughly spiral shaped memory space.
// CoordOffset represents an XY offset relative to a center coordinate
// Larger indexes in CoordOffset will have a larger distance from center
var CoordOffset []XY
// Return the slice of pixel data for the psuedo-polar coordinates x,y,r
func Coords(x, y, r uint32) ([]uint8, bool) {
if r >= uint32(len(CoordOffset)) {
// log.Printf("Coord radius too large: %d\n", r)
return []uint8{0, 0, 0}, false
}
dx := CoordOffset[r][0]
dy := CoordOffset[r][1]
if int(x)+dx < 0 || int(x)+dx >= WindowImage.Rect.Max.X || int(y)+dy < 0 || int(y)+dy >= WindowImage.Rect.Max.Y {
return []uint8{0, 0, 0}, false
}
i := (int(x)+dx)*4 + (int(y)+dy)*WindowImage.Stride
return WindowImage.Pix[i : i+3], true
}
var DirOffset = [8]XY{
{0, -1},
{-1, -1},
{-1, 0},
{-1, 1},
{0, 1},
{1, 1},
{1, 0},
{1, -1},
}
func init() {
// Generate CoordOffset lookup table using a boundary fill algorithm
var area [2048][2048]bool
originx := 1024
originy := 1024
CoordOffset = []XY{{0, 0}, {0, 1}}
area[originx][originy] = true
area[originx][originy+1] = true
dx := 0
dy := 1
for {
var dir int
if dx >= 0 {
if dy > dx {
dir = 0
} else if dy >= 0 {
dir = 1
} else if dx > -dy {
dir = 2
} else {
dir = 3
}
} else {
if dy < dx {
dir = 4
} else if dy < 0 {
dir = 5
} else if -dx > dy {
dir = 6
} else {
dir = 7
}
}
for i := 0; i < 8; i++ {
ddx := DirOffset[(dir+i)%8][0]
ddy := DirOffset[(dir+i)%8][1]
if dx+ddx < -originx || dx+ddx >= originx || dy+ddy < -originy || dy+ddy >= originy {
return
}
if !area[originx+dx+ddx][originy+dy+ddy] {
dx += ddx
dy += ddy
CoordOffset = append(CoordOffset, XY{dx, dy})
area[originx+dx][originy+dy] = true
break
}
}
}
}
func DrawImage(img *xgraphics.Image) {
img.XDraw()
img.XPaint(DisplayWindow.Id)
}
| {
log.Fatal(err)
} | conditional_block |
main.go | package main
import (
"fmt"
"image"
"log"
"math/rand"
"sync/atomic"
"time"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil"
"github.com/BurntSushi/xgbutil/xevent"
"github.com/BurntSushi/xgbutil/xgraphics"
"github.com/BurntSushi/xgbutil/xwindow"
)
var DisplayWindow *xwindow.Window
var WindowImage *xgraphics.Image
var X *xgbutil.XUtil
type Instruction uint32
type XY [2]int
// Static address list (relative to a center pixel)
const (
PROGRAM_COUNTER uint32 = iota // 24 bit program counter
STACK_POINTER // 24 bit stack pointer
REGISTER_A // Register A
REGISTER_B // Register B
PROGRAM // Start of program
)
// Instructions (4 bits instruction, 20 bits data):
const (
NOP Instruction = iota
JUMP // Relative jump (Add data (signed) to PC)
COND // Conditional jump (If A is non-zero, perform relative jump)
SETA // Set A (Set A to data (unsigned))
ADDI // Add immediate (Add data (signed) to A)
PUSH // Push stack (Push A to stack)
POP // Pop stack (Pop from stack and store in A)
ADDS // Add stack (Pop from stack and add to A)
SWAP // Swap A and B
RAND // Add random value to A (A = rand % data, unless data == 0)
SHIFT // Shift A (Shift contents of A by data (signed))
LOCAL // Set A to the current program's x,y coordinates
RPUSH // Push remote (Push A to remote stack of x,y = B)
RSET // Write remote (Store A at x,y = B, r = data)
RGET // Read remote (Read A from x,y = B, r = data)
FORK // Fork (call go Exec(B))
)
func (i Instruction) String() string {
return []string{"NOP", "JUMP", "COND", "SETA", "ADDI", "PUSH", "POP", "ADDS", "SWAP", "RAND", "SHIFT", "LOCAL", "RPUSH", "RSET", "RGET", "FORK"}[i]
}
func WriteProgram(x, y uint32) {
r := PROGRAM
// Padding for static memory usage
WriteInstruction(x, y, &r, JUMP, 3)
mem1 := r
WriteInstruction(x, y, &r, NOP, 0)
mem2 := r
WriteInstruction(x, y, &r, NOP, 0)
{ // Store end of program in the static slot 1
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, RSET, int32(mem1))
}
start := r
{ // for i := 1024; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 1024)
loop := r
{ // Push random blue to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // for i := 2048; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 2048)
loop := r
{ // Push random green to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, SHIFT, 8)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // for i := 4096; i != 0; i-- {
WriteInstruction(x, y, &r, SETA, 4096)
loop := r
{ // Push random red to stack
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RAND, 255)
WriteInstruction(x, y, &r, SHIFT, 16)
WriteInstruction(x, y, &r, PUSH, 0)
WriteInstruction(x, y, &r, SWAP, 0)
}
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // Copy the current program
{ // Store current stack pointer in static slot 2
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, RSET, int32(mem2))
}
{ // Set stack pointer to end of program
WriteInstruction(x, y, &r, RGET, int32(mem1))
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
{ // Select random coordinates for new program and set stack pointer to end of remote program
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, SETA, 0)
WriteInstruction(x, y, &r, RAND, 1024)
WriteInstruction(x, y, &r, SHIFT, 12)
WriteInstruction(x, y, &r, RAND, 1024)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
{ // Pop from local stack, push to local stack in reverse order
WriteInstruction(x, y, &r, ADDI, -1)
loop := r
WriteInstruction(x, y, &r, ADDI, 1)
WriteInstruction(x, y, &r, POP, 0)
WriteInstruction(x, y, &r, RPUSH, 0)
WriteInstruction(x, y, &r, RGET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, ADDI, -2)
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, ADDI, -1)
WriteInstruction(x, y, &r, COND, int32(loop-r))
}
{ // Fork remote program
WriteInstruction(x, y, &r, RGET, int32(mem1)) // Restore remote stack pointer
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
WriteInstruction(x, y, &r, SETA, 0)
WriteInstruction(x, y, &r, RSET, int32(PROGRAM_COUNTER))
WriteInstruction(x, y, &r, FORK, 0)
}
{ // Restore local stack
WriteInstruction(x, y, &r, LOCAL, 0)
WriteInstruction(x, y, &r, SWAP, 0)
WriteInstruction(x, y, &r, RGET, int32(mem2))
WriteInstruction(x, y, &r, RSET, int32(STACK_POINTER))
}
}
WriteInstruction(x, y, &r, JUMP, int32(start-r))
data, _ := Coords(x, y, PROGRAM_COUNTER)
Write24Bit(data, 0) // Initial program counter
data, _ = Coords(x, y, STACK_POINTER)
Write24Bit(data, r) // Set stack pointer to end of program
// fmt.Println("Program length:", r)
}
func main() {
var err error
X, err = xgbutil.NewConn()
if err != nil {
log.Fatal(err)
}
// Create a window
DisplayWindow, err = xwindow.Generate(X)
if err != nil {
log.Fatalf("Could not generate a new window X id: %s", err)
}
DisplayWindow.Create(X.RootWin(), 0, 0, 1024, 1024, xproto.CwBackPixel, 0)
DisplayWindow.Map()
WindowImage = xgraphics.New(X, image.Rect(0, 0, 1024, 1024))
err = WindowImage.XSurfaceSet(DisplayWindow.Id)
if err != nil {
log.Printf("Error while setting window surface to image %d: %s\n", DisplayWindow, err)
}
// Start a program in the center of the screen if there have been no recent forks
go func() {
for {
count := atomic.SwapInt64(&forkCount, 0)
if count < 1 {
fmt.Println("No forks in the last 10 seconds, spawning new program")
WriteProgram(512, 512)
go Exec(512, 512)
}
time.Sleep(10 * time.Second)
}
}()
drawTimer = time.Tick(16 * time.Millisecond)
printTimer = time.Tick(1 * time.Second)
xevent.Main(X)
}
var execCount int64 = 0
var forkCount int64 = 0
var fpsCount int64 = 0
var drawTimer <-chan time.Time
var printTimer <-chan time.Time
var execs [1024][1024]uint32
func Exec(x, y uint32) |
// Write an instruction to x,y,r
func WriteInstruction(x, y uint32, r *uint32, instr Instruction, data int32) {
mem, _ := Coords(x, y, *r)
instrData := (uint32(instr&0xF) << 20) | uint32(data&0xFFFFF)
Write24Bit(mem, instrData)
*r++
}
// Read a 24 bit pixel as 2x 12 bit uints
func Read12Bit(in []uint8) (outx, outy uint32) {
outx = uint32(in[0]) | (uint32(in[1]&0xF) << 8)
outy = (uint32(in[1]&0xF0) >> 4) | (uint32(in[2]) << 4)
return outx, outy
}
// Write 2x 12 bit uints as a 24 bit pixel
func Write12Bit(out []uint8, inx, iny uint32) {
out[0] = uint8(inx & 0xFF)
out[1] = uint8(((inx & 0xF00) >> 8) | ((iny & 0xF) << 4))
out[2] = uint8(((iny & 0xFF0) >> 4))
}
// Read a 24 bit pixel as a 24 bit uint
func Read24Bit(in []uint8) (out uint32) {
out = uint32(in[0]) | (uint32(in[1]) << 8) | (uint32(in[2]) << 16)
return out
}
// Write a 24 bit uint as a 24 bit pixel
func Write24Bit(out []uint8, in uint32) {
out[0] = uint8(in & 0xFF)
out[1] = uint8(((in & 0xFF00) >> 8))
out[2] = uint8(((in & 0xFF0000) >> 16))
}
// Convert a 2s complement 20-bit uint to a 32-bit uint
func Signed20Bit(in uint32) (out uint32) {
out = in
if (out & 0x80000) != 0 {
// Data is negative
out |= 0xFFF00000
}
return out
}
// Generate a roughly spiral shaped memory space.
// CoordOffset represents an XY offset relative to a center coordinate
// Larger indexes in CoordOffset will have a larger distance from center
var CoordOffset []XY
// Return the slice of pixel data for the psuedo-polar coordinates x,y,r
func Coords(x, y, r uint32) ([]uint8, bool) {
if r >= uint32(len(CoordOffset)) {
// log.Printf("Coord radius too large: %d\n", r)
return []uint8{0, 0, 0}, false
}
dx := CoordOffset[r][0]
dy := CoordOffset[r][1]
if int(x)+dx < 0 || int(x)+dx >= WindowImage.Rect.Max.X || int(y)+dy < 0 || int(y)+dy >= WindowImage.Rect.Max.Y {
return []uint8{0, 0, 0}, false
}
i := (int(x)+dx)*4 + (int(y)+dy)*WindowImage.Stride
return WindowImage.Pix[i : i+3], true
}
var DirOffset = [8]XY{
{0, -1},
{-1, -1},
{-1, 0},
{-1, 1},
{0, 1},
{1, 1},
{1, 0},
{1, -1},
}
func init() {
// Generate CoordOffset lookup table using a boundary fill algorithm
var area [2048][2048]bool
originx := 1024
originy := 1024
CoordOffset = []XY{{0, 0}, {0, 1}}
area[originx][originy] = true
area[originx][originy+1] = true
dx := 0
dy := 1
for {
var dir int
if dx >= 0 {
if dy > dx {
dir = 0
} else if dy >= 0 {
dir = 1
} else if dx > -dy {
dir = 2
} else {
dir = 3
}
} else {
if dy < dx {
dir = 4
} else if dy < 0 {
dir = 5
} else if -dx > dy {
dir = 6
} else {
dir = 7
}
}
for i := 0; i < 8; i++ {
ddx := DirOffset[(dir+i)%8][0]
ddy := DirOffset[(dir+i)%8][1]
if dx+ddx < -originx || dx+ddx >= originx || dy+ddy < -originy || dy+ddy >= originy {
return
}
if !area[originx+dx+ddx][originy+dy+ddy] {
dx += ddx
dy += ddy
CoordOffset = append(CoordOffset, XY{dx, dy})
area[originx+dx][originy+dy] = true
break
}
}
}
}
func DrawImage(img *xgraphics.Image) {
img.XDraw()
img.XPaint(DisplayWindow.Id)
}
| {
// Limit executions to 100 concurrent programs
count := atomic.AddInt64(&execCount, 1)
atomic.AddInt64(&forkCount, 1)
defer atomic.AddInt64(&execCount, -1)
if count > 100 {
return
}
// De-duplicate programs running at the same location
if int(x) >= WindowImage.Rect.Max.X || int(y) >= WindowImage.Rect.Max.Y {
return
}
old := atomic.SwapUint32(&execs[x][y], 1)
if old != 0 {
return
}
defer atomic.StoreUint32(&execs[x][y], 0)
// fmt.Println("Starting program at", x, y)
for {
// Read Program Counter
mem, ok := Coords(x, y, PROGRAM_COUNTER)
if !ok {
// fmt.Println("Program", x, y, "invalid")
return
}
PC := Read24Bit(mem)
// Read Stack Pointer
mem, ok = Coords(x, y, STACK_POINTER)
if !ok {
// fmt.Println("Program", x, y, "has invalid stack pointer")
return
}
S := Read24Bit(mem)
// Read Register A
mem, ok = Coords(x, y, REGISTER_A)
if !ok {
// fmt.Println("Program", x, y, "has invalid register A")
return
}
A := Read24Bit(mem)
// Read Register B
mem, ok = Coords(x, y, REGISTER_B)
if !ok {
// fmt.Println("Program", x, y, "has invalid register B")
return
}
B := Read24Bit(mem)
// Read instruction
mem, ok = Coords(x, y, PROGRAM+PC)
if !ok {
// fmt.Println("Program", x, y, "has invalid PC:", PC)
return
}
instrData := Read24Bit(mem)
instr := Instruction((instrData & 0xF00000) >> 20)
data := instrData & 0xFFFFF
// Evaluate instruction
// fmt.Printf("(%d, %d): PC(%4d) S(%4d) A(%10d) B(%10d) Instruction(%s)\tData(%d)\n", x, y, PC, S, A, B, instr, int32(Signed20Bit(data)))
switch instr {
case NOP: // NOP
case JUMP: // Relative jump
PC += Signed20Bit(data) - 1
case COND: // Conditional jump
if A != 0 {
PC += Signed20Bit(data) - 1
}
case SETA: // Set register A
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, data)
case ADDI: // Add immediate to A
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A+Signed20Bit(data))
case PUSH: // Push A to stack
mem, ok = Coords(x, y, S)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S)
return
}
Write24Bit(mem, A)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S+1)
case POP: // Pop stack into A
mem, ok = Coords(x, y, S-1)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S-1)
return
}
r := Read24Bit(mem)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S-1)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, r)
case ADDS: // Pop from stack and add to A
mem, ok = Coords(x, y, S-1)
if !ok {
// fmt.Println("Program", x, y, "has stack overflow:", S-1)
return
}
r := Read24Bit(mem)
mem, _ = Coords(x, y, STACK_POINTER)
Write24Bit(mem, S-1)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A+r)
case SWAP: // Swap registers A and B
mem, _ = Coords(x, y, REGISTER_B)
Write24Bit(mem, A)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, B)
case RAND: // Add random bits to A
mem, _ = Coords(x, y, REGISTER_A)
r := rand.Uint32()
if data != 0 {
r = r % data
}
Write24Bit(mem, A+r)
case SHIFT: // Shift A by data (signed)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, A<<Signed20Bit(data))
case LOCAL: // Set A to x,y
mem, _ = Coords(x, y, REGISTER_A)
Write12Bit(mem, x, y)
case RPUSH: // Push to remote stack
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, STACK_POINTER)
if !ok {
// fmt.Println("Program", x2, y2, "has invalid stack pointer")
} else {
S2 := Read24Bit(mem)
mem, ok = Coords(x2, y2, S2)
if !ok {
// fmt.Println("Program", x2, y2, "has remote stack overflow:", S2)
} else {
Write24Bit(mem, A)
}
mem, _ = Coords(x2, y2, STACK_POINTER)
Write24Bit(mem, S2+1)
}
case RSET: // Write A into remote address
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, data)
if !ok {
// fmt.Println("Program", x, y, "has invalid remote write:", x2, y2, data)
} else {
Write24Bit(mem, A)
}
case RGET: // Read remote address into A
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
mem, ok = Coords(x2, y2, data)
if !ok {
// fmt.Println("Program", x, y, "has invalid remote read:", x2, y2, data)
} else {
r := Read24Bit(mem)
mem, _ = Coords(x, y, REGISTER_A)
Write24Bit(mem, r)
}
case FORK: // Call go Exec(B)
mem, _ = Coords(x, y, REGISTER_B)
x2, y2 := Read12Bit(mem)
go Exec(x2, y2)
default:
fmt.Println("Program", x, y, "running unknown instruction:", instr)
return
}
// Advance program counter
mem, _ = Coords(x, y, PROGRAM_COUNTER)
Write24Bit(mem, PC+1)
if (atomic.AddUint32(&execs[x][y], 1) > 1000000) {
return
}
select {
case <-drawTimer:
atomic.AddInt64(&fpsCount, 1)
DrawImage(WindowImage)
case <-printTimer:
execs := atomic.LoadInt64(&execCount)
fps := atomic.SwapInt64(&fpsCount, 0)
fmt.Println("FPS:", fps, "Current executions:", execs)
default:
time.Sleep(100 * time.Nanosecond)
}
}
} | identifier_body |
product_structs.go | // Copyright 2013 The Changkong Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package product
const VersionNo = "20140725"
/* 卖家设置售后服务对象 */
type AfterSale struct {
AfterSaleId int `json:"after_sale_id"`
AfterSaleName string `json:"after_sale_name"`
AfterSalePath string `json:"after_sale_path"`
}
/* Item(商品)结构 */
type Item struct {
AfterSaleId int `json:"after_sale_id"`
ApproveStatus string `json:"approve_status"`
AuctionPoint int `json:"auction_point"`
AutoFill string `json:"auto_fill"`
Barcode string `json:"barcode"`
ChangeProp string `json:"change_prop"`
Cid int `json:"cid"`
CodPostageId int `json:"cod_postage_id"`
Created string `json:"created"`
CustomMadeTypeId string `json:"custom_made_type_id"`
DelistTime string `json:"delist_time"`
DeliveryTime *DeliveryTime `json:"delivery_time"`
Desc string `json:"desc"`
DescModuleInfo *DescModuleInfo `json:"desc_module_info"`
DescModules string `json:"desc_modules"`
DetailUrl string `json:"detail_url"`
EmsFee float64 `json:"ems_fee,string"`
ExpressFee float64 `json:"express_fee,string"`
Features string `json:"features"`
FoodSecurity *FoodSecurity `json:"food_security"`
FreightPayer string `json:"freight_payer"`
GlobalStockCountry string `json:"global_stock_country"`
GlobalStockType string `json:"global_stock_type"`
HasDiscount bool `json:"has_discount"`
HasInvoice bool `json:"has_invoice"`
HasShowcase bool `json:"has_showcase"`
HasWarranty bool `json:"has_warranty"`
Increment string `json:"increment"`
InnerShopAuctionTemplateId int `json:"inner_shop_auction_template_id"`
InputPids string `json:"input_pids"`
InputStr string `json:"input_str"`
Is3D bool `json:"is_3D"`
IsCspu bool `json:"is_cspu"`
IsEx bool `json:"is_ex"`
IsFenxiao int `json:"is_fenxiao"`
IsLightningConsignment bool `json:"is_lightning_consignment"`
IsOffline string `json:"is_offline"`
IsPrepay bool `json:"is_prepay"`
IsTaobao bool `json:"is_taobao"`
IsTiming bool `json:"is_timing"`
IsVirtual bool `json:"is_virtual"`
IsXinpin bool `json:"is_xinpin"`
ItemImgs struct {
ItemImg []*ItemImg `json:"item_img"`
} `json:"item_imgs"`
ItemSize string `json:"item_size"`
ItemWeight string `json:"item_weight"`
ListTime string `json:"list_time"`
LocalityLife *LocalityLife `json:"locality_life"`
Location *Location `json:"location"`
Modified string `json:"modified"`
MpicVideo *MpicVideo `json:"mpic_video"`
Newprepay string `json:"newprepay"`
Nick string `json:"nick"`
Num int `json:"num"`
NumIid int `json:"num_iid"`
OneStation bool `json:"one_station"`
OuterId string `json:"outer_id"`
OuterShopAuctionTemplateId int `json:"outer_shop_auction_template_id"`
PaimaiInfo *PaimaiInfo `json:"paimai_info"`
PicUrl string `json:"pic_url"`
PostFee float64 `json:"post_fee,string"`
PostageId int `json:"postage_id"`
Price float64 `json:"price,string"`
ProductId int `json:"product_id"`
PromotedService string `json:"promoted_service"`
PropImgs struct {
PropImg []*PropImg `json:"prop_img"`
} `json:"prop_imgs"`
PropertyAlias string `json:"property_alias"`
Props string `json:"props"`
PropsName string `json:"props_name"`
Score int `json:"score"`
SecondKill string `json:"second_kill"`
SecondResult bool `json:"second_result"`
SellPoint string `json:"sell_point"`
SellPromise bool `json:"sell_promise"`
SellerCids string `json:"seller_cids"`
Skus struct {
Sku []*Sku `json:"sku"`
} `json:"skus"`
SoldQuantity int `json:"sold_quantity"`
StuffStatus string `json:"stuff_status"`
SubStock int `json:"sub_stock"`
SubTitle string `json:"sub_title"`
TemplateId string `json:"template_id"`
Title string `json:"title"`
Type string `json:"type"`
ValidThru int `json:"valid_thru"`
VideoId int `json:"video_id"`
Videos struct {
Video []*Video `json:"video"`
} `json:"videos"`
Violation bool `json:"violation"`
WapDesc string `json:"wap_desc"`
WapDetailUrl string `json:"wap_detail_url"`
WirelessDesc string `json:"wireless_desc"`
WithHoldQuantity int `json:"with_hold_quantity"`
WwStatus bool `json:"ww_status"`
}
/* 发货时间数据结构 */
type DeliveryTime struct {
DeliveryTime string `json:"delivery_time"`
DeliveryTimeType string `json:"delivery_time_type"`
NeedDeliveryTime string `json:"need_delivery_time"`
}
/* 该数据结构保存宝贝描述对应的规范化信息 */
type DescModuleInfo struct {
AnchorModuleIds string `json:"anchor_module_ids"`
Type int `json:"type"`
}
/* 食品安全信息,包括:
生产许可证号、产品标准号、厂名、厂址等 */
type FoodSecurity struct {
Contact string `json:"contact"`
DesignCode string `json:"design_code"`
Factory string `json:"factory"`
FactorySite string `json:"factory_site"`
FoodAdditive string `json:"food_additive"`
HealthProductNo string `json:"health_product_no"`
Mix string `json:"mix"`
Period string `json:"period"`
PlanStorage string `json:"plan_storage"`
PrdLicenseNo string `json:"prd_license_no"`
ProductDateEnd string `json:"product_date_end"`
ProductDateStart string `json:"product_date_start"`
StockDateEnd string `json:"stock_date_end"`
StockDateStart string `json:"stock_date_start"`
Supplier string `json:"supplier"`
}
/* ItemImg结构 */
type ItemImg struct {
Created string `json:"created"`
Id int `json:"id"`
Position int `json:"position"`
Url string `json:"url"`
}
/* 本地生活垂直市场数据结构,修改宝贝时在参数empty_fields里设置locality_life可删除所有电子凭证信息 */
type LocalityLife struct {
ChooseLogis string `json:"choose_logis"`
Eticket string `json:"eticket"`
Expirydate string `json:"expirydate"`
Merchant string `json:"merchant"`
NetworkId string `json:"network_id"`
OnsaleAutoRefundRatio int `json:"onsale_auto_refund_ratio"`
RefundRatio int `json:"refund_ratio"`
Refundmafee string `json:"refundmafee"`
Verification string `json:"verification"`
}
/* 用户地址 */
type Location struct {
Address string `json:"address"`
City string `json:"city"`
Country string `json:"country"`
District string `json:"district"`
State string `json:"state"`
Zip string `json:"zip"`
}
/* 宝贝主图视频数据结构 */
type MpicVideo struct {
NumIid int `json:"num_iid"`
VideoDuaration int `json:"video_duaration"`
VideoId int `json:"video_id"`
VideoPic string `json:"video_pic"`
VideoStatus int `json:"video_status"`
}
/* 拍卖商品相关信息 */
type PaimaiInfo struct {
Deposit int `json:"deposit"`
Interval int `json:"interval"`
Mode int `json:"mode"`
Reserve float64 `json:"reserve,string"`
ValidHour int `json:"valid_hour"`
ValidMinute int `json:"valid_minute"`
}
/* 商品属性图片结构 */
type PropImg struct {
Created string `json:"created"`
Id int `json:"id"`
Position int `json:"position"`
Properties string `json:"properties"`
Url string `json:"url"`
}
/* Sku结构 */
type Sku struct {
Barcode string `json:"barcode"`
ChangeProp string `json:"change_prop"`
Created string `json:"created"`
Iid string `json:"iid"`
Modified string `json:"modified"`
NumIid int `json:"num_iid"`
OuterId string `json:"outer_id"`
Price string `json:"price"`
Properties string `json:"properties"`
PropertiesName string `json:"properties_name"`
Quantity int `json:"quantity"`
SkuDeliveryTime string `json:"sku_delivery_time"`
SkuId int `json:"sku_id"`
SkuSpecId int `json:"sku_spec_id"`
Status string `json:"status"`
WithHoldQuantity int `json:"with_hold_quantity"`
}
/* 商品视频关联记录 */
type Video struct {
Created string `json:"created"`
Id int `json:"id"`
Iid string `json:"iid"`
Modified string `json:"modified"`
NumIid int `json:"num_iid"`
Url string `json:"url"`
VideoId int `json:"video_id"`
}
/* 用于保存宝贝描述规范化模块信息 */
type IdsModule struct {
Id int `json:"id"`
Name string `json:"name"`
Type int `json:"type"`
}
/* 宝贝详情页面信息 */
type ItemTemplate struct {
ShopType int `json:"shop_type"`
TemplateId int `json:"template_id"`
TemplateName string `json:"template_name"`
}
/* 产品结构 */
type Product struct {
BarcodeStr string `json:"barcode_str"`
Binds string `json:"binds"`
BindsStr string `json:"binds_str"`
CatName string `json:"cat_name"`
Cid int `json:"cid"`
CollectNum int `json:"collect_num"`
CommodityId int `json:"commodity_id"`
Created string `json:"created"`
CspuFeature string `json:"cspu_feature"`
CustomerProps string `json:"customer_props"`
Desc string `json:"desc"`
IsSuiteEffective bool `json:"is_suite_effective"`
Level int `json:"level"`
Modified string `json:"modified"`
Name string `json:"name"`
OuterId string `json:"outer_id"`
PicPath string `json:"pic_path"`
PicUrl string `json:"pic_url"`
Price float64 `json:"price,string"`
ProductExtraInfos struct {
ProductExtraInfo []*ProductExtraInfo `json:"product_extra_info"`
} `json:"product_extra_infos"`
ProductId int `json:"product_id"`
ProductImgs struct {
ProductImg []*ProductImg `json:"product_img"`
} `json:"product_imgs"`
ProductPropImgs struct {
ProductPropImg []*ProductPropImg `json:"product_prop_img"`
} `json:"product_prop_imgs"`
PropertyAlias string `json:"property_alias"`
Props string `json:"props"`
PropsStr string `json:"props_str"`
RateNum int `json:"rate_num"`
SaleNum int `json:"sale_num"`
SaleProps string `json:"sale_props"`
SalePropsStr string `json:"sale_props_str"`
SellPt string `json:"sell_pt"`
ShopPrice string `json:"shop_price"`
StandardPrice string `json:"standard_price"`
Status int `json:"status"`
SuiteItemsStr string `json:"suite_items_str"`
TemplateId int `json:"template_id"` | VerticalMarket int `json:"vertical_market"`
}
/* 产品扩展信息 */
type ProductExtraInfo struct {
FieldKey string `json:"field_key"`
FieldName string `json:"field_name"`
FieldValue string `json:"field_value"`
ProductId int `json:"product_id"`
}
/* 产品图片 */
type ProductImg struct {
Created string `json:"created"`
Id int `json:"id"`
Modified string `json:"modified"`
Position int `json:"position"`
ProductId int `json:"product_id"`
Url string `json:"url"`
}
/* 产品属性图片 */
type ProductPropImg struct {
Created string `json:"created"`
Id int `json:"id"`
Modified string `json:"modified"`
Position int `json:"position"`
ProductId int `json:"product_id"`
Props string `json:"props"`
Url string `json:"url"`
}
/* 优惠信息对象 */
type PromotionDisplayTop struct {
PromotionInItem struct {
PromotionInItem []*PromotionInItem `json:"promotion_in_item"`
} `json:"promotion_in_item"`
PromotionInShop struct {
PromotionInShop []*PromotionInShop `json:"promotion_in_shop"`
} `json:"promotion_in_shop"`
}
/* 单品级优惠信息 */
type PromotionInItem struct {
Desc string `json:"desc"`
EndTime string `json:"end_time"`
ItemPromoPrice float64 `json:"item_promo_price,string"`
Name string `json:"name"`
OtherNeed string `json:"other_need"`
OtherSend string `json:"other_send"`
PromotionId string `json:"promotion_id"`
SkuIdList []string `json:"sku_id_list"`
SkuPriceList []float64 `json:"sku_price_list"`
StartTime string `json:"start_time"`
}
/* 店铺级优惠信息 */
type PromotionInShop struct {
Name string `json:"name"`
PromotionDetailDesc string `json:"promotion_detail_desc"`
PromotionId string `json:"promotion_id"`
}
/* 管控的类目以及品牌信息 */
type BrandCatControlInfo struct {
BrandCatControls struct {
BrandCatControl []*BrandCatControl `json:"brand_cat_control"`
} `json:"brand_cat_controls"`
}
/* 管控的品牌类目信息 */
type BrandCatControl struct {
BrandId int `json:"brand_id"`
BrandName string `json:"brand_name"`
CatId int `json:"cat_id"`
CatName string `json:"cat_name"`
CertifiedData string `json:"certified_data"`
}
/* 类目、品牌下的达尔文元数据 */
type BrandCatMetaData struct {
BrandId int `json:"brand_id"`
CatId int `json:"cat_id"`
CertifiedData string `json:"certified_data"`
IsDarwin bool `json:"is_darwin"`
}
/* 属性输入特征DO */
type PropertyInputDO struct {
IsAllowInput bool `json:"is_allow_input"`
IsRootAllowInput bool `json:"is_root_allow_input"`
IsSubProperty bool `json:"is_sub_property"`
PropertyId int `json:"property_id"`
}
/* 被管控的品牌和类目的所对应的销售属性 */
type CatBrandSaleProp struct {
BrandId int `json:"brand_id"`
CatId int `json:"cat_id"`
DefMarketPropValue int `json:"def_market_prop_value"`
IsNotSpec bool `json:"is_not_spec"`
PropertyId int `json:"property_id"`
}
/* 套装配置 */
type SuiteConfDO struct {
Inputs []int `json:"inputs"`
MaxSize int `json:"max_size"`
MaxTotalNumber int `json:"max_total_number"`
RootCatId int `json:"root_cat_id"`
SuiteCatId int `json:"suite_cat_id"`
}
/* 图书类目导入返回结果 */
type ProductBooks struct {
Author string `json:"author"`
BarCode string `json:"bar_code"`
BookName string `json:"book_name"`
CategoryId int `json:"category_id"`
Isbn string `json:"isbn"`
Price string `json:"price"`
}
/* ProductSpec(产品规格)结构。 */
type ProductSpec struct {
Barcode string `json:"barcode"`
BrandId int `json:"brand_id"`
CertifiedPics struct {
CertPicInfo []*CertPicInfo `json:"cert_pic_info"`
} `json:"certified_pics"`
CertifiedTxts struct {
CertTxtInfo []*CertTxtInfo `json:"cert_txt_info"`
} `json:"certified_txts"`
ChangeProp string `json:"change_prop"`
CustomePropsName string `json:"custome_props_name"`
LabelPrice int `json:"label_price"`
MarketTime string `json:"market_time"`
Number int `json:"number"`
PicUrl string `json:"pic_url"`
ProductCode string `json:"product_code"`
ProductId int `json:"product_id"`
SpecId int `json:"spec_id"`
SpecProps string `json:"spec_props"`
SpecPropsAlias string `json:"spec_props_alias"`
Status int `json:"status"`
}
/* 产品资质认证图片信息,包括认证类型以及图片url */
type CertPicInfo struct {
CertType int `json:"cert_type"`
PicUrl string `json:"pic_url"`
}
/* 产品资质认证文本信息,包括认证类型以及文本信息 */
type CertTxtInfo struct {
CertType int `json:"cert_type"`
Text string `json:"text"`
}
/* 产品规格审核信息 */
type Ticket struct {
AuditSellerId int `json:"audit_seller_id"`
CreateUserId int `json:"create_user_id"`
GmtCreate string `json:"gmt_create"`
GmtModified string `json:"gmt_modified"`
Memo string `json:"memo"`
Reason string `json:"reason"`
SpecId int `json:"spec_id"`
Status int `json:"status"`
}
/* SPU发布模板,定义了产品发布需要那些关键属性,绑定属性。 */
type SpuTemplateDO struct {
AffectProperties []int `json:"affect_properties"`
CategoryId int `json:"category_id"`
CommodityId int `json:"commodity_id"`
FilterProperties []int `json:"filter_properties"`
KeyProperties []int `json:"key_properties"`
PropFeatures string `json:"prop_features"`
PropNameStr string `json:"prop_name_str"`
TemplateId int `json:"template_id"`
} | Tsc string `json:"tsc"` | random_line_split |
server-utils.ts | import type { IncomingMessage } from 'http'
import type { Rewrite } from '../lib/load-custom-routes'
import type { RouteMatchFn } from '../shared/lib/router/utils/route-matcher'
import type { NextConfig } from './config'
import type { BaseNextRequest } from './base-http'
import type { ParsedUrlQuery } from 'querystring'
import { format as formatUrl, UrlWithParsedQuery, parse as parseUrl } from 'url'
import { normalizeLocalePath } from '../shared/lib/i18n/normalize-locale-path'
import { getPathMatch } from '../shared/lib/router/utils/path-match'
import { getNamedRouteRegex } from '../shared/lib/router/utils/route-regex'
import { getRouteMatcher } from '../shared/lib/router/utils/route-matcher'
import {
matchHas,
prepareDestination,
} from '../shared/lib/router/utils/prepare-destination'
import { removeTrailingSlash } from '../shared/lib/router/utils/remove-trailing-slash'
import { normalizeRscPath } from '../shared/lib/router/utils/app-paths'
import { NEXT_QUERY_PARAM_PREFIX } from '../lib/constants'
export function normalizeVercelUrl(
req: BaseNextRequest | IncomingMessage,
trustQuery: boolean,
paramKeys?: string[],
pageIsDynamic?: boolean,
defaultRouteRegex?: ReturnType<typeof getNamedRouteRegex> | undefined
) {
// make sure to normalize req.url on Vercel to strip dynamic params
// from the query which are added during routing
if (pageIsDynamic && trustQuery && defaultRouteRegex) {
const _parsedUrl = parseUrl(req.url!, true)
delete (_parsedUrl as any).search
for (const key of Object.keys(_parsedUrl.query)) {
if (
(key !== NEXT_QUERY_PARAM_PREFIX &&
key.startsWith(NEXT_QUERY_PARAM_PREFIX)) ||
(paramKeys || Object.keys(defaultRouteRegex.groups)).includes(key)
) {
delete _parsedUrl.query[key]
}
}
req.url = formatUrl(_parsedUrl)
}
}
export function interpolateDynamicPath(
pathname: string,
params: ParsedUrlQuery,
defaultRouteRegex?: ReturnType<typeof getNamedRouteRegex> | undefined
) |
export function getUtils({
page,
i18n,
basePath,
rewrites,
pageIsDynamic,
trailingSlash,
caseSensitive,
}: {
page: string
i18n?: NextConfig['i18n']
basePath: string
rewrites: {
fallback?: ReadonlyArray<Rewrite>
afterFiles?: ReadonlyArray<Rewrite>
beforeFiles?: ReadonlyArray<Rewrite>
}
pageIsDynamic: boolean
trailingSlash?: boolean
caseSensitive: boolean
}) {
let defaultRouteRegex: ReturnType<typeof getNamedRouteRegex> | undefined
let dynamicRouteMatcher: RouteMatchFn | undefined
let defaultRouteMatches: ParsedUrlQuery | undefined
if (pageIsDynamic) {
defaultRouteRegex = getNamedRouteRegex(page, false)
dynamicRouteMatcher = getRouteMatcher(defaultRouteRegex)
defaultRouteMatches = dynamicRouteMatcher(page) as ParsedUrlQuery
}
function handleRewrites(
req: BaseNextRequest | IncomingMessage,
parsedUrl: UrlWithParsedQuery
) {
const rewriteParams = {}
let fsPathname = parsedUrl.pathname
const matchesPage = () => {
const fsPathnameNoSlash = removeTrailingSlash(fsPathname || '')
return (
fsPathnameNoSlash === removeTrailingSlash(page) ||
dynamicRouteMatcher?.(fsPathnameNoSlash)
)
}
const checkRewrite = (rewrite: Rewrite): boolean => {
const matcher = getPathMatch(
rewrite.source + (trailingSlash ? '(/)?' : ''),
{
removeUnnamedParams: true,
strict: true,
sensitive: !!caseSensitive,
}
)
let params = matcher(parsedUrl.pathname)
if ((rewrite.has || rewrite.missing) && params) {
const hasParams = matchHas(
req,
parsedUrl.query,
rewrite.has,
rewrite.missing
)
if (hasParams) {
Object.assign(params, hasParams)
} else {
params = false
}
}
if (params) {
const { parsedDestination, destQuery } = prepareDestination({
appendParamsToQuery: true,
destination: rewrite.destination,
params: params,
query: parsedUrl.query,
})
// if the rewrite destination is external break rewrite chain
if (parsedDestination.protocol) {
return true
}
Object.assign(rewriteParams, destQuery, params)
Object.assign(parsedUrl.query, parsedDestination.query)
delete (parsedDestination as any).query
Object.assign(parsedUrl, parsedDestination)
fsPathname = parsedUrl.pathname
if (basePath) {
fsPathname =
fsPathname!.replace(new RegExp(`^${basePath}`), '') || '/'
}
if (i18n) {
const destLocalePathResult = normalizeLocalePath(
fsPathname!,
i18n.locales
)
fsPathname = destLocalePathResult.pathname
parsedUrl.query.nextInternalLocale =
destLocalePathResult.detectedLocale || params.nextInternalLocale
}
if (fsPathname === page) {
return true
}
if (pageIsDynamic && dynamicRouteMatcher) {
const dynamicParams = dynamicRouteMatcher(fsPathname)
if (dynamicParams) {
parsedUrl.query = {
...parsedUrl.query,
...dynamicParams,
}
return true
}
}
}
return false
}
for (const rewrite of rewrites.beforeFiles || []) {
checkRewrite(rewrite)
}
if (fsPathname !== page) {
let finished = false
for (const rewrite of rewrites.afterFiles || []) {
finished = checkRewrite(rewrite)
if (finished) break
}
if (!finished && !matchesPage()) {
for (const rewrite of rewrites.fallback || []) {
finished = checkRewrite(rewrite)
if (finished) break
}
}
}
return rewriteParams
}
function handleBasePath(
req: BaseNextRequest | IncomingMessage,
parsedUrl: UrlWithParsedQuery
) {
// always strip the basePath if configured since it is required
req.url = req.url!.replace(new RegExp(`^${basePath}`), '') || '/'
parsedUrl.pathname =
parsedUrl.pathname!.replace(new RegExp(`^${basePath}`), '') || '/'
}
function getParamsFromRouteMatches(
req: BaseNextRequest | IncomingMessage,
renderOpts?: any,
detectedLocale?: string
) {
return getRouteMatcher(
(function () {
const { groups, routeKeys } = defaultRouteRegex!
return {
re: {
// Simulate a RegExp match from the \`req.url\` input
exec: (str: string) => {
const obj = Object.fromEntries(new URLSearchParams(str))
const matchesHasLocale =
i18n && detectedLocale && obj['1'] === detectedLocale
for (const key of Object.keys(obj)) {
const value = obj[key]
if (
key !== NEXT_QUERY_PARAM_PREFIX &&
key.startsWith(NEXT_QUERY_PARAM_PREFIX)
) {
const normalizedKey = key.substring(
NEXT_QUERY_PARAM_PREFIX.length
)
obj[normalizedKey] = value
delete obj[key]
}
}
// favor named matches if available
const routeKeyNames = Object.keys(routeKeys || {})
const filterLocaleItem = (val: string | string[] | undefined) => {
if (i18n) {
// locale items can be included in route-matches
// for fallback SSG pages so ensure they are
// filtered
const isCatchAll = Array.isArray(val)
const _val = isCatchAll ? val[0] : val
if (
typeof _val === 'string' &&
i18n.locales.some((item) => {
if (item.toLowerCase() === _val.toLowerCase()) {
detectedLocale = item
renderOpts.locale = detectedLocale
return true
}
return false
})
) {
// remove the locale item from the match
if (isCatchAll) {
;(val as string[]).splice(0, 1)
}
// the value is only a locale item and
// shouldn't be added
return isCatchAll ? val.length === 0 : true
}
}
return false
}
if (routeKeyNames.every((name) => obj[name])) {
return routeKeyNames.reduce((prev, keyName) => {
const paramName = routeKeys?.[keyName]
if (paramName && !filterLocaleItem(obj[keyName])) {
prev[groups[paramName].pos] = obj[keyName]
}
return prev
}, {} as any)
}
return Object.keys(obj).reduce((prev, key) => {
if (!filterLocaleItem(obj[key])) {
let normalizedKey = key
if (matchesHasLocale) {
normalizedKey = parseInt(key, 10) - 1 + ''
}
return Object.assign(prev, {
[normalizedKey]: obj[key],
})
}
return prev
}, {})
},
},
groups,
}
})() as any
)(req.headers['x-now-route-matches'] as string) as ParsedUrlQuery
}
function normalizeDynamicRouteParams(
params: ParsedUrlQuery,
ignoreOptional?: boolean
) {
let hasValidParams = true
if (!defaultRouteRegex) return { params, hasValidParams: false }
params = Object.keys(defaultRouteRegex.groups).reduce((prev, key) => {
let value: string | string[] | undefined = params[key]
if (typeof value === 'string') {
value = normalizeRscPath(value, true)
}
if (Array.isArray(value)) {
value = value.map((val) => {
if (typeof val === 'string') {
val = normalizeRscPath(val, true)
}
return val
})
}
// if the value matches the default value we can't rely
// on the parsed params, this is used to signal if we need
// to parse x-now-route-matches or not
const defaultValue = defaultRouteMatches![key]
const isOptional = defaultRouteRegex!.groups[key].optional
const isDefaultValue = Array.isArray(defaultValue)
? defaultValue.some((defaultVal) => {
return Array.isArray(value)
? value.some((val) => val.includes(defaultVal))
: value?.includes(defaultVal)
})
: value?.includes(defaultValue as string)
if (
isDefaultValue ||
(typeof value === 'undefined' && !(isOptional && ignoreOptional))
) {
hasValidParams = false
}
// non-provided optional values should be undefined so normalize
// them to undefined
if (
isOptional &&
(!value ||
(Array.isArray(value) &&
value.length === 1 &&
// fallback optional catch-all SSG pages have
// [[...paramName]] for the root path on Vercel
(value[0] === 'index' || value[0] === `[[...${key}]]`)))
) {
value = undefined
delete params[key]
}
// query values from the proxy aren't already split into arrays
// so make sure to normalize catch-all values
if (
value &&
typeof value === 'string' &&
defaultRouteRegex!.groups[key].repeat
) {
value = value.split('/')
}
if (value) {
prev[key] = value
}
return prev
}, {} as ParsedUrlQuery)
return {
params,
hasValidParams,
}
}
return {
handleRewrites,
handleBasePath,
defaultRouteRegex,
dynamicRouteMatcher,
defaultRouteMatches,
getParamsFromRouteMatches,
normalizeDynamicRouteParams,
normalizeVercelUrl: (
req: BaseNextRequest | IncomingMessage,
trustQuery: boolean,
paramKeys?: string[]
) =>
normalizeVercelUrl(
req,
trustQuery,
paramKeys,
pageIsDynamic,
defaultRouteRegex
),
interpolateDynamicPath: (
pathname: string,
params: Record<string, undefined | string | string[]>
) => interpolateDynamicPath(pathname, params, defaultRouteRegex),
}
}
| {
if (!defaultRouteRegex) return pathname
for (const param of Object.keys(defaultRouteRegex.groups)) {
const { optional, repeat } = defaultRouteRegex.groups[param]
let builtParam = `[${repeat ? '...' : ''}${param}]`
if (optional) {
builtParam = `[${builtParam}]`
}
const paramIdx = pathname!.indexOf(builtParam)
if (paramIdx > -1) {
let paramValue: string
const value = params[param]
if (Array.isArray(value)) {
paramValue = value.map((v) => v && encodeURIComponent(v)).join('/')
} else if (value) {
paramValue = encodeURIComponent(value)
} else {
paramValue = ''
}
pathname =
pathname.slice(0, paramIdx) +
paramValue +
pathname.slice(paramIdx + builtParam.length)
}
}
return pathname
} | identifier_body |
server-utils.ts | import type { IncomingMessage } from 'http'
import type { Rewrite } from '../lib/load-custom-routes'
import type { RouteMatchFn } from '../shared/lib/router/utils/route-matcher'
import type { NextConfig } from './config'
import type { BaseNextRequest } from './base-http'
import type { ParsedUrlQuery } from 'querystring'
import { format as formatUrl, UrlWithParsedQuery, parse as parseUrl } from 'url'
import { normalizeLocalePath } from '../shared/lib/i18n/normalize-locale-path'
import { getPathMatch } from '../shared/lib/router/utils/path-match'
import { getNamedRouteRegex } from '../shared/lib/router/utils/route-regex'
import { getRouteMatcher } from '../shared/lib/router/utils/route-matcher'
import {
matchHas,
prepareDestination,
} from '../shared/lib/router/utils/prepare-destination'
import { removeTrailingSlash } from '../shared/lib/router/utils/remove-trailing-slash'
import { normalizeRscPath } from '../shared/lib/router/utils/app-paths'
import { NEXT_QUERY_PARAM_PREFIX } from '../lib/constants'
export function normalizeVercelUrl(
req: BaseNextRequest | IncomingMessage,
trustQuery: boolean,
paramKeys?: string[],
pageIsDynamic?: boolean,
defaultRouteRegex?: ReturnType<typeof getNamedRouteRegex> | undefined
) {
// make sure to normalize req.url on Vercel to strip dynamic params
// from the query which are added during routing
if (pageIsDynamic && trustQuery && defaultRouteRegex) {
const _parsedUrl = parseUrl(req.url!, true)
delete (_parsedUrl as any).search
for (const key of Object.keys(_parsedUrl.query)) {
if (
(key !== NEXT_QUERY_PARAM_PREFIX &&
key.startsWith(NEXT_QUERY_PARAM_PREFIX)) ||
(paramKeys || Object.keys(defaultRouteRegex.groups)).includes(key)
) {
delete _parsedUrl.query[key]
}
}
req.url = formatUrl(_parsedUrl)
}
}
export function interpolateDynamicPath(
pathname: string,
params: ParsedUrlQuery,
defaultRouteRegex?: ReturnType<typeof getNamedRouteRegex> | undefined
) {
if (!defaultRouteRegex) return pathname
for (const param of Object.keys(defaultRouteRegex.groups)) {
const { optional, repeat } = defaultRouteRegex.groups[param]
let builtParam = `[${repeat ? '...' : ''}${param}]`
if (optional) {
builtParam = `[${builtParam}]`
}
const paramIdx = pathname!.indexOf(builtParam)
if (paramIdx > -1) {
let paramValue: string
const value = params[param]
if (Array.isArray(value)) {
paramValue = value.map((v) => v && encodeURIComponent(v)).join('/')
} else if (value) {
paramValue = encodeURIComponent(value)
} else {
paramValue = ''
}
pathname =
pathname.slice(0, paramIdx) +
paramValue +
pathname.slice(paramIdx + builtParam.length)
}
}
return pathname
}
export function getUtils({
page,
i18n,
basePath,
rewrites,
pageIsDynamic,
trailingSlash,
caseSensitive,
}: {
page: string
i18n?: NextConfig['i18n']
basePath: string
rewrites: {
fallback?: ReadonlyArray<Rewrite>
afterFiles?: ReadonlyArray<Rewrite>
beforeFiles?: ReadonlyArray<Rewrite>
}
pageIsDynamic: boolean
trailingSlash?: boolean
caseSensitive: boolean
}) {
let defaultRouteRegex: ReturnType<typeof getNamedRouteRegex> | undefined
let dynamicRouteMatcher: RouteMatchFn | undefined
let defaultRouteMatches: ParsedUrlQuery | undefined
if (pageIsDynamic) {
defaultRouteRegex = getNamedRouteRegex(page, false)
dynamicRouteMatcher = getRouteMatcher(defaultRouteRegex)
defaultRouteMatches = dynamicRouteMatcher(page) as ParsedUrlQuery
}
function handleRewrites(
req: BaseNextRequest | IncomingMessage,
parsedUrl: UrlWithParsedQuery
) {
const rewriteParams = {}
let fsPathname = parsedUrl.pathname
const matchesPage = () => {
const fsPathnameNoSlash = removeTrailingSlash(fsPathname || '')
return (
fsPathnameNoSlash === removeTrailingSlash(page) ||
dynamicRouteMatcher?.(fsPathnameNoSlash)
)
}
const checkRewrite = (rewrite: Rewrite): boolean => {
const matcher = getPathMatch(
rewrite.source + (trailingSlash ? '(/)?' : ''),
{
removeUnnamedParams: true,
strict: true,
sensitive: !!caseSensitive,
}
)
let params = matcher(parsedUrl.pathname)
if ((rewrite.has || rewrite.missing) && params) {
const hasParams = matchHas(
req,
parsedUrl.query,
rewrite.has,
rewrite.missing
)
if (hasParams) {
Object.assign(params, hasParams)
} else {
params = false
}
}
if (params) {
const { parsedDestination, destQuery } = prepareDestination({
appendParamsToQuery: true,
destination: rewrite.destination,
params: params,
query: parsedUrl.query,
})
// if the rewrite destination is external break rewrite chain
if (parsedDestination.protocol) {
return true
}
Object.assign(rewriteParams, destQuery, params)
Object.assign(parsedUrl.query, parsedDestination.query)
delete (parsedDestination as any).query
Object.assign(parsedUrl, parsedDestination)
fsPathname = parsedUrl.pathname
if (basePath) {
fsPathname =
fsPathname!.replace(new RegExp(`^${basePath}`), '') || '/'
}
if (i18n) {
const destLocalePathResult = normalizeLocalePath(
fsPathname!,
i18n.locales
)
fsPathname = destLocalePathResult.pathname
parsedUrl.query.nextInternalLocale =
destLocalePathResult.detectedLocale || params.nextInternalLocale
}
if (fsPathname === page) {
return true
}
if (pageIsDynamic && dynamicRouteMatcher) {
const dynamicParams = dynamicRouteMatcher(fsPathname)
if (dynamicParams) {
parsedUrl.query = {
...parsedUrl.query,
...dynamicParams,
}
return true
}
}
}
return false
}
for (const rewrite of rewrites.beforeFiles || []) {
checkRewrite(rewrite)
}
if (fsPathname !== page) {
let finished = false
for (const rewrite of rewrites.afterFiles || []) {
finished = checkRewrite(rewrite)
if (finished) break
}
if (!finished && !matchesPage()) {
for (const rewrite of rewrites.fallback || []) {
finished = checkRewrite(rewrite)
if (finished) break
}
}
}
return rewriteParams
}
function handleBasePath(
req: BaseNextRequest | IncomingMessage,
parsedUrl: UrlWithParsedQuery
) {
// always strip the basePath if configured since it is required
req.url = req.url!.replace(new RegExp(`^${basePath}`), '') || '/'
parsedUrl.pathname =
parsedUrl.pathname!.replace(new RegExp(`^${basePath}`), '') || '/'
}
function | (
req: BaseNextRequest | IncomingMessage,
renderOpts?: any,
detectedLocale?: string
) {
return getRouteMatcher(
(function () {
const { groups, routeKeys } = defaultRouteRegex!
return {
re: {
// Simulate a RegExp match from the \`req.url\` input
exec: (str: string) => {
const obj = Object.fromEntries(new URLSearchParams(str))
const matchesHasLocale =
i18n && detectedLocale && obj['1'] === detectedLocale
for (const key of Object.keys(obj)) {
const value = obj[key]
if (
key !== NEXT_QUERY_PARAM_PREFIX &&
key.startsWith(NEXT_QUERY_PARAM_PREFIX)
) {
const normalizedKey = key.substring(
NEXT_QUERY_PARAM_PREFIX.length
)
obj[normalizedKey] = value
delete obj[key]
}
}
// favor named matches if available
const routeKeyNames = Object.keys(routeKeys || {})
const filterLocaleItem = (val: string | string[] | undefined) => {
if (i18n) {
// locale items can be included in route-matches
// for fallback SSG pages so ensure they are
// filtered
const isCatchAll = Array.isArray(val)
const _val = isCatchAll ? val[0] : val
if (
typeof _val === 'string' &&
i18n.locales.some((item) => {
if (item.toLowerCase() === _val.toLowerCase()) {
detectedLocale = item
renderOpts.locale = detectedLocale
return true
}
return false
})
) {
// remove the locale item from the match
if (isCatchAll) {
;(val as string[]).splice(0, 1)
}
// the value is only a locale item and
// shouldn't be added
return isCatchAll ? val.length === 0 : true
}
}
return false
}
if (routeKeyNames.every((name) => obj[name])) {
return routeKeyNames.reduce((prev, keyName) => {
const paramName = routeKeys?.[keyName]
if (paramName && !filterLocaleItem(obj[keyName])) {
prev[groups[paramName].pos] = obj[keyName]
}
return prev
}, {} as any)
}
return Object.keys(obj).reduce((prev, key) => {
if (!filterLocaleItem(obj[key])) {
let normalizedKey = key
if (matchesHasLocale) {
normalizedKey = parseInt(key, 10) - 1 + ''
}
return Object.assign(prev, {
[normalizedKey]: obj[key],
})
}
return prev
}, {})
},
},
groups,
}
})() as any
)(req.headers['x-now-route-matches'] as string) as ParsedUrlQuery
}
function normalizeDynamicRouteParams(
params: ParsedUrlQuery,
ignoreOptional?: boolean
) {
let hasValidParams = true
if (!defaultRouteRegex) return { params, hasValidParams: false }
params = Object.keys(defaultRouteRegex.groups).reduce((prev, key) => {
let value: string | string[] | undefined = params[key]
if (typeof value === 'string') {
value = normalizeRscPath(value, true)
}
if (Array.isArray(value)) {
value = value.map((val) => {
if (typeof val === 'string') {
val = normalizeRscPath(val, true)
}
return val
})
}
// if the value matches the default value we can't rely
// on the parsed params, this is used to signal if we need
// to parse x-now-route-matches or not
const defaultValue = defaultRouteMatches![key]
const isOptional = defaultRouteRegex!.groups[key].optional
const isDefaultValue = Array.isArray(defaultValue)
? defaultValue.some((defaultVal) => {
return Array.isArray(value)
? value.some((val) => val.includes(defaultVal))
: value?.includes(defaultVal)
})
: value?.includes(defaultValue as string)
if (
isDefaultValue ||
(typeof value === 'undefined' && !(isOptional && ignoreOptional))
) {
hasValidParams = false
}
// non-provided optional values should be undefined so normalize
// them to undefined
if (
isOptional &&
(!value ||
(Array.isArray(value) &&
value.length === 1 &&
// fallback optional catch-all SSG pages have
// [[...paramName]] for the root path on Vercel
(value[0] === 'index' || value[0] === `[[...${key}]]`)))
) {
value = undefined
delete params[key]
}
// query values from the proxy aren't already split into arrays
// so make sure to normalize catch-all values
if (
value &&
typeof value === 'string' &&
defaultRouteRegex!.groups[key].repeat
) {
value = value.split('/')
}
if (value) {
prev[key] = value
}
return prev
}, {} as ParsedUrlQuery)
return {
params,
hasValidParams,
}
}
return {
handleRewrites,
handleBasePath,
defaultRouteRegex,
dynamicRouteMatcher,
defaultRouteMatches,
getParamsFromRouteMatches,
normalizeDynamicRouteParams,
normalizeVercelUrl: (
req: BaseNextRequest | IncomingMessage,
trustQuery: boolean,
paramKeys?: string[]
) =>
normalizeVercelUrl(
req,
trustQuery,
paramKeys,
pageIsDynamic,
defaultRouteRegex
),
interpolateDynamicPath: (
pathname: string,
params: Record<string, undefined | string | string[]>
) => interpolateDynamicPath(pathname, params, defaultRouteRegex),
}
}
| getParamsFromRouteMatches | identifier_name |
server-utils.ts | import type { IncomingMessage } from 'http'
import type { Rewrite } from '../lib/load-custom-routes'
import type { RouteMatchFn } from '../shared/lib/router/utils/route-matcher'
import type { NextConfig } from './config'
import type { BaseNextRequest } from './base-http'
import type { ParsedUrlQuery } from 'querystring'
import { format as formatUrl, UrlWithParsedQuery, parse as parseUrl } from 'url'
import { normalizeLocalePath } from '../shared/lib/i18n/normalize-locale-path'
import { getPathMatch } from '../shared/lib/router/utils/path-match'
import { getNamedRouteRegex } from '../shared/lib/router/utils/route-regex'
import { getRouteMatcher } from '../shared/lib/router/utils/route-matcher'
import {
matchHas,
prepareDestination,
} from '../shared/lib/router/utils/prepare-destination'
import { removeTrailingSlash } from '../shared/lib/router/utils/remove-trailing-slash'
import { normalizeRscPath } from '../shared/lib/router/utils/app-paths'
import { NEXT_QUERY_PARAM_PREFIX } from '../lib/constants'
export function normalizeVercelUrl(
req: BaseNextRequest | IncomingMessage,
trustQuery: boolean,
paramKeys?: string[],
pageIsDynamic?: boolean,
defaultRouteRegex?: ReturnType<typeof getNamedRouteRegex> | undefined
) {
// make sure to normalize req.url on Vercel to strip dynamic params
// from the query which are added during routing
if (pageIsDynamic && trustQuery && defaultRouteRegex) {
const _parsedUrl = parseUrl(req.url!, true)
delete (_parsedUrl as any).search
for (const key of Object.keys(_parsedUrl.query)) {
if (
(key !== NEXT_QUERY_PARAM_PREFIX &&
key.startsWith(NEXT_QUERY_PARAM_PREFIX)) ||
(paramKeys || Object.keys(defaultRouteRegex.groups)).includes(key)
) {
delete _parsedUrl.query[key]
}
}
req.url = formatUrl(_parsedUrl)
}
}
export function interpolateDynamicPath(
pathname: string,
params: ParsedUrlQuery,
defaultRouteRegex?: ReturnType<typeof getNamedRouteRegex> | undefined
) {
if (!defaultRouteRegex) return pathname
for (const param of Object.keys(defaultRouteRegex.groups)) {
const { optional, repeat } = defaultRouteRegex.groups[param]
let builtParam = `[${repeat ? '...' : ''}${param}]`
if (optional) {
builtParam = `[${builtParam}]`
}
const paramIdx = pathname!.indexOf(builtParam)
if (paramIdx > -1) {
let paramValue: string
const value = params[param]
if (Array.isArray(value)) {
paramValue = value.map((v) => v && encodeURIComponent(v)).join('/')
} else if (value) {
paramValue = encodeURIComponent(value)
} else {
paramValue = ''
}
pathname =
pathname.slice(0, paramIdx) +
paramValue +
pathname.slice(paramIdx + builtParam.length)
}
}
return pathname
}
export function getUtils({
page,
i18n,
basePath,
rewrites,
pageIsDynamic,
trailingSlash,
caseSensitive,
}: {
page: string
i18n?: NextConfig['i18n']
basePath: string
rewrites: {
fallback?: ReadonlyArray<Rewrite>
afterFiles?: ReadonlyArray<Rewrite>
beforeFiles?: ReadonlyArray<Rewrite>
}
pageIsDynamic: boolean
trailingSlash?: boolean
caseSensitive: boolean
}) {
let defaultRouteRegex: ReturnType<typeof getNamedRouteRegex> | undefined
let dynamicRouteMatcher: RouteMatchFn | undefined
let defaultRouteMatches: ParsedUrlQuery | undefined
if (pageIsDynamic) {
defaultRouteRegex = getNamedRouteRegex(page, false)
dynamicRouteMatcher = getRouteMatcher(defaultRouteRegex)
defaultRouteMatches = dynamicRouteMatcher(page) as ParsedUrlQuery
}
function handleRewrites(
req: BaseNextRequest | IncomingMessage,
parsedUrl: UrlWithParsedQuery
) {
const rewriteParams = {}
let fsPathname = parsedUrl.pathname
const matchesPage = () => {
const fsPathnameNoSlash = removeTrailingSlash(fsPathname || '')
return (
fsPathnameNoSlash === removeTrailingSlash(page) ||
dynamicRouteMatcher?.(fsPathnameNoSlash)
)
}
const checkRewrite = (rewrite: Rewrite): boolean => {
const matcher = getPathMatch(
rewrite.source + (trailingSlash ? '(/)?' : ''),
{
removeUnnamedParams: true,
strict: true,
sensitive: !!caseSensitive,
}
)
let params = matcher(parsedUrl.pathname)
if ((rewrite.has || rewrite.missing) && params) {
const hasParams = matchHas(
req,
parsedUrl.query,
rewrite.has,
rewrite.missing
)
if (hasParams) {
Object.assign(params, hasParams)
} else {
params = false
}
}
if (params) {
const { parsedDestination, destQuery } = prepareDestination({
appendParamsToQuery: true,
destination: rewrite.destination,
params: params,
query: parsedUrl.query,
})
// if the rewrite destination is external break rewrite chain
if (parsedDestination.protocol) |
Object.assign(rewriteParams, destQuery, params)
Object.assign(parsedUrl.query, parsedDestination.query)
delete (parsedDestination as any).query
Object.assign(parsedUrl, parsedDestination)
fsPathname = parsedUrl.pathname
if (basePath) {
fsPathname =
fsPathname!.replace(new RegExp(`^${basePath}`), '') || '/'
}
if (i18n) {
const destLocalePathResult = normalizeLocalePath(
fsPathname!,
i18n.locales
)
fsPathname = destLocalePathResult.pathname
parsedUrl.query.nextInternalLocale =
destLocalePathResult.detectedLocale || params.nextInternalLocale
}
if (fsPathname === page) {
return true
}
if (pageIsDynamic && dynamicRouteMatcher) {
const dynamicParams = dynamicRouteMatcher(fsPathname)
if (dynamicParams) {
parsedUrl.query = {
...parsedUrl.query,
...dynamicParams,
}
return true
}
}
}
return false
}
for (const rewrite of rewrites.beforeFiles || []) {
checkRewrite(rewrite)
}
if (fsPathname !== page) {
let finished = false
for (const rewrite of rewrites.afterFiles || []) {
finished = checkRewrite(rewrite)
if (finished) break
}
if (!finished && !matchesPage()) {
for (const rewrite of rewrites.fallback || []) {
finished = checkRewrite(rewrite)
if (finished) break
}
}
}
return rewriteParams
}
function handleBasePath(
req: BaseNextRequest | IncomingMessage,
parsedUrl: UrlWithParsedQuery
) {
// always strip the basePath if configured since it is required
req.url = req.url!.replace(new RegExp(`^${basePath}`), '') || '/'
parsedUrl.pathname =
parsedUrl.pathname!.replace(new RegExp(`^${basePath}`), '') || '/'
}
function getParamsFromRouteMatches(
req: BaseNextRequest | IncomingMessage,
renderOpts?: any,
detectedLocale?: string
) {
return getRouteMatcher(
(function () {
const { groups, routeKeys } = defaultRouteRegex!
return {
re: {
// Simulate a RegExp match from the \`req.url\` input
exec: (str: string) => {
const obj = Object.fromEntries(new URLSearchParams(str))
const matchesHasLocale =
i18n && detectedLocale && obj['1'] === detectedLocale
for (const key of Object.keys(obj)) {
const value = obj[key]
if (
key !== NEXT_QUERY_PARAM_PREFIX &&
key.startsWith(NEXT_QUERY_PARAM_PREFIX)
) {
const normalizedKey = key.substring(
NEXT_QUERY_PARAM_PREFIX.length
)
obj[normalizedKey] = value
delete obj[key]
}
}
// favor named matches if available
const routeKeyNames = Object.keys(routeKeys || {})
const filterLocaleItem = (val: string | string[] | undefined) => {
if (i18n) {
// locale items can be included in route-matches
// for fallback SSG pages so ensure they are
// filtered
const isCatchAll = Array.isArray(val)
const _val = isCatchAll ? val[0] : val
if (
typeof _val === 'string' &&
i18n.locales.some((item) => {
if (item.toLowerCase() === _val.toLowerCase()) {
detectedLocale = item
renderOpts.locale = detectedLocale
return true
}
return false
})
) {
// remove the locale item from the match
if (isCatchAll) {
;(val as string[]).splice(0, 1)
}
// the value is only a locale item and
// shouldn't be added
return isCatchAll ? val.length === 0 : true
}
}
return false
}
if (routeKeyNames.every((name) => obj[name])) {
return routeKeyNames.reduce((prev, keyName) => {
const paramName = routeKeys?.[keyName]
if (paramName && !filterLocaleItem(obj[keyName])) {
prev[groups[paramName].pos] = obj[keyName]
}
return prev
}, {} as any)
}
return Object.keys(obj).reduce((prev, key) => {
if (!filterLocaleItem(obj[key])) {
let normalizedKey = key
if (matchesHasLocale) {
normalizedKey = parseInt(key, 10) - 1 + ''
}
return Object.assign(prev, {
[normalizedKey]: obj[key],
})
}
return prev
}, {})
},
},
groups,
}
})() as any
)(req.headers['x-now-route-matches'] as string) as ParsedUrlQuery
}
function normalizeDynamicRouteParams(
params: ParsedUrlQuery,
ignoreOptional?: boolean
) {
let hasValidParams = true
if (!defaultRouteRegex) return { params, hasValidParams: false }
params = Object.keys(defaultRouteRegex.groups).reduce((prev, key) => {
let value: string | string[] | undefined = params[key]
if (typeof value === 'string') {
value = normalizeRscPath(value, true)
}
if (Array.isArray(value)) {
value = value.map((val) => {
if (typeof val === 'string') {
val = normalizeRscPath(val, true)
}
return val
})
}
// if the value matches the default value we can't rely
// on the parsed params, this is used to signal if we need
// to parse x-now-route-matches or not
const defaultValue = defaultRouteMatches![key]
const isOptional = defaultRouteRegex!.groups[key].optional
const isDefaultValue = Array.isArray(defaultValue)
? defaultValue.some((defaultVal) => {
return Array.isArray(value)
? value.some((val) => val.includes(defaultVal))
: value?.includes(defaultVal)
})
: value?.includes(defaultValue as string)
if (
isDefaultValue ||
(typeof value === 'undefined' && !(isOptional && ignoreOptional))
) {
hasValidParams = false
}
// non-provided optional values should be undefined so normalize
// them to undefined
if (
isOptional &&
(!value ||
(Array.isArray(value) &&
value.length === 1 &&
// fallback optional catch-all SSG pages have
// [[...paramName]] for the root path on Vercel
(value[0] === 'index' || value[0] === `[[...${key}]]`)))
) {
value = undefined
delete params[key]
}
// query values from the proxy aren't already split into arrays
// so make sure to normalize catch-all values
if (
value &&
typeof value === 'string' &&
defaultRouteRegex!.groups[key].repeat
) {
value = value.split('/')
}
if (value) {
prev[key] = value
}
return prev
}, {} as ParsedUrlQuery)
return {
params,
hasValidParams,
}
}
return {
handleRewrites,
handleBasePath,
defaultRouteRegex,
dynamicRouteMatcher,
defaultRouteMatches,
getParamsFromRouteMatches,
normalizeDynamicRouteParams,
normalizeVercelUrl: (
req: BaseNextRequest | IncomingMessage,
trustQuery: boolean,
paramKeys?: string[]
) =>
normalizeVercelUrl(
req,
trustQuery,
paramKeys,
pageIsDynamic,
defaultRouteRegex
),
interpolateDynamicPath: (
pathname: string,
params: Record<string, undefined | string | string[]>
) => interpolateDynamicPath(pathname, params, defaultRouteRegex),
}
}
| {
return true
} | conditional_block |
server-utils.ts | import type { IncomingMessage } from 'http'
import type { Rewrite } from '../lib/load-custom-routes'
import type { RouteMatchFn } from '../shared/lib/router/utils/route-matcher'
import type { NextConfig } from './config'
import type { BaseNextRequest } from './base-http'
import type { ParsedUrlQuery } from 'querystring'
import { format as formatUrl, UrlWithParsedQuery, parse as parseUrl } from 'url'
import { normalizeLocalePath } from '../shared/lib/i18n/normalize-locale-path'
import { getPathMatch } from '../shared/lib/router/utils/path-match'
import { getNamedRouteRegex } from '../shared/lib/router/utils/route-regex'
import { getRouteMatcher } from '../shared/lib/router/utils/route-matcher'
import {
matchHas,
prepareDestination,
} from '../shared/lib/router/utils/prepare-destination'
import { removeTrailingSlash } from '../shared/lib/router/utils/remove-trailing-slash'
import { normalizeRscPath } from '../shared/lib/router/utils/app-paths'
import { NEXT_QUERY_PARAM_PREFIX } from '../lib/constants'
export function normalizeVercelUrl(
req: BaseNextRequest | IncomingMessage,
trustQuery: boolean,
paramKeys?: string[],
pageIsDynamic?: boolean,
defaultRouteRegex?: ReturnType<typeof getNamedRouteRegex> | undefined
) {
// make sure to normalize req.url on Vercel to strip dynamic params
// from the query which are added during routing
if (pageIsDynamic && trustQuery && defaultRouteRegex) {
const _parsedUrl = parseUrl(req.url!, true)
delete (_parsedUrl as any).search
for (const key of Object.keys(_parsedUrl.query)) {
if (
(key !== NEXT_QUERY_PARAM_PREFIX &&
key.startsWith(NEXT_QUERY_PARAM_PREFIX)) ||
(paramKeys || Object.keys(defaultRouteRegex.groups)).includes(key)
) {
delete _parsedUrl.query[key]
}
}
req.url = formatUrl(_parsedUrl)
}
}
export function interpolateDynamicPath(
pathname: string,
params: ParsedUrlQuery,
defaultRouteRegex?: ReturnType<typeof getNamedRouteRegex> | undefined
) {
if (!defaultRouteRegex) return pathname
for (const param of Object.keys(defaultRouteRegex.groups)) {
const { optional, repeat } = defaultRouteRegex.groups[param]
let builtParam = `[${repeat ? '...' : ''}${param}]`
if (optional) {
builtParam = `[${builtParam}]`
}
const paramIdx = pathname!.indexOf(builtParam)
if (paramIdx > -1) {
let paramValue: string
const value = params[param]
if (Array.isArray(value)) {
paramValue = value.map((v) => v && encodeURIComponent(v)).join('/')
} else if (value) {
paramValue = encodeURIComponent(value)
} else {
paramValue = ''
}
pathname =
pathname.slice(0, paramIdx) +
paramValue +
pathname.slice(paramIdx + builtParam.length)
}
}
return pathname
}
export function getUtils({
page,
i18n,
basePath,
rewrites,
pageIsDynamic,
trailingSlash,
caseSensitive,
}: {
page: string
i18n?: NextConfig['i18n']
basePath: string
rewrites: {
fallback?: ReadonlyArray<Rewrite>
afterFiles?: ReadonlyArray<Rewrite>
beforeFiles?: ReadonlyArray<Rewrite>
}
pageIsDynamic: boolean
trailingSlash?: boolean
caseSensitive: boolean
}) {
let defaultRouteRegex: ReturnType<typeof getNamedRouteRegex> | undefined
let dynamicRouteMatcher: RouteMatchFn | undefined
let defaultRouteMatches: ParsedUrlQuery | undefined
if (pageIsDynamic) {
defaultRouteRegex = getNamedRouteRegex(page, false)
dynamicRouteMatcher = getRouteMatcher(defaultRouteRegex)
defaultRouteMatches = dynamicRouteMatcher(page) as ParsedUrlQuery
}
function handleRewrites(
req: BaseNextRequest | IncomingMessage,
parsedUrl: UrlWithParsedQuery
) {
const rewriteParams = {}
let fsPathname = parsedUrl.pathname
const matchesPage = () => {
const fsPathnameNoSlash = removeTrailingSlash(fsPathname || '')
return (
fsPathnameNoSlash === removeTrailingSlash(page) ||
dynamicRouteMatcher?.(fsPathnameNoSlash)
)
}
const checkRewrite = (rewrite: Rewrite): boolean => {
const matcher = getPathMatch(
rewrite.source + (trailingSlash ? '(/)?' : ''),
{
removeUnnamedParams: true,
strict: true,
sensitive: !!caseSensitive,
}
)
let params = matcher(parsedUrl.pathname) | rewrite.has,
rewrite.missing
)
if (hasParams) {
Object.assign(params, hasParams)
} else {
params = false
}
}
if (params) {
const { parsedDestination, destQuery } = prepareDestination({
appendParamsToQuery: true,
destination: rewrite.destination,
params: params,
query: parsedUrl.query,
})
// if the rewrite destination is external break rewrite chain
if (parsedDestination.protocol) {
return true
}
Object.assign(rewriteParams, destQuery, params)
Object.assign(parsedUrl.query, parsedDestination.query)
delete (parsedDestination as any).query
Object.assign(parsedUrl, parsedDestination)
fsPathname = parsedUrl.pathname
if (basePath) {
fsPathname =
fsPathname!.replace(new RegExp(`^${basePath}`), '') || '/'
}
if (i18n) {
const destLocalePathResult = normalizeLocalePath(
fsPathname!,
i18n.locales
)
fsPathname = destLocalePathResult.pathname
parsedUrl.query.nextInternalLocale =
destLocalePathResult.detectedLocale || params.nextInternalLocale
}
if (fsPathname === page) {
return true
}
if (pageIsDynamic && dynamicRouteMatcher) {
const dynamicParams = dynamicRouteMatcher(fsPathname)
if (dynamicParams) {
parsedUrl.query = {
...parsedUrl.query,
...dynamicParams,
}
return true
}
}
}
return false
}
for (const rewrite of rewrites.beforeFiles || []) {
checkRewrite(rewrite)
}
if (fsPathname !== page) {
let finished = false
for (const rewrite of rewrites.afterFiles || []) {
finished = checkRewrite(rewrite)
if (finished) break
}
if (!finished && !matchesPage()) {
for (const rewrite of rewrites.fallback || []) {
finished = checkRewrite(rewrite)
if (finished) break
}
}
}
return rewriteParams
}
function handleBasePath(
req: BaseNextRequest | IncomingMessage,
parsedUrl: UrlWithParsedQuery
) {
// always strip the basePath if configured since it is required
req.url = req.url!.replace(new RegExp(`^${basePath}`), '') || '/'
parsedUrl.pathname =
parsedUrl.pathname!.replace(new RegExp(`^${basePath}`), '') || '/'
}
function getParamsFromRouteMatches(
req: BaseNextRequest | IncomingMessage,
renderOpts?: any,
detectedLocale?: string
) {
return getRouteMatcher(
(function () {
const { groups, routeKeys } = defaultRouteRegex!
return {
re: {
// Simulate a RegExp match from the \`req.url\` input
exec: (str: string) => {
const obj = Object.fromEntries(new URLSearchParams(str))
const matchesHasLocale =
i18n && detectedLocale && obj['1'] === detectedLocale
for (const key of Object.keys(obj)) {
const value = obj[key]
if (
key !== NEXT_QUERY_PARAM_PREFIX &&
key.startsWith(NEXT_QUERY_PARAM_PREFIX)
) {
const normalizedKey = key.substring(
NEXT_QUERY_PARAM_PREFIX.length
)
obj[normalizedKey] = value
delete obj[key]
}
}
// favor named matches if available
const routeKeyNames = Object.keys(routeKeys || {})
const filterLocaleItem = (val: string | string[] | undefined) => {
if (i18n) {
// locale items can be included in route-matches
// for fallback SSG pages so ensure they are
// filtered
const isCatchAll = Array.isArray(val)
const _val = isCatchAll ? val[0] : val
if (
typeof _val === 'string' &&
i18n.locales.some((item) => {
if (item.toLowerCase() === _val.toLowerCase()) {
detectedLocale = item
renderOpts.locale = detectedLocale
return true
}
return false
})
) {
// remove the locale item from the match
if (isCatchAll) {
;(val as string[]).splice(0, 1)
}
// the value is only a locale item and
// shouldn't be added
return isCatchAll ? val.length === 0 : true
}
}
return false
}
if (routeKeyNames.every((name) => obj[name])) {
return routeKeyNames.reduce((prev, keyName) => {
const paramName = routeKeys?.[keyName]
if (paramName && !filterLocaleItem(obj[keyName])) {
prev[groups[paramName].pos] = obj[keyName]
}
return prev
}, {} as any)
}
return Object.keys(obj).reduce((prev, key) => {
if (!filterLocaleItem(obj[key])) {
let normalizedKey = key
if (matchesHasLocale) {
normalizedKey = parseInt(key, 10) - 1 + ''
}
return Object.assign(prev, {
[normalizedKey]: obj[key],
})
}
return prev
}, {})
},
},
groups,
}
})() as any
)(req.headers['x-now-route-matches'] as string) as ParsedUrlQuery
}
function normalizeDynamicRouteParams(
params: ParsedUrlQuery,
ignoreOptional?: boolean
) {
let hasValidParams = true
if (!defaultRouteRegex) return { params, hasValidParams: false }
params = Object.keys(defaultRouteRegex.groups).reduce((prev, key) => {
let value: string | string[] | undefined = params[key]
if (typeof value === 'string') {
value = normalizeRscPath(value, true)
}
if (Array.isArray(value)) {
value = value.map((val) => {
if (typeof val === 'string') {
val = normalizeRscPath(val, true)
}
return val
})
}
// if the value matches the default value we can't rely
// on the parsed params, this is used to signal if we need
// to parse x-now-route-matches or not
const defaultValue = defaultRouteMatches![key]
const isOptional = defaultRouteRegex!.groups[key].optional
const isDefaultValue = Array.isArray(defaultValue)
? defaultValue.some((defaultVal) => {
return Array.isArray(value)
? value.some((val) => val.includes(defaultVal))
: value?.includes(defaultVal)
})
: value?.includes(defaultValue as string)
if (
isDefaultValue ||
(typeof value === 'undefined' && !(isOptional && ignoreOptional))
) {
hasValidParams = false
}
// non-provided optional values should be undefined so normalize
// them to undefined
if (
isOptional &&
(!value ||
(Array.isArray(value) &&
value.length === 1 &&
// fallback optional catch-all SSG pages have
// [[...paramName]] for the root path on Vercel
(value[0] === 'index' || value[0] === `[[...${key}]]`)))
) {
value = undefined
delete params[key]
}
// query values from the proxy aren't already split into arrays
// so make sure to normalize catch-all values
if (
value &&
typeof value === 'string' &&
defaultRouteRegex!.groups[key].repeat
) {
value = value.split('/')
}
if (value) {
prev[key] = value
}
return prev
}, {} as ParsedUrlQuery)
return {
params,
hasValidParams,
}
}
return {
handleRewrites,
handleBasePath,
defaultRouteRegex,
dynamicRouteMatcher,
defaultRouteMatches,
getParamsFromRouteMatches,
normalizeDynamicRouteParams,
normalizeVercelUrl: (
req: BaseNextRequest | IncomingMessage,
trustQuery: boolean,
paramKeys?: string[]
) =>
normalizeVercelUrl(
req,
trustQuery,
paramKeys,
pageIsDynamic,
defaultRouteRegex
),
interpolateDynamicPath: (
pathname: string,
params: Record<string, undefined | string | string[]>
) => interpolateDynamicPath(pathname, params, defaultRouteRegex),
}
} |
if ((rewrite.has || rewrite.missing) && params) {
const hasParams = matchHas(
req,
parsedUrl.query, | random_line_split |
main.rs | use git2::{Commit, Oid, Repository};
use mailmap::{Author, Mailmap};
use regex::{Regex, RegexBuilder};
use semver::Version;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::Mutex;
use std::{cmp, fmt, str};
use config::Config;
use reviewers::Reviewers;
mod config;
mod error;
mod reviewers;
mod site;
use error::ErrorContext;
trait ToAuthor {
fn from_sig(sig: git2::Signature<'_>) -> Author;
}
impl ToAuthor for Author {
fn from_sig(sig: git2::Signature<'_>) -> Author {
let name = sig.name().unwrap_or_else(|| panic!("no name for {}", sig));
let email = sig
.email()
.unwrap_or_else(|| panic!("no email for {}", sig));
Author {
name: name.to_string(),
email: email.to_string(),
}
}
}
#[derive(Clone)]
pub struct AuthorMap {
// author -> [commits]
map: HashMap<Author, HashSet<Oid>>,
}
impl AuthorMap {
fn new() -> Self {
AuthorMap {
map: HashMap::new(),
}
}
fn add(&mut self, author: Author, commit: Oid) {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.insert(commit);
}
fn iter(&self) -> impl Iterator<Item = (&Author, usize)> {
self.map.iter().map(|(k, v)| (k, v.len()))
}
fn extend(&mut self, other: Self) {
for (author, set) in other.map {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.extend(set);
}
}
#[must_use]
fn difference(&self, other: &AuthorMap) -> AuthorMap {
let mut new = AuthorMap::new();
new.map.reserve(self.map.len());
for (author, set) in self.map.iter() {
if let Some(other_set) = other.map.get(&author) {
let diff: HashSet<_> = set.difference(other_set).cloned().collect();
if !diff.is_empty() {
new.map.insert(author.clone(), diff);
}
} else |
}
new
}
}
fn git(args: &[&str]) -> Result<String, Box<dyn std::error::Error>> {
let mut cmd = Command::new("git");
cmd.args(args);
cmd.stdout(Stdio::piped());
let out = cmd.spawn();
let mut out = match out {
Ok(v) => v,
Err(err) => {
panic!("Failed to spawn command `{:?}`: {:?}", cmd, err);
}
};
let status = out.wait().expect("waited");
if !status.success() {
eprintln!("failed to run `git {:?}`: {:?}", args, status);
return Err(std::io::Error::from(std::io::ErrorKind::Other).into());
}
let mut stdout = Vec::new();
out.stdout.unwrap().read_to_end(&mut stdout).unwrap();
Ok(String::from_utf8_lossy(&stdout).into_owned())
}
lazy_static::lazy_static! {
static ref UPDATED: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn update_repo(url: &str) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut slug = url;
let prefix = "https://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "git://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "https://git.chromium.org/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let suffix = ".git";
if slug.ends_with(suffix) {
slug = &slug[..slug.len() - suffix.len()];
}
let path_s = format!("repos/{}", slug);
let path = PathBuf::from(&path_s);
if !UPDATED.lock().unwrap().insert(slug.to_string()) {
return Ok(path);
}
if path.exists() {
if should_update() {
// we know for sure the path_s does *not* contain .git as we strip it, so this is a safe
// temp directory
let tmp = format!("{}.git", path_s);
std::fs::rename(&path, &tmp)?;
git(&[
"clone",
"--bare",
"--dissociate",
"--reference",
&tmp,
&url,
&path_s,
])?;
std::fs::remove_dir_all(&tmp)?;
}
} else {
git(&["clone", "--bare", &url, &path_s])?;
}
Ok(path)
}
fn should_update() -> bool {
std::env::args_os().nth(1).unwrap_or_default() == "--refresh"
}
#[derive(Clone)]
pub struct VersionTag {
name: String,
version: Version,
raw_tag: String,
commit: Oid,
in_progress: bool,
}
impl fmt::Display for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
impl std::hash::Hash for VersionTag {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.version.hash(state);
}
}
impl cmp::Eq for VersionTag {}
impl cmp::PartialEq for VersionTag {
fn eq(&self, other: &Self) -> bool {
self.version == other.version
}
}
impl cmp::PartialOrd for VersionTag {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(&other))
}
}
impl cmp::Ord for VersionTag {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.version.cmp(&other.version)
}
}
impl fmt::Debug for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
fn get_versions(repo: &Repository) -> Result<Vec<VersionTag>, Box<dyn std::error::Error>> {
let tags = repo
.tag_names(None)?
.into_iter()
.filter_map(|v| v)
.map(|v| v.to_owned())
.collect::<Vec<_>>();
let mut versions = tags
.iter()
.filter_map(|tag| {
Version::parse(&tag)
.or_else(|_| Version::parse(&format!("{}.0", tag)))
.ok()
.map(|v| VersionTag {
name: format!("Rust {}", v),
version: v,
raw_tag: tag.clone(),
commit: repo
.revparse_single(&tag)
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: false,
})
})
.collect::<Vec<_>>();
versions.sort();
Ok(versions)
}
fn commit_coauthors(commit: &Commit) -> Vec<Author> {
let mut coauthors = vec![];
if let Some(msg) = commit.message_raw() {
lazy_static::lazy_static! {
static ref RE: Regex =
RegexBuilder::new(r"^Co-authored-by: (?P<name>.*) <(?P<email>.*)>")
.case_insensitive(true)
.build()
.unwrap();
}
for line in msg.lines().rev() {
if line.starts_with("Co-authored-by") {
if let Some(caps) = RE.captures(line) {
coauthors.push(Author {
name: caps["name"].to_string(),
email: caps["email"].to_string(),
});
}
}
}
}
coauthors
}
fn build_author_map(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
match build_author_map_(repo, reviewers, mailmap, from, to) {
Ok(o) => Ok(o),
Err(err) => Err(ErrorContext(
format!(
"build_author_map(repo={}, from={:?}, to={:?})",
repo.path().display(),
from,
to
),
err,
))?,
}
}
// Note this is not the bors merge commit of a rollup
fn is_rollup_commit(commit: &Commit) -> bool {
let summary = commit.summary().unwrap_or("");
summary.starts_with("Rollup merge of #")
}
fn parse_bors_reviewer(
reviewers: &Reviewers,
repo: &Repository,
commit: &Commit,
) -> Result<Option<Vec<Author>>, ErrorContext> {
if commit.author().name_bytes() != b"bors" || commit.committer().name_bytes() != b"bors" {
if commit.committer().name_bytes() != b"GitHub" || !is_rollup_commit(commit) {
return Ok(None);
}
}
// Skip non-merge commits
if commit.parents().count() == 1 {
return Ok(None);
}
let to_author = |list: &str| -> Result<Vec<Author>, ErrorContext> {
list.trim_end_matches('.')
.split(|c| c == ',' || c == '+')
.map(|r| r.trim_start_matches('@'))
.map(|r| r.trim_end_matches('`'))
.map(|r| r.trim())
.filter(|r| !r.is_empty())
.filter(|r| *r != "<try>")
.inspect(|r| {
if !r.chars().all(|c| {
c.is_alphabetic() || c.is_digit(10) || c == '-' || c == '_' || c == '='
}) {
eprintln!(
"warning: to_author for {} contained non-alphabetic characters: {:?}",
commit.id(),
r
);
}
})
.map(|r| {
reviewers.to_author(r).map_err(|e| {
ErrorContext(
format!("reviewer: {:?}, commit: {}", r, commit.id()),
e.into(),
)
})
})
.flat_map(|r| r.transpose())
.collect::<Result<Vec<_>, ErrorContext>>()
};
let message = commit.message().unwrap_or("");
let mut reviewers = if let Some(line) = message.lines().find(|l| l.contains(" r=")) {
let start = line.find("r=").unwrap() + 2;
let end = line[start..]
.find(' ')
.map(|pos| pos + start)
.unwrap_or(line.len());
to_author(&line[start..end])?
} else if let Some(line) = message.lines().find(|l| l.starts_with("Reviewed-by: ")) {
let line = &line["Reviewed-by: ".len()..];
to_author(&line)?
} else {
// old bors didn't include r=
if message != "automated merge\n" {
panic!(
"expected reviewer for bors merge commit {} in {:?}, message: {:?}",
commit.id(),
repo.path(),
message
);
}
return Ok(None);
};
reviewers.sort();
reviewers.dedup();
Ok(Some(reviewers))
}
fn build_author_map_(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let mut walker = repo.revwalk()?;
if repo.revparse_single(to).is_err() {
// If a commit is not found, try fetching it.
git(&[
"--git-dir",
repo.path().to_str().unwrap(),
"fetch",
"origin",
to,
])?;
}
if from == "" {
let to = repo.revparse_single(to)?.peel_to_commit()?.id();
walker.push(to)?;
} else {
walker.push_range(&format!("{}..{}", from, to))?;
}
let mut author_map = AuthorMap::new();
for oid in walker {
let oid = oid?;
let commit = repo.find_commit(oid)?;
let mut commit_authors = Vec::new();
if !is_rollup_commit(&commit) {
// We ignore the author of rollup-merge commits, and account for
// that author once by counting the reviewer of all bors merges. For
// rollups, we consider that this is the most relevant person, which
// is usually the case.
//
// Otherwise, a single rollup with N PRs attributes N commits to the author of the
// rollup, which isn't fair.
commit_authors.push(Author::from_sig(commit.author()));
}
match parse_bors_reviewer(&reviewers, &repo, &commit) {
Ok(Some(reviewers)) => commit_authors.extend(reviewers),
Ok(None) => {}
Err(ErrorContext(msg, e)) => {
if e.is::<reviewers::UnknownReviewer>() {
eprintln!("Unknown reviewer: {}", ErrorContext(msg, e));
} else {
return Err(ErrorContext(msg, e).into());
}
}
}
commit_authors.extend(commit_coauthors(&commit));
for author in commit_authors {
let author = mailmap.canonicalize(&author);
author_map.add(author, oid);
}
}
Ok(author_map)
}
fn mailmap_from_repo(repo: &git2::Repository) -> Result<Mailmap, Box<dyn std::error::Error>> {
let file = String::from_utf8(
repo.revparse_single("master")?
.peel_to_commit()?
.tree()?
.get_name(".mailmap")
.unwrap()
.to_object(&repo)?
.peel_to_blob()?
.content()
.into(),
)?;
Mailmap::from_string(file)
}
fn up_to_release(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
to: &VersionTag,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let to_commit = repo.find_commit(to.commit).map_err(|e| {
ErrorContext(
format!(
"find_commit: repo={}, commit={}",
repo.path().display(),
to.commit
),
Box::new(e),
)
})?;
let modules = get_submodules(&repo, &to_commit)?;
let mut author_map = build_author_map(&repo, &reviewers, &mailmap, "", &to.raw_tag)
.map_err(|e| ErrorContext(format!("Up to {}", to), e))?;
for module in &modules {
if let Ok(path) = update_repo(&module.repository) {
let subrepo = Repository::open(&path)?;
let submap = build_author_map(
&subrepo,
&reviewers,
&mailmap,
"",
&module.commit.to_string(),
)?;
author_map.extend(submap);
}
}
Ok(author_map)
}
fn generate_thanks() -> Result<BTreeMap<VersionTag, AuthorMap>, Box<dyn std::error::Error>> {
let path = update_repo("https://github.com/rust-lang/rust.git")?;
let repo = git2::Repository::open(&path)?;
let mailmap = mailmap_from_repo(&repo)?;
let reviewers = Reviewers::new()?;
let mut versions = get_versions(&repo)?;
let last_full_stable = versions
.iter()
.rfind(|v| v.raw_tag.ends_with(".0"))
.unwrap()
.version
.clone();
versions.push(VersionTag {
name: String::from("Beta"),
version: {
let mut last = last_full_stable.clone();
last.minor += 1;
last
},
raw_tag: String::from("beta"),
commit: repo
.revparse_single("beta")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
versions.push(VersionTag {
name: String::from("Master"),
version: {
// master is plus 1 minor versions off of beta, which we just pushed
let mut last = last_full_stable.clone();
last.minor += 2;
last
},
raw_tag: String::from("master"),
commit: repo
.revparse_single("master")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
let mut version_map = BTreeMap::new();
let mut cache = HashMap::new();
for (idx, version) in versions.iter().enumerate() {
let previous = if let Some(v) = idx.checked_sub(1).map(|idx| &versions[idx]) {
v
} else {
let author_map = build_author_map(&repo, &reviewers, &mailmap, "", &version.raw_tag)?;
version_map.insert(version.clone(), author_map);
continue;
};
eprintln!("Processing {:?} to {:?}", previous, version);
cache.insert(
version,
up_to_release(&repo, &reviewers, &mailmap, &version)?,
);
let previous = match cache.remove(&previous) {
Some(v) => v,
None => up_to_release(&repo, &reviewers, &mailmap, &previous)?,
};
let current = cache.get(&version).unwrap();
// Remove commits reachable from the previous release.
let only_current = current.difference(&previous);
version_map.insert(version.clone(), only_current);
}
Ok(version_map)
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let by_version = generate_thanks()?;
let mut all_time = by_version.values().next().unwrap().clone();
for map in by_version.values().skip(1) {
all_time.extend(map.clone());
}
site::render(by_version, all_time)?;
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("Error: {}", err);
let mut cur = &*err;
while let Some(cause) = cur.source() {
eprintln!("\tcaused by: {}", cause);
cur = cause;
}
std::mem::drop(cur);
std::process::exit(1);
}
}
#[derive(Debug)]
struct Submodule {
path: PathBuf,
commit: Oid,
// url
repository: String,
}
fn get_submodules(
repo: &Repository,
at: &Commit,
) -> Result<Vec<Submodule>, Box<dyn std::error::Error>> {
let submodule_cfg = modules_file(&repo, &at)?;
let submodule_cfg = Config::parse(&submodule_cfg)?;
let mut path_to_url = HashMap::new();
let entries = submodule_cfg.entries(None)?;
for entry in &entries {
let entry = entry?;
let name = entry.name().unwrap();
if name.ends_with(".path") {
let url = name.replace(".path", ".url");
let url = submodule_cfg.get_string(&url).unwrap();
path_to_url.insert(entry.value().unwrap().to_owned(), url);
}
}
let mut submodules = Vec::new();
let tree = at.tree()?;
for (path, url) in &path_to_url {
let path = Path::new(&path);
let entry = tree.get_path(&path);
// the submodule may not actually exist
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
assert_eq!(entry.kind().unwrap(), git2::ObjectType::Commit);
submodules.push(Submodule {
path: path.to_owned(),
commit: entry.id(),
repository: url.to_owned(),
});
}
submodules.retain(|s| {
let is_rust =
s.repository.contains("rust-lang") || s.repository.contains("rust-lang-nursery");
let exclude = vec![
"https://github.com/rust-lang/llvm.git",
"https://github.com/rust-lang/llvm-project.git",
"https://github.com/rust-lang/lld.git",
"https://github.com/rust-lang-nursery/clang.git",
"https://github.com/rust-lang-nursery/lldb.git",
"https://github.com/rust-lang/libuv.git",
"https://github.com/rust-lang/gyp.git",
"https://github.com/rust-lang/jemalloc.git",
"https://github.com/rust-lang/compiler-rt.git",
"https://github.com/rust-lang/hoedown.git",
];
is_rust
&& !exclude.contains(&s.repository.as_str())
&& !exclude.contains(&&*format!("{}.git", s.repository))
});
Ok(submodules)
}
fn modules_file(repo: &Repository, at: &Commit) -> Result<String, Box<dyn std::error::Error>> {
if let Some(modules) = at.tree()?.get_name(".gitmodules") {
Ok(String::from_utf8(
modules.to_object(&repo)?.peel_to_blob()?.content().into(),
)?)
} else {
return Ok(String::new());
}
}
| {
new.map.insert(author.clone(), set.clone());
} | conditional_block |
main.rs | use git2::{Commit, Oid, Repository};
use mailmap::{Author, Mailmap};
use regex::{Regex, RegexBuilder};
use semver::Version;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::Mutex;
use std::{cmp, fmt, str};
use config::Config;
use reviewers::Reviewers;
mod config;
mod error;
mod reviewers;
mod site;
use error::ErrorContext;
trait ToAuthor {
fn from_sig(sig: git2::Signature<'_>) -> Author;
}
impl ToAuthor for Author {
fn from_sig(sig: git2::Signature<'_>) -> Author {
let name = sig.name().unwrap_or_else(|| panic!("no name for {}", sig));
let email = sig
.email()
.unwrap_or_else(|| panic!("no email for {}", sig));
Author {
name: name.to_string(),
email: email.to_string(),
}
}
}
#[derive(Clone)]
pub struct AuthorMap {
// author -> [commits]
map: HashMap<Author, HashSet<Oid>>,
}
impl AuthorMap {
fn new() -> Self {
AuthorMap {
map: HashMap::new(),
}
}
fn add(&mut self, author: Author, commit: Oid) {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.insert(commit);
}
fn iter(&self) -> impl Iterator<Item = (&Author, usize)> {
self.map.iter().map(|(k, v)| (k, v.len()))
}
fn extend(&mut self, other: Self) {
for (author, set) in other.map {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.extend(set);
}
}
#[must_use]
fn difference(&self, other: &AuthorMap) -> AuthorMap {
let mut new = AuthorMap::new();
new.map.reserve(self.map.len());
for (author, set) in self.map.iter() {
if let Some(other_set) = other.map.get(&author) {
let diff: HashSet<_> = set.difference(other_set).cloned().collect();
if !diff.is_empty() {
new.map.insert(author.clone(), diff);
}
} else {
new.map.insert(author.clone(), set.clone());
}
}
new
}
}
fn git(args: &[&str]) -> Result<String, Box<dyn std::error::Error>> {
let mut cmd = Command::new("git");
cmd.args(args);
cmd.stdout(Stdio::piped());
let out = cmd.spawn();
let mut out = match out {
Ok(v) => v,
Err(err) => {
panic!("Failed to spawn command `{:?}`: {:?}", cmd, err);
}
};
let status = out.wait().expect("waited");
if !status.success() {
eprintln!("failed to run `git {:?}`: {:?}", args, status);
return Err(std::io::Error::from(std::io::ErrorKind::Other).into());
}
let mut stdout = Vec::new();
out.stdout.unwrap().read_to_end(&mut stdout).unwrap();
Ok(String::from_utf8_lossy(&stdout).into_owned())
}
lazy_static::lazy_static! {
static ref UPDATED: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn update_repo(url: &str) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut slug = url;
let prefix = "https://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "git://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "https://git.chromium.org/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let suffix = ".git";
if slug.ends_with(suffix) {
slug = &slug[..slug.len() - suffix.len()];
}
let path_s = format!("repos/{}", slug);
let path = PathBuf::from(&path_s);
if !UPDATED.lock().unwrap().insert(slug.to_string()) {
return Ok(path);
}
if path.exists() {
if should_update() {
// we know for sure the path_s does *not* contain .git as we strip it, so this is a safe
// temp directory
let tmp = format!("{}.git", path_s);
std::fs::rename(&path, &tmp)?;
git(&[
"clone",
"--bare",
"--dissociate",
"--reference",
&tmp,
&url,
&path_s,
])?;
std::fs::remove_dir_all(&tmp)?;
}
} else {
git(&["clone", "--bare", &url, &path_s])?;
}
Ok(path)
}
fn should_update() -> bool {
std::env::args_os().nth(1).unwrap_or_default() == "--refresh"
}
#[derive(Clone)]
pub struct VersionTag {
name: String,
version: Version,
raw_tag: String,
commit: Oid,
in_progress: bool,
}
impl fmt::Display for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
impl std::hash::Hash for VersionTag {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.version.hash(state);
}
}
impl cmp::Eq for VersionTag {}
impl cmp::PartialEq for VersionTag {
fn eq(&self, other: &Self) -> bool {
self.version == other.version
}
}
impl cmp::PartialOrd for VersionTag {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(&other))
}
}
impl cmp::Ord for VersionTag {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.version.cmp(&other.version)
}
}
impl fmt::Debug for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
fn get_versions(repo: &Repository) -> Result<Vec<VersionTag>, Box<dyn std::error::Error>> {
let tags = repo
.tag_names(None)?
.into_iter()
.filter_map(|v| v)
.map(|v| v.to_owned())
.collect::<Vec<_>>();
let mut versions = tags
.iter()
.filter_map(|tag| {
Version::parse(&tag)
.or_else(|_| Version::parse(&format!("{}.0", tag)))
.ok()
.map(|v| VersionTag {
name: format!("Rust {}", v),
version: v,
raw_tag: tag.clone(),
commit: repo
.revparse_single(&tag)
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: false,
})
})
.collect::<Vec<_>>();
versions.sort();
Ok(versions)
}
fn commit_coauthors(commit: &Commit) -> Vec<Author> {
let mut coauthors = vec![];
if let Some(msg) = commit.message_raw() {
lazy_static::lazy_static! {
static ref RE: Regex =
RegexBuilder::new(r"^Co-authored-by: (?P<name>.*) <(?P<email>.*)>")
.case_insensitive(true)
.build()
.unwrap();
}
for line in msg.lines().rev() {
if line.starts_with("Co-authored-by") {
if let Some(caps) = RE.captures(line) {
coauthors.push(Author {
name: caps["name"].to_string(),
email: caps["email"].to_string(),
});
}
}
}
}
coauthors
}
fn build_author_map(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
match build_author_map_(repo, reviewers, mailmap, from, to) {
Ok(o) => Ok(o),
Err(err) => Err(ErrorContext(
format!(
"build_author_map(repo={}, from={:?}, to={:?})",
repo.path().display(),
from,
to
),
err,
))?,
}
}
// Note this is not the bors merge commit of a rollup
fn is_rollup_commit(commit: &Commit) -> bool {
let summary = commit.summary().unwrap_or("");
summary.starts_with("Rollup merge of #")
}
fn parse_bors_reviewer(
reviewers: &Reviewers,
repo: &Repository,
commit: &Commit,
) -> Result<Option<Vec<Author>>, ErrorContext> {
if commit.author().name_bytes() != b"bors" || commit.committer().name_bytes() != b"bors" {
if commit.committer().name_bytes() != b"GitHub" || !is_rollup_commit(commit) {
return Ok(None);
}
}
// Skip non-merge commits
if commit.parents().count() == 1 {
return Ok(None);
}
let to_author = |list: &str| -> Result<Vec<Author>, ErrorContext> {
list.trim_end_matches('.')
.split(|c| c == ',' || c == '+')
.map(|r| r.trim_start_matches('@'))
.map(|r| r.trim_end_matches('`'))
.map(|r| r.trim())
.filter(|r| !r.is_empty())
.filter(|r| *r != "<try>")
.inspect(|r| {
if !r.chars().all(|c| {
c.is_alphabetic() || c.is_digit(10) || c == '-' || c == '_' || c == '='
}) {
eprintln!(
"warning: to_author for {} contained non-alphabetic characters: {:?}",
commit.id(),
r
);
}
})
.map(|r| {
reviewers.to_author(r).map_err(|e| {
ErrorContext(
format!("reviewer: {:?}, commit: {}", r, commit.id()),
e.into(),
)
})
})
.flat_map(|r| r.transpose())
.collect::<Result<Vec<_>, ErrorContext>>()
};
let message = commit.message().unwrap_or("");
let mut reviewers = if let Some(line) = message.lines().find(|l| l.contains(" r=")) {
let start = line.find("r=").unwrap() + 2;
let end = line[start..]
.find(' ')
.map(|pos| pos + start)
.unwrap_or(line.len());
to_author(&line[start..end])?
} else if let Some(line) = message.lines().find(|l| l.starts_with("Reviewed-by: ")) {
let line = &line["Reviewed-by: ".len()..];
to_author(&line)?
} else {
// old bors didn't include r=
if message != "automated merge\n" {
panic!(
"expected reviewer for bors merge commit {} in {:?}, message: {:?}",
commit.id(),
repo.path(),
message
);
}
return Ok(None);
};
reviewers.sort();
reviewers.dedup();
Ok(Some(reviewers))
}
fn build_author_map_(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let mut walker = repo.revwalk()?;
if repo.revparse_single(to).is_err() {
// If a commit is not found, try fetching it.
git(&[
"--git-dir",
repo.path().to_str().unwrap(),
"fetch",
"origin",
to,
])?;
}
if from == "" {
let to = repo.revparse_single(to)?.peel_to_commit()?.id();
walker.push(to)?;
} else {
walker.push_range(&format!("{}..{}", from, to))?;
}
let mut author_map = AuthorMap::new();
for oid in walker {
let oid = oid?;
let commit = repo.find_commit(oid)?;
let mut commit_authors = Vec::new();
if !is_rollup_commit(&commit) {
// We ignore the author of rollup-merge commits, and account for
// that author once by counting the reviewer of all bors merges. For
// rollups, we consider that this is the most relevant person, which
// is usually the case.
//
// Otherwise, a single rollup with N PRs attributes N commits to the author of the
// rollup, which isn't fair.
commit_authors.push(Author::from_sig(commit.author()));
}
match parse_bors_reviewer(&reviewers, &repo, &commit) {
Ok(Some(reviewers)) => commit_authors.extend(reviewers),
Ok(None) => {}
Err(ErrorContext(msg, e)) => {
if e.is::<reviewers::UnknownReviewer>() {
eprintln!("Unknown reviewer: {}", ErrorContext(msg, e));
} else {
return Err(ErrorContext(msg, e).into());
}
}
}
commit_authors.extend(commit_coauthors(&commit));
for author in commit_authors {
let author = mailmap.canonicalize(&author);
author_map.add(author, oid);
}
}
Ok(author_map)
}
fn mailmap_from_repo(repo: &git2::Repository) -> Result<Mailmap, Box<dyn std::error::Error>> {
let file = String::from_utf8(
repo.revparse_single("master")?
.peel_to_commit()?
.tree()?
.get_name(".mailmap")
.unwrap()
.to_object(&repo)?
.peel_to_blob()?
.content()
.into(),
)?;
Mailmap::from_string(file)
}
fn up_to_release(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
to: &VersionTag,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let to_commit = repo.find_commit(to.commit).map_err(|e| {
ErrorContext(
format!(
"find_commit: repo={}, commit={}",
repo.path().display(),
to.commit
),
Box::new(e),
)
})?;
let modules = get_submodules(&repo, &to_commit)?;
let mut author_map = build_author_map(&repo, &reviewers, &mailmap, "", &to.raw_tag)
.map_err(|e| ErrorContext(format!("Up to {}", to), e))?;
for module in &modules {
if let Ok(path) = update_repo(&module.repository) {
let subrepo = Repository::open(&path)?;
let submap = build_author_map(
&subrepo,
&reviewers,
&mailmap,
"",
&module.commit.to_string(),
)?;
author_map.extend(submap);
}
}
Ok(author_map)
}
fn generate_thanks() -> Result<BTreeMap<VersionTag, AuthorMap>, Box<dyn std::error::Error>> {
let path = update_repo("https://github.com/rust-lang/rust.git")?;
let repo = git2::Repository::open(&path)?;
let mailmap = mailmap_from_repo(&repo)?;
let reviewers = Reviewers::new()?;
let mut versions = get_versions(&repo)?;
let last_full_stable = versions
.iter()
.rfind(|v| v.raw_tag.ends_with(".0"))
.unwrap()
.version
.clone();
versions.push(VersionTag {
name: String::from("Beta"),
version: {
let mut last = last_full_stable.clone();
last.minor += 1;
last
},
raw_tag: String::from("beta"),
commit: repo
.revparse_single("beta")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
versions.push(VersionTag {
name: String::from("Master"),
version: {
// master is plus 1 minor versions off of beta, which we just pushed
let mut last = last_full_stable.clone();
last.minor += 2;
last
},
raw_tag: String::from("master"),
commit: repo
.revparse_single("master")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
let mut version_map = BTreeMap::new();
let mut cache = HashMap::new();
for (idx, version) in versions.iter().enumerate() {
let previous = if let Some(v) = idx.checked_sub(1).map(|idx| &versions[idx]) {
v
} else {
let author_map = build_author_map(&repo, &reviewers, &mailmap, "", &version.raw_tag)?;
version_map.insert(version.clone(), author_map);
continue;
};
eprintln!("Processing {:?} to {:?}", previous, version);
cache.insert(
version,
up_to_release(&repo, &reviewers, &mailmap, &version)?,
);
let previous = match cache.remove(&previous) {
Some(v) => v,
None => up_to_release(&repo, &reviewers, &mailmap, &previous)?,
};
let current = cache.get(&version).unwrap();
// Remove commits reachable from the previous release.
let only_current = current.difference(&previous);
version_map.insert(version.clone(), only_current);
}
Ok(version_map)
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let by_version = generate_thanks()?;
let mut all_time = by_version.values().next().unwrap().clone();
for map in by_version.values().skip(1) {
all_time.extend(map.clone());
}
site::render(by_version, all_time)?;
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("Error: {}", err);
let mut cur = &*err;
while let Some(cause) = cur.source() {
eprintln!("\tcaused by: {}", cause);
cur = cause;
}
std::mem::drop(cur);
std::process::exit(1);
}
}
#[derive(Debug)]
struct Submodule {
path: PathBuf,
commit: Oid,
// url
repository: String,
}
fn get_submodules(
repo: &Repository,
at: &Commit,
) -> Result<Vec<Submodule>, Box<dyn std::error::Error>> {
let submodule_cfg = modules_file(&repo, &at)?;
let submodule_cfg = Config::parse(&submodule_cfg)?;
let mut path_to_url = HashMap::new();
let entries = submodule_cfg.entries(None)?;
for entry in &entries {
let entry = entry?;
let name = entry.name().unwrap();
if name.ends_with(".path") {
let url = name.replace(".path", ".url");
let url = submodule_cfg.get_string(&url).unwrap();
path_to_url.insert(entry.value().unwrap().to_owned(), url);
}
}
let mut submodules = Vec::new();
let tree = at.tree()?;
for (path, url) in &path_to_url {
let path = Path::new(&path);
let entry = tree.get_path(&path);
// the submodule may not actually exist
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
assert_eq!(entry.kind().unwrap(), git2::ObjectType::Commit);
submodules.push(Submodule {
path: path.to_owned(),
commit: entry.id(),
repository: url.to_owned(),
});
}
submodules.retain(|s| {
let is_rust =
s.repository.contains("rust-lang") || s.repository.contains("rust-lang-nursery");
let exclude = vec![
"https://github.com/rust-lang/llvm.git",
"https://github.com/rust-lang/llvm-project.git",
"https://github.com/rust-lang/lld.git",
"https://github.com/rust-lang-nursery/clang.git",
"https://github.com/rust-lang-nursery/lldb.git",
"https://github.com/rust-lang/libuv.git",
"https://github.com/rust-lang/gyp.git",
"https://github.com/rust-lang/jemalloc.git",
"https://github.com/rust-lang/compiler-rt.git",
"https://github.com/rust-lang/hoedown.git",
];
is_rust
&& !exclude.contains(&s.repository.as_str())
&& !exclude.contains(&&*format!("{}.git", s.repository))
});
Ok(submodules)
}
fn | (repo: &Repository, at: &Commit) -> Result<String, Box<dyn std::error::Error>> {
if let Some(modules) = at.tree()?.get_name(".gitmodules") {
Ok(String::from_utf8(
modules.to_object(&repo)?.peel_to_blob()?.content().into(),
)?)
} else {
return Ok(String::new());
}
}
| modules_file | identifier_name |
main.rs | use git2::{Commit, Oid, Repository};
use mailmap::{Author, Mailmap};
use regex::{Regex, RegexBuilder};
use semver::Version;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::Mutex;
use std::{cmp, fmt, str};
use config::Config;
use reviewers::Reviewers;
mod config;
mod error;
mod reviewers;
mod site;
use error::ErrorContext;
trait ToAuthor {
fn from_sig(sig: git2::Signature<'_>) -> Author;
}
impl ToAuthor for Author {
fn from_sig(sig: git2::Signature<'_>) -> Author {
let name = sig.name().unwrap_or_else(|| panic!("no name for {}", sig));
let email = sig
.email()
.unwrap_or_else(|| panic!("no email for {}", sig));
Author {
name: name.to_string(),
email: email.to_string(),
}
}
}
#[derive(Clone)]
pub struct AuthorMap {
// author -> [commits]
map: HashMap<Author, HashSet<Oid>>,
}
impl AuthorMap {
fn new() -> Self {
AuthorMap {
map: HashMap::new(),
}
}
fn add(&mut self, author: Author, commit: Oid) { | .or_insert_with(HashSet::new)
.insert(commit);
}
fn iter(&self) -> impl Iterator<Item = (&Author, usize)> {
self.map.iter().map(|(k, v)| (k, v.len()))
}
fn extend(&mut self, other: Self) {
for (author, set) in other.map {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.extend(set);
}
}
#[must_use]
fn difference(&self, other: &AuthorMap) -> AuthorMap {
let mut new = AuthorMap::new();
new.map.reserve(self.map.len());
for (author, set) in self.map.iter() {
if let Some(other_set) = other.map.get(&author) {
let diff: HashSet<_> = set.difference(other_set).cloned().collect();
if !diff.is_empty() {
new.map.insert(author.clone(), diff);
}
} else {
new.map.insert(author.clone(), set.clone());
}
}
new
}
}
fn git(args: &[&str]) -> Result<String, Box<dyn std::error::Error>> {
let mut cmd = Command::new("git");
cmd.args(args);
cmd.stdout(Stdio::piped());
let out = cmd.spawn();
let mut out = match out {
Ok(v) => v,
Err(err) => {
panic!("Failed to spawn command `{:?}`: {:?}", cmd, err);
}
};
let status = out.wait().expect("waited");
if !status.success() {
eprintln!("failed to run `git {:?}`: {:?}", args, status);
return Err(std::io::Error::from(std::io::ErrorKind::Other).into());
}
let mut stdout = Vec::new();
out.stdout.unwrap().read_to_end(&mut stdout).unwrap();
Ok(String::from_utf8_lossy(&stdout).into_owned())
}
lazy_static::lazy_static! {
static ref UPDATED: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn update_repo(url: &str) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut slug = url;
let prefix = "https://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "git://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "https://git.chromium.org/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let suffix = ".git";
if slug.ends_with(suffix) {
slug = &slug[..slug.len() - suffix.len()];
}
let path_s = format!("repos/{}", slug);
let path = PathBuf::from(&path_s);
if !UPDATED.lock().unwrap().insert(slug.to_string()) {
return Ok(path);
}
if path.exists() {
if should_update() {
// we know for sure the path_s does *not* contain .git as we strip it, so this is a safe
// temp directory
let tmp = format!("{}.git", path_s);
std::fs::rename(&path, &tmp)?;
git(&[
"clone",
"--bare",
"--dissociate",
"--reference",
&tmp,
&url,
&path_s,
])?;
std::fs::remove_dir_all(&tmp)?;
}
} else {
git(&["clone", "--bare", &url, &path_s])?;
}
Ok(path)
}
fn should_update() -> bool {
std::env::args_os().nth(1).unwrap_or_default() == "--refresh"
}
#[derive(Clone)]
pub struct VersionTag {
name: String,
version: Version,
raw_tag: String,
commit: Oid,
in_progress: bool,
}
impl fmt::Display for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
impl std::hash::Hash for VersionTag {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.version.hash(state);
}
}
impl cmp::Eq for VersionTag {}
impl cmp::PartialEq for VersionTag {
fn eq(&self, other: &Self) -> bool {
self.version == other.version
}
}
impl cmp::PartialOrd for VersionTag {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(&other))
}
}
impl cmp::Ord for VersionTag {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.version.cmp(&other.version)
}
}
impl fmt::Debug for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
fn get_versions(repo: &Repository) -> Result<Vec<VersionTag>, Box<dyn std::error::Error>> {
let tags = repo
.tag_names(None)?
.into_iter()
.filter_map(|v| v)
.map(|v| v.to_owned())
.collect::<Vec<_>>();
let mut versions = tags
.iter()
.filter_map(|tag| {
Version::parse(&tag)
.or_else(|_| Version::parse(&format!("{}.0", tag)))
.ok()
.map(|v| VersionTag {
name: format!("Rust {}", v),
version: v,
raw_tag: tag.clone(),
commit: repo
.revparse_single(&tag)
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: false,
})
})
.collect::<Vec<_>>();
versions.sort();
Ok(versions)
}
fn commit_coauthors(commit: &Commit) -> Vec<Author> {
let mut coauthors = vec![];
if let Some(msg) = commit.message_raw() {
lazy_static::lazy_static! {
static ref RE: Regex =
RegexBuilder::new(r"^Co-authored-by: (?P<name>.*) <(?P<email>.*)>")
.case_insensitive(true)
.build()
.unwrap();
}
for line in msg.lines().rev() {
if line.starts_with("Co-authored-by") {
if let Some(caps) = RE.captures(line) {
coauthors.push(Author {
name: caps["name"].to_string(),
email: caps["email"].to_string(),
});
}
}
}
}
coauthors
}
fn build_author_map(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
match build_author_map_(repo, reviewers, mailmap, from, to) {
Ok(o) => Ok(o),
Err(err) => Err(ErrorContext(
format!(
"build_author_map(repo={}, from={:?}, to={:?})",
repo.path().display(),
from,
to
),
err,
))?,
}
}
// Note this is not the bors merge commit of a rollup
fn is_rollup_commit(commit: &Commit) -> bool {
let summary = commit.summary().unwrap_or("");
summary.starts_with("Rollup merge of #")
}
fn parse_bors_reviewer(
reviewers: &Reviewers,
repo: &Repository,
commit: &Commit,
) -> Result<Option<Vec<Author>>, ErrorContext> {
if commit.author().name_bytes() != b"bors" || commit.committer().name_bytes() != b"bors" {
if commit.committer().name_bytes() != b"GitHub" || !is_rollup_commit(commit) {
return Ok(None);
}
}
// Skip non-merge commits
if commit.parents().count() == 1 {
return Ok(None);
}
let to_author = |list: &str| -> Result<Vec<Author>, ErrorContext> {
list.trim_end_matches('.')
.split(|c| c == ',' || c == '+')
.map(|r| r.trim_start_matches('@'))
.map(|r| r.trim_end_matches('`'))
.map(|r| r.trim())
.filter(|r| !r.is_empty())
.filter(|r| *r != "<try>")
.inspect(|r| {
if !r.chars().all(|c| {
c.is_alphabetic() || c.is_digit(10) || c == '-' || c == '_' || c == '='
}) {
eprintln!(
"warning: to_author for {} contained non-alphabetic characters: {:?}",
commit.id(),
r
);
}
})
.map(|r| {
reviewers.to_author(r).map_err(|e| {
ErrorContext(
format!("reviewer: {:?}, commit: {}", r, commit.id()),
e.into(),
)
})
})
.flat_map(|r| r.transpose())
.collect::<Result<Vec<_>, ErrorContext>>()
};
let message = commit.message().unwrap_or("");
let mut reviewers = if let Some(line) = message.lines().find(|l| l.contains(" r=")) {
let start = line.find("r=").unwrap() + 2;
let end = line[start..]
.find(' ')
.map(|pos| pos + start)
.unwrap_or(line.len());
to_author(&line[start..end])?
} else if let Some(line) = message.lines().find(|l| l.starts_with("Reviewed-by: ")) {
let line = &line["Reviewed-by: ".len()..];
to_author(&line)?
} else {
// old bors didn't include r=
if message != "automated merge\n" {
panic!(
"expected reviewer for bors merge commit {} in {:?}, message: {:?}",
commit.id(),
repo.path(),
message
);
}
return Ok(None);
};
reviewers.sort();
reviewers.dedup();
Ok(Some(reviewers))
}
fn build_author_map_(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let mut walker = repo.revwalk()?;
if repo.revparse_single(to).is_err() {
// If a commit is not found, try fetching it.
git(&[
"--git-dir",
repo.path().to_str().unwrap(),
"fetch",
"origin",
to,
])?;
}
if from == "" {
let to = repo.revparse_single(to)?.peel_to_commit()?.id();
walker.push(to)?;
} else {
walker.push_range(&format!("{}..{}", from, to))?;
}
let mut author_map = AuthorMap::new();
for oid in walker {
let oid = oid?;
let commit = repo.find_commit(oid)?;
let mut commit_authors = Vec::new();
if !is_rollup_commit(&commit) {
// We ignore the author of rollup-merge commits, and account for
// that author once by counting the reviewer of all bors merges. For
// rollups, we consider that this is the most relevant person, which
// is usually the case.
//
// Otherwise, a single rollup with N PRs attributes N commits to the author of the
// rollup, which isn't fair.
commit_authors.push(Author::from_sig(commit.author()));
}
match parse_bors_reviewer(&reviewers, &repo, &commit) {
Ok(Some(reviewers)) => commit_authors.extend(reviewers),
Ok(None) => {}
Err(ErrorContext(msg, e)) => {
if e.is::<reviewers::UnknownReviewer>() {
eprintln!("Unknown reviewer: {}", ErrorContext(msg, e));
} else {
return Err(ErrorContext(msg, e).into());
}
}
}
commit_authors.extend(commit_coauthors(&commit));
for author in commit_authors {
let author = mailmap.canonicalize(&author);
author_map.add(author, oid);
}
}
Ok(author_map)
}
fn mailmap_from_repo(repo: &git2::Repository) -> Result<Mailmap, Box<dyn std::error::Error>> {
let file = String::from_utf8(
repo.revparse_single("master")?
.peel_to_commit()?
.tree()?
.get_name(".mailmap")
.unwrap()
.to_object(&repo)?
.peel_to_blob()?
.content()
.into(),
)?;
Mailmap::from_string(file)
}
fn up_to_release(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
to: &VersionTag,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let to_commit = repo.find_commit(to.commit).map_err(|e| {
ErrorContext(
format!(
"find_commit: repo={}, commit={}",
repo.path().display(),
to.commit
),
Box::new(e),
)
})?;
let modules = get_submodules(&repo, &to_commit)?;
let mut author_map = build_author_map(&repo, &reviewers, &mailmap, "", &to.raw_tag)
.map_err(|e| ErrorContext(format!("Up to {}", to), e))?;
for module in &modules {
if let Ok(path) = update_repo(&module.repository) {
let subrepo = Repository::open(&path)?;
let submap = build_author_map(
&subrepo,
&reviewers,
&mailmap,
"",
&module.commit.to_string(),
)?;
author_map.extend(submap);
}
}
Ok(author_map)
}
fn generate_thanks() -> Result<BTreeMap<VersionTag, AuthorMap>, Box<dyn std::error::Error>> {
let path = update_repo("https://github.com/rust-lang/rust.git")?;
let repo = git2::Repository::open(&path)?;
let mailmap = mailmap_from_repo(&repo)?;
let reviewers = Reviewers::new()?;
let mut versions = get_versions(&repo)?;
let last_full_stable = versions
.iter()
.rfind(|v| v.raw_tag.ends_with(".0"))
.unwrap()
.version
.clone();
versions.push(VersionTag {
name: String::from("Beta"),
version: {
let mut last = last_full_stable.clone();
last.minor += 1;
last
},
raw_tag: String::from("beta"),
commit: repo
.revparse_single("beta")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
versions.push(VersionTag {
name: String::from("Master"),
version: {
// master is plus 1 minor versions off of beta, which we just pushed
let mut last = last_full_stable.clone();
last.minor += 2;
last
},
raw_tag: String::from("master"),
commit: repo
.revparse_single("master")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
let mut version_map = BTreeMap::new();
let mut cache = HashMap::new();
for (idx, version) in versions.iter().enumerate() {
let previous = if let Some(v) = idx.checked_sub(1).map(|idx| &versions[idx]) {
v
} else {
let author_map = build_author_map(&repo, &reviewers, &mailmap, "", &version.raw_tag)?;
version_map.insert(version.clone(), author_map);
continue;
};
eprintln!("Processing {:?} to {:?}", previous, version);
cache.insert(
version,
up_to_release(&repo, &reviewers, &mailmap, &version)?,
);
let previous = match cache.remove(&previous) {
Some(v) => v,
None => up_to_release(&repo, &reviewers, &mailmap, &previous)?,
};
let current = cache.get(&version).unwrap();
// Remove commits reachable from the previous release.
let only_current = current.difference(&previous);
version_map.insert(version.clone(), only_current);
}
Ok(version_map)
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let by_version = generate_thanks()?;
let mut all_time = by_version.values().next().unwrap().clone();
for map in by_version.values().skip(1) {
all_time.extend(map.clone());
}
site::render(by_version, all_time)?;
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("Error: {}", err);
let mut cur = &*err;
while let Some(cause) = cur.source() {
eprintln!("\tcaused by: {}", cause);
cur = cause;
}
std::mem::drop(cur);
std::process::exit(1);
}
}
#[derive(Debug)]
struct Submodule {
path: PathBuf,
commit: Oid,
// url
repository: String,
}
fn get_submodules(
repo: &Repository,
at: &Commit,
) -> Result<Vec<Submodule>, Box<dyn std::error::Error>> {
let submodule_cfg = modules_file(&repo, &at)?;
let submodule_cfg = Config::parse(&submodule_cfg)?;
let mut path_to_url = HashMap::new();
let entries = submodule_cfg.entries(None)?;
for entry in &entries {
let entry = entry?;
let name = entry.name().unwrap();
if name.ends_with(".path") {
let url = name.replace(".path", ".url");
let url = submodule_cfg.get_string(&url).unwrap();
path_to_url.insert(entry.value().unwrap().to_owned(), url);
}
}
let mut submodules = Vec::new();
let tree = at.tree()?;
for (path, url) in &path_to_url {
let path = Path::new(&path);
let entry = tree.get_path(&path);
// the submodule may not actually exist
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
assert_eq!(entry.kind().unwrap(), git2::ObjectType::Commit);
submodules.push(Submodule {
path: path.to_owned(),
commit: entry.id(),
repository: url.to_owned(),
});
}
submodules.retain(|s| {
let is_rust =
s.repository.contains("rust-lang") || s.repository.contains("rust-lang-nursery");
let exclude = vec![
"https://github.com/rust-lang/llvm.git",
"https://github.com/rust-lang/llvm-project.git",
"https://github.com/rust-lang/lld.git",
"https://github.com/rust-lang-nursery/clang.git",
"https://github.com/rust-lang-nursery/lldb.git",
"https://github.com/rust-lang/libuv.git",
"https://github.com/rust-lang/gyp.git",
"https://github.com/rust-lang/jemalloc.git",
"https://github.com/rust-lang/compiler-rt.git",
"https://github.com/rust-lang/hoedown.git",
];
is_rust
&& !exclude.contains(&s.repository.as_str())
&& !exclude.contains(&&*format!("{}.git", s.repository))
});
Ok(submodules)
}
fn modules_file(repo: &Repository, at: &Commit) -> Result<String, Box<dyn std::error::Error>> {
if let Some(modules) = at.tree()?.get_name(".gitmodules") {
Ok(String::from_utf8(
modules.to_object(&repo)?.peel_to_blob()?.content().into(),
)?)
} else {
return Ok(String::new());
}
} | self.map
.entry(author) | random_line_split |
main.rs | use git2::{Commit, Oid, Repository};
use mailmap::{Author, Mailmap};
use regex::{Regex, RegexBuilder};
use semver::Version;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::Mutex;
use std::{cmp, fmt, str};
use config::Config;
use reviewers::Reviewers;
mod config;
mod error;
mod reviewers;
mod site;
use error::ErrorContext;
trait ToAuthor {
fn from_sig(sig: git2::Signature<'_>) -> Author;
}
impl ToAuthor for Author {
fn from_sig(sig: git2::Signature<'_>) -> Author {
let name = sig.name().unwrap_or_else(|| panic!("no name for {}", sig));
let email = sig
.email()
.unwrap_or_else(|| panic!("no email for {}", sig));
Author {
name: name.to_string(),
email: email.to_string(),
}
}
}
#[derive(Clone)]
pub struct AuthorMap {
// author -> [commits]
map: HashMap<Author, HashSet<Oid>>,
}
impl AuthorMap {
fn new() -> Self {
AuthorMap {
map: HashMap::new(),
}
}
fn add(&mut self, author: Author, commit: Oid) {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.insert(commit);
}
fn iter(&self) -> impl Iterator<Item = (&Author, usize)> {
self.map.iter().map(|(k, v)| (k, v.len()))
}
fn extend(&mut self, other: Self) {
for (author, set) in other.map {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.extend(set);
}
}
#[must_use]
fn difference(&self, other: &AuthorMap) -> AuthorMap {
let mut new = AuthorMap::new();
new.map.reserve(self.map.len());
for (author, set) in self.map.iter() {
if let Some(other_set) = other.map.get(&author) {
let diff: HashSet<_> = set.difference(other_set).cloned().collect();
if !diff.is_empty() {
new.map.insert(author.clone(), diff);
}
} else {
new.map.insert(author.clone(), set.clone());
}
}
new
}
}
fn git(args: &[&str]) -> Result<String, Box<dyn std::error::Error>> |
lazy_static::lazy_static! {
static ref UPDATED: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn update_repo(url: &str) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut slug = url;
let prefix = "https://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "git://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "https://git.chromium.org/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let suffix = ".git";
if slug.ends_with(suffix) {
slug = &slug[..slug.len() - suffix.len()];
}
let path_s = format!("repos/{}", slug);
let path = PathBuf::from(&path_s);
if !UPDATED.lock().unwrap().insert(slug.to_string()) {
return Ok(path);
}
if path.exists() {
if should_update() {
// we know for sure the path_s does *not* contain .git as we strip it, so this is a safe
// temp directory
let tmp = format!("{}.git", path_s);
std::fs::rename(&path, &tmp)?;
git(&[
"clone",
"--bare",
"--dissociate",
"--reference",
&tmp,
&url,
&path_s,
])?;
std::fs::remove_dir_all(&tmp)?;
}
} else {
git(&["clone", "--bare", &url, &path_s])?;
}
Ok(path)
}
fn should_update() -> bool {
std::env::args_os().nth(1).unwrap_or_default() == "--refresh"
}
#[derive(Clone)]
pub struct VersionTag {
name: String,
version: Version,
raw_tag: String,
commit: Oid,
in_progress: bool,
}
impl fmt::Display for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
impl std::hash::Hash for VersionTag {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.version.hash(state);
}
}
impl cmp::Eq for VersionTag {}
impl cmp::PartialEq for VersionTag {
fn eq(&self, other: &Self) -> bool {
self.version == other.version
}
}
impl cmp::PartialOrd for VersionTag {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(&other))
}
}
impl cmp::Ord for VersionTag {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.version.cmp(&other.version)
}
}
impl fmt::Debug for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
fn get_versions(repo: &Repository) -> Result<Vec<VersionTag>, Box<dyn std::error::Error>> {
let tags = repo
.tag_names(None)?
.into_iter()
.filter_map(|v| v)
.map(|v| v.to_owned())
.collect::<Vec<_>>();
let mut versions = tags
.iter()
.filter_map(|tag| {
Version::parse(&tag)
.or_else(|_| Version::parse(&format!("{}.0", tag)))
.ok()
.map(|v| VersionTag {
name: format!("Rust {}", v),
version: v,
raw_tag: tag.clone(),
commit: repo
.revparse_single(&tag)
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: false,
})
})
.collect::<Vec<_>>();
versions.sort();
Ok(versions)
}
fn commit_coauthors(commit: &Commit) -> Vec<Author> {
let mut coauthors = vec![];
if let Some(msg) = commit.message_raw() {
lazy_static::lazy_static! {
static ref RE: Regex =
RegexBuilder::new(r"^Co-authored-by: (?P<name>.*) <(?P<email>.*)>")
.case_insensitive(true)
.build()
.unwrap();
}
for line in msg.lines().rev() {
if line.starts_with("Co-authored-by") {
if let Some(caps) = RE.captures(line) {
coauthors.push(Author {
name: caps["name"].to_string(),
email: caps["email"].to_string(),
});
}
}
}
}
coauthors
}
fn build_author_map(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
match build_author_map_(repo, reviewers, mailmap, from, to) {
Ok(o) => Ok(o),
Err(err) => Err(ErrorContext(
format!(
"build_author_map(repo={}, from={:?}, to={:?})",
repo.path().display(),
from,
to
),
err,
))?,
}
}
// Note this is not the bors merge commit of a rollup
fn is_rollup_commit(commit: &Commit) -> bool {
let summary = commit.summary().unwrap_or("");
summary.starts_with("Rollup merge of #")
}
fn parse_bors_reviewer(
reviewers: &Reviewers,
repo: &Repository,
commit: &Commit,
) -> Result<Option<Vec<Author>>, ErrorContext> {
if commit.author().name_bytes() != b"bors" || commit.committer().name_bytes() != b"bors" {
if commit.committer().name_bytes() != b"GitHub" || !is_rollup_commit(commit) {
return Ok(None);
}
}
// Skip non-merge commits
if commit.parents().count() == 1 {
return Ok(None);
}
let to_author = |list: &str| -> Result<Vec<Author>, ErrorContext> {
list.trim_end_matches('.')
.split(|c| c == ',' || c == '+')
.map(|r| r.trim_start_matches('@'))
.map(|r| r.trim_end_matches('`'))
.map(|r| r.trim())
.filter(|r| !r.is_empty())
.filter(|r| *r != "<try>")
.inspect(|r| {
if !r.chars().all(|c| {
c.is_alphabetic() || c.is_digit(10) || c == '-' || c == '_' || c == '='
}) {
eprintln!(
"warning: to_author for {} contained non-alphabetic characters: {:?}",
commit.id(),
r
);
}
})
.map(|r| {
reviewers.to_author(r).map_err(|e| {
ErrorContext(
format!("reviewer: {:?}, commit: {}", r, commit.id()),
e.into(),
)
})
})
.flat_map(|r| r.transpose())
.collect::<Result<Vec<_>, ErrorContext>>()
};
let message = commit.message().unwrap_or("");
let mut reviewers = if let Some(line) = message.lines().find(|l| l.contains(" r=")) {
let start = line.find("r=").unwrap() + 2;
let end = line[start..]
.find(' ')
.map(|pos| pos + start)
.unwrap_or(line.len());
to_author(&line[start..end])?
} else if let Some(line) = message.lines().find(|l| l.starts_with("Reviewed-by: ")) {
let line = &line["Reviewed-by: ".len()..];
to_author(&line)?
} else {
// old bors didn't include r=
if message != "automated merge\n" {
panic!(
"expected reviewer for bors merge commit {} in {:?}, message: {:?}",
commit.id(),
repo.path(),
message
);
}
return Ok(None);
};
reviewers.sort();
reviewers.dedup();
Ok(Some(reviewers))
}
fn build_author_map_(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let mut walker = repo.revwalk()?;
if repo.revparse_single(to).is_err() {
// If a commit is not found, try fetching it.
git(&[
"--git-dir",
repo.path().to_str().unwrap(),
"fetch",
"origin",
to,
])?;
}
if from == "" {
let to = repo.revparse_single(to)?.peel_to_commit()?.id();
walker.push(to)?;
} else {
walker.push_range(&format!("{}..{}", from, to))?;
}
let mut author_map = AuthorMap::new();
for oid in walker {
let oid = oid?;
let commit = repo.find_commit(oid)?;
let mut commit_authors = Vec::new();
if !is_rollup_commit(&commit) {
// We ignore the author of rollup-merge commits, and account for
// that author once by counting the reviewer of all bors merges. For
// rollups, we consider that this is the most relevant person, which
// is usually the case.
//
// Otherwise, a single rollup with N PRs attributes N commits to the author of the
// rollup, which isn't fair.
commit_authors.push(Author::from_sig(commit.author()));
}
match parse_bors_reviewer(&reviewers, &repo, &commit) {
Ok(Some(reviewers)) => commit_authors.extend(reviewers),
Ok(None) => {}
Err(ErrorContext(msg, e)) => {
if e.is::<reviewers::UnknownReviewer>() {
eprintln!("Unknown reviewer: {}", ErrorContext(msg, e));
} else {
return Err(ErrorContext(msg, e).into());
}
}
}
commit_authors.extend(commit_coauthors(&commit));
for author in commit_authors {
let author = mailmap.canonicalize(&author);
author_map.add(author, oid);
}
}
Ok(author_map)
}
fn mailmap_from_repo(repo: &git2::Repository) -> Result<Mailmap, Box<dyn std::error::Error>> {
let file = String::from_utf8(
repo.revparse_single("master")?
.peel_to_commit()?
.tree()?
.get_name(".mailmap")
.unwrap()
.to_object(&repo)?
.peel_to_blob()?
.content()
.into(),
)?;
Mailmap::from_string(file)
}
fn up_to_release(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
to: &VersionTag,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let to_commit = repo.find_commit(to.commit).map_err(|e| {
ErrorContext(
format!(
"find_commit: repo={}, commit={}",
repo.path().display(),
to.commit
),
Box::new(e),
)
})?;
let modules = get_submodules(&repo, &to_commit)?;
let mut author_map = build_author_map(&repo, &reviewers, &mailmap, "", &to.raw_tag)
.map_err(|e| ErrorContext(format!("Up to {}", to), e))?;
for module in &modules {
if let Ok(path) = update_repo(&module.repository) {
let subrepo = Repository::open(&path)?;
let submap = build_author_map(
&subrepo,
&reviewers,
&mailmap,
"",
&module.commit.to_string(),
)?;
author_map.extend(submap);
}
}
Ok(author_map)
}
fn generate_thanks() -> Result<BTreeMap<VersionTag, AuthorMap>, Box<dyn std::error::Error>> {
let path = update_repo("https://github.com/rust-lang/rust.git")?;
let repo = git2::Repository::open(&path)?;
let mailmap = mailmap_from_repo(&repo)?;
let reviewers = Reviewers::new()?;
let mut versions = get_versions(&repo)?;
let last_full_stable = versions
.iter()
.rfind(|v| v.raw_tag.ends_with(".0"))
.unwrap()
.version
.clone();
versions.push(VersionTag {
name: String::from("Beta"),
version: {
let mut last = last_full_stable.clone();
last.minor += 1;
last
},
raw_tag: String::from("beta"),
commit: repo
.revparse_single("beta")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
versions.push(VersionTag {
name: String::from("Master"),
version: {
// master is plus 1 minor versions off of beta, which we just pushed
let mut last = last_full_stable.clone();
last.minor += 2;
last
},
raw_tag: String::from("master"),
commit: repo
.revparse_single("master")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
let mut version_map = BTreeMap::new();
let mut cache = HashMap::new();
for (idx, version) in versions.iter().enumerate() {
let previous = if let Some(v) = idx.checked_sub(1).map(|idx| &versions[idx]) {
v
} else {
let author_map = build_author_map(&repo, &reviewers, &mailmap, "", &version.raw_tag)?;
version_map.insert(version.clone(), author_map);
continue;
};
eprintln!("Processing {:?} to {:?}", previous, version);
cache.insert(
version,
up_to_release(&repo, &reviewers, &mailmap, &version)?,
);
let previous = match cache.remove(&previous) {
Some(v) => v,
None => up_to_release(&repo, &reviewers, &mailmap, &previous)?,
};
let current = cache.get(&version).unwrap();
// Remove commits reachable from the previous release.
let only_current = current.difference(&previous);
version_map.insert(version.clone(), only_current);
}
Ok(version_map)
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let by_version = generate_thanks()?;
let mut all_time = by_version.values().next().unwrap().clone();
for map in by_version.values().skip(1) {
all_time.extend(map.clone());
}
site::render(by_version, all_time)?;
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("Error: {}", err);
let mut cur = &*err;
while let Some(cause) = cur.source() {
eprintln!("\tcaused by: {}", cause);
cur = cause;
}
std::mem::drop(cur);
std::process::exit(1);
}
}
#[derive(Debug)]
struct Submodule {
path: PathBuf,
commit: Oid,
// url
repository: String,
}
fn get_submodules(
repo: &Repository,
at: &Commit,
) -> Result<Vec<Submodule>, Box<dyn std::error::Error>> {
let submodule_cfg = modules_file(&repo, &at)?;
let submodule_cfg = Config::parse(&submodule_cfg)?;
let mut path_to_url = HashMap::new();
let entries = submodule_cfg.entries(None)?;
for entry in &entries {
let entry = entry?;
let name = entry.name().unwrap();
if name.ends_with(".path") {
let url = name.replace(".path", ".url");
let url = submodule_cfg.get_string(&url).unwrap();
path_to_url.insert(entry.value().unwrap().to_owned(), url);
}
}
let mut submodules = Vec::new();
let tree = at.tree()?;
for (path, url) in &path_to_url {
let path = Path::new(&path);
let entry = tree.get_path(&path);
// the submodule may not actually exist
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
assert_eq!(entry.kind().unwrap(), git2::ObjectType::Commit);
submodules.push(Submodule {
path: path.to_owned(),
commit: entry.id(),
repository: url.to_owned(),
});
}
submodules.retain(|s| {
let is_rust =
s.repository.contains("rust-lang") || s.repository.contains("rust-lang-nursery");
let exclude = vec![
"https://github.com/rust-lang/llvm.git",
"https://github.com/rust-lang/llvm-project.git",
"https://github.com/rust-lang/lld.git",
"https://github.com/rust-lang-nursery/clang.git",
"https://github.com/rust-lang-nursery/lldb.git",
"https://github.com/rust-lang/libuv.git",
"https://github.com/rust-lang/gyp.git",
"https://github.com/rust-lang/jemalloc.git",
"https://github.com/rust-lang/compiler-rt.git",
"https://github.com/rust-lang/hoedown.git",
];
is_rust
&& !exclude.contains(&s.repository.as_str())
&& !exclude.contains(&&*format!("{}.git", s.repository))
});
Ok(submodules)
}
fn modules_file(repo: &Repository, at: &Commit) -> Result<String, Box<dyn std::error::Error>> {
if let Some(modules) = at.tree()?.get_name(".gitmodules") {
Ok(String::from_utf8(
modules.to_object(&repo)?.peel_to_blob()?.content().into(),
)?)
} else {
return Ok(String::new());
}
}
| {
let mut cmd = Command::new("git");
cmd.args(args);
cmd.stdout(Stdio::piped());
let out = cmd.spawn();
let mut out = match out {
Ok(v) => v,
Err(err) => {
panic!("Failed to spawn command `{:?}`: {:?}", cmd, err);
}
};
let status = out.wait().expect("waited");
if !status.success() {
eprintln!("failed to run `git {:?}`: {:?}", args, status);
return Err(std::io::Error::from(std::io::ErrorKind::Other).into());
}
let mut stdout = Vec::new();
out.stdout.unwrap().read_to_end(&mut stdout).unwrap();
Ok(String::from_utf8_lossy(&stdout).into_owned())
} | identifier_body |
graph_layers.rs | use std::cmp::max;
use std::path::{Path, PathBuf};
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::graph_links::{GraphLinks, GraphLinksMmap};
use crate::common::file_operations::{atomic_save_bin, read_bin, FileStorageError};
use crate::common::mmap_ops;
use crate::common::utils::rev_range;
use crate::entry::entry_point::OperationResult;
use crate::index::hnsw_index::entry_points::EntryPoints;
use crate::index::hnsw_index::graph_links::GraphLinksConverter;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::index::hnsw_index::search_context::SearchContext;
use crate::index::visited_pool::{VisitedList, VisitedPool};
use crate::types::PointOffsetType;
use crate::vector_storage::ScoredPointOffset;
pub type LinkContainer = Vec<PointOffsetType>;
pub type LinkContainerRef<'a> = &'a [PointOffsetType];
pub type LayersContainer = Vec<LinkContainer>;
pub const HNSW_GRAPH_FILE: &str = "graph.bin";
pub const HNSW_LINKS_FILE: &str = "links.bin";
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayersBackwardCompatibility {
pub(super) max_level: usize,
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
pub(super) links_layers: Vec<LayersContainer>,
pub(super) entry_points: EntryPoints,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayers<TGraphLinks: GraphLinks> {
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
#[serde(skip)]
pub(super) links: TGraphLinks,
pub(super) entry_points: EntryPoints,
#[serde(skip)]
pub(super) visited_pool: VisitedPool,
}
pub trait GraphLayersBase {
fn get_visited_list_from_pool(&self) -> VisitedList;
fn return_visited_list_to_pool(&self, visited_list: VisitedList);
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, f: F)
where
F: FnMut(PointOffsetType);
/// Get M based on current level
fn get_m(&self, level: usize) -> usize;
/// Greedy search for closest points within a single graph layer
fn _search_on_level(
&self,
searcher: &mut SearchContext,
level: usize,
visited_list: &mut VisitedList,
points_scorer: &mut FilteredScorer,
) {
let limit = self.get_m(level);
let mut points_ids: Vec<PointOffsetType> = Vec::with_capacity(2 * limit);
while let Some(candidate) = searcher.candidates.pop() {
if candidate.score < searcher.lower_bound() {
break;
}
points_ids.clear();
self.links_map(candidate.idx, level, |link| {
if !visited_list.check_and_update_visited(link) {
points_ids.push(link);
}
});
let scores = points_scorer.score_points(&mut points_ids, limit);
scores
.iter()
.copied()
.for_each(|score_point| searcher.process_candidate(score_point));
}
}
fn search_on_level(
&self,
level_entry: ScoredPointOffset,
level: usize,
ef: usize,
points_scorer: &mut FilteredScorer,
) -> FixedLengthPriorityQueue<ScoredPointOffset> {
let mut visited_list = self.get_visited_list_from_pool();
visited_list.check_and_update_visited(level_entry.idx);
let mut search_context = SearchContext::new(level_entry, ef);
self._search_on_level(&mut search_context, level, &mut visited_list, points_scorer);
self.return_visited_list_to_pool(visited_list);
search_context.nearest
}
/// Greedy searches for entry point of level `target_level`.
/// Beam size is 1.
fn search_entry(
&self,
entry_point: PointOffsetType,
top_level: usize,
target_level: usize,
points_scorer: &mut FilteredScorer,
) -> ScoredPointOffset {
let mut links: Vec<PointOffsetType> = Vec::with_capacity(2 * self.get_m(0));
let mut current_point = ScoredPointOffset {
idx: entry_point,
score: points_scorer.score_point(entry_point),
};
for level in rev_range(top_level, target_level) {
let limit = self.get_m(level);
let mut changed = true;
while changed {
changed = false;
links.clear();
self.links_map(current_point.idx, level, |link| {
links.push(link);
});
let scores = points_scorer.score_points(&mut links, limit);
scores.iter().copied().for_each(|score_point| {
if score_point.score > current_point.score {
changed = true;
current_point = score_point;
}
});
}
}
current_point
}
}
impl<TGraphLinks: GraphLinks> GraphLayersBase for GraphLayers<TGraphLinks> {
fn get_visited_list_from_pool(&self) -> VisitedList {
self.visited_pool.get(self.links.num_points())
}
fn return_visited_list_to_pool(&self, visited_list: VisitedList) {
self.visited_pool.return_back(visited_list);
}
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, mut f: F)
where
F: FnMut(PointOffsetType),
{
for link in self.links.links(point_id, level) {
f(*link);
}
}
fn get_m(&self, level: usize) -> usize {
if level == 0 {
self.m0
} else {
self.m
}
}
}
/// Object contains links between nodes for HNSW search
///
/// Assume all scores are similarities. Larger score = closer points
impl<TGraphLinks: GraphLinks> GraphLayers<TGraphLinks> {
pub fn point_level(&self, point_id: PointOffsetType) -> usize {
self.links.point_level(point_id)
}
pub fn search(
&self,
top: usize,
ef: usize,
mut points_scorer: FilteredScorer,
) -> Vec<ScoredPointOffset> {
let entry_point = match self
.entry_points
.get_entry_point(|point_id| points_scorer.check_vector(point_id))
{
None => return vec![],
Some(ep) => ep,
};
let zero_level_entry = self.search_entry(
entry_point.point_id,
entry_point.level,
0,
&mut points_scorer,
);
let nearest = self.search_on_level(zero_level_entry, 0, max(top, ef), &mut points_scorer);
nearest.into_iter().take(top).collect_vec()
}
pub fn get_path(path: &Path) -> PathBuf {
path.join(HNSW_GRAPH_FILE)
}
pub fn get_links_path(path: &Path) -> PathBuf {
path.join(HNSW_LINKS_FILE)
}
pub fn num_points(&self) -> usize {
self.links.num_points()
}
}
impl<TGraphLinks> GraphLayers<TGraphLinks>
where
TGraphLinks: GraphLinks,
{
pub fn load(graph_path: &Path, links_path: &Path) -> OperationResult<Self> {
let try_self: Result<Self, FileStorageError> = if links_path.exists() {
read_bin(graph_path)
} else {
Err(FileStorageError::generic(format!(
"Links file does not exists: {links_path:?}"
)))
};
match try_self {
Ok(mut slf) => {
let links = TGraphLinks::load_from_file(links_path)?;
slf.links = links;
Ok(slf)
}
Err(err) => {
let try_legacy: Result<GraphLayersBackwardCompatibility, _> = read_bin(graph_path);
if let Ok(legacy) = try_legacy {
log::debug!("Converting legacy graph to new format");
let mut converter = GraphLinksConverter::new(legacy.links_layers);
converter.save_as(links_path)?;
let links = TGraphLinks::from_converter(converter)?;
let slf = Self {
m: legacy.m,
m0: legacy.m0,
ef_construct: legacy.ef_construct,
links,
entry_points: legacy.entry_points,
visited_pool: VisitedPool::new(),
};
slf.save(graph_path)?;
Ok(slf)
} else {
Err(err)?
}
}
}
}
pub fn save(&self, path: &Path) -> OperationResult<()> |
}
impl GraphLayers<GraphLinksMmap> {
pub fn prefault_mmap_pages(&self, path: &Path) -> Option<mmap_ops::PrefaultMmapPages> {
self.links.prefault_mmap_pages(path)
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::VectorElementType;
use crate::fixtures::index_fixtures::{
random_vector, FakeFilterContext, TestRawScorerProducer,
};
use crate::index::hnsw_index::graph_links::GraphLinksRam;
use crate::index::hnsw_index::tests::create_graph_layer_fixture;
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric};
fn search_in_graph<TGraphLinks: GraphLinks>(
query: &[VectorElementType],
top: usize,
vector_storage: &TestRawScorerProducer<CosineMetric>,
graph: &GraphLayers<TGraphLinks>,
) -> Vec<ScoredPointOffset> {
let fake_filter_context = FakeFilterContext {};
let raw_scorer = vector_storage.get_raw_scorer(query.to_owned());
let scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let ef = 16;
graph.search(top, ef, scorer)
}
const M: usize = 8;
#[test]
fn test_search_on_level() {
let dim = 8;
let m = 8;
let ef_construct = 32;
let entry_points_num = 10;
let num_vectors = 10;
let mut rng = StdRng::seed_from_u64(42);
let vector_holder =
TestRawScorerProducer::<DotProductMetric>::new(dim, num_vectors, &mut rng);
let mut graph_layers = GraphLayers {
m,
m0: 2 * m,
ef_construct,
links: GraphLinksRam::default(),
entry_points: EntryPoints::new(entry_points_num),
visited_pool: VisitedPool::new(),
};
let mut graph_links = vec![vec![Vec::new()]; num_vectors];
graph_links[0][0] = vec![1, 2, 3, 4, 5, 6];
graph_layers.links =
GraphLinksRam::from_converter(GraphLinksConverter::new(graph_links.clone())).unwrap();
let linking_idx: PointOffsetType = 7;
let fake_filter_context = FakeFilterContext {};
let added_vector = vector_holder.vectors.get(linking_idx).to_vec();
let raw_scorer = vector_holder.get_raw_scorer(added_vector);
let mut scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let nearest_on_level = graph_layers.search_on_level(
ScoredPointOffset {
idx: 0,
score: scorer.score_point(0),
},
0,
32,
&mut scorer,
);
assert_eq!(nearest_on_level.len(), graph_links[0][0].len() + 1);
for nearest in &nearest_on_level {
// eprintln!("nearest = {:#?}", nearest);
assert_eq!(
nearest.score,
scorer.score_internal(linking_idx, nearest.idx)
)
}
}
#[test]
fn test_save_and_load() {
let num_vectors = 100;
let dim = 8;
let top = 5;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("graph_dir").tempdir().unwrap();
let links_path = GraphLayers::<GraphLinksRam>::get_links_path(dir.path());
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
false,
&mut rng,
Some(&links_path),
);
let query = random_vector(&mut rng, dim);
let res1 = search_in_graph(&query, top, &vector_holder, &graph_layers);
let path = GraphLayers::<GraphLinksRam>::get_path(dir.path());
graph_layers.save(&path).unwrap();
let graph2 = GraphLayers::<GraphLinksRam>::load(&path, &links_path).unwrap();
let res2 = search_in_graph(&query, top, &vector_holder, &graph2);
assert_eq!(res1, res2)
}
#[test]
fn test_add_points() {
let num_vectors = 1000;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
type M = CosineMetric;
let (vector_holder, graph_layers) =
create_graph_layer_fixture::<M, _>(num_vectors, M, dim, false, &mut rng, None);
let main_entry = graph_layers
.entry_points
.get_entry_point(|_x| true)
.expect("Expect entry point to exists");
assert!(main_entry.level > 0);
let num_levels = (0..num_vectors)
.map(|i| graph_layers.links.point_level(i as PointOffsetType))
.max()
.unwrap();
assert_eq!(main_entry.level, num_levels);
let total_links_0 = (0..num_vectors)
.map(|i| graph_layers.links.links(i as PointOffsetType, 0).len())
.sum::<usize>();
eprintln!("total_links_0 = {total_links_0:#?}");
eprintln!("num_vectors = {num_vectors:#?}");
assert!(total_links_0 > 0);
assert!(total_links_0 as f64 / num_vectors as f64 > M as f64);
let top = 5;
let query = random_vector(&mut rng, dim);
let processed_query = M::preprocess(&query).unwrap_or_else(|| query.clone());
let mut reference_top = FixedLengthPriorityQueue::new(top);
for idx in 0..vector_holder.vectors.len() as PointOffsetType {
let vec = &vector_holder.vectors.get(idx);
reference_top.push(ScoredPointOffset {
idx,
score: M::similarity(vec, &processed_query),
});
}
let graph_search = search_in_graph(&query, top, &vector_holder, &graph_layers);
assert_eq!(reference_top.into_vec(), graph_search);
}
#[test]
#[ignore]
fn test_draw_hnsw_graph() {
let dim = 2;
let num_vectors = 500;
let mut rng = StdRng::seed_from_u64(42);
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
true,
&mut rng,
None,
);
let graph_json = serde_json::to_string_pretty(&graph_layers).unwrap();
let vectors_json = serde_json::to_string_pretty(
&(0..vector_holder.vectors.len() as PointOffsetType)
.map(|point_id| vector_holder.vectors.get(point_id).to_vec())
.collect_vec(),
)
.unwrap();
let mut file = File::create("graph.json").unwrap();
file.write_all(
format!("{{ \"graph\": {graph_json}, \n \"vectors\": {vectors_json} }}").as_bytes(),
)
.unwrap();
}
}
| {
Ok(atomic_save_bin(path, self)?)
} | identifier_body |
graph_layers.rs | use std::cmp::max;
use std::path::{Path, PathBuf};
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::graph_links::{GraphLinks, GraphLinksMmap};
use crate::common::file_operations::{atomic_save_bin, read_bin, FileStorageError};
use crate::common::mmap_ops;
use crate::common::utils::rev_range;
use crate::entry::entry_point::OperationResult;
use crate::index::hnsw_index::entry_points::EntryPoints;
use crate::index::hnsw_index::graph_links::GraphLinksConverter;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::index::hnsw_index::search_context::SearchContext;
use crate::index::visited_pool::{VisitedList, VisitedPool};
use crate::types::PointOffsetType;
use crate::vector_storage::ScoredPointOffset;
pub type LinkContainer = Vec<PointOffsetType>;
pub type LinkContainerRef<'a> = &'a [PointOffsetType];
pub type LayersContainer = Vec<LinkContainer>;
pub const HNSW_GRAPH_FILE: &str = "graph.bin";
pub const HNSW_LINKS_FILE: &str = "links.bin";
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayersBackwardCompatibility {
pub(super) max_level: usize,
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
pub(super) links_layers: Vec<LayersContainer>,
pub(super) entry_points: EntryPoints,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayers<TGraphLinks: GraphLinks> {
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
#[serde(skip)]
pub(super) links: TGraphLinks,
pub(super) entry_points: EntryPoints,
#[serde(skip)]
pub(super) visited_pool: VisitedPool,
}
pub trait GraphLayersBase {
fn get_visited_list_from_pool(&self) -> VisitedList;
fn return_visited_list_to_pool(&self, visited_list: VisitedList);
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, f: F)
where
F: FnMut(PointOffsetType);
/// Get M based on current level
fn get_m(&self, level: usize) -> usize;
/// Greedy search for closest points within a single graph layer
fn _search_on_level(
&self,
searcher: &mut SearchContext,
level: usize,
visited_list: &mut VisitedList,
points_scorer: &mut FilteredScorer,
) {
let limit = self.get_m(level);
let mut points_ids: Vec<PointOffsetType> = Vec::with_capacity(2 * limit);
while let Some(candidate) = searcher.candidates.pop() {
if candidate.score < searcher.lower_bound() {
break;
}
points_ids.clear();
self.links_map(candidate.idx, level, |link| {
if !visited_list.check_and_update_visited(link) {
points_ids.push(link);
}
});
let scores = points_scorer.score_points(&mut points_ids, limit);
scores
.iter()
.copied()
.for_each(|score_point| searcher.process_candidate(score_point));
}
}
fn search_on_level(
&self,
level_entry: ScoredPointOffset,
level: usize,
ef: usize,
points_scorer: &mut FilteredScorer,
) -> FixedLengthPriorityQueue<ScoredPointOffset> {
let mut visited_list = self.get_visited_list_from_pool();
visited_list.check_and_update_visited(level_entry.idx);
let mut search_context = SearchContext::new(level_entry, ef);
self._search_on_level(&mut search_context, level, &mut visited_list, points_scorer);
self.return_visited_list_to_pool(visited_list);
search_context.nearest
}
/// Greedy searches for entry point of level `target_level`.
/// Beam size is 1.
fn search_entry(
&self,
entry_point: PointOffsetType,
top_level: usize,
target_level: usize,
points_scorer: &mut FilteredScorer,
) -> ScoredPointOffset {
let mut links: Vec<PointOffsetType> = Vec::with_capacity(2 * self.get_m(0));
let mut current_point = ScoredPointOffset {
idx: entry_point,
score: points_scorer.score_point(entry_point),
};
for level in rev_range(top_level, target_level) {
let limit = self.get_m(level);
let mut changed = true;
while changed {
changed = false;
links.clear();
self.links_map(current_point.idx, level, |link| {
links.push(link);
});
let scores = points_scorer.score_points(&mut links, limit);
scores.iter().copied().for_each(|score_point| {
if score_point.score > current_point.score {
changed = true;
current_point = score_point;
}
});
}
}
current_point
}
}
impl<TGraphLinks: GraphLinks> GraphLayersBase for GraphLayers<TGraphLinks> {
fn get_visited_list_from_pool(&self) -> VisitedList {
self.visited_pool.get(self.links.num_points())
}
fn return_visited_list_to_pool(&self, visited_list: VisitedList) {
self.visited_pool.return_back(visited_list);
}
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, mut f: F)
where
F: FnMut(PointOffsetType),
{
for link in self.links.links(point_id, level) {
f(*link);
}
}
fn get_m(&self, level: usize) -> usize {
if level == 0 {
self.m0
} else {
self.m
}
}
}
/// Object contains links between nodes for HNSW search
///
/// Assume all scores are similarities. Larger score = closer points
impl<TGraphLinks: GraphLinks> GraphLayers<TGraphLinks> {
pub fn | (&self, point_id: PointOffsetType) -> usize {
self.links.point_level(point_id)
}
pub fn search(
&self,
top: usize,
ef: usize,
mut points_scorer: FilteredScorer,
) -> Vec<ScoredPointOffset> {
let entry_point = match self
.entry_points
.get_entry_point(|point_id| points_scorer.check_vector(point_id))
{
None => return vec![],
Some(ep) => ep,
};
let zero_level_entry = self.search_entry(
entry_point.point_id,
entry_point.level,
0,
&mut points_scorer,
);
let nearest = self.search_on_level(zero_level_entry, 0, max(top, ef), &mut points_scorer);
nearest.into_iter().take(top).collect_vec()
}
pub fn get_path(path: &Path) -> PathBuf {
path.join(HNSW_GRAPH_FILE)
}
pub fn get_links_path(path: &Path) -> PathBuf {
path.join(HNSW_LINKS_FILE)
}
pub fn num_points(&self) -> usize {
self.links.num_points()
}
}
impl<TGraphLinks> GraphLayers<TGraphLinks>
where
TGraphLinks: GraphLinks,
{
pub fn load(graph_path: &Path, links_path: &Path) -> OperationResult<Self> {
let try_self: Result<Self, FileStorageError> = if links_path.exists() {
read_bin(graph_path)
} else {
Err(FileStorageError::generic(format!(
"Links file does not exists: {links_path:?}"
)))
};
match try_self {
Ok(mut slf) => {
let links = TGraphLinks::load_from_file(links_path)?;
slf.links = links;
Ok(slf)
}
Err(err) => {
let try_legacy: Result<GraphLayersBackwardCompatibility, _> = read_bin(graph_path);
if let Ok(legacy) = try_legacy {
log::debug!("Converting legacy graph to new format");
let mut converter = GraphLinksConverter::new(legacy.links_layers);
converter.save_as(links_path)?;
let links = TGraphLinks::from_converter(converter)?;
let slf = Self {
m: legacy.m,
m0: legacy.m0,
ef_construct: legacy.ef_construct,
links,
entry_points: legacy.entry_points,
visited_pool: VisitedPool::new(),
};
slf.save(graph_path)?;
Ok(slf)
} else {
Err(err)?
}
}
}
}
pub fn save(&self, path: &Path) -> OperationResult<()> {
Ok(atomic_save_bin(path, self)?)
}
}
impl GraphLayers<GraphLinksMmap> {
pub fn prefault_mmap_pages(&self, path: &Path) -> Option<mmap_ops::PrefaultMmapPages> {
self.links.prefault_mmap_pages(path)
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::VectorElementType;
use crate::fixtures::index_fixtures::{
random_vector, FakeFilterContext, TestRawScorerProducer,
};
use crate::index::hnsw_index::graph_links::GraphLinksRam;
use crate::index::hnsw_index::tests::create_graph_layer_fixture;
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric};
fn search_in_graph<TGraphLinks: GraphLinks>(
query: &[VectorElementType],
top: usize,
vector_storage: &TestRawScorerProducer<CosineMetric>,
graph: &GraphLayers<TGraphLinks>,
) -> Vec<ScoredPointOffset> {
let fake_filter_context = FakeFilterContext {};
let raw_scorer = vector_storage.get_raw_scorer(query.to_owned());
let scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let ef = 16;
graph.search(top, ef, scorer)
}
const M: usize = 8;
#[test]
fn test_search_on_level() {
let dim = 8;
let m = 8;
let ef_construct = 32;
let entry_points_num = 10;
let num_vectors = 10;
let mut rng = StdRng::seed_from_u64(42);
let vector_holder =
TestRawScorerProducer::<DotProductMetric>::new(dim, num_vectors, &mut rng);
let mut graph_layers = GraphLayers {
m,
m0: 2 * m,
ef_construct,
links: GraphLinksRam::default(),
entry_points: EntryPoints::new(entry_points_num),
visited_pool: VisitedPool::new(),
};
let mut graph_links = vec![vec![Vec::new()]; num_vectors];
graph_links[0][0] = vec![1, 2, 3, 4, 5, 6];
graph_layers.links =
GraphLinksRam::from_converter(GraphLinksConverter::new(graph_links.clone())).unwrap();
let linking_idx: PointOffsetType = 7;
let fake_filter_context = FakeFilterContext {};
let added_vector = vector_holder.vectors.get(linking_idx).to_vec();
let raw_scorer = vector_holder.get_raw_scorer(added_vector);
let mut scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let nearest_on_level = graph_layers.search_on_level(
ScoredPointOffset {
idx: 0,
score: scorer.score_point(0),
},
0,
32,
&mut scorer,
);
assert_eq!(nearest_on_level.len(), graph_links[0][0].len() + 1);
for nearest in &nearest_on_level {
// eprintln!("nearest = {:#?}", nearest);
assert_eq!(
nearest.score,
scorer.score_internal(linking_idx, nearest.idx)
)
}
}
#[test]
fn test_save_and_load() {
let num_vectors = 100;
let dim = 8;
let top = 5;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("graph_dir").tempdir().unwrap();
let links_path = GraphLayers::<GraphLinksRam>::get_links_path(dir.path());
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
false,
&mut rng,
Some(&links_path),
);
let query = random_vector(&mut rng, dim);
let res1 = search_in_graph(&query, top, &vector_holder, &graph_layers);
let path = GraphLayers::<GraphLinksRam>::get_path(dir.path());
graph_layers.save(&path).unwrap();
let graph2 = GraphLayers::<GraphLinksRam>::load(&path, &links_path).unwrap();
let res2 = search_in_graph(&query, top, &vector_holder, &graph2);
assert_eq!(res1, res2)
}
#[test]
fn test_add_points() {
let num_vectors = 1000;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
type M = CosineMetric;
let (vector_holder, graph_layers) =
create_graph_layer_fixture::<M, _>(num_vectors, M, dim, false, &mut rng, None);
let main_entry = graph_layers
.entry_points
.get_entry_point(|_x| true)
.expect("Expect entry point to exists");
assert!(main_entry.level > 0);
let num_levels = (0..num_vectors)
.map(|i| graph_layers.links.point_level(i as PointOffsetType))
.max()
.unwrap();
assert_eq!(main_entry.level, num_levels);
let total_links_0 = (0..num_vectors)
.map(|i| graph_layers.links.links(i as PointOffsetType, 0).len())
.sum::<usize>();
eprintln!("total_links_0 = {total_links_0:#?}");
eprintln!("num_vectors = {num_vectors:#?}");
assert!(total_links_0 > 0);
assert!(total_links_0 as f64 / num_vectors as f64 > M as f64);
let top = 5;
let query = random_vector(&mut rng, dim);
let processed_query = M::preprocess(&query).unwrap_or_else(|| query.clone());
let mut reference_top = FixedLengthPriorityQueue::new(top);
for idx in 0..vector_holder.vectors.len() as PointOffsetType {
let vec = &vector_holder.vectors.get(idx);
reference_top.push(ScoredPointOffset {
idx,
score: M::similarity(vec, &processed_query),
});
}
let graph_search = search_in_graph(&query, top, &vector_holder, &graph_layers);
assert_eq!(reference_top.into_vec(), graph_search);
}
#[test]
#[ignore]
fn test_draw_hnsw_graph() {
let dim = 2;
let num_vectors = 500;
let mut rng = StdRng::seed_from_u64(42);
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
true,
&mut rng,
None,
);
let graph_json = serde_json::to_string_pretty(&graph_layers).unwrap();
let vectors_json = serde_json::to_string_pretty(
&(0..vector_holder.vectors.len() as PointOffsetType)
.map(|point_id| vector_holder.vectors.get(point_id).to_vec())
.collect_vec(),
)
.unwrap();
let mut file = File::create("graph.json").unwrap();
file.write_all(
format!("{{ \"graph\": {graph_json}, \n \"vectors\": {vectors_json} }}").as_bytes(),
)
.unwrap();
}
}
| point_level | identifier_name |
graph_layers.rs | use std::cmp::max;
use std::path::{Path, PathBuf};
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::graph_links::{GraphLinks, GraphLinksMmap};
use crate::common::file_operations::{atomic_save_bin, read_bin, FileStorageError};
use crate::common::mmap_ops;
use crate::common::utils::rev_range;
use crate::entry::entry_point::OperationResult;
use crate::index::hnsw_index::entry_points::EntryPoints;
use crate::index::hnsw_index::graph_links::GraphLinksConverter;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::index::hnsw_index::search_context::SearchContext;
use crate::index::visited_pool::{VisitedList, VisitedPool};
use crate::types::PointOffsetType;
use crate::vector_storage::ScoredPointOffset;
pub type LinkContainer = Vec<PointOffsetType>;
pub type LinkContainerRef<'a> = &'a [PointOffsetType];
pub type LayersContainer = Vec<LinkContainer>;
pub const HNSW_GRAPH_FILE: &str = "graph.bin";
pub const HNSW_LINKS_FILE: &str = "links.bin";
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayersBackwardCompatibility {
pub(super) max_level: usize,
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
pub(super) links_layers: Vec<LayersContainer>,
pub(super) entry_points: EntryPoints,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayers<TGraphLinks: GraphLinks> {
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
#[serde(skip)]
pub(super) links: TGraphLinks,
pub(super) entry_points: EntryPoints,
#[serde(skip)]
pub(super) visited_pool: VisitedPool,
}
pub trait GraphLayersBase {
fn get_visited_list_from_pool(&self) -> VisitedList;
fn return_visited_list_to_pool(&self, visited_list: VisitedList);
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, f: F)
where
F: FnMut(PointOffsetType);
/// Get M based on current level
fn get_m(&self, level: usize) -> usize;
/// Greedy search for closest points within a single graph layer
fn _search_on_level(
&self,
searcher: &mut SearchContext,
level: usize,
visited_list: &mut VisitedList,
points_scorer: &mut FilteredScorer,
) {
let limit = self.get_m(level);
let mut points_ids: Vec<PointOffsetType> = Vec::with_capacity(2 * limit);
while let Some(candidate) = searcher.candidates.pop() {
if candidate.score < searcher.lower_bound() {
break;
}
points_ids.clear();
self.links_map(candidate.idx, level, |link| {
if !visited_list.check_and_update_visited(link) {
points_ids.push(link);
}
});
let scores = points_scorer.score_points(&mut points_ids, limit);
scores
.iter()
.copied()
.for_each(|score_point| searcher.process_candidate(score_point));
}
}
fn search_on_level(
&self,
level_entry: ScoredPointOffset,
level: usize,
ef: usize,
points_scorer: &mut FilteredScorer,
) -> FixedLengthPriorityQueue<ScoredPointOffset> {
let mut visited_list = self.get_visited_list_from_pool();
visited_list.check_and_update_visited(level_entry.idx);
let mut search_context = SearchContext::new(level_entry, ef);
self._search_on_level(&mut search_context, level, &mut visited_list, points_scorer);
self.return_visited_list_to_pool(visited_list);
search_context.nearest
}
/// Greedy searches for entry point of level `target_level`.
/// Beam size is 1.
fn search_entry(
&self,
entry_point: PointOffsetType,
top_level: usize,
target_level: usize,
points_scorer: &mut FilteredScorer,
) -> ScoredPointOffset {
let mut links: Vec<PointOffsetType> = Vec::with_capacity(2 * self.get_m(0));
let mut current_point = ScoredPointOffset {
idx: entry_point,
score: points_scorer.score_point(entry_point),
};
for level in rev_range(top_level, target_level) {
let limit = self.get_m(level);
let mut changed = true;
while changed {
changed = false;
links.clear();
self.links_map(current_point.idx, level, |link| {
links.push(link);
});
let scores = points_scorer.score_points(&mut links, limit);
scores.iter().copied().for_each(|score_point| {
if score_point.score > current_point.score {
changed = true;
current_point = score_point;
}
});
}
}
current_point
}
}
impl<TGraphLinks: GraphLinks> GraphLayersBase for GraphLayers<TGraphLinks> {
fn get_visited_list_from_pool(&self) -> VisitedList {
self.visited_pool.get(self.links.num_points())
}
fn return_visited_list_to_pool(&self, visited_list: VisitedList) {
self.visited_pool.return_back(visited_list);
}
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, mut f: F)
where
F: FnMut(PointOffsetType),
{
for link in self.links.links(point_id, level) {
f(*link);
}
}
fn get_m(&self, level: usize) -> usize {
if level == 0 {
self.m0
} else {
self.m
}
}
}
/// Object contains links between nodes for HNSW search
///
/// Assume all scores are similarities. Larger score = closer points
impl<TGraphLinks: GraphLinks> GraphLayers<TGraphLinks> {
pub fn point_level(&self, point_id: PointOffsetType) -> usize {
self.links.point_level(point_id)
}
pub fn search(
&self,
top: usize,
ef: usize,
mut points_scorer: FilteredScorer,
) -> Vec<ScoredPointOffset> {
let entry_point = match self
.entry_points
.get_entry_point(|point_id| points_scorer.check_vector(point_id))
{
None => return vec![],
Some(ep) => ep,
};
let zero_level_entry = self.search_entry(
entry_point.point_id,
entry_point.level,
0,
&mut points_scorer,
);
let nearest = self.search_on_level(zero_level_entry, 0, max(top, ef), &mut points_scorer);
nearest.into_iter().take(top).collect_vec()
}
pub fn get_path(path: &Path) -> PathBuf {
path.join(HNSW_GRAPH_FILE)
}
pub fn get_links_path(path: &Path) -> PathBuf {
path.join(HNSW_LINKS_FILE)
}
pub fn num_points(&self) -> usize {
self.links.num_points()
}
}
impl<TGraphLinks> GraphLayers<TGraphLinks>
where
TGraphLinks: GraphLinks,
{
pub fn load(graph_path: &Path, links_path: &Path) -> OperationResult<Self> {
let try_self: Result<Self, FileStorageError> = if links_path.exists() {
read_bin(graph_path)
} else {
Err(FileStorageError::generic(format!(
"Links file does not exists: {links_path:?}"
)))
};
match try_self {
Ok(mut slf) => {
let links = TGraphLinks::load_from_file(links_path)?;
slf.links = links;
Ok(slf)
}
Err(err) => |
}
}
pub fn save(&self, path: &Path) -> OperationResult<()> {
Ok(atomic_save_bin(path, self)?)
}
}
impl GraphLayers<GraphLinksMmap> {
pub fn prefault_mmap_pages(&self, path: &Path) -> Option<mmap_ops::PrefaultMmapPages> {
self.links.prefault_mmap_pages(path)
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::VectorElementType;
use crate::fixtures::index_fixtures::{
random_vector, FakeFilterContext, TestRawScorerProducer,
};
use crate::index::hnsw_index::graph_links::GraphLinksRam;
use crate::index::hnsw_index::tests::create_graph_layer_fixture;
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric};
fn search_in_graph<TGraphLinks: GraphLinks>(
query: &[VectorElementType],
top: usize,
vector_storage: &TestRawScorerProducer<CosineMetric>,
graph: &GraphLayers<TGraphLinks>,
) -> Vec<ScoredPointOffset> {
let fake_filter_context = FakeFilterContext {};
let raw_scorer = vector_storage.get_raw_scorer(query.to_owned());
let scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let ef = 16;
graph.search(top, ef, scorer)
}
const M: usize = 8;
#[test]
fn test_search_on_level() {
let dim = 8;
let m = 8;
let ef_construct = 32;
let entry_points_num = 10;
let num_vectors = 10;
let mut rng = StdRng::seed_from_u64(42);
let vector_holder =
TestRawScorerProducer::<DotProductMetric>::new(dim, num_vectors, &mut rng);
let mut graph_layers = GraphLayers {
m,
m0: 2 * m,
ef_construct,
links: GraphLinksRam::default(),
entry_points: EntryPoints::new(entry_points_num),
visited_pool: VisitedPool::new(),
};
let mut graph_links = vec![vec![Vec::new()]; num_vectors];
graph_links[0][0] = vec![1, 2, 3, 4, 5, 6];
graph_layers.links =
GraphLinksRam::from_converter(GraphLinksConverter::new(graph_links.clone())).unwrap();
let linking_idx: PointOffsetType = 7;
let fake_filter_context = FakeFilterContext {};
let added_vector = vector_holder.vectors.get(linking_idx).to_vec();
let raw_scorer = vector_holder.get_raw_scorer(added_vector);
let mut scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let nearest_on_level = graph_layers.search_on_level(
ScoredPointOffset {
idx: 0,
score: scorer.score_point(0),
},
0,
32,
&mut scorer,
);
assert_eq!(nearest_on_level.len(), graph_links[0][0].len() + 1);
for nearest in &nearest_on_level {
// eprintln!("nearest = {:#?}", nearest);
assert_eq!(
nearest.score,
scorer.score_internal(linking_idx, nearest.idx)
)
}
}
#[test]
fn test_save_and_load() {
let num_vectors = 100;
let dim = 8;
let top = 5;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("graph_dir").tempdir().unwrap();
let links_path = GraphLayers::<GraphLinksRam>::get_links_path(dir.path());
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
false,
&mut rng,
Some(&links_path),
);
let query = random_vector(&mut rng, dim);
let res1 = search_in_graph(&query, top, &vector_holder, &graph_layers);
let path = GraphLayers::<GraphLinksRam>::get_path(dir.path());
graph_layers.save(&path).unwrap();
let graph2 = GraphLayers::<GraphLinksRam>::load(&path, &links_path).unwrap();
let res2 = search_in_graph(&query, top, &vector_holder, &graph2);
assert_eq!(res1, res2)
}
#[test]
fn test_add_points() {
let num_vectors = 1000;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
type M = CosineMetric;
let (vector_holder, graph_layers) =
create_graph_layer_fixture::<M, _>(num_vectors, M, dim, false, &mut rng, None);
let main_entry = graph_layers
.entry_points
.get_entry_point(|_x| true)
.expect("Expect entry point to exists");
assert!(main_entry.level > 0);
let num_levels = (0..num_vectors)
.map(|i| graph_layers.links.point_level(i as PointOffsetType))
.max()
.unwrap();
assert_eq!(main_entry.level, num_levels);
let total_links_0 = (0..num_vectors)
.map(|i| graph_layers.links.links(i as PointOffsetType, 0).len())
.sum::<usize>();
eprintln!("total_links_0 = {total_links_0:#?}");
eprintln!("num_vectors = {num_vectors:#?}");
assert!(total_links_0 > 0);
assert!(total_links_0 as f64 / num_vectors as f64 > M as f64);
let top = 5;
let query = random_vector(&mut rng, dim);
let processed_query = M::preprocess(&query).unwrap_or_else(|| query.clone());
let mut reference_top = FixedLengthPriorityQueue::new(top);
for idx in 0..vector_holder.vectors.len() as PointOffsetType {
let vec = &vector_holder.vectors.get(idx);
reference_top.push(ScoredPointOffset {
idx,
score: M::similarity(vec, &processed_query),
});
}
let graph_search = search_in_graph(&query, top, &vector_holder, &graph_layers);
assert_eq!(reference_top.into_vec(), graph_search);
}
#[test]
#[ignore]
fn test_draw_hnsw_graph() {
let dim = 2;
let num_vectors = 500;
let mut rng = StdRng::seed_from_u64(42);
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
true,
&mut rng,
None,
);
let graph_json = serde_json::to_string_pretty(&graph_layers).unwrap();
let vectors_json = serde_json::to_string_pretty(
&(0..vector_holder.vectors.len() as PointOffsetType)
.map(|point_id| vector_holder.vectors.get(point_id).to_vec())
.collect_vec(),
)
.unwrap();
let mut file = File::create("graph.json").unwrap();
file.write_all(
format!("{{ \"graph\": {graph_json}, \n \"vectors\": {vectors_json} }}").as_bytes(),
)
.unwrap();
}
}
| {
let try_legacy: Result<GraphLayersBackwardCompatibility, _> = read_bin(graph_path);
if let Ok(legacy) = try_legacy {
log::debug!("Converting legacy graph to new format");
let mut converter = GraphLinksConverter::new(legacy.links_layers);
converter.save_as(links_path)?;
let links = TGraphLinks::from_converter(converter)?;
let slf = Self {
m: legacy.m,
m0: legacy.m0,
ef_construct: legacy.ef_construct,
links,
entry_points: legacy.entry_points,
visited_pool: VisitedPool::new(),
};
slf.save(graph_path)?;
Ok(slf)
} else {
Err(err)?
}
} | conditional_block |
graph_layers.rs | use std::cmp::max;
use std::path::{Path, PathBuf};
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::graph_links::{GraphLinks, GraphLinksMmap};
use crate::common::file_operations::{atomic_save_bin, read_bin, FileStorageError};
use crate::common::mmap_ops;
use crate::common::utils::rev_range;
use crate::entry::entry_point::OperationResult;
use crate::index::hnsw_index::entry_points::EntryPoints;
use crate::index::hnsw_index::graph_links::GraphLinksConverter;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::index::hnsw_index::search_context::SearchContext;
use crate::index::visited_pool::{VisitedList, VisitedPool};
use crate::types::PointOffsetType;
use crate::vector_storage::ScoredPointOffset;
pub type LinkContainer = Vec<PointOffsetType>;
pub type LinkContainerRef<'a> = &'a [PointOffsetType];
pub type LayersContainer = Vec<LinkContainer>;
pub const HNSW_GRAPH_FILE: &str = "graph.bin";
pub const HNSW_LINKS_FILE: &str = "links.bin";
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayersBackwardCompatibility {
pub(super) max_level: usize,
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
pub(super) links_layers: Vec<LayersContainer>,
pub(super) entry_points: EntryPoints,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayers<TGraphLinks: GraphLinks> {
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
#[serde(skip)]
pub(super) links: TGraphLinks,
pub(super) entry_points: EntryPoints,
#[serde(skip)]
pub(super) visited_pool: VisitedPool,
}
pub trait GraphLayersBase {
fn get_visited_list_from_pool(&self) -> VisitedList;
fn return_visited_list_to_pool(&self, visited_list: VisitedList);
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, f: F)
where
F: FnMut(PointOffsetType);
/// Get M based on current level
fn get_m(&self, level: usize) -> usize;
/// Greedy search for closest points within a single graph layer
fn _search_on_level(
&self,
searcher: &mut SearchContext,
level: usize,
visited_list: &mut VisitedList,
points_scorer: &mut FilteredScorer,
) {
let limit = self.get_m(level);
let mut points_ids: Vec<PointOffsetType> = Vec::with_capacity(2 * limit);
while let Some(candidate) = searcher.candidates.pop() {
if candidate.score < searcher.lower_bound() {
break;
}
points_ids.clear();
self.links_map(candidate.idx, level, |link| {
if !visited_list.check_and_update_visited(link) {
points_ids.push(link);
}
});
let scores = points_scorer.score_points(&mut points_ids, limit);
scores
.iter()
.copied()
.for_each(|score_point| searcher.process_candidate(score_point));
}
}
fn search_on_level(
&self,
level_entry: ScoredPointOffset,
level: usize,
ef: usize,
points_scorer: &mut FilteredScorer,
) -> FixedLengthPriorityQueue<ScoredPointOffset> {
let mut visited_list = self.get_visited_list_from_pool();
visited_list.check_and_update_visited(level_entry.idx);
let mut search_context = SearchContext::new(level_entry, ef);
self._search_on_level(&mut search_context, level, &mut visited_list, points_scorer);
self.return_visited_list_to_pool(visited_list);
search_context.nearest
}
/// Greedy searches for entry point of level `target_level`.
/// Beam size is 1.
fn search_entry(
&self,
entry_point: PointOffsetType,
top_level: usize,
target_level: usize,
points_scorer: &mut FilteredScorer,
) -> ScoredPointOffset {
let mut links: Vec<PointOffsetType> = Vec::with_capacity(2 * self.get_m(0));
let mut current_point = ScoredPointOffset {
idx: entry_point,
score: points_scorer.score_point(entry_point),
};
for level in rev_range(top_level, target_level) {
let limit = self.get_m(level);
let mut changed = true;
while changed {
changed = false;
links.clear();
self.links_map(current_point.idx, level, |link| {
links.push(link);
});
let scores = points_scorer.score_points(&mut links, limit);
scores.iter().copied().for_each(|score_point| {
if score_point.score > current_point.score {
changed = true;
current_point = score_point;
}
});
}
}
current_point
}
}
impl<TGraphLinks: GraphLinks> GraphLayersBase for GraphLayers<TGraphLinks> {
fn get_visited_list_from_pool(&self) -> VisitedList {
self.visited_pool.get(self.links.num_points())
}
fn return_visited_list_to_pool(&self, visited_list: VisitedList) {
self.visited_pool.return_back(visited_list);
}
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, mut f: F)
where
F: FnMut(PointOffsetType),
{
for link in self.links.links(point_id, level) {
f(*link);
}
}
fn get_m(&self, level: usize) -> usize {
if level == 0 {
self.m0
} else {
self.m
}
}
}
/// Object contains links between nodes for HNSW search
///
/// Assume all scores are similarities. Larger score = closer points
impl<TGraphLinks: GraphLinks> GraphLayers<TGraphLinks> {
pub fn point_level(&self, point_id: PointOffsetType) -> usize {
self.links.point_level(point_id)
}
pub fn search(
&self,
top: usize,
ef: usize,
mut points_scorer: FilteredScorer,
) -> Vec<ScoredPointOffset> {
let entry_point = match self
.entry_points
.get_entry_point(|point_id| points_scorer.check_vector(point_id))
{
None => return vec![],
Some(ep) => ep,
};
let zero_level_entry = self.search_entry(
entry_point.point_id,
entry_point.level,
0,
&mut points_scorer,
);
let nearest = self.search_on_level(zero_level_entry, 0, max(top, ef), &mut points_scorer);
nearest.into_iter().take(top).collect_vec()
}
pub fn get_path(path: &Path) -> PathBuf {
path.join(HNSW_GRAPH_FILE)
}
pub fn get_links_path(path: &Path) -> PathBuf {
path.join(HNSW_LINKS_FILE)
}
pub fn num_points(&self) -> usize {
self.links.num_points()
}
}
impl<TGraphLinks> GraphLayers<TGraphLinks>
where
TGraphLinks: GraphLinks,
{
pub fn load(graph_path: &Path, links_path: &Path) -> OperationResult<Self> {
let try_self: Result<Self, FileStorageError> = if links_path.exists() {
read_bin(graph_path)
} else {
Err(FileStorageError::generic(format!(
"Links file does not exists: {links_path:?}"
)))
};
match try_self {
Ok(mut slf) => {
let links = TGraphLinks::load_from_file(links_path)?;
slf.links = links;
Ok(slf)
}
Err(err) => {
let try_legacy: Result<GraphLayersBackwardCompatibility, _> = read_bin(graph_path);
if let Ok(legacy) = try_legacy {
log::debug!("Converting legacy graph to new format");
let mut converter = GraphLinksConverter::new(legacy.links_layers);
converter.save_as(links_path)?;
let links = TGraphLinks::from_converter(converter)?;
let slf = Self {
m: legacy.m,
m0: legacy.m0,
ef_construct: legacy.ef_construct,
links,
entry_points: legacy.entry_points,
visited_pool: VisitedPool::new(),
};
slf.save(graph_path)?;
Ok(slf)
} else {
Err(err)?
}
}
}
}
pub fn save(&self, path: &Path) -> OperationResult<()> {
Ok(atomic_save_bin(path, self)?)
}
}
impl GraphLayers<GraphLinksMmap> {
pub fn prefault_mmap_pages(&self, path: &Path) -> Option<mmap_ops::PrefaultMmapPages> {
self.links.prefault_mmap_pages(path)
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::VectorElementType;
use crate::fixtures::index_fixtures::{
random_vector, FakeFilterContext, TestRawScorerProducer,
};
use crate::index::hnsw_index::graph_links::GraphLinksRam;
use crate::index::hnsw_index::tests::create_graph_layer_fixture;
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric};
fn search_in_graph<TGraphLinks: GraphLinks>(
query: &[VectorElementType], | vector_storage: &TestRawScorerProducer<CosineMetric>,
graph: &GraphLayers<TGraphLinks>,
) -> Vec<ScoredPointOffset> {
let fake_filter_context = FakeFilterContext {};
let raw_scorer = vector_storage.get_raw_scorer(query.to_owned());
let scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let ef = 16;
graph.search(top, ef, scorer)
}
const M: usize = 8;
#[test]
fn test_search_on_level() {
let dim = 8;
let m = 8;
let ef_construct = 32;
let entry_points_num = 10;
let num_vectors = 10;
let mut rng = StdRng::seed_from_u64(42);
let vector_holder =
TestRawScorerProducer::<DotProductMetric>::new(dim, num_vectors, &mut rng);
let mut graph_layers = GraphLayers {
m,
m0: 2 * m,
ef_construct,
links: GraphLinksRam::default(),
entry_points: EntryPoints::new(entry_points_num),
visited_pool: VisitedPool::new(),
};
let mut graph_links = vec![vec![Vec::new()]; num_vectors];
graph_links[0][0] = vec![1, 2, 3, 4, 5, 6];
graph_layers.links =
GraphLinksRam::from_converter(GraphLinksConverter::new(graph_links.clone())).unwrap();
let linking_idx: PointOffsetType = 7;
let fake_filter_context = FakeFilterContext {};
let added_vector = vector_holder.vectors.get(linking_idx).to_vec();
let raw_scorer = vector_holder.get_raw_scorer(added_vector);
let mut scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let nearest_on_level = graph_layers.search_on_level(
ScoredPointOffset {
idx: 0,
score: scorer.score_point(0),
},
0,
32,
&mut scorer,
);
assert_eq!(nearest_on_level.len(), graph_links[0][0].len() + 1);
for nearest in &nearest_on_level {
// eprintln!("nearest = {:#?}", nearest);
assert_eq!(
nearest.score,
scorer.score_internal(linking_idx, nearest.idx)
)
}
}
#[test]
fn test_save_and_load() {
let num_vectors = 100;
let dim = 8;
let top = 5;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("graph_dir").tempdir().unwrap();
let links_path = GraphLayers::<GraphLinksRam>::get_links_path(dir.path());
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
false,
&mut rng,
Some(&links_path),
);
let query = random_vector(&mut rng, dim);
let res1 = search_in_graph(&query, top, &vector_holder, &graph_layers);
let path = GraphLayers::<GraphLinksRam>::get_path(dir.path());
graph_layers.save(&path).unwrap();
let graph2 = GraphLayers::<GraphLinksRam>::load(&path, &links_path).unwrap();
let res2 = search_in_graph(&query, top, &vector_holder, &graph2);
assert_eq!(res1, res2)
}
#[test]
fn test_add_points() {
let num_vectors = 1000;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
type M = CosineMetric;
let (vector_holder, graph_layers) =
create_graph_layer_fixture::<M, _>(num_vectors, M, dim, false, &mut rng, None);
let main_entry = graph_layers
.entry_points
.get_entry_point(|_x| true)
.expect("Expect entry point to exists");
assert!(main_entry.level > 0);
let num_levels = (0..num_vectors)
.map(|i| graph_layers.links.point_level(i as PointOffsetType))
.max()
.unwrap();
assert_eq!(main_entry.level, num_levels);
let total_links_0 = (0..num_vectors)
.map(|i| graph_layers.links.links(i as PointOffsetType, 0).len())
.sum::<usize>();
eprintln!("total_links_0 = {total_links_0:#?}");
eprintln!("num_vectors = {num_vectors:#?}");
assert!(total_links_0 > 0);
assert!(total_links_0 as f64 / num_vectors as f64 > M as f64);
let top = 5;
let query = random_vector(&mut rng, dim);
let processed_query = M::preprocess(&query).unwrap_or_else(|| query.clone());
let mut reference_top = FixedLengthPriorityQueue::new(top);
for idx in 0..vector_holder.vectors.len() as PointOffsetType {
let vec = &vector_holder.vectors.get(idx);
reference_top.push(ScoredPointOffset {
idx,
score: M::similarity(vec, &processed_query),
});
}
let graph_search = search_in_graph(&query, top, &vector_holder, &graph_layers);
assert_eq!(reference_top.into_vec(), graph_search);
}
#[test]
#[ignore]
fn test_draw_hnsw_graph() {
let dim = 2;
let num_vectors = 500;
let mut rng = StdRng::seed_from_u64(42);
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
true,
&mut rng,
None,
);
let graph_json = serde_json::to_string_pretty(&graph_layers).unwrap();
let vectors_json = serde_json::to_string_pretty(
&(0..vector_holder.vectors.len() as PointOffsetType)
.map(|point_id| vector_holder.vectors.get(point_id).to_vec())
.collect_vec(),
)
.unwrap();
let mut file = File::create("graph.json").unwrap();
file.write_all(
format!("{{ \"graph\": {graph_json}, \n \"vectors\": {vectors_json} }}").as_bytes(),
)
.unwrap();
}
} | top: usize, | random_line_split |
Modules.py | # written by: S. Ali Ahmadi
# last modified: 7/30/2018 - 11:25 PM
#
#
# These modules are written for the purpose of Pattern Recognition course syllabus.
# Some of the functions can be used for general purposes (i.e. read_image)
#
from osgeo import gdal
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
gdal.UseExceptions()
# ######################################################################################################################
def read_image(pathname, filename, normalize=False, stats=False):
"""
:param pathname: directory to the image.
:param filename: image name with its extension.
examples:
>>> pathname = './HSI_images/'
>>> filename = 'image1.tif'
:param normalize: if True, each band will be normalized between 0 & 1.
:param stats: if True, statistics of each band will be printed.
also the function will put out a text file containing statistics.
:return: read_image(), inputs the filename and directory of an
image and then puts out the image and its numpy array.
other options are also available to extract image info.
NOTE: be careful about the number of rows and columns.
"""
try:
data = gdal.Open(pathname + filename, gdal.GA_ReadOnly)
except RuntimeError:
print('Unable to open input file')
sys.exit(1)
data_array = data.ReadAsArray()
# Printing # of Bands, Columns, and Rows, respectively
print(' Rows: {row} \n'.format(row=data.RasterXSize),
'Columns: {cols} \n'.format(cols=data.RasterYSize),
'Bands: {bands} \n'.format(bands=data.RasterCount))
# Further information is available in the following link and other GDAL search results.
# https: // www.gdal.org / gdal_tutorial.html
# *****************************************************************************************************************
if stats:
# creating a stats file and writing the statistics in it.
# following lines are changing the output destination of the system
# from console to the text file.
# actually, we are writing directly in the file.
orig_stdout = sys.stdout
f = open(pathname + 'statistics.txt', 'w')
sys.stdout = f
print(' Rows: {row} \n'.format(row=data.RasterXSize),
'Columns: {cols} \n'.format(cols=data.RasterYSize),
'Bands: {bands}'.format(bands=data.RasterCount))
print("No Data Value = NDV \n")
for band in range(data.RasterCount):
band += 1
print("#", band, end="")
srcband = data.GetRasterBand(band)
if srcband is None:
continue
stats = srcband.GetStatistics(True, True)
if stats is None:
continue
print(" Scale", srcband.GetScale(), end="")
print(" NDV", srcband.GetNoDataValue())
print(" Min = %.3f, Max = %.3f, Mean = %.3f, Std = %.3f \n" %
(stats[0], stats[1], stats[2], stats[3]))
for band in range(data.RasterCount):
band += 1
srcband = data.GetRasterBand(band)
stats = srcband.GetStatistics(True, True)
print("%d, %.3f, %.3f, %.3f, %.3f" %
(band, stats[0], stats[1], stats[2], stats[3]))
sys.stdout = orig_stdout
f.close()
# *****************************************************************************************************************
if normalize:
temp = data_array.copy()/1000
data_array = np.zeros_like(temp, np.float32)
for i in range(temp.shape[0]):
# the shape axis is different due to the order in which GDAL reads data.
band = temp[i, :, :]
minimum = np.amin(band)
maximum = np.amax(band)
data_array[i, :, :] = np.divide((band - minimum), (maximum - minimum))
return data, data_array.T
# ######################################################################################################################
def array_to_raster(array, pathname, filename, src_file=None):
"""
:param array: The input array that is going to be written on the disk as
GDAL raster. The raster has parameters such as projection, geo-
transform, etc. These parameters are taken from the source file
or if not specified, are set to zero.
:param pathname: Folder to which the file is going to save.
:param filename: Name of the output raster with its format.
:param src_file: An optional file. If specified, the geographic information will
be taken from it; if not, the geo information will be set to default.
:return: function returns the output raster in GDAL format and writes it
to the disk.
"""
dst_filename = pathname + filename
rows = array.shape[0]
cols = array.shape[1]
if array.ndim == 3:
num_bands = array.shape[2]
else:
num_bands = 1
# *****************************************************************************************************************
if src_file:
geo_transform = src_file.GetGeoTransform()
projection = src_file.GetProjectionRef() # Projection
# Need a driver object. By default, we use GeoTIFF
driver = gdal.GetDriverByName('GTiff')
outfile = driver.Create(dst_filename, xsize=cols, ysize=rows,
bands=num_bands, eType=gdal.GDT_Float32)
outfile.SetGeoTransform(geo_transform)
outfile.SetProjection(projection)
if array.ndim == 3:
for b in range(num_bands):
outfile.GetRasterBand(b + 1).WriteArray(array[:, :, b].astype(np.float32))
else:
outfile.GetRasterBand(1).WriteArray(array.astype(np.float32))
else:
# Need a driver object. By default, we use GeoTIFF
driver = gdal.GetDriverByName('GTiff')
outfile = driver.Create(dst_filename, xsize=cols, ysize=rows,
bands=num_bands, eType=gdal.GDT_Float32)
if array.ndim == 3:
for b in range(num_bands):
outfile.GetRasterBand(b + 1).WriteArray(array[:, :, b].astype(np.float32))
else:
outfile.GetRasterBand(1).WriteArray(array.astype(np.float32))
return outfile
# ######################################################################################################################
def read_roi(pathname, filename, separate=False, percent=0.7):
"""
:param pathname: directory to the ROI image.
:param filename: image name with its extension.
:param separate: if True, it means that test/train ROI files are not separated
and they should be created from the original file; so the file will be
split into two files with a specified split percent.
:param percent: specifies the split percentage for test and train data.
:return: outputs the ROI image in uint8 type. Also the labels of the classes
are exported in the labels variable.
*** (it should be completed to return ROIs ready for machine learning)
*** (maybe in another function like, sort_roi)
"""
roi_ds = gdal.Open(pathname+filename, gdal.GA_ReadOnly)
roi = roi_ds.GetRasterBand(1).ReadAsArray().astype(np.uint8)
labels = np.unique(roi[roi > 0])
print('Train/Test data includes {n} classes: {classes}'
.format(n=labels.size, classes=labels))
n_samples = (roi > 0).sum()
print('It has {n} samples.'.format(n=n_samples))
# *****************************************************************************************************************
if separate:
train = np.zeros_like(roi)
test = np.zeros_like(roi)
for l in labels:
cls = roi == l # looping through classes
np.put(train, np.random.permutation(np.flatnonzero(cls))[0:int(len(np.flatnonzero(cls)) * percent)], l)
np.put(test, np.random.permutation(np.flatnonzero(cls))[int(len(np.flatnonzero(cls)) * percent)+1:], l)
array_to_raster(train, pathname, 'PySeparateTrain.tif', roi_ds)
array_to_raster(test, pathname, 'PySeparateTest.tif', roi_ds)
# printing train /test files information into text files.
orig_stdout = sys.stdout
f = open(pathname + 'GroundTruth.txt', 'w')
sys.stdout = f
print("---------number of samples information.")
unique_elements, counts_elements = np.unique(roi, return_counts=True)
print(unique_elements)
for l in range(len(labels)):
print("class %d has %d samples." % (l, counts_elements[l]))
sys.stdout = orig_stdout
f.close()
return roi.T, labels
# ######################################################################################################################
def spectrum_plot(data_array, roi, labels):
"""
:param data_array: the output array from read_image module which is
a numpy array. note that this array is transposed in previous
steps.
:param roi: the ROI image from read_roi module.
:param labels: output of the read_roi module which indicates the DN values
of the samples of each class from ROI image.
:return: the function does not return any specific value; but it shows the
a subplot containing spectral curves of all classes.
"""
plt.figure()
plt.rc('font', size=8)
plt.suptitle('spectral reflectance curve of training samples in each class',
fontsize=15, fontname={'serif'})
for c in range(labels.size):
x = data_array[roi == labels[c], :]
plt.subplot(3, 5, c + 1)
for b in range(0, x.shape[0], 5):
|
plt.show()
# ######################################################################################################################
def difference(time1, time2, channel=0, datype=float):
"""
:param time1: image of time 1, which is pre-phenomena;
:param time2: image of time 2, which is post-phenomena; sizes must
agree each other.
:param channel: the default value is 0, which means all the bands;
but the user can specify to perform the application only on
one specific channel (band) of the image.
:param datype: The default value for data type is considered to be
float; but user can change to any acceptable data type they want.
:return: the function computes the difference of two images for
two different times. The difference image and its size
are two outputs of the function.
"""
# checking for array sizes to be matched.
try:
np.shape(time1) == np.shape(time2)
except ValueError:
print('Input images are not the same size or /n does not have same number of bands.')
# changing data type to what the user wants to be.
if datype is float:
time1.astype(float)
time2.astype(float)
else:
time1.astype(datype)
time2.astype(datype)
numbands = np.shape(time1)[0]
# computing difference map from both images.
if channel is 0:
# default case is switched. function will use all the bands.
diff_image = np.zeros_like(time1)
for i in range(numbands):
diff_image[i, :, :] = time2[i, :, :] - time1[i, :, :]
else:
diff_image = time2[channel, :, :] - time1[channel, :, :]
print(np.shape(diff_image))
return diff_image
# ######################################################################################################################
def pca_transform(reshaped_array, n):
"""
:param reshaped_array: an array with the shape of (n_samples, n_features).
:param n: number of principle components that should remain after transformation.
:return: a new array obtained from PCA with the shape of (n_samples, n_components)
"""
pca = PCA(n_components=n)
new_array = pca.fit_transform(reshaped_array)
return new_array
# ######################################################################################################################
| plt.scatter(range(x.shape[1]), x[b, :], marker='.', color='k',
s=0.3, alpha=0.6)
plt.tick_params(axis='y', length=3.0, pad=1.0, labelsize=7)
plt.tick_params(axis='x', length=0, labelsize=0) | conditional_block |
Modules.py | # written by: S. Ali Ahmadi
# last modified: 7/30/2018 - 11:25 PM
#
#
# These modules are written for the purpose of Pattern Recognition course syllabus.
# Some of the functions can be used for general purposes (i.e. read_image)
#
from osgeo import gdal
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
gdal.UseExceptions()
# ######################################################################################################################
def read_image(pathname, filename, normalize=False, stats=False):
"""
:param pathname: directory to the image.
:param filename: image name with its extension.
examples:
>>> pathname = './HSI_images/'
>>> filename = 'image1.tif'
:param normalize: if True, each band will be normalized between 0 & 1.
:param stats: if True, statistics of each band will be printed.
also the function will put out a text file containing statistics.
:return: read_image(), inputs the filename and directory of an
image and then puts out the image and its numpy array.
other options are also available to extract image info.
NOTE: be careful about the number of rows and columns.
"""
try:
data = gdal.Open(pathname + filename, gdal.GA_ReadOnly)
except RuntimeError:
print('Unable to open input file')
sys.exit(1)
data_array = data.ReadAsArray()
# Printing # of Bands, Columns, and Rows, respectively
print(' Rows: {row} \n'.format(row=data.RasterXSize),
'Columns: {cols} \n'.format(cols=data.RasterYSize),
'Bands: {bands} \n'.format(bands=data.RasterCount))
# Further information is available in the following link and other GDAL search results.
# https: // www.gdal.org / gdal_tutorial.html
# *****************************************************************************************************************
if stats:
# creating a stats file and writing the statistics in it.
# following lines are changing the output destination of the system
# from console to the text file.
# actually, we are writing directly in the file.
orig_stdout = sys.stdout
f = open(pathname + 'statistics.txt', 'w')
sys.stdout = f
print(' Rows: {row} \n'.format(row=data.RasterXSize),
'Columns: {cols} \n'.format(cols=data.RasterYSize),
'Bands: {bands}'.format(bands=data.RasterCount))
print("No Data Value = NDV \n")
for band in range(data.RasterCount):
band += 1
print("#", band, end="")
srcband = data.GetRasterBand(band)
if srcband is None:
continue
stats = srcband.GetStatistics(True, True)
if stats is None:
continue
print(" Scale", srcband.GetScale(), end="")
print(" NDV", srcband.GetNoDataValue())
print(" Min = %.3f, Max = %.3f, Mean = %.3f, Std = %.3f \n" %
(stats[0], stats[1], stats[2], stats[3]))
for band in range(data.RasterCount):
band += 1
srcband = data.GetRasterBand(band)
stats = srcband.GetStatistics(True, True)
print("%d, %.3f, %.3f, %.3f, %.3f" %
(band, stats[0], stats[1], stats[2], stats[3]))
sys.stdout = orig_stdout
f.close()
# *****************************************************************************************************************
if normalize:
temp = data_array.copy()/1000
data_array = np.zeros_like(temp, np.float32)
for i in range(temp.shape[0]):
# the shape axis is different due to the order in which GDAL reads data.
band = temp[i, :, :]
minimum = np.amin(band)
maximum = np.amax(band)
data_array[i, :, :] = np.divide((band - minimum), (maximum - minimum))
return data, data_array.T
# ######################################################################################################################
def array_to_raster(array, pathname, filename, src_file=None):
"""
:param array: The input array that is going to be written on the disk as
GDAL raster. The raster has parameters such as projection, geo-
transform, etc. These parameters are taken from the source file
or if not specified, are set to zero.
:param pathname: Folder to which the file is going to save.
:param filename: Name of the output raster with its format.
:param src_file: An optional file. If specified, the geographic information will
be taken from it; if not, the geo information will be set to default.
:return: function returns the output raster in GDAL format and writes it
to the disk.
"""
dst_filename = pathname + filename
rows = array.shape[0]
cols = array.shape[1]
if array.ndim == 3:
num_bands = array.shape[2]
else:
num_bands = 1
# *****************************************************************************************************************
if src_file:
geo_transform = src_file.GetGeoTransform()
projection = src_file.GetProjectionRef() # Projection
# Need a driver object. By default, we use GeoTIFF
driver = gdal.GetDriverByName('GTiff')
outfile = driver.Create(dst_filename, xsize=cols, ysize=rows,
bands=num_bands, eType=gdal.GDT_Float32)
outfile.SetGeoTransform(geo_transform)
outfile.SetProjection(projection)
if array.ndim == 3:
for b in range(num_bands):
outfile.GetRasterBand(b + 1).WriteArray(array[:, :, b].astype(np.float32))
else:
outfile.GetRasterBand(1).WriteArray(array.astype(np.float32))
else:
# Need a driver object. By default, we use GeoTIFF
driver = gdal.GetDriverByName('GTiff')
outfile = driver.Create(dst_filename, xsize=cols, ysize=rows,
bands=num_bands, eType=gdal.GDT_Float32)
if array.ndim == 3:
for b in range(num_bands):
outfile.GetRasterBand(b + 1).WriteArray(array[:, :, b].astype(np.float32))
else:
outfile.GetRasterBand(1).WriteArray(array.astype(np.float32))
return outfile
# ######################################################################################################################
def read_roi(pathname, filename, separate=False, percent=0.7):
"""
:param pathname: directory to the ROI image.
:param filename: image name with its extension.
:param separate: if True, it means that test/train ROI files are not separated
and they should be created from the original file; so the file will be
split into two files with a specified split percent.
:param percent: specifies the split percentage for test and train data.
:return: outputs the ROI image in uint8 type. Also the labels of the classes
are exported in the labels variable.
*** (it should be completed to return ROIs ready for machine learning)
*** (maybe in another function like, sort_roi)
"""
roi_ds = gdal.Open(pathname+filename, gdal.GA_ReadOnly)
roi = roi_ds.GetRasterBand(1).ReadAsArray().astype(np.uint8)
labels = np.unique(roi[roi > 0])
print('Train/Test data includes {n} classes: {classes}'
.format(n=labels.size, classes=labels))
n_samples = (roi > 0).sum()
print('It has {n} samples.'.format(n=n_samples))
# *****************************************************************************************************************
if separate:
train = np.zeros_like(roi)
test = np.zeros_like(roi)
for l in labels:
cls = roi == l # looping through classes
np.put(train, np.random.permutation(np.flatnonzero(cls))[0:int(len(np.flatnonzero(cls)) * percent)], l)
np.put(test, np.random.permutation(np.flatnonzero(cls))[int(len(np.flatnonzero(cls)) * percent)+1:], l)
array_to_raster(train, pathname, 'PySeparateTrain.tif', roi_ds)
array_to_raster(test, pathname, 'PySeparateTest.tif', roi_ds)
# printing train /test files information into text files.
orig_stdout = sys.stdout
f = open(pathname + 'GroundTruth.txt', 'w')
sys.stdout = f
print("---------number of samples information.")
unique_elements, counts_elements = np.unique(roi, return_counts=True)
print(unique_elements)
for l in range(len(labels)):
print("class %d has %d samples." % (l, counts_elements[l]))
sys.stdout = orig_stdout
f.close()
return roi.T, labels
# ######################################################################################################################
def spectrum_plot(data_array, roi, labels):
"""
:param data_array: the output array from read_image module which is
a numpy array. note that this array is transposed in previous
steps.
:param roi: the ROI image from read_roi module.
:param labels: output of the read_roi module which indicates the DN values
of the samples of each class from ROI image.
:return: the function does not return any specific value; but it shows the
a subplot containing spectral curves of all classes.
"""
plt.figure()
plt.rc('font', size=8)
plt.suptitle('spectral reflectance curve of training samples in each class',
fontsize=15, fontname={'serif'})
for c in range(labels.size):
x = data_array[roi == labels[c], :]
plt.subplot(3, 5, c + 1)
for b in range(0, x.shape[0], 5):
plt.scatter(range(x.shape[1]), x[b, :], marker='.', color='k',
s=0.3, alpha=0.6)
plt.tick_params(axis='y', length=3.0, pad=1.0, labelsize=7)
plt.tick_params(axis='x', length=0, labelsize=0)
plt.show()
# ######################################################################################################################
def difference(time1, time2, channel=0, datype=float):
"""
:param time1: image of time 1, which is pre-phenomena;
:param time2: image of time 2, which is post-phenomena; sizes must
agree each other.
:param channel: the default value is 0, which means all the bands;
but the user can specify to perform the application only on
one specific channel (band) of the image.
:param datype: The default value for data type is considered to be
float; but user can change to any acceptable data type they want.
:return: the function computes the difference of two images for
two different times. The difference image and its size
are two outputs of the function.
"""
# checking for array sizes to be matched.
try:
np.shape(time1) == np.shape(time2)
except ValueError:
print('Input images are not the same size or /n does not have same number of bands.')
# changing data type to what the user wants to be.
if datype is float:
time1.astype(float)
time2.astype(float)
else:
time1.astype(datype)
time2.astype(datype)
numbands = np.shape(time1)[0]
# computing difference map from both images.
if channel is 0:
# default case is switched. function will use all the bands.
diff_image = np.zeros_like(time1)
for i in range(numbands):
diff_image[i, :, :] = time2[i, :, :] - time1[i, :, :]
else:
diff_image = time2[channel, :, :] - time1[channel, :, :]
print(np.shape(diff_image))
return diff_image
# ######################################################################################################################
def | (reshaped_array, n):
"""
:param reshaped_array: an array with the shape of (n_samples, n_features).
:param n: number of principle components that should remain after transformation.
:return: a new array obtained from PCA with the shape of (n_samples, n_components)
"""
pca = PCA(n_components=n)
new_array = pca.fit_transform(reshaped_array)
return new_array
# ######################################################################################################################
| pca_transform | identifier_name |
Modules.py | # written by: S. Ali Ahmadi
# last modified: 7/30/2018 - 11:25 PM
#
#
# These modules are written for the purpose of Pattern Recognition course syllabus.
# Some of the functions can be used for general purposes (i.e. read_image)
#
from osgeo import gdal
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
gdal.UseExceptions()
# ######################################################################################################################
def read_image(pathname, filename, normalize=False, stats=False):
"""
:param pathname: directory to the image.
:param filename: image name with its extension.
examples:
>>> pathname = './HSI_images/'
>>> filename = 'image1.tif'
:param normalize: if True, each band will be normalized between 0 & 1.
:param stats: if True, statistics of each band will be printed.
also the function will put out a text file containing statistics.
:return: read_image(), inputs the filename and directory of an
image and then puts out the image and its numpy array.
other options are also available to extract image info.
NOTE: be careful about the number of rows and columns.
"""
try:
data = gdal.Open(pathname + filename, gdal.GA_ReadOnly)
except RuntimeError:
print('Unable to open input file')
sys.exit(1)
data_array = data.ReadAsArray()
# Printing # of Bands, Columns, and Rows, respectively
print(' Rows: {row} \n'.format(row=data.RasterXSize),
'Columns: {cols} \n'.format(cols=data.RasterYSize),
'Bands: {bands} \n'.format(bands=data.RasterCount))
# Further information is available in the following link and other GDAL search results.
# https: // www.gdal.org / gdal_tutorial.html
# *****************************************************************************************************************
if stats:
# creating a stats file and writing the statistics in it.
# following lines are changing the output destination of the system
# from console to the text file.
# actually, we are writing directly in the file.
orig_stdout = sys.stdout
f = open(pathname + 'statistics.txt', 'w')
sys.stdout = f
print(' Rows: {row} \n'.format(row=data.RasterXSize),
'Columns: {cols} \n'.format(cols=data.RasterYSize),
'Bands: {bands}'.format(bands=data.RasterCount))
print("No Data Value = NDV \n")
for band in range(data.RasterCount):
band += 1
print("#", band, end="")
srcband = data.GetRasterBand(band)
if srcband is None:
continue
stats = srcband.GetStatistics(True, True)
if stats is None:
continue
print(" Scale", srcband.GetScale(), end="")
print(" NDV", srcband.GetNoDataValue())
print(" Min = %.3f, Max = %.3f, Mean = %.3f, Std = %.3f \n" %
(stats[0], stats[1], stats[2], stats[3]))
for band in range(data.RasterCount):
band += 1
srcband = data.GetRasterBand(band)
stats = srcband.GetStatistics(True, True)
print("%d, %.3f, %.3f, %.3f, %.3f" %
(band, stats[0], stats[1], stats[2], stats[3]))
sys.stdout = orig_stdout
f.close()
# *****************************************************************************************************************
if normalize:
temp = data_array.copy()/1000
data_array = np.zeros_like(temp, np.float32)
for i in range(temp.shape[0]):
# the shape axis is different due to the order in which GDAL reads data.
band = temp[i, :, :]
minimum = np.amin(band)
maximum = np.amax(band)
data_array[i, :, :] = np.divide((band - minimum), (maximum - minimum))
return data, data_array.T
# ######################################################################################################################
def array_to_raster(array, pathname, filename, src_file=None):
"""
:param array: The input array that is going to be written on the disk as
GDAL raster. The raster has parameters such as projection, geo-
transform, etc. These parameters are taken from the source file
or if not specified, are set to zero.
:param pathname: Folder to which the file is going to save.
:param filename: Name of the output raster with its format.
:param src_file: An optional file. If specified, the geographic information will
be taken from it; if not, the geo information will be set to default.
:return: function returns the output raster in GDAL format and writes it
to the disk.
"""
dst_filename = pathname + filename
rows = array.shape[0]
cols = array.shape[1]
if array.ndim == 3:
num_bands = array.shape[2]
else:
num_bands = 1
# ***************************************************************************************************************** | projection = src_file.GetProjectionRef() # Projection
# Need a driver object. By default, we use GeoTIFF
driver = gdal.GetDriverByName('GTiff')
outfile = driver.Create(dst_filename, xsize=cols, ysize=rows,
bands=num_bands, eType=gdal.GDT_Float32)
outfile.SetGeoTransform(geo_transform)
outfile.SetProjection(projection)
if array.ndim == 3:
for b in range(num_bands):
outfile.GetRasterBand(b + 1).WriteArray(array[:, :, b].astype(np.float32))
else:
outfile.GetRasterBand(1).WriteArray(array.astype(np.float32))
else:
# Need a driver object. By default, we use GeoTIFF
driver = gdal.GetDriverByName('GTiff')
outfile = driver.Create(dst_filename, xsize=cols, ysize=rows,
bands=num_bands, eType=gdal.GDT_Float32)
if array.ndim == 3:
for b in range(num_bands):
outfile.GetRasterBand(b + 1).WriteArray(array[:, :, b].astype(np.float32))
else:
outfile.GetRasterBand(1).WriteArray(array.astype(np.float32))
return outfile
# ######################################################################################################################
def read_roi(pathname, filename, separate=False, percent=0.7):
"""
:param pathname: directory to the ROI image.
:param filename: image name with its extension.
:param separate: if True, it means that test/train ROI files are not separated
and they should be created from the original file; so the file will be
split into two files with a specified split percent.
:param percent: specifies the split percentage for test and train data.
:return: outputs the ROI image in uint8 type. Also the labels of the classes
are exported in the labels variable.
*** (it should be completed to return ROIs ready for machine learning)
*** (maybe in another function like, sort_roi)
"""
roi_ds = gdal.Open(pathname+filename, gdal.GA_ReadOnly)
roi = roi_ds.GetRasterBand(1).ReadAsArray().astype(np.uint8)
labels = np.unique(roi[roi > 0])
print('Train/Test data includes {n} classes: {classes}'
.format(n=labels.size, classes=labels))
n_samples = (roi > 0).sum()
print('It has {n} samples.'.format(n=n_samples))
# *****************************************************************************************************************
if separate:
train = np.zeros_like(roi)
test = np.zeros_like(roi)
for l in labels:
cls = roi == l # looping through classes
np.put(train, np.random.permutation(np.flatnonzero(cls))[0:int(len(np.flatnonzero(cls)) * percent)], l)
np.put(test, np.random.permutation(np.flatnonzero(cls))[int(len(np.flatnonzero(cls)) * percent)+1:], l)
array_to_raster(train, pathname, 'PySeparateTrain.tif', roi_ds)
array_to_raster(test, pathname, 'PySeparateTest.tif', roi_ds)
# printing train /test files information into text files.
orig_stdout = sys.stdout
f = open(pathname + 'GroundTruth.txt', 'w')
sys.stdout = f
print("---------number of samples information.")
unique_elements, counts_elements = np.unique(roi, return_counts=True)
print(unique_elements)
for l in range(len(labels)):
print("class %d has %d samples." % (l, counts_elements[l]))
sys.stdout = orig_stdout
f.close()
return roi.T, labels
# ######################################################################################################################
def spectrum_plot(data_array, roi, labels):
"""
:param data_array: the output array from read_image module which is
a numpy array. note that this array is transposed in previous
steps.
:param roi: the ROI image from read_roi module.
:param labels: output of the read_roi module which indicates the DN values
of the samples of each class from ROI image.
:return: the function does not return any specific value; but it shows the
a subplot containing spectral curves of all classes.
"""
plt.figure()
plt.rc('font', size=8)
plt.suptitle('spectral reflectance curve of training samples in each class',
fontsize=15, fontname={'serif'})
for c in range(labels.size):
x = data_array[roi == labels[c], :]
plt.subplot(3, 5, c + 1)
for b in range(0, x.shape[0], 5):
plt.scatter(range(x.shape[1]), x[b, :], marker='.', color='k',
s=0.3, alpha=0.6)
plt.tick_params(axis='y', length=3.0, pad=1.0, labelsize=7)
plt.tick_params(axis='x', length=0, labelsize=0)
plt.show()
# ######################################################################################################################
def difference(time1, time2, channel=0, datype=float):
"""
:param time1: image of time 1, which is pre-phenomena;
:param time2: image of time 2, which is post-phenomena; sizes must
agree each other.
:param channel: the default value is 0, which means all the bands;
but the user can specify to perform the application only on
one specific channel (band) of the image.
:param datype: The default value for data type is considered to be
float; but user can change to any acceptable data type they want.
:return: the function computes the difference of two images for
two different times. The difference image and its size
are two outputs of the function.
"""
# checking for array sizes to be matched.
try:
np.shape(time1) == np.shape(time2)
except ValueError:
print('Input images are not the same size or /n does not have same number of bands.')
# changing data type to what the user wants to be.
if datype is float:
time1.astype(float)
time2.astype(float)
else:
time1.astype(datype)
time2.astype(datype)
numbands = np.shape(time1)[0]
# computing difference map from both images.
if channel is 0:
# default case is switched. function will use all the bands.
diff_image = np.zeros_like(time1)
for i in range(numbands):
diff_image[i, :, :] = time2[i, :, :] - time1[i, :, :]
else:
diff_image = time2[channel, :, :] - time1[channel, :, :]
print(np.shape(diff_image))
return diff_image
# ######################################################################################################################
def pca_transform(reshaped_array, n):
"""
:param reshaped_array: an array with the shape of (n_samples, n_features).
:param n: number of principle components that should remain after transformation.
:return: a new array obtained from PCA with the shape of (n_samples, n_components)
"""
pca = PCA(n_components=n)
new_array = pca.fit_transform(reshaped_array)
return new_array
# ###################################################################################################################### | if src_file:
geo_transform = src_file.GetGeoTransform() | random_line_split |
Modules.py | # written by: S. Ali Ahmadi
# last modified: 7/30/2018 - 11:25 PM
#
#
# These modules are written for the purpose of Pattern Recognition course syllabus.
# Some of the functions can be used for general purposes (i.e. read_image)
#
from osgeo import gdal
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
gdal.UseExceptions()
# ######################################################################################################################
def read_image(pathname, filename, normalize=False, stats=False):
"""
:param pathname: directory to the image.
:param filename: image name with its extension.
examples:
>>> pathname = './HSI_images/'
>>> filename = 'image1.tif'
:param normalize: if True, each band will be normalized between 0 & 1.
:param stats: if True, statistics of each band will be printed.
also the function will put out a text file containing statistics.
:return: read_image(), inputs the filename and directory of an
image and then puts out the image and its numpy array.
other options are also available to extract image info.
NOTE: be careful about the number of rows and columns.
"""
try:
data = gdal.Open(pathname + filename, gdal.GA_ReadOnly)
except RuntimeError:
print('Unable to open input file')
sys.exit(1)
data_array = data.ReadAsArray()
# Printing # of Bands, Columns, and Rows, respectively
print(' Rows: {row} \n'.format(row=data.RasterXSize),
'Columns: {cols} \n'.format(cols=data.RasterYSize),
'Bands: {bands} \n'.format(bands=data.RasterCount))
# Further information is available in the following link and other GDAL search results.
# https: // www.gdal.org / gdal_tutorial.html
# *****************************************************************************************************************
if stats:
# creating a stats file and writing the statistics in it.
# following lines are changing the output destination of the system
# from console to the text file.
# actually, we are writing directly in the file.
orig_stdout = sys.stdout
f = open(pathname + 'statistics.txt', 'w')
sys.stdout = f
print(' Rows: {row} \n'.format(row=data.RasterXSize),
'Columns: {cols} \n'.format(cols=data.RasterYSize),
'Bands: {bands}'.format(bands=data.RasterCount))
print("No Data Value = NDV \n")
for band in range(data.RasterCount):
band += 1
print("#", band, end="")
srcband = data.GetRasterBand(band)
if srcband is None:
continue
stats = srcband.GetStatistics(True, True)
if stats is None:
continue
print(" Scale", srcband.GetScale(), end="")
print(" NDV", srcband.GetNoDataValue())
print(" Min = %.3f, Max = %.3f, Mean = %.3f, Std = %.3f \n" %
(stats[0], stats[1], stats[2], stats[3]))
for band in range(data.RasterCount):
band += 1
srcband = data.GetRasterBand(band)
stats = srcband.GetStatistics(True, True)
print("%d, %.3f, %.3f, %.3f, %.3f" %
(band, stats[0], stats[1], stats[2], stats[3]))
sys.stdout = orig_stdout
f.close()
# *****************************************************************************************************************
if normalize:
temp = data_array.copy()/1000
data_array = np.zeros_like(temp, np.float32)
for i in range(temp.shape[0]):
# the shape axis is different due to the order in which GDAL reads data.
band = temp[i, :, :]
minimum = np.amin(band)
maximum = np.amax(band)
data_array[i, :, :] = np.divide((band - minimum), (maximum - minimum))
return data, data_array.T
# ######################################################################################################################
def array_to_raster(array, pathname, filename, src_file=None):
"""
:param array: The input array that is going to be written on the disk as
GDAL raster. The raster has parameters such as projection, geo-
transform, etc. These parameters are taken from the source file
or if not specified, are set to zero.
:param pathname: Folder to which the file is going to save.
:param filename: Name of the output raster with its format.
:param src_file: An optional file. If specified, the geographic information will
be taken from it; if not, the geo information will be set to default.
:return: function returns the output raster in GDAL format and writes it
to the disk.
"""
dst_filename = pathname + filename
rows = array.shape[0]
cols = array.shape[1]
if array.ndim == 3:
num_bands = array.shape[2]
else:
num_bands = 1
# *****************************************************************************************************************
if src_file:
geo_transform = src_file.GetGeoTransform()
projection = src_file.GetProjectionRef() # Projection
# Need a driver object. By default, we use GeoTIFF
driver = gdal.GetDriverByName('GTiff')
outfile = driver.Create(dst_filename, xsize=cols, ysize=rows,
bands=num_bands, eType=gdal.GDT_Float32)
outfile.SetGeoTransform(geo_transform)
outfile.SetProjection(projection)
if array.ndim == 3:
for b in range(num_bands):
outfile.GetRasterBand(b + 1).WriteArray(array[:, :, b].astype(np.float32))
else:
outfile.GetRasterBand(1).WriteArray(array.astype(np.float32))
else:
# Need a driver object. By default, we use GeoTIFF
driver = gdal.GetDriverByName('GTiff')
outfile = driver.Create(dst_filename, xsize=cols, ysize=rows,
bands=num_bands, eType=gdal.GDT_Float32)
if array.ndim == 3:
for b in range(num_bands):
outfile.GetRasterBand(b + 1).WriteArray(array[:, :, b].astype(np.float32))
else:
outfile.GetRasterBand(1).WriteArray(array.astype(np.float32))
return outfile
# ######################################################################################################################
def read_roi(pathname, filename, separate=False, percent=0.7):
"""
:param pathname: directory to the ROI image.
:param filename: image name with its extension.
:param separate: if True, it means that test/train ROI files are not separated
and they should be created from the original file; so the file will be
split into two files with a specified split percent.
:param percent: specifies the split percentage for test and train data.
:return: outputs the ROI image in uint8 type. Also the labels of the classes
are exported in the labels variable.
*** (it should be completed to return ROIs ready for machine learning)
*** (maybe in another function like, sort_roi)
"""
roi_ds = gdal.Open(pathname+filename, gdal.GA_ReadOnly)
roi = roi_ds.GetRasterBand(1).ReadAsArray().astype(np.uint8)
labels = np.unique(roi[roi > 0])
print('Train/Test data includes {n} classes: {classes}'
.format(n=labels.size, classes=labels))
n_samples = (roi > 0).sum()
print('It has {n} samples.'.format(n=n_samples))
# *****************************************************************************************************************
if separate:
train = np.zeros_like(roi)
test = np.zeros_like(roi)
for l in labels:
cls = roi == l # looping through classes
np.put(train, np.random.permutation(np.flatnonzero(cls))[0:int(len(np.flatnonzero(cls)) * percent)], l)
np.put(test, np.random.permutation(np.flatnonzero(cls))[int(len(np.flatnonzero(cls)) * percent)+1:], l)
array_to_raster(train, pathname, 'PySeparateTrain.tif', roi_ds)
array_to_raster(test, pathname, 'PySeparateTest.tif', roi_ds)
# printing train /test files information into text files.
orig_stdout = sys.stdout
f = open(pathname + 'GroundTruth.txt', 'w')
sys.stdout = f
print("---------number of samples information.")
unique_elements, counts_elements = np.unique(roi, return_counts=True)
print(unique_elements)
for l in range(len(labels)):
print("class %d has %d samples." % (l, counts_elements[l]))
sys.stdout = orig_stdout
f.close()
return roi.T, labels
# ######################################################################################################################
def spectrum_plot(data_array, roi, labels):
"""
:param data_array: the output array from read_image module which is
a numpy array. note that this array is transposed in previous
steps.
:param roi: the ROI image from read_roi module.
:param labels: output of the read_roi module which indicates the DN values
of the samples of each class from ROI image.
:return: the function does not return any specific value; but it shows the
a subplot containing spectral curves of all classes.
"""
plt.figure()
plt.rc('font', size=8)
plt.suptitle('spectral reflectance curve of training samples in each class',
fontsize=15, fontname={'serif'})
for c in range(labels.size):
x = data_array[roi == labels[c], :]
plt.subplot(3, 5, c + 1)
for b in range(0, x.shape[0], 5):
plt.scatter(range(x.shape[1]), x[b, :], marker='.', color='k',
s=0.3, alpha=0.6)
plt.tick_params(axis='y', length=3.0, pad=1.0, labelsize=7)
plt.tick_params(axis='x', length=0, labelsize=0)
plt.show()
# ######################################################################################################################
def difference(time1, time2, channel=0, datype=float):
|
# ######################################################################################################################
def pca_transform(reshaped_array, n):
"""
:param reshaped_array: an array with the shape of (n_samples, n_features).
:param n: number of principle components that should remain after transformation.
:return: a new array obtained from PCA with the shape of (n_samples, n_components)
"""
pca = PCA(n_components=n)
new_array = pca.fit_transform(reshaped_array)
return new_array
# ######################################################################################################################
| """
:param time1: image of time 1, which is pre-phenomena;
:param time2: image of time 2, which is post-phenomena; sizes must
agree each other.
:param channel: the default value is 0, which means all the bands;
but the user can specify to perform the application only on
one specific channel (band) of the image.
:param datype: The default value for data type is considered to be
float; but user can change to any acceptable data type they want.
:return: the function computes the difference of two images for
two different times. The difference image and its size
are two outputs of the function.
"""
# checking for array sizes to be matched.
try:
np.shape(time1) == np.shape(time2)
except ValueError:
print('Input images are not the same size or /n does not have same number of bands.')
# changing data type to what the user wants to be.
if datype is float:
time1.astype(float)
time2.astype(float)
else:
time1.astype(datype)
time2.astype(datype)
numbands = np.shape(time1)[0]
# computing difference map from both images.
if channel is 0:
# default case is switched. function will use all the bands.
diff_image = np.zeros_like(time1)
for i in range(numbands):
diff_image[i, :, :] = time2[i, :, :] - time1[i, :, :]
else:
diff_image = time2[channel, :, :] - time1[channel, :, :]
print(np.shape(diff_image))
return diff_image | identifier_body |
train_utils.py | import string
import random
import sys
import pickle
import pathlib
import time
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
import imgaug as ia
from imgaug import augmenters as iaa
# sys.path.append(pathlib.Path(__file__).parent)
from dataset import *
import metrics
def create_seq():
SEQ = iaa.Sequential([
iaa.OneOf([
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.5), # vertically flips
iaa.Crop(percent=(0, 0.2)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
# Strengthen or weaken the contrast in each image.
iaa.ContrastNormalization((0.75, 1.5)),
# Add gaussian noise.
# For 50% of all images, we sample the noise once per pixel.
# For the other 50% of all images, we sample the noise per pixel AND
# channel. This can change the color (not only brightness) of the
# pixels.
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=True),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (1.0 - AUG_SCALE, 1.0 + AUG_SCALE), "y": (1.0 - AUG_SCALE, 1.0 + AUG_SCALE)},
# translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-180, 180),
shear=(-8, 8),
)
])], random_order=True)
return SEQ
def create_callbacks(dataset, name_weights, patience_lr=10, patience_es=150):
mcp_save = ModelCheckpoint('model/validate.weights.best.hdf5', save_best_only=True, monitor='val_loss')
# history = metrics.Histories(dataset)
# mcp_save = AllModelCheckpoint(name_weights)
# reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=patience_lr, verbose=1, min_delta=1e-4, mode='min')
# early_stopping = EarlyStopping(monitor='val_loss', patience=patience_es, verbose=1, mode='auto')
# return [early_stopping, mcp_save, reduce_lr_loss]
# return [f1metrics, early_stopping, mcp_save]
return [dataset, mcp_save]
def load_dataset(filename):
with open(filename, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
train_dataset = pickle.load(f)
return train_dataset
def next_simple_dataset(dataset, batch_size: int, datatype):
""" Obtain a batch of training data
"""
while True:
x_batch = []
y_batch = []
for i in range(batch_size):
try:
x, y, data_unit, index = create_xy(dataset, datatype)
# x = normalize(x)
x_batch.append(x)
y_batch.append(y)
except StopIteration:
break
x_batch, y_batch = np.array(x_batch), np.array(y_batch)
if datatype != DataType.test:
x_batch = SEQ_CVXTZ.augment_images(x_batch).astype("float32")
x_batch = np.array([normalize(x) for x in x_batch])
# org_shape = x_batch.shape
# org_width = x_batch.shape[1]
# corner = int((org_width - ROI_IMAGE_SIZE) // 2)
# print(f"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}")
# x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]
# resized_x_batch = []
# for x in x_batch:
# img = Image.fromarray(np.uint8(x))
# img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)
# resized_x_batch.append(normalize(np.array(img)))
# print(f"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}")
# yield np.array(resized_x_batch), y_batch
yield np.array(x_batch), y_batch
| epochs=EPOCHS, ):
callbacks = create_callbacks(dataset, model_filename)
dataset.model = model
answers = [data_unit.answer for data_unit in dataset.data_list]
sample_num = len(answers)
# sample_num = len(answers)
train_num = int(sample_num * TRAIN_RATIO)
validate_num = int(sample_num * VALIDATE_RATIO)
steps_per_epoch = train_num // batch_size
# steps_per_epoch = 50
validation_steps = validate_num // batch_size
print(f"train_num:{train_num} validate_num:{validate_num} steps_per_epoch:{steps_per_epoch} validateion_steps:{validation_steps}")
model.fit_generator(generator=next_simple_dataset(dataset, batch_size, DataType.train),
epochs=epochs,
validation_data=next_simple_dataset(dataset, batch_size, DataType.validate),
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
callbacks=callbacks, verbose=1)
def create_sequential_cvxtz():
# https://www.kaggle.com/CVxTz/cnn-starter-nasnet-mobile-0-9709-lb
def sometimes(aug):
return iaa.Sometimes(0.5, aug)
seq = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 20% of all images
sometimes(iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
# scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, # translate by -20 to +20 percent (per axis)
rotate=(-10, 10), # rotate by -45 to +45 degrees
shear=(-5, 5), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),
# convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 1.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(3, 5)),
# blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 5)),
# blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.9, 1.1)), # sharpen images
iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.01 * 255), per_channel=0.5),
# add gaussian noise to images
iaa.OneOf([
iaa.Dropout((0.01, 0.05), per_channel=0.5), # randomly remove up to 10% of the pixels
iaa.CoarseDropout((0.01, 0.03), size_percent=(0.01, 0.02), per_channel=0.2),
]),
iaa.Invert(0.01, per_channel=True), # invert color channels
iaa.Add((-2, 2), per_channel=0.5),
# change brightness of images (by -10 to 10 of original value)
iaa.AddToHueAndSaturation((-1, 1)), # change hue and saturation
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.9, 1.1), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-1, 0),
first=iaa.Multiply((0.9, 1.1), per_channel=True),
second=iaa.ContrastNormalization((0.9, 1.1))
)
]),
sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)),
# move pixels locally around (with random strengths)
sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
# sometimes move parts of the image around
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
)
return seq
SEQ_CVXTZ = create_sequential_cvxtz() |
def train_model(model: Model, dataset, model_filename: str,
batch_size=BATCH_SIZE, | random_line_split |
train_utils.py | import string
import random
import sys
import pickle
import pathlib
import time
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
import imgaug as ia
from imgaug import augmenters as iaa
# sys.path.append(pathlib.Path(__file__).parent)
from dataset import *
import metrics
def create_seq():
SEQ = iaa.Sequential([
iaa.OneOf([
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.5), # vertically flips
iaa.Crop(percent=(0, 0.2)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
# Strengthen or weaken the contrast in each image.
iaa.ContrastNormalization((0.75, 1.5)),
# Add gaussian noise.
# For 50% of all images, we sample the noise once per pixel.
# For the other 50% of all images, we sample the noise per pixel AND
# channel. This can change the color (not only brightness) of the
# pixels.
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=True),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (1.0 - AUG_SCALE, 1.0 + AUG_SCALE), "y": (1.0 - AUG_SCALE, 1.0 + AUG_SCALE)},
# translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-180, 180),
shear=(-8, 8),
)
])], random_order=True)
return SEQ
def create_callbacks(dataset, name_weights, patience_lr=10, patience_es=150):
mcp_save = ModelCheckpoint('model/validate.weights.best.hdf5', save_best_only=True, monitor='val_loss')
# history = metrics.Histories(dataset)
# mcp_save = AllModelCheckpoint(name_weights)
# reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=patience_lr, verbose=1, min_delta=1e-4, mode='min')
# early_stopping = EarlyStopping(monitor='val_loss', patience=patience_es, verbose=1, mode='auto')
# return [early_stopping, mcp_save, reduce_lr_loss]
# return [f1metrics, early_stopping, mcp_save]
return [dataset, mcp_save]
def load_dataset(filename):
with open(filename, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
train_dataset = pickle.load(f)
return train_dataset
def next_simple_dataset(dataset, batch_size: int, datatype):
""" Obtain a batch of training data
"""
while True:
x_batch = []
y_batch = []
for i in range(batch_size):
|
x_batch, y_batch = np.array(x_batch), np.array(y_batch)
if datatype != DataType.test:
x_batch = SEQ_CVXTZ.augment_images(x_batch).astype("float32")
x_batch = np.array([normalize(x) for x in x_batch])
# org_shape = x_batch.shape
# org_width = x_batch.shape[1]
# corner = int((org_width - ROI_IMAGE_SIZE) // 2)
# print(f"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}")
# x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]
# resized_x_batch = []
# for x in x_batch:
# img = Image.fromarray(np.uint8(x))
# img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)
# resized_x_batch.append(normalize(np.array(img)))
# print(f"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}")
# yield np.array(resized_x_batch), y_batch
yield np.array(x_batch), y_batch
def train_model(model: Model, dataset, model_filename: str,
batch_size=BATCH_SIZE,
epochs=EPOCHS, ):
callbacks = create_callbacks(dataset, model_filename)
dataset.model = model
answers = [data_unit.answer for data_unit in dataset.data_list]
sample_num = len(answers)
# sample_num = len(answers)
train_num = int(sample_num * TRAIN_RATIO)
validate_num = int(sample_num * VALIDATE_RATIO)
steps_per_epoch = train_num // batch_size
# steps_per_epoch = 50
validation_steps = validate_num // batch_size
print(f"train_num:{train_num} validate_num:{validate_num} steps_per_epoch:{steps_per_epoch} validateion_steps:{validation_steps}")
model.fit_generator(generator=next_simple_dataset(dataset, batch_size, DataType.train),
epochs=epochs,
validation_data=next_simple_dataset(dataset, batch_size, DataType.validate),
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
callbacks=callbacks, verbose=1)
def create_sequential_cvxtz():
# https://www.kaggle.com/CVxTz/cnn-starter-nasnet-mobile-0-9709-lb
def sometimes(aug):
return iaa.Sometimes(0.5, aug)
seq = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 20% of all images
sometimes(iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
# scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, # translate by -20 to +20 percent (per axis)
rotate=(-10, 10), # rotate by -45 to +45 degrees
shear=(-5, 5), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),
# convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 1.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(3, 5)),
# blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 5)),
# blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.9, 1.1)), # sharpen images
iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.01 * 255), per_channel=0.5),
# add gaussian noise to images
iaa.OneOf([
iaa.Dropout((0.01, 0.05), per_channel=0.5), # randomly remove up to 10% of the pixels
iaa.CoarseDropout((0.01, 0.03), size_percent=(0.01, 0.02), per_channel=0.2),
]),
iaa.Invert(0.01, per_channel=True), # invert color channels
iaa.Add((-2, 2), per_channel=0.5),
# change brightness of images (by -10 to 10 of original value)
iaa.AddToHueAndSaturation((-1, 1)), # change hue and saturation
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.9, 1.1), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-1, 0),
first=iaa.Multiply((0.9, 1.1), per_channel=True),
second=iaa.ContrastNormalization((0.9, 1.1))
)
]),
sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)),
# move pixels locally around (with random strengths)
sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
# sometimes move parts of the image around
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
)
return seq
SEQ_CVXTZ = create_sequential_cvxtz()
| try:
x, y, data_unit, index = create_xy(dataset, datatype)
# x = normalize(x)
x_batch.append(x)
y_batch.append(y)
except StopIteration:
break | conditional_block |
train_utils.py | import string
import random
import sys
import pickle
import pathlib
import time
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
import imgaug as ia
from imgaug import augmenters as iaa
# sys.path.append(pathlib.Path(__file__).parent)
from dataset import *
import metrics
def create_seq():
SEQ = iaa.Sequential([
iaa.OneOf([
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.5), # vertically flips
iaa.Crop(percent=(0, 0.2)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
# Strengthen or weaken the contrast in each image.
iaa.ContrastNormalization((0.75, 1.5)),
# Add gaussian noise.
# For 50% of all images, we sample the noise once per pixel.
# For the other 50% of all images, we sample the noise per pixel AND
# channel. This can change the color (not only brightness) of the
# pixels.
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=True),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (1.0 - AUG_SCALE, 1.0 + AUG_SCALE), "y": (1.0 - AUG_SCALE, 1.0 + AUG_SCALE)},
# translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-180, 180),
shear=(-8, 8),
)
])], random_order=True)
return SEQ
def create_callbacks(dataset, name_weights, patience_lr=10, patience_es=150):
mcp_save = ModelCheckpoint('model/validate.weights.best.hdf5', save_best_only=True, monitor='val_loss')
# history = metrics.Histories(dataset)
# mcp_save = AllModelCheckpoint(name_weights)
# reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=patience_lr, verbose=1, min_delta=1e-4, mode='min')
# early_stopping = EarlyStopping(monitor='val_loss', patience=patience_es, verbose=1, mode='auto')
# return [early_stopping, mcp_save, reduce_lr_loss]
# return [f1metrics, early_stopping, mcp_save]
return [dataset, mcp_save]
def load_dataset(filename):
with open(filename, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
train_dataset = pickle.load(f)
return train_dataset
def next_simple_dataset(dataset, batch_size: int, datatype):
""" Obtain a batch of training data
"""
while True:
x_batch = []
y_batch = []
for i in range(batch_size):
try:
x, y, data_unit, index = create_xy(dataset, datatype)
# x = normalize(x)
x_batch.append(x)
y_batch.append(y)
except StopIteration:
break
x_batch, y_batch = np.array(x_batch), np.array(y_batch)
if datatype != DataType.test:
x_batch = SEQ_CVXTZ.augment_images(x_batch).astype("float32")
x_batch = np.array([normalize(x) for x in x_batch])
# org_shape = x_batch.shape
# org_width = x_batch.shape[1]
# corner = int((org_width - ROI_IMAGE_SIZE) // 2)
# print(f"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}")
# x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]
# resized_x_batch = []
# for x in x_batch:
# img = Image.fromarray(np.uint8(x))
# img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)
# resized_x_batch.append(normalize(np.array(img)))
# print(f"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}")
# yield np.array(resized_x_batch), y_batch
yield np.array(x_batch), y_batch
def train_model(model: Model, dataset, model_filename: str,
batch_size=BATCH_SIZE,
epochs=EPOCHS, ):
callbacks = create_callbacks(dataset, model_filename)
dataset.model = model
answers = [data_unit.answer for data_unit in dataset.data_list]
sample_num = len(answers)
# sample_num = len(answers)
train_num = int(sample_num * TRAIN_RATIO)
validate_num = int(sample_num * VALIDATE_RATIO)
steps_per_epoch = train_num // batch_size
# steps_per_epoch = 50
validation_steps = validate_num // batch_size
print(f"train_num:{train_num} validate_num:{validate_num} steps_per_epoch:{steps_per_epoch} validateion_steps:{validation_steps}")
model.fit_generator(generator=next_simple_dataset(dataset, batch_size, DataType.train),
epochs=epochs,
validation_data=next_simple_dataset(dataset, batch_size, DataType.validate),
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
callbacks=callbacks, verbose=1)
def | ():
# https://www.kaggle.com/CVxTz/cnn-starter-nasnet-mobile-0-9709-lb
def sometimes(aug):
return iaa.Sometimes(0.5, aug)
seq = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 20% of all images
sometimes(iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
# scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, # translate by -20 to +20 percent (per axis)
rotate=(-10, 10), # rotate by -45 to +45 degrees
shear=(-5, 5), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),
# convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 1.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(3, 5)),
# blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 5)),
# blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.9, 1.1)), # sharpen images
iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.01 * 255), per_channel=0.5),
# add gaussian noise to images
iaa.OneOf([
iaa.Dropout((0.01, 0.05), per_channel=0.5), # randomly remove up to 10% of the pixels
iaa.CoarseDropout((0.01, 0.03), size_percent=(0.01, 0.02), per_channel=0.2),
]),
iaa.Invert(0.01, per_channel=True), # invert color channels
iaa.Add((-2, 2), per_channel=0.5),
# change brightness of images (by -10 to 10 of original value)
iaa.AddToHueAndSaturation((-1, 1)), # change hue and saturation
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.9, 1.1), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-1, 0),
first=iaa.Multiply((0.9, 1.1), per_channel=True),
second=iaa.ContrastNormalization((0.9, 1.1))
)
]),
sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)),
# move pixels locally around (with random strengths)
sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
# sometimes move parts of the image around
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
)
return seq
SEQ_CVXTZ = create_sequential_cvxtz()
| create_sequential_cvxtz | identifier_name |
train_utils.py | import string
import random
import sys
import pickle
import pathlib
import time
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
import imgaug as ia
from imgaug import augmenters as iaa
# sys.path.append(pathlib.Path(__file__).parent)
from dataset import *
import metrics
def create_seq():
|
def create_callbacks(dataset, name_weights, patience_lr=10, patience_es=150):
mcp_save = ModelCheckpoint('model/validate.weights.best.hdf5', save_best_only=True, monitor='val_loss')
# history = metrics.Histories(dataset)
# mcp_save = AllModelCheckpoint(name_weights)
# reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=patience_lr, verbose=1, min_delta=1e-4, mode='min')
# early_stopping = EarlyStopping(monitor='val_loss', patience=patience_es, verbose=1, mode='auto')
# return [early_stopping, mcp_save, reduce_lr_loss]
# return [f1metrics, early_stopping, mcp_save]
return [dataset, mcp_save]
def load_dataset(filename):
with open(filename, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
train_dataset = pickle.load(f)
return train_dataset
def next_simple_dataset(dataset, batch_size: int, datatype):
""" Obtain a batch of training data
"""
while True:
x_batch = []
y_batch = []
for i in range(batch_size):
try:
x, y, data_unit, index = create_xy(dataset, datatype)
# x = normalize(x)
x_batch.append(x)
y_batch.append(y)
except StopIteration:
break
x_batch, y_batch = np.array(x_batch), np.array(y_batch)
if datatype != DataType.test:
x_batch = SEQ_CVXTZ.augment_images(x_batch).astype("float32")
x_batch = np.array([normalize(x) for x in x_batch])
# org_shape = x_batch.shape
# org_width = x_batch.shape[1]
# corner = int((org_width - ROI_IMAGE_SIZE) // 2)
# print(f"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}")
# x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]
# resized_x_batch = []
# for x in x_batch:
# img = Image.fromarray(np.uint8(x))
# img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)
# resized_x_batch.append(normalize(np.array(img)))
# print(f"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}")
# yield np.array(resized_x_batch), y_batch
yield np.array(x_batch), y_batch
def train_model(model: Model, dataset, model_filename: str,
batch_size=BATCH_SIZE,
epochs=EPOCHS, ):
callbacks = create_callbacks(dataset, model_filename)
dataset.model = model
answers = [data_unit.answer for data_unit in dataset.data_list]
sample_num = len(answers)
# sample_num = len(answers)
train_num = int(sample_num * TRAIN_RATIO)
validate_num = int(sample_num * VALIDATE_RATIO)
steps_per_epoch = train_num // batch_size
# steps_per_epoch = 50
validation_steps = validate_num // batch_size
print(f"train_num:{train_num} validate_num:{validate_num} steps_per_epoch:{steps_per_epoch} validateion_steps:{validation_steps}")
model.fit_generator(generator=next_simple_dataset(dataset, batch_size, DataType.train),
epochs=epochs,
validation_data=next_simple_dataset(dataset, batch_size, DataType.validate),
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
callbacks=callbacks, verbose=1)
def create_sequential_cvxtz():
# https://www.kaggle.com/CVxTz/cnn-starter-nasnet-mobile-0-9709-lb
def sometimes(aug):
return iaa.Sometimes(0.5, aug)
seq = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 20% of all images
sometimes(iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
# scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, # translate by -20 to +20 percent (per axis)
rotate=(-10, 10), # rotate by -45 to +45 degrees
shear=(-5, 5), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),
# convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 1.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(3, 5)),
# blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 5)),
# blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.9, 1.1)), # sharpen images
iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.01 * 255), per_channel=0.5),
# add gaussian noise to images
iaa.OneOf([
iaa.Dropout((0.01, 0.05), per_channel=0.5), # randomly remove up to 10% of the pixels
iaa.CoarseDropout((0.01, 0.03), size_percent=(0.01, 0.02), per_channel=0.2),
]),
iaa.Invert(0.01, per_channel=True), # invert color channels
iaa.Add((-2, 2), per_channel=0.5),
# change brightness of images (by -10 to 10 of original value)
iaa.AddToHueAndSaturation((-1, 1)), # change hue and saturation
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.9, 1.1), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-1, 0),
first=iaa.Multiply((0.9, 1.1), per_channel=True),
second=iaa.ContrastNormalization((0.9, 1.1))
)
]),
sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)),
# move pixels locally around (with random strengths)
sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
# sometimes move parts of the image around
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
)
return seq
SEQ_CVXTZ = create_sequential_cvxtz()
| SEQ = iaa.Sequential([
iaa.OneOf([
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.5), # vertically flips
iaa.Crop(percent=(0, 0.2)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
# Strengthen or weaken the contrast in each image.
iaa.ContrastNormalization((0.75, 1.5)),
# Add gaussian noise.
# For 50% of all images, we sample the noise once per pixel.
# For the other 50% of all images, we sample the noise per pixel AND
# channel. This can change the color (not only brightness) of the
# pixels.
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=True),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (1.0 - AUG_SCALE, 1.0 + AUG_SCALE), "y": (1.0 - AUG_SCALE, 1.0 + AUG_SCALE)},
# translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-180, 180),
shear=(-8, 8),
)
])], random_order=True)
return SEQ | identifier_body |
kubectl.go | /*
Copyright 2023 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/gravitational/trace"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"k8s.io/cli-runtime/pkg/genericclioptions"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/component-base/cli"
"k8s.io/kubectl/pkg/cmd"
"k8s.io/kubectl/pkg/cmd/plugin"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/gravitational/teleport"
tracehttp "github.com/gravitational/teleport/api/observability/tracing/http"
"github.com/gravitational/teleport/api/profile"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/kube/kubeconfig"
"github.com/gravitational/teleport/lib/observability/tracing"
)
var (
podForbiddenRe = regexp.MustCompile(`(?m)Error from server \(Forbidden\): pods "(.*)" is forbidden: User ".*" cannot get resource "pods" in API group "" in the namespace "(.*)"`)
clusterForbidden = "[00] access denied"
// clusterObjectDiscoveryFailed is printed when kubectl tries to do API discovery
// - calling /apis endpoint - but Teleport denies the request. Since it cannot
// discover the resources available in the cluster, it prints this message saying
// that the cluster does not have pod(s). Since every Kubernetes cluster supports
// pods, it's safe to create a resource access request.
clusterObjectDiscoveryFailed = regexp.MustCompile(`(?m)the server doesn't have a resource type "pods?"`)
)
// resourceKind identifies a Kubernetes resource.
type resourceKind struct {
kind string
subResourceName string
}
// onKubectlCommand re-execs itself if env var `tshKubectlRexec` is not set
// in order to execute the `kubectl` portion of the code. This is a requirement because
// `kubectl` calls `os.Exit()` in every code path, and we need to intercept the
// exit code to validate if the request was denied.
// When executing `tsh kubectl get pods`, tsh checks if `tshKubectlReexec`. Since
// it's the user call and the flag is not present, tsh reexecs the same exact
// the user executed and uses an io.MultiWriter to write the os.Stderr output
// from the kubectl command into an io.Pipe for analysis. It also sets the env
// `tshKubectlReexec` in the exec.Cmd.Env and runs the command. When running the
// command, `tsh` will be recalled, and since `tshKubectlReexec` is set only the
// kubectl portion of code is executed.
// On the caller side, once the callee execution finishes, tsh inspects the stderr
// outputs and decides if creating an access request is appropriate.
// If the access request is created, tsh waits for the approval and runs the expected
// command again.
func onKubectlCommand(cf *CLIConf, fullArgs []string, args []string) error {
if os.Getenv(tshKubectlReexecEnvVar) == "" {
err := runKubectlAndCollectRun(cf, fullArgs, args)
return trace.Wrap(err)
}
runKubectlCode(cf, args)
return nil
}
const (
// tshKubectlReexecEnvVar is the name of the environment variable used to control if
// tsh should re-exec or execute a kubectl command.
tshKubectlReexecEnvVar = "TSH_KUBE_REEXEC"
)
// runKubectlReexec reexecs itself and copies the `stderr` output into
// the provided collector.
// It also sets tshKubectlReexec for the command to prevent
// an exec loop
func runKubectlReexec(cf *CLIConf, fullArgs, args []string, collector io.Writer) error {
closeFn, newKubeConfigLocation, err := maybeStartKubeLocalProxy(cf, withKubectlArgs(args))
if err != nil {
return trace.Wrap(err)
}
defer closeFn()
cmdEnv := append(os.Environ(), fmt.Sprintf("%s=yes", tshKubectlReexecEnvVar))
// Update kubeconfig location.
if newKubeConfigLocation != "" {
cmdEnv = overwriteKubeconfigInEnv(cmdEnv, newKubeConfigLocation)
fullArgs = overwriteKubeconfigFlagInArgs(fullArgs, newKubeConfigLocation)
}
// Execute.
cmd := exec.Command(cf.executablePath, fullArgs...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = io.MultiWriter(os.Stderr, collector)
cmd.Env = cmdEnv
return trace.Wrap(cmd.Run())
}
// wrapConfigFn wraps the rest.Config with a custom RoundTripper if the user
// wants to sample traces.
func wrapConfigFn(cf *CLIConf) func(c *rest.Config) *rest.Config {
return func(c *rest.Config) *rest.Config {
c.Wrap(
func(rt http.RoundTripper) http.RoundTripper {
if cf.SampleTraces {
// If the user wants to sample traces, wrap the transport with a trace
// transport.
return tracehttp.NewTransport(rt)
}
return rt
},
)
return c
}
}
// runKubectlCode runs the actual kubectl package code with the default options.
// This code is only executed when `tshKubectlReexec` env is present. This happens
// because we need to retry kubectl calls and `kubectl` calls os.Exit in multiple
// paths.
func runKubectlCode(cf *CLIConf, args []string) {
closeTracer := func() {}
cf.TracingProvider = tracing.NoopProvider()
cf.tracer = cf.TracingProvider.Tracer(teleport.ComponentTSH)
if cf.SampleTraces {
provider, err := newTraceProvider(cf, "", nil)
if err != nil {
log.WithError(err).Debug("Failed to set up span forwarding")
} else {
// ensure that the provider is shutdown on exit to flush any spans
// that haven't been forwarded yet.
closeTracer = func() {
shutdownCtx, cancel := context.WithTimeout(cf.Context, 1*time.Second)
defer cancel()
err := provider.Shutdown(shutdownCtx)
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
log.WithError(err).Debugf("Failed to shutdown trace provider")
}
}
}
}
// If the user opted to not sample traces, cf.TracingProvider is pre-initialized
// with a noop provider.
ctx, span := cf.TracingProvider.Tracer("kubectl").Start(cf.Context, "kubectl")
closeSpanAndTracer := func() {
span.End()
closeTracer()
}
// These values are the defaults used by kubectl and can be found here:
// https://github.com/kubernetes/kubectl/blob/3612c18ed86fc0a2f4467ca355b3e21569fabe0a/pkg/cmd/cmd.go#L94
defaultConfigFlags := genericclioptions.NewConfigFlags(true).
WithDeprecatedPasswordFlag().
WithDiscoveryBurst(300).
WithDiscoveryQPS(50.0).
WithWrapConfigFn(wrapConfigFn(cf))
command := cmd.NewDefaultKubectlCommandWithArgs(
cmd.KubectlOptions{
// init the default plugin handler.
PluginHandler: cmd.NewDefaultPluginHandler(plugin.ValidPluginFilenamePrefixes),
Arguments: args,
ConfigFlags: defaultConfigFlags,
// init the IOSStreams.
IOStreams: genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr},
},
)
command.SetContext(ctx)
// override args without kubectl to avoid errors.
command.SetArgs(args[1:])
// run command until it finishes.
if err := cli.RunNoErrOutput(command); err != nil {
closeSpanAndTracer()
// Pretty-print the error and exit with an error.
cmdutil.CheckErr(err)
}
closeSpanAndTracer()
os.Exit(0)
}
func runKubectlAndCollectRun(cf *CLIConf, fullArgs, args []string) error {
var (
alreadyRequestedAccess bool
err error
exitErr *exec.ExitError
)
for {
// missingKubeResources will include the Kubernetes Resources whose access
// was rejected in this kubectl call.
missingKubeResources := make([]resourceKind, 0, 50)
reader, writer := io.Pipe()
group, _ := errgroup.WithContext(cf.Context)
group.Go(
func() error {
// This goroutine scans each line of output emitted to stderr by kubectl
// and parses it in order to check if the returned error was a problem with
// missing access level. If it's the case, tsh kubectl will create automatically
// the access request for the user to access the resource.
// Current supported resources:
// - pod
// - kube_cluster
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
line := scanner.Text()
// Check if the request targeting a pod endpoint was denied due to
// Teleport Pod RBAC or if the operation was denied by Kubernetes RBAC.
// In the second case, we should create a Resource Access Request to allow
// the user to exec/read logs using different Kubernetes RBAC principals.
// using different Kubernetes RBAC principals.
if podForbiddenRe.MatchString(line) {
results := podForbiddenRe.FindStringSubmatch(line)
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubePod, subResourceName: filepath.Join(results[2], results[1])})
// Check if cluster access was denied. If denied we should create
// a Resource Access Request for the cluster and not a pod.
} else if strings.Contains(line, clusterForbidden) || clusterObjectDiscoveryFailed.MatchString(line) {
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubernetesCluster})
}
}
return trace.Wrap(scanner.Err())
},
)
err := runKubectlReexec(cf, fullArgs, args, writer)
writer.CloseWithError(io.EOF)
if scanErr := group.Wait(); scanErr != nil {
log.WithError(scanErr).Warn("unable to scan stderr payload")
}
if err == nil {
break
} else if !errors.As(err, &exitErr) {
return trace.Wrap(err)
} else if errors.As(err, &exitErr) && exitErr.ExitCode() != cmdutil.DefaultErrorExitCode {
// if the exit code is not 1, it was emitted by pod exec code and we should
// ignore it since the user was allowed to execute the command in the pod.
break
}
if len(missingKubeResources) > 0 && !alreadyRequestedAccess {
// create the access requests for the user and wait for approval.
if err := createKubeAccessRequest(cf, missingKubeResources, args); err != nil {
return trace.Wrap(err)
}
alreadyRequestedAccess = true
continue
}
break
}
// exit with the kubectl exit code to keep compatibility.
if errors.As(err, &exitErr) {
os.Exit(exitErr.ExitCode())
}
return nil
}
// createKubeAccessRequest creates an access request to the denied resources
// if the user's roles allow search_as_role.
func createKubeAccessRequest(cf *CLIConf, resources []resourceKind, args []string) error {
tc, err := makeClient(cf)
if err != nil {
return trace.Wrap(err)
}
kubeName, err := getKubeClusterName(args, tc.SiteName)
if err != nil {
return trace.Wrap(err)
}
for _, rec := range resources {
cf.RequestedResourceIDs = append(
cf.RequestedResourceIDs,
filepath.Join("/", tc.SiteName, rec.kind, kubeName, rec.subResourceName),
)
}
cf.Reason = fmt.Sprintf("Resource request automatically created for %v", args)
if err := executeAccessRequest(cf, tc); err != nil {
// TODO(tigrato): intercept the error to validate the origin
return trace.Wrap(err)
}
return nil
}
// extractKubeConfigAndContext parses the args and extracts:
// - the "--context" flag that overrides the default context to use, if present
// - the "--kubeconfig" flag that overrides the default kubeconfig location, if
// present
func extractKubeConfigAndContext(args []string) (string, string) {
if len(args) <= 2 {
return "", ""
}
command := makeKubectlCobraCommand()
return extractKubeConfigAndContextFromCommand(command, args)
}
// extractKubeConfigAndContextFromCommand parses the args using provided
// kubectl command and extracts:
// - the "--context" flag that overrides the default context to use, if present
// - the "--kubeconfig" flag that overrides the default kubeconfig location, if
// present
func extractKubeConfigAndContextFromCommand(command *cobra.Command, args []string) (kubeconfig string, context string) {
if len(args) <= 2 {
return
}
// Find subcommand.
if subcommand, _, err := command.Find(args[1:]); err == nil {
command = subcommand
}
// Ignore errors from ParseFlags.
command.ParseFlags(args[1:])
kubeconfig = command.Flag("kubeconfig").Value.String()
context = command.Flag("context").Value.String()
return
}
var makeKubectlCobraCommandLock sync.Mutex
// makeKubectlCobraCommand creates a cobra.Command for kubectl.
//
// Note that cmd.NewKubectlCommand is slow (15+ ms, 20k+ alloc), so avoid
// making/re-making it when possible.
//
// Also cmd.NewKubectlCommand is not goroutine-safe, thus using a lock.
func makeKubectlCobraCommand() *cobra.Command {
makeKubectlCobraCommandLock.Lock()
defer makeKubectlCobraCommandLock.Unlock()
return cmd.NewKubectlCommand(cmd.KubectlOptions{
// Use NewConfigFlags to avoid load existing values from
// defaultConfigFlags.
ConfigFlags: genericclioptions.NewConfigFlags(true),
})
}
// getKubeClusterName extracts the Kubernetes Cluster name if the Kube belongs to
// the teleportClusterName cluster. It parses the args to extract the `--kubeconfig`
// and `--context` flag values and to use them if any was overriten.
func getKubeClusterName(args []string, teleportClusterName string) (string, error) {
kubeconfigLocation, selectedContext := extractKubeConfigAndContext(args)
if selectedContext == "" {
kubeName, err := kubeconfig.SelectedKubeCluster(kubeconfigLocation, teleportClusterName)
return kubeName, trace.Wrap(err)
}
kc, err := kubeconfig.Load(kubeconfigLocation)
if err != nil {
return "", trace.Wrap(err)
}
kubeName := kubeconfig.KubeClusterFromContext(selectedContext, kc.Contexts[selectedContext], teleportClusterName)
if kubeName == "" {
return "", trace.BadParameter("selected context %q does not belong to Teleport cluster %q", selectedContext, teleportClusterName)
}
return kubeName, nil
}
type kubeLocalProxyOpts struct {
// kubectlArgs is a list of command arguments passed in for `tsh kubectl`.
// used to decide if local proxy is required.
kubectlArgs []string
// makeAndStartKubeLocalProxyFunc is a callback function to create and
// start a kube local proxy, when it is decided that a local proxy is
// required. Default to makeAndStartKubeLocalProxy. Can be set another
// function for testing.
makeAndStartKubeLocalProxyFunc func(*CLIConf, *clientcmdapi.Config, kubeconfig.LocalProxyClusters) (func(), string, error)
}
type applyKubeLocalProxyOpts func(o *kubeLocalProxyOpts)
func withKubectlArgs(args []string) applyKubeLocalProxyOpts {
return func(o *kubeLocalProxyOpts) {
o.kubectlArgs = args
}
}
func newKubeLocalProxyOpts(applyOpts ...applyKubeLocalProxyOpts) kubeLocalProxyOpts {
opts := kubeLocalProxyOpts{
makeAndStartKubeLocalProxyFunc: makeAndStartKubeLocalProxy,
}
for _, applyOpt := range applyOpts {
applyOpt(&opts)
}
return opts
}
// maybeStartKubeLocalProxy starts a kube local proxy if local proxy is
// required. A closeFn and the new kubeconfig path are returned if local proxy
// is successfully created. Called by `tsh kubectl` and `tsh kube exec`.
func maybeStartKubeLocalProxy(cf *CLIConf, applyOpts ...applyKubeLocalProxyOpts) (func(), string, error) {
opts := newKubeLocalProxyOpts(applyOpts...)
config, clusters, useLocalProxy := shouldUseKubeLocalProxy(cf, opts.kubectlArgs)
if !useLocalProxy {
return func() {}, "", nil
}
closeFn, newKubeConfigLocation, err := opts.makeAndStartKubeLocalProxyFunc(cf, config, clusters)
return closeFn, newKubeConfigLocation, trace.Wrap(err)
}
// makeAndStartKubeLocalProxy is a helper to create a kube local proxy and
// start it in a goroutine. If successful, a closeFn and the generated
// kubeconfig location are returned.
func makeAndStartKubeLocalProxy(cf *CLIConf, config *clientcmdapi.Config, clusters kubeconfig.LocalProxyClusters) (func(), string, error) {
tc, err := makeClient(cf)
if err != nil {
return nil, "", trace.Wrap(err)
}
localProxy, err := makeKubeLocalProxy(cf, tc, clusters, config, cf.LocalProxyPort)
if err != nil {
return nil, "", trace.Wrap(err)
}
go localProxy.Start(cf.Context)
closeFn := func() {
localProxy.Close()
}
return closeFn, localProxy.KubeConfigPath(), nil
}
// shouldUseKubeLocalProxy checks if a local proxy is required for kube
// access for `tsh kubectl` or `tsh kube exec`.
//
// The local proxy is required when all of these conditions are met:
// - profile is loadable
// - kube access is enabled, and is accessed through web proxy address
// - ALPN connection upgrade is required (e.g. Proxy behind ALB)
// - not `kubectl config` commands
// - original/default kubeconfig is loadable
// - Selected cluster is a Teleport cluster that uses KubeClusterAddr
func shouldUseKubeLocalProxy(cf *CLIConf, kubectlArgs []string) (*clientcmdapi.Config, kubeconfig.LocalProxyClusters, bool) {
// When failed to load profile, assume this CLI command is not running
// against Teleport clusters.
profile, err := cf.GetProfile()
if err != nil {
return nil, nil, false
}
if !profile.RequireKubeLocalProxy() {
return nil, nil, false
}
// Skip "kubectl config" commands.
var kubeconfigLocation, selectedContext string
if len(kubectlArgs) > 0 {
kubectlCommand := makeKubectlCobraCommand()
if isKubectlConfigCommand(kubectlCommand, kubectlArgs) {
return nil, nil, false
}
kubeconfigLocation, selectedContext = extractKubeConfigAndContextFromCommand(kubectlCommand, kubectlArgs)
}
// Nothing to do if cannot load original kubeconfig.
defaultConfig, err := kubeconfig.Load(kubeconfigLocation)
if err != nil {
return nil, nil, false
}
// Prepare Teleport kube cluster based on selected context.
kubeCluster, found := kubeconfig.FindTeleportClusterForLocalProxy(defaultConfig, kubeClusterAddrFromProfile(profile), selectedContext)
if !found {
return nil, nil, false
}
return defaultConfig, kubeconfig.LocalProxyClusters{kubeCluster}, true
}
func isKubectlConfigCommand(kubectlCommand *cobra.Command, args []string) bool {
if len(args) < 2 || args[0] != "kubectl" {
return false
}
find, _, _ := kubectlCommand.Find(args[1:])
for ; find != nil; find = find.Parent() |
return false
}
func kubeClusterAddrFromProfile(profile *profile.Profile) string {
partialClientConfig := client.Config{
WebProxyAddr: profile.WebProxyAddr,
KubeProxyAddr: profile.KubeProxyAddr,
}
return partialClientConfig.KubeClusterAddr()
}
func overwriteKubeconfigFlagInArgs(args []string, newPath string) []string {
// Make a clone to avoid changing the original args.
args = slices.Clone(args)
for i, arg := range args {
switch {
case strings.HasPrefix(arg, "--kubeconfig="):
args[i] = fmt.Sprintf("--kubeconfig=%v", newPath)
case arg == "--kubeconfig" && len(args) > i+1:
args[i+1] = newPath
}
}
return args
}
func overwriteKubeconfigInEnv(env []string, newPath string) (output []string) {
kubeConfigEnvPrefix := teleport.EnvKubeConfig + "="
for _, entry := range env {
if strings.HasPrefix(entry, kubeConfigEnvPrefix) {
continue
}
output = append(output, entry)
}
output = append(output, kubeConfigEnvPrefix+newPath)
return
}
| {
if find.Name() == "config" {
return true
}
} | conditional_block |
kubectl.go | /*
Copyright 2023 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/gravitational/trace"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"k8s.io/cli-runtime/pkg/genericclioptions"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/component-base/cli"
"k8s.io/kubectl/pkg/cmd"
"k8s.io/kubectl/pkg/cmd/plugin"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/gravitational/teleport"
tracehttp "github.com/gravitational/teleport/api/observability/tracing/http"
"github.com/gravitational/teleport/api/profile"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/kube/kubeconfig"
"github.com/gravitational/teleport/lib/observability/tracing"
)
var (
podForbiddenRe = regexp.MustCompile(`(?m)Error from server \(Forbidden\): pods "(.*)" is forbidden: User ".*" cannot get resource "pods" in API group "" in the namespace "(.*)"`)
clusterForbidden = "[00] access denied"
// clusterObjectDiscoveryFailed is printed when kubectl tries to do API discovery
// - calling /apis endpoint - but Teleport denies the request. Since it cannot
// discover the resources available in the cluster, it prints this message saying
// that the cluster does not have pod(s). Since every Kubernetes cluster supports
// pods, it's safe to create a resource access request.
clusterObjectDiscoveryFailed = regexp.MustCompile(`(?m)the server doesn't have a resource type "pods?"`)
)
// resourceKind identifies a Kubernetes resource.
type resourceKind struct {
kind string
subResourceName string
}
// onKubectlCommand re-execs itself if env var `tshKubectlRexec` is not set
// in order to execute the `kubectl` portion of the code. This is a requirement because
// `kubectl` calls `os.Exit()` in every code path, and we need to intercept the
// exit code to validate if the request was denied.
// When executing `tsh kubectl get pods`, tsh checks if `tshKubectlReexec`. Since
// it's the user call and the flag is not present, tsh reexecs the same exact
// the user executed and uses an io.MultiWriter to write the os.Stderr output
// from the kubectl command into an io.Pipe for analysis. It also sets the env
// `tshKubectlReexec` in the exec.Cmd.Env and runs the command. When running the
// command, `tsh` will be recalled, and since `tshKubectlReexec` is set only the
// kubectl portion of code is executed.
// On the caller side, once the callee execution finishes, tsh inspects the stderr
// outputs and decides if creating an access request is appropriate.
// If the access request is created, tsh waits for the approval and runs the expected
// command again.
func onKubectlCommand(cf *CLIConf, fullArgs []string, args []string) error {
if os.Getenv(tshKubectlReexecEnvVar) == "" {
err := runKubectlAndCollectRun(cf, fullArgs, args)
return trace.Wrap(err)
}
runKubectlCode(cf, args)
return nil
}
const (
// tshKubectlReexecEnvVar is the name of the environment variable used to control if
// tsh should re-exec or execute a kubectl command.
tshKubectlReexecEnvVar = "TSH_KUBE_REEXEC"
)
// runKubectlReexec reexecs itself and copies the `stderr` output into
// the provided collector.
// It also sets tshKubectlReexec for the command to prevent
// an exec loop
func runKubectlReexec(cf *CLIConf, fullArgs, args []string, collector io.Writer) error {
closeFn, newKubeConfigLocation, err := maybeStartKubeLocalProxy(cf, withKubectlArgs(args))
if err != nil {
return trace.Wrap(err)
}
defer closeFn()
cmdEnv := append(os.Environ(), fmt.Sprintf("%s=yes", tshKubectlReexecEnvVar))
// Update kubeconfig location.
if newKubeConfigLocation != "" {
cmdEnv = overwriteKubeconfigInEnv(cmdEnv, newKubeConfigLocation)
fullArgs = overwriteKubeconfigFlagInArgs(fullArgs, newKubeConfigLocation)
}
// Execute.
cmd := exec.Command(cf.executablePath, fullArgs...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = io.MultiWriter(os.Stderr, collector)
cmd.Env = cmdEnv
return trace.Wrap(cmd.Run())
}
// wrapConfigFn wraps the rest.Config with a custom RoundTripper if the user
// wants to sample traces.
func wrapConfigFn(cf *CLIConf) func(c *rest.Config) *rest.Config {
return func(c *rest.Config) *rest.Config {
c.Wrap(
func(rt http.RoundTripper) http.RoundTripper {
if cf.SampleTraces {
// If the user wants to sample traces, wrap the transport with a trace
// transport.
return tracehttp.NewTransport(rt)
}
return rt
},
)
return c
}
}
// runKubectlCode runs the actual kubectl package code with the default options.
// This code is only executed when `tshKubectlReexec` env is present. This happens
// because we need to retry kubectl calls and `kubectl` calls os.Exit in multiple
// paths.
func runKubectlCode(cf *CLIConf, args []string) {
closeTracer := func() {}
cf.TracingProvider = tracing.NoopProvider()
cf.tracer = cf.TracingProvider.Tracer(teleport.ComponentTSH)
if cf.SampleTraces {
provider, err := newTraceProvider(cf, "", nil)
if err != nil {
log.WithError(err).Debug("Failed to set up span forwarding")
} else {
// ensure that the provider is shutdown on exit to flush any spans
// that haven't been forwarded yet.
closeTracer = func() {
shutdownCtx, cancel := context.WithTimeout(cf.Context, 1*time.Second)
defer cancel()
err := provider.Shutdown(shutdownCtx)
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
log.WithError(err).Debugf("Failed to shutdown trace provider")
}
}
}
}
// If the user opted to not sample traces, cf.TracingProvider is pre-initialized
// with a noop provider.
ctx, span := cf.TracingProvider.Tracer("kubectl").Start(cf.Context, "kubectl")
closeSpanAndTracer := func() {
span.End()
closeTracer()
}
// These values are the defaults used by kubectl and can be found here:
// https://github.com/kubernetes/kubectl/blob/3612c18ed86fc0a2f4467ca355b3e21569fabe0a/pkg/cmd/cmd.go#L94
defaultConfigFlags := genericclioptions.NewConfigFlags(true).
WithDeprecatedPasswordFlag().
WithDiscoveryBurst(300).
WithDiscoveryQPS(50.0).
WithWrapConfigFn(wrapConfigFn(cf))
command := cmd.NewDefaultKubectlCommandWithArgs(
cmd.KubectlOptions{
// init the default plugin handler.
PluginHandler: cmd.NewDefaultPluginHandler(plugin.ValidPluginFilenamePrefixes),
Arguments: args,
ConfigFlags: defaultConfigFlags,
// init the IOSStreams.
IOStreams: genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr},
},
)
command.SetContext(ctx)
// override args without kubectl to avoid errors. | if err := cli.RunNoErrOutput(command); err != nil {
closeSpanAndTracer()
// Pretty-print the error and exit with an error.
cmdutil.CheckErr(err)
}
closeSpanAndTracer()
os.Exit(0)
}
func runKubectlAndCollectRun(cf *CLIConf, fullArgs, args []string) error {
var (
alreadyRequestedAccess bool
err error
exitErr *exec.ExitError
)
for {
// missingKubeResources will include the Kubernetes Resources whose access
// was rejected in this kubectl call.
missingKubeResources := make([]resourceKind, 0, 50)
reader, writer := io.Pipe()
group, _ := errgroup.WithContext(cf.Context)
group.Go(
func() error {
// This goroutine scans each line of output emitted to stderr by kubectl
// and parses it in order to check if the returned error was a problem with
// missing access level. If it's the case, tsh kubectl will create automatically
// the access request for the user to access the resource.
// Current supported resources:
// - pod
// - kube_cluster
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
line := scanner.Text()
// Check if the request targeting a pod endpoint was denied due to
// Teleport Pod RBAC or if the operation was denied by Kubernetes RBAC.
// In the second case, we should create a Resource Access Request to allow
// the user to exec/read logs using different Kubernetes RBAC principals.
// using different Kubernetes RBAC principals.
if podForbiddenRe.MatchString(line) {
results := podForbiddenRe.FindStringSubmatch(line)
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubePod, subResourceName: filepath.Join(results[2], results[1])})
// Check if cluster access was denied. If denied we should create
// a Resource Access Request for the cluster and not a pod.
} else if strings.Contains(line, clusterForbidden) || clusterObjectDiscoveryFailed.MatchString(line) {
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubernetesCluster})
}
}
return trace.Wrap(scanner.Err())
},
)
err := runKubectlReexec(cf, fullArgs, args, writer)
writer.CloseWithError(io.EOF)
if scanErr := group.Wait(); scanErr != nil {
log.WithError(scanErr).Warn("unable to scan stderr payload")
}
if err == nil {
break
} else if !errors.As(err, &exitErr) {
return trace.Wrap(err)
} else if errors.As(err, &exitErr) && exitErr.ExitCode() != cmdutil.DefaultErrorExitCode {
// if the exit code is not 1, it was emitted by pod exec code and we should
// ignore it since the user was allowed to execute the command in the pod.
break
}
if len(missingKubeResources) > 0 && !alreadyRequestedAccess {
// create the access requests for the user and wait for approval.
if err := createKubeAccessRequest(cf, missingKubeResources, args); err != nil {
return trace.Wrap(err)
}
alreadyRequestedAccess = true
continue
}
break
}
// exit with the kubectl exit code to keep compatibility.
if errors.As(err, &exitErr) {
os.Exit(exitErr.ExitCode())
}
return nil
}
// createKubeAccessRequest creates an access request to the denied resources
// if the user's roles allow search_as_role.
func createKubeAccessRequest(cf *CLIConf, resources []resourceKind, args []string) error {
tc, err := makeClient(cf)
if err != nil {
return trace.Wrap(err)
}
kubeName, err := getKubeClusterName(args, tc.SiteName)
if err != nil {
return trace.Wrap(err)
}
for _, rec := range resources {
cf.RequestedResourceIDs = append(
cf.RequestedResourceIDs,
filepath.Join("/", tc.SiteName, rec.kind, kubeName, rec.subResourceName),
)
}
cf.Reason = fmt.Sprintf("Resource request automatically created for %v", args)
if err := executeAccessRequest(cf, tc); err != nil {
// TODO(tigrato): intercept the error to validate the origin
return trace.Wrap(err)
}
return nil
}
// extractKubeConfigAndContext parses the args and extracts:
// - the "--context" flag that overrides the default context to use, if present
// - the "--kubeconfig" flag that overrides the default kubeconfig location, if
// present
func extractKubeConfigAndContext(args []string) (string, string) {
if len(args) <= 2 {
return "", ""
}
command := makeKubectlCobraCommand()
return extractKubeConfigAndContextFromCommand(command, args)
}
// extractKubeConfigAndContextFromCommand parses the args using provided
// kubectl command and extracts:
// - the "--context" flag that overrides the default context to use, if present
// - the "--kubeconfig" flag that overrides the default kubeconfig location, if
// present
func extractKubeConfigAndContextFromCommand(command *cobra.Command, args []string) (kubeconfig string, context string) {
if len(args) <= 2 {
return
}
// Find subcommand.
if subcommand, _, err := command.Find(args[1:]); err == nil {
command = subcommand
}
// Ignore errors from ParseFlags.
command.ParseFlags(args[1:])
kubeconfig = command.Flag("kubeconfig").Value.String()
context = command.Flag("context").Value.String()
return
}
var makeKubectlCobraCommandLock sync.Mutex
// makeKubectlCobraCommand creates a cobra.Command for kubectl.
//
// Note that cmd.NewKubectlCommand is slow (15+ ms, 20k+ alloc), so avoid
// making/re-making it when possible.
//
// Also cmd.NewKubectlCommand is not goroutine-safe, thus using a lock.
func makeKubectlCobraCommand() *cobra.Command {
makeKubectlCobraCommandLock.Lock()
defer makeKubectlCobraCommandLock.Unlock()
return cmd.NewKubectlCommand(cmd.KubectlOptions{
// Use NewConfigFlags to avoid load existing values from
// defaultConfigFlags.
ConfigFlags: genericclioptions.NewConfigFlags(true),
})
}
// getKubeClusterName extracts the Kubernetes Cluster name if the Kube belongs to
// the teleportClusterName cluster. It parses the args to extract the `--kubeconfig`
// and `--context` flag values and to use them if any was overriten.
func getKubeClusterName(args []string, teleportClusterName string) (string, error) {
kubeconfigLocation, selectedContext := extractKubeConfigAndContext(args)
if selectedContext == "" {
kubeName, err := kubeconfig.SelectedKubeCluster(kubeconfigLocation, teleportClusterName)
return kubeName, trace.Wrap(err)
}
kc, err := kubeconfig.Load(kubeconfigLocation)
if err != nil {
return "", trace.Wrap(err)
}
kubeName := kubeconfig.KubeClusterFromContext(selectedContext, kc.Contexts[selectedContext], teleportClusterName)
if kubeName == "" {
return "", trace.BadParameter("selected context %q does not belong to Teleport cluster %q", selectedContext, teleportClusterName)
}
return kubeName, nil
}
type kubeLocalProxyOpts struct {
// kubectlArgs is a list of command arguments passed in for `tsh kubectl`.
// used to decide if local proxy is required.
kubectlArgs []string
// makeAndStartKubeLocalProxyFunc is a callback function to create and
// start a kube local proxy, when it is decided that a local proxy is
// required. Default to makeAndStartKubeLocalProxy. Can be set another
// function for testing.
makeAndStartKubeLocalProxyFunc func(*CLIConf, *clientcmdapi.Config, kubeconfig.LocalProxyClusters) (func(), string, error)
}
type applyKubeLocalProxyOpts func(o *kubeLocalProxyOpts)
func withKubectlArgs(args []string) applyKubeLocalProxyOpts {
return func(o *kubeLocalProxyOpts) {
o.kubectlArgs = args
}
}
func newKubeLocalProxyOpts(applyOpts ...applyKubeLocalProxyOpts) kubeLocalProxyOpts {
opts := kubeLocalProxyOpts{
makeAndStartKubeLocalProxyFunc: makeAndStartKubeLocalProxy,
}
for _, applyOpt := range applyOpts {
applyOpt(&opts)
}
return opts
}
// maybeStartKubeLocalProxy starts a kube local proxy if local proxy is
// required. A closeFn and the new kubeconfig path are returned if local proxy
// is successfully created. Called by `tsh kubectl` and `tsh kube exec`.
func maybeStartKubeLocalProxy(cf *CLIConf, applyOpts ...applyKubeLocalProxyOpts) (func(), string, error) {
opts := newKubeLocalProxyOpts(applyOpts...)
config, clusters, useLocalProxy := shouldUseKubeLocalProxy(cf, opts.kubectlArgs)
if !useLocalProxy {
return func() {}, "", nil
}
closeFn, newKubeConfigLocation, err := opts.makeAndStartKubeLocalProxyFunc(cf, config, clusters)
return closeFn, newKubeConfigLocation, trace.Wrap(err)
}
// makeAndStartKubeLocalProxy is a helper to create a kube local proxy and
// start it in a goroutine. If successful, a closeFn and the generated
// kubeconfig location are returned.
func makeAndStartKubeLocalProxy(cf *CLIConf, config *clientcmdapi.Config, clusters kubeconfig.LocalProxyClusters) (func(), string, error) {
tc, err := makeClient(cf)
if err != nil {
return nil, "", trace.Wrap(err)
}
localProxy, err := makeKubeLocalProxy(cf, tc, clusters, config, cf.LocalProxyPort)
if err != nil {
return nil, "", trace.Wrap(err)
}
go localProxy.Start(cf.Context)
closeFn := func() {
localProxy.Close()
}
return closeFn, localProxy.KubeConfigPath(), nil
}
// shouldUseKubeLocalProxy checks if a local proxy is required for kube
// access for `tsh kubectl` or `tsh kube exec`.
//
// The local proxy is required when all of these conditions are met:
// - profile is loadable
// - kube access is enabled, and is accessed through web proxy address
// - ALPN connection upgrade is required (e.g. Proxy behind ALB)
// - not `kubectl config` commands
// - original/default kubeconfig is loadable
// - Selected cluster is a Teleport cluster that uses KubeClusterAddr
func shouldUseKubeLocalProxy(cf *CLIConf, kubectlArgs []string) (*clientcmdapi.Config, kubeconfig.LocalProxyClusters, bool) {
// When failed to load profile, assume this CLI command is not running
// against Teleport clusters.
profile, err := cf.GetProfile()
if err != nil {
return nil, nil, false
}
if !profile.RequireKubeLocalProxy() {
return nil, nil, false
}
// Skip "kubectl config" commands.
var kubeconfigLocation, selectedContext string
if len(kubectlArgs) > 0 {
kubectlCommand := makeKubectlCobraCommand()
if isKubectlConfigCommand(kubectlCommand, kubectlArgs) {
return nil, nil, false
}
kubeconfigLocation, selectedContext = extractKubeConfigAndContextFromCommand(kubectlCommand, kubectlArgs)
}
// Nothing to do if cannot load original kubeconfig.
defaultConfig, err := kubeconfig.Load(kubeconfigLocation)
if err != nil {
return nil, nil, false
}
// Prepare Teleport kube cluster based on selected context.
kubeCluster, found := kubeconfig.FindTeleportClusterForLocalProxy(defaultConfig, kubeClusterAddrFromProfile(profile), selectedContext)
if !found {
return nil, nil, false
}
return defaultConfig, kubeconfig.LocalProxyClusters{kubeCluster}, true
}
func isKubectlConfigCommand(kubectlCommand *cobra.Command, args []string) bool {
if len(args) < 2 || args[0] != "kubectl" {
return false
}
find, _, _ := kubectlCommand.Find(args[1:])
for ; find != nil; find = find.Parent() {
if find.Name() == "config" {
return true
}
}
return false
}
func kubeClusterAddrFromProfile(profile *profile.Profile) string {
partialClientConfig := client.Config{
WebProxyAddr: profile.WebProxyAddr,
KubeProxyAddr: profile.KubeProxyAddr,
}
return partialClientConfig.KubeClusterAddr()
}
func overwriteKubeconfigFlagInArgs(args []string, newPath string) []string {
// Make a clone to avoid changing the original args.
args = slices.Clone(args)
for i, arg := range args {
switch {
case strings.HasPrefix(arg, "--kubeconfig="):
args[i] = fmt.Sprintf("--kubeconfig=%v", newPath)
case arg == "--kubeconfig" && len(args) > i+1:
args[i+1] = newPath
}
}
return args
}
func overwriteKubeconfigInEnv(env []string, newPath string) (output []string) {
kubeConfigEnvPrefix := teleport.EnvKubeConfig + "="
for _, entry := range env {
if strings.HasPrefix(entry, kubeConfigEnvPrefix) {
continue
}
output = append(output, entry)
}
output = append(output, kubeConfigEnvPrefix+newPath)
return
} | command.SetArgs(args[1:])
// run command until it finishes. | random_line_split |
kubectl.go | /*
Copyright 2023 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/gravitational/trace"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"k8s.io/cli-runtime/pkg/genericclioptions"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/component-base/cli"
"k8s.io/kubectl/pkg/cmd"
"k8s.io/kubectl/pkg/cmd/plugin"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/gravitational/teleport"
tracehttp "github.com/gravitational/teleport/api/observability/tracing/http"
"github.com/gravitational/teleport/api/profile"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/kube/kubeconfig"
"github.com/gravitational/teleport/lib/observability/tracing"
)
var (
podForbiddenRe = regexp.MustCompile(`(?m)Error from server \(Forbidden\): pods "(.*)" is forbidden: User ".*" cannot get resource "pods" in API group "" in the namespace "(.*)"`)
clusterForbidden = "[00] access denied"
// clusterObjectDiscoveryFailed is printed when kubectl tries to do API discovery
// - calling /apis endpoint - but Teleport denies the request. Since it cannot
// discover the resources available in the cluster, it prints this message saying
// that the cluster does not have pod(s). Since every Kubernetes cluster supports
// pods, it's safe to create a resource access request.
clusterObjectDiscoveryFailed = regexp.MustCompile(`(?m)the server doesn't have a resource type "pods?"`)
)
// resourceKind identifies a Kubernetes resource.
type resourceKind struct {
kind string
subResourceName string
}
// onKubectlCommand re-execs itself if env var `tshKubectlRexec` is not set
// in order to execute the `kubectl` portion of the code. This is a requirement because
// `kubectl` calls `os.Exit()` in every code path, and we need to intercept the
// exit code to validate if the request was denied.
// When executing `tsh kubectl get pods`, tsh checks if `tshKubectlReexec`. Since
// it's the user call and the flag is not present, tsh reexecs the same exact
// the user executed and uses an io.MultiWriter to write the os.Stderr output
// from the kubectl command into an io.Pipe for analysis. It also sets the env
// `tshKubectlReexec` in the exec.Cmd.Env and runs the command. When running the
// command, `tsh` will be recalled, and since `tshKubectlReexec` is set only the
// kubectl portion of code is executed.
// On the caller side, once the callee execution finishes, tsh inspects the stderr
// outputs and decides if creating an access request is appropriate.
// If the access request is created, tsh waits for the approval and runs the expected
// command again.
func | (cf *CLIConf, fullArgs []string, args []string) error {
if os.Getenv(tshKubectlReexecEnvVar) == "" {
err := runKubectlAndCollectRun(cf, fullArgs, args)
return trace.Wrap(err)
}
runKubectlCode(cf, args)
return nil
}
const (
// tshKubectlReexecEnvVar is the name of the environment variable used to control if
// tsh should re-exec or execute a kubectl command.
tshKubectlReexecEnvVar = "TSH_KUBE_REEXEC"
)
// runKubectlReexec reexecs itself and copies the `stderr` output into
// the provided collector.
// It also sets tshKubectlReexec for the command to prevent
// an exec loop
func runKubectlReexec(cf *CLIConf, fullArgs, args []string, collector io.Writer) error {
closeFn, newKubeConfigLocation, err := maybeStartKubeLocalProxy(cf, withKubectlArgs(args))
if err != nil {
return trace.Wrap(err)
}
defer closeFn()
cmdEnv := append(os.Environ(), fmt.Sprintf("%s=yes", tshKubectlReexecEnvVar))
// Update kubeconfig location.
if newKubeConfigLocation != "" {
cmdEnv = overwriteKubeconfigInEnv(cmdEnv, newKubeConfigLocation)
fullArgs = overwriteKubeconfigFlagInArgs(fullArgs, newKubeConfigLocation)
}
// Execute.
cmd := exec.Command(cf.executablePath, fullArgs...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = io.MultiWriter(os.Stderr, collector)
cmd.Env = cmdEnv
return trace.Wrap(cmd.Run())
}
// wrapConfigFn wraps the rest.Config with a custom RoundTripper if the user
// wants to sample traces.
func wrapConfigFn(cf *CLIConf) func(c *rest.Config) *rest.Config {
return func(c *rest.Config) *rest.Config {
c.Wrap(
func(rt http.RoundTripper) http.RoundTripper {
if cf.SampleTraces {
// If the user wants to sample traces, wrap the transport with a trace
// transport.
return tracehttp.NewTransport(rt)
}
return rt
},
)
return c
}
}
// runKubectlCode runs the actual kubectl package code with the default options.
// This code is only executed when `tshKubectlReexec` env is present. This happens
// because we need to retry kubectl calls and `kubectl` calls os.Exit in multiple
// paths.
func runKubectlCode(cf *CLIConf, args []string) {
closeTracer := func() {}
cf.TracingProvider = tracing.NoopProvider()
cf.tracer = cf.TracingProvider.Tracer(teleport.ComponentTSH)
if cf.SampleTraces {
provider, err := newTraceProvider(cf, "", nil)
if err != nil {
log.WithError(err).Debug("Failed to set up span forwarding")
} else {
// ensure that the provider is shutdown on exit to flush any spans
// that haven't been forwarded yet.
closeTracer = func() {
shutdownCtx, cancel := context.WithTimeout(cf.Context, 1*time.Second)
defer cancel()
err := provider.Shutdown(shutdownCtx)
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
log.WithError(err).Debugf("Failed to shutdown trace provider")
}
}
}
}
// If the user opted to not sample traces, cf.TracingProvider is pre-initialized
// with a noop provider.
ctx, span := cf.TracingProvider.Tracer("kubectl").Start(cf.Context, "kubectl")
closeSpanAndTracer := func() {
span.End()
closeTracer()
}
// These values are the defaults used by kubectl and can be found here:
// https://github.com/kubernetes/kubectl/blob/3612c18ed86fc0a2f4467ca355b3e21569fabe0a/pkg/cmd/cmd.go#L94
defaultConfigFlags := genericclioptions.NewConfigFlags(true).
WithDeprecatedPasswordFlag().
WithDiscoveryBurst(300).
WithDiscoveryQPS(50.0).
WithWrapConfigFn(wrapConfigFn(cf))
command := cmd.NewDefaultKubectlCommandWithArgs(
cmd.KubectlOptions{
// init the default plugin handler.
PluginHandler: cmd.NewDefaultPluginHandler(plugin.ValidPluginFilenamePrefixes),
Arguments: args,
ConfigFlags: defaultConfigFlags,
// init the IOSStreams.
IOStreams: genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr},
},
)
command.SetContext(ctx)
// override args without kubectl to avoid errors.
command.SetArgs(args[1:])
// run command until it finishes.
if err := cli.RunNoErrOutput(command); err != nil {
closeSpanAndTracer()
// Pretty-print the error and exit with an error.
cmdutil.CheckErr(err)
}
closeSpanAndTracer()
os.Exit(0)
}
func runKubectlAndCollectRun(cf *CLIConf, fullArgs, args []string) error {
var (
alreadyRequestedAccess bool
err error
exitErr *exec.ExitError
)
for {
// missingKubeResources will include the Kubernetes Resources whose access
// was rejected in this kubectl call.
missingKubeResources := make([]resourceKind, 0, 50)
reader, writer := io.Pipe()
group, _ := errgroup.WithContext(cf.Context)
group.Go(
func() error {
// This goroutine scans each line of output emitted to stderr by kubectl
// and parses it in order to check if the returned error was a problem with
// missing access level. If it's the case, tsh kubectl will create automatically
// the access request for the user to access the resource.
// Current supported resources:
// - pod
// - kube_cluster
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
line := scanner.Text()
// Check if the request targeting a pod endpoint was denied due to
// Teleport Pod RBAC or if the operation was denied by Kubernetes RBAC.
// In the second case, we should create a Resource Access Request to allow
// the user to exec/read logs using different Kubernetes RBAC principals.
// using different Kubernetes RBAC principals.
if podForbiddenRe.MatchString(line) {
results := podForbiddenRe.FindStringSubmatch(line)
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubePod, subResourceName: filepath.Join(results[2], results[1])})
// Check if cluster access was denied. If denied we should create
// a Resource Access Request for the cluster and not a pod.
} else if strings.Contains(line, clusterForbidden) || clusterObjectDiscoveryFailed.MatchString(line) {
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubernetesCluster})
}
}
return trace.Wrap(scanner.Err())
},
)
err := runKubectlReexec(cf, fullArgs, args, writer)
writer.CloseWithError(io.EOF)
if scanErr := group.Wait(); scanErr != nil {
log.WithError(scanErr).Warn("unable to scan stderr payload")
}
if err == nil {
break
} else if !errors.As(err, &exitErr) {
return trace.Wrap(err)
} else if errors.As(err, &exitErr) && exitErr.ExitCode() != cmdutil.DefaultErrorExitCode {
// if the exit code is not 1, it was emitted by pod exec code and we should
// ignore it since the user was allowed to execute the command in the pod.
break
}
if len(missingKubeResources) > 0 && !alreadyRequestedAccess {
// create the access requests for the user and wait for approval.
if err := createKubeAccessRequest(cf, missingKubeResources, args); err != nil {
return trace.Wrap(err)
}
alreadyRequestedAccess = true
continue
}
break
}
// exit with the kubectl exit code to keep compatibility.
if errors.As(err, &exitErr) {
os.Exit(exitErr.ExitCode())
}
return nil
}
// createKubeAccessRequest creates an access request to the denied resources
// if the user's roles allow search_as_role.
func createKubeAccessRequest(cf *CLIConf, resources []resourceKind, args []string) error {
tc, err := makeClient(cf)
if err != nil {
return trace.Wrap(err)
}
kubeName, err := getKubeClusterName(args, tc.SiteName)
if err != nil {
return trace.Wrap(err)
}
for _, rec := range resources {
cf.RequestedResourceIDs = append(
cf.RequestedResourceIDs,
filepath.Join("/", tc.SiteName, rec.kind, kubeName, rec.subResourceName),
)
}
cf.Reason = fmt.Sprintf("Resource request automatically created for %v", args)
if err := executeAccessRequest(cf, tc); err != nil {
// TODO(tigrato): intercept the error to validate the origin
return trace.Wrap(err)
}
return nil
}
// extractKubeConfigAndContext parses the args and extracts:
// - the "--context" flag that overrides the default context to use, if present
// - the "--kubeconfig" flag that overrides the default kubeconfig location, if
// present
func extractKubeConfigAndContext(args []string) (string, string) {
if len(args) <= 2 {
return "", ""
}
command := makeKubectlCobraCommand()
return extractKubeConfigAndContextFromCommand(command, args)
}
// extractKubeConfigAndContextFromCommand parses the args using provided
// kubectl command and extracts:
// - the "--context" flag that overrides the default context to use, if present
// - the "--kubeconfig" flag that overrides the default kubeconfig location, if
// present
func extractKubeConfigAndContextFromCommand(command *cobra.Command, args []string) (kubeconfig string, context string) {
if len(args) <= 2 {
return
}
// Find subcommand.
if subcommand, _, err := command.Find(args[1:]); err == nil {
command = subcommand
}
// Ignore errors from ParseFlags.
command.ParseFlags(args[1:])
kubeconfig = command.Flag("kubeconfig").Value.String()
context = command.Flag("context").Value.String()
return
}
var makeKubectlCobraCommandLock sync.Mutex
// makeKubectlCobraCommand creates a cobra.Command for kubectl.
//
// Note that cmd.NewKubectlCommand is slow (15+ ms, 20k+ alloc), so avoid
// making/re-making it when possible.
//
// Also cmd.NewKubectlCommand is not goroutine-safe, thus using a lock.
func makeKubectlCobraCommand() *cobra.Command {
makeKubectlCobraCommandLock.Lock()
defer makeKubectlCobraCommandLock.Unlock()
return cmd.NewKubectlCommand(cmd.KubectlOptions{
// Use NewConfigFlags to avoid load existing values from
// defaultConfigFlags.
ConfigFlags: genericclioptions.NewConfigFlags(true),
})
}
// getKubeClusterName extracts the Kubernetes Cluster name if the Kube belongs to
// the teleportClusterName cluster. It parses the args to extract the `--kubeconfig`
// and `--context` flag values and to use them if any was overriten.
func getKubeClusterName(args []string, teleportClusterName string) (string, error) {
kubeconfigLocation, selectedContext := extractKubeConfigAndContext(args)
if selectedContext == "" {
kubeName, err := kubeconfig.SelectedKubeCluster(kubeconfigLocation, teleportClusterName)
return kubeName, trace.Wrap(err)
}
kc, err := kubeconfig.Load(kubeconfigLocation)
if err != nil {
return "", trace.Wrap(err)
}
kubeName := kubeconfig.KubeClusterFromContext(selectedContext, kc.Contexts[selectedContext], teleportClusterName)
if kubeName == "" {
return "", trace.BadParameter("selected context %q does not belong to Teleport cluster %q", selectedContext, teleportClusterName)
}
return kubeName, nil
}
type kubeLocalProxyOpts struct {
// kubectlArgs is a list of command arguments passed in for `tsh kubectl`.
// used to decide if local proxy is required.
kubectlArgs []string
// makeAndStartKubeLocalProxyFunc is a callback function to create and
// start a kube local proxy, when it is decided that a local proxy is
// required. Default to makeAndStartKubeLocalProxy. Can be set another
// function for testing.
makeAndStartKubeLocalProxyFunc func(*CLIConf, *clientcmdapi.Config, kubeconfig.LocalProxyClusters) (func(), string, error)
}
type applyKubeLocalProxyOpts func(o *kubeLocalProxyOpts)
func withKubectlArgs(args []string) applyKubeLocalProxyOpts {
return func(o *kubeLocalProxyOpts) {
o.kubectlArgs = args
}
}
func newKubeLocalProxyOpts(applyOpts ...applyKubeLocalProxyOpts) kubeLocalProxyOpts {
opts := kubeLocalProxyOpts{
makeAndStartKubeLocalProxyFunc: makeAndStartKubeLocalProxy,
}
for _, applyOpt := range applyOpts {
applyOpt(&opts)
}
return opts
}
// maybeStartKubeLocalProxy starts a kube local proxy if local proxy is
// required. A closeFn and the new kubeconfig path are returned if local proxy
// is successfully created. Called by `tsh kubectl` and `tsh kube exec`.
func maybeStartKubeLocalProxy(cf *CLIConf, applyOpts ...applyKubeLocalProxyOpts) (func(), string, error) {
opts := newKubeLocalProxyOpts(applyOpts...)
config, clusters, useLocalProxy := shouldUseKubeLocalProxy(cf, opts.kubectlArgs)
if !useLocalProxy {
return func() {}, "", nil
}
closeFn, newKubeConfigLocation, err := opts.makeAndStartKubeLocalProxyFunc(cf, config, clusters)
return closeFn, newKubeConfigLocation, trace.Wrap(err)
}
// makeAndStartKubeLocalProxy is a helper to create a kube local proxy and
// start it in a goroutine. If successful, a closeFn and the generated
// kubeconfig location are returned.
func makeAndStartKubeLocalProxy(cf *CLIConf, config *clientcmdapi.Config, clusters kubeconfig.LocalProxyClusters) (func(), string, error) {
tc, err := makeClient(cf)
if err != nil {
return nil, "", trace.Wrap(err)
}
localProxy, err := makeKubeLocalProxy(cf, tc, clusters, config, cf.LocalProxyPort)
if err != nil {
return nil, "", trace.Wrap(err)
}
go localProxy.Start(cf.Context)
closeFn := func() {
localProxy.Close()
}
return closeFn, localProxy.KubeConfigPath(), nil
}
// shouldUseKubeLocalProxy checks if a local proxy is required for kube
// access for `tsh kubectl` or `tsh kube exec`.
//
// The local proxy is required when all of these conditions are met:
// - profile is loadable
// - kube access is enabled, and is accessed through web proxy address
// - ALPN connection upgrade is required (e.g. Proxy behind ALB)
// - not `kubectl config` commands
// - original/default kubeconfig is loadable
// - Selected cluster is a Teleport cluster that uses KubeClusterAddr
func shouldUseKubeLocalProxy(cf *CLIConf, kubectlArgs []string) (*clientcmdapi.Config, kubeconfig.LocalProxyClusters, bool) {
// When failed to load profile, assume this CLI command is not running
// against Teleport clusters.
profile, err := cf.GetProfile()
if err != nil {
return nil, nil, false
}
if !profile.RequireKubeLocalProxy() {
return nil, nil, false
}
// Skip "kubectl config" commands.
var kubeconfigLocation, selectedContext string
if len(kubectlArgs) > 0 {
kubectlCommand := makeKubectlCobraCommand()
if isKubectlConfigCommand(kubectlCommand, kubectlArgs) {
return nil, nil, false
}
kubeconfigLocation, selectedContext = extractKubeConfigAndContextFromCommand(kubectlCommand, kubectlArgs)
}
// Nothing to do if cannot load original kubeconfig.
defaultConfig, err := kubeconfig.Load(kubeconfigLocation)
if err != nil {
return nil, nil, false
}
// Prepare Teleport kube cluster based on selected context.
kubeCluster, found := kubeconfig.FindTeleportClusterForLocalProxy(defaultConfig, kubeClusterAddrFromProfile(profile), selectedContext)
if !found {
return nil, nil, false
}
return defaultConfig, kubeconfig.LocalProxyClusters{kubeCluster}, true
}
func isKubectlConfigCommand(kubectlCommand *cobra.Command, args []string) bool {
if len(args) < 2 || args[0] != "kubectl" {
return false
}
find, _, _ := kubectlCommand.Find(args[1:])
for ; find != nil; find = find.Parent() {
if find.Name() == "config" {
return true
}
}
return false
}
func kubeClusterAddrFromProfile(profile *profile.Profile) string {
partialClientConfig := client.Config{
WebProxyAddr: profile.WebProxyAddr,
KubeProxyAddr: profile.KubeProxyAddr,
}
return partialClientConfig.KubeClusterAddr()
}
func overwriteKubeconfigFlagInArgs(args []string, newPath string) []string {
// Make a clone to avoid changing the original args.
args = slices.Clone(args)
for i, arg := range args {
switch {
case strings.HasPrefix(arg, "--kubeconfig="):
args[i] = fmt.Sprintf("--kubeconfig=%v", newPath)
case arg == "--kubeconfig" && len(args) > i+1:
args[i+1] = newPath
}
}
return args
}
func overwriteKubeconfigInEnv(env []string, newPath string) (output []string) {
kubeConfigEnvPrefix := teleport.EnvKubeConfig + "="
for _, entry := range env {
if strings.HasPrefix(entry, kubeConfigEnvPrefix) {
continue
}
output = append(output, entry)
}
output = append(output, kubeConfigEnvPrefix+newPath)
return
}
| onKubectlCommand | identifier_name |
kubectl.go | /*
Copyright 2023 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/gravitational/trace"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"k8s.io/cli-runtime/pkg/genericclioptions"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/component-base/cli"
"k8s.io/kubectl/pkg/cmd"
"k8s.io/kubectl/pkg/cmd/plugin"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/gravitational/teleport"
tracehttp "github.com/gravitational/teleport/api/observability/tracing/http"
"github.com/gravitational/teleport/api/profile"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/kube/kubeconfig"
"github.com/gravitational/teleport/lib/observability/tracing"
)
var (
podForbiddenRe = regexp.MustCompile(`(?m)Error from server \(Forbidden\): pods "(.*)" is forbidden: User ".*" cannot get resource "pods" in API group "" in the namespace "(.*)"`)
clusterForbidden = "[00] access denied"
// clusterObjectDiscoveryFailed is printed when kubectl tries to do API discovery
// - calling /apis endpoint - but Teleport denies the request. Since it cannot
// discover the resources available in the cluster, it prints this message saying
// that the cluster does not have pod(s). Since every Kubernetes cluster supports
// pods, it's safe to create a resource access request.
clusterObjectDiscoveryFailed = regexp.MustCompile(`(?m)the server doesn't have a resource type "pods?"`)
)
// resourceKind identifies a Kubernetes resource.
type resourceKind struct {
kind string
subResourceName string
}
// onKubectlCommand re-execs itself if env var `tshKubectlRexec` is not set
// in order to execute the `kubectl` portion of the code. This is a requirement because
// `kubectl` calls `os.Exit()` in every code path, and we need to intercept the
// exit code to validate if the request was denied.
// When executing `tsh kubectl get pods`, tsh checks if `tshKubectlReexec`. Since
// it's the user call and the flag is not present, tsh reexecs the same exact
// the user executed and uses an io.MultiWriter to write the os.Stderr output
// from the kubectl command into an io.Pipe for analysis. It also sets the env
// `tshKubectlReexec` in the exec.Cmd.Env and runs the command. When running the
// command, `tsh` will be recalled, and since `tshKubectlReexec` is set only the
// kubectl portion of code is executed.
// On the caller side, once the callee execution finishes, tsh inspects the stderr
// outputs and decides if creating an access request is appropriate.
// If the access request is created, tsh waits for the approval and runs the expected
// command again.
func onKubectlCommand(cf *CLIConf, fullArgs []string, args []string) error {
if os.Getenv(tshKubectlReexecEnvVar) == "" {
err := runKubectlAndCollectRun(cf, fullArgs, args)
return trace.Wrap(err)
}
runKubectlCode(cf, args)
return nil
}
const (
// tshKubectlReexecEnvVar is the name of the environment variable used to control if
// tsh should re-exec or execute a kubectl command.
tshKubectlReexecEnvVar = "TSH_KUBE_REEXEC"
)
// runKubectlReexec reexecs itself and copies the `stderr` output into
// the provided collector.
// It also sets tshKubectlReexec for the command to prevent
// an exec loop
func runKubectlReexec(cf *CLIConf, fullArgs, args []string, collector io.Writer) error {
closeFn, newKubeConfigLocation, err := maybeStartKubeLocalProxy(cf, withKubectlArgs(args))
if err != nil {
return trace.Wrap(err)
}
defer closeFn()
cmdEnv := append(os.Environ(), fmt.Sprintf("%s=yes", tshKubectlReexecEnvVar))
// Update kubeconfig location.
if newKubeConfigLocation != "" {
cmdEnv = overwriteKubeconfigInEnv(cmdEnv, newKubeConfigLocation)
fullArgs = overwriteKubeconfigFlagInArgs(fullArgs, newKubeConfigLocation)
}
// Execute.
cmd := exec.Command(cf.executablePath, fullArgs...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = io.MultiWriter(os.Stderr, collector)
cmd.Env = cmdEnv
return trace.Wrap(cmd.Run())
}
// wrapConfigFn wraps the rest.Config with a custom RoundTripper if the user
// wants to sample traces.
func wrapConfigFn(cf *CLIConf) func(c *rest.Config) *rest.Config {
return func(c *rest.Config) *rest.Config {
c.Wrap(
func(rt http.RoundTripper) http.RoundTripper {
if cf.SampleTraces {
// If the user wants to sample traces, wrap the transport with a trace
// transport.
return tracehttp.NewTransport(rt)
}
return rt
},
)
return c
}
}
// runKubectlCode runs the actual kubectl package code with the default options.
// This code is only executed when `tshKubectlReexec` env is present. This happens
// because we need to retry kubectl calls and `kubectl` calls os.Exit in multiple
// paths.
func runKubectlCode(cf *CLIConf, args []string) {
closeTracer := func() {}
cf.TracingProvider = tracing.NoopProvider()
cf.tracer = cf.TracingProvider.Tracer(teleport.ComponentTSH)
if cf.SampleTraces {
provider, err := newTraceProvider(cf, "", nil)
if err != nil {
log.WithError(err).Debug("Failed to set up span forwarding")
} else {
// ensure that the provider is shutdown on exit to flush any spans
// that haven't been forwarded yet.
closeTracer = func() {
shutdownCtx, cancel := context.WithTimeout(cf.Context, 1*time.Second)
defer cancel()
err := provider.Shutdown(shutdownCtx)
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
log.WithError(err).Debugf("Failed to shutdown trace provider")
}
}
}
}
// If the user opted to not sample traces, cf.TracingProvider is pre-initialized
// with a noop provider.
ctx, span := cf.TracingProvider.Tracer("kubectl").Start(cf.Context, "kubectl")
closeSpanAndTracer := func() {
span.End()
closeTracer()
}
// These values are the defaults used by kubectl and can be found here:
// https://github.com/kubernetes/kubectl/blob/3612c18ed86fc0a2f4467ca355b3e21569fabe0a/pkg/cmd/cmd.go#L94
defaultConfigFlags := genericclioptions.NewConfigFlags(true).
WithDeprecatedPasswordFlag().
WithDiscoveryBurst(300).
WithDiscoveryQPS(50.0).
WithWrapConfigFn(wrapConfigFn(cf))
command := cmd.NewDefaultKubectlCommandWithArgs(
cmd.KubectlOptions{
// init the default plugin handler.
PluginHandler: cmd.NewDefaultPluginHandler(plugin.ValidPluginFilenamePrefixes),
Arguments: args,
ConfigFlags: defaultConfigFlags,
// init the IOSStreams.
IOStreams: genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr},
},
)
command.SetContext(ctx)
// override args without kubectl to avoid errors.
command.SetArgs(args[1:])
// run command until it finishes.
if err := cli.RunNoErrOutput(command); err != nil {
closeSpanAndTracer()
// Pretty-print the error and exit with an error.
cmdutil.CheckErr(err)
}
closeSpanAndTracer()
os.Exit(0)
}
func runKubectlAndCollectRun(cf *CLIConf, fullArgs, args []string) error {
var (
alreadyRequestedAccess bool
err error
exitErr *exec.ExitError
)
for {
// missingKubeResources will include the Kubernetes Resources whose access
// was rejected in this kubectl call.
missingKubeResources := make([]resourceKind, 0, 50)
reader, writer := io.Pipe()
group, _ := errgroup.WithContext(cf.Context)
group.Go(
func() error {
// This goroutine scans each line of output emitted to stderr by kubectl
// and parses it in order to check if the returned error was a problem with
// missing access level. If it's the case, tsh kubectl will create automatically
// the access request for the user to access the resource.
// Current supported resources:
// - pod
// - kube_cluster
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
line := scanner.Text()
// Check if the request targeting a pod endpoint was denied due to
// Teleport Pod RBAC or if the operation was denied by Kubernetes RBAC.
// In the second case, we should create a Resource Access Request to allow
// the user to exec/read logs using different Kubernetes RBAC principals.
// using different Kubernetes RBAC principals.
if podForbiddenRe.MatchString(line) {
results := podForbiddenRe.FindStringSubmatch(line)
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubePod, subResourceName: filepath.Join(results[2], results[1])})
// Check if cluster access was denied. If denied we should create
// a Resource Access Request for the cluster and not a pod.
} else if strings.Contains(line, clusterForbidden) || clusterObjectDiscoveryFailed.MatchString(line) {
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubernetesCluster})
}
}
return trace.Wrap(scanner.Err())
},
)
err := runKubectlReexec(cf, fullArgs, args, writer)
writer.CloseWithError(io.EOF)
if scanErr := group.Wait(); scanErr != nil {
log.WithError(scanErr).Warn("unable to scan stderr payload")
}
if err == nil {
break
} else if !errors.As(err, &exitErr) {
return trace.Wrap(err)
} else if errors.As(err, &exitErr) && exitErr.ExitCode() != cmdutil.DefaultErrorExitCode {
// if the exit code is not 1, it was emitted by pod exec code and we should
// ignore it since the user was allowed to execute the command in the pod.
break
}
if len(missingKubeResources) > 0 && !alreadyRequestedAccess {
// create the access requests for the user and wait for approval.
if err := createKubeAccessRequest(cf, missingKubeResources, args); err != nil {
return trace.Wrap(err)
}
alreadyRequestedAccess = true
continue
}
break
}
// exit with the kubectl exit code to keep compatibility.
if errors.As(err, &exitErr) {
os.Exit(exitErr.ExitCode())
}
return nil
}
// createKubeAccessRequest creates an access request to the denied resources
// if the user's roles allow search_as_role.
func createKubeAccessRequest(cf *CLIConf, resources []resourceKind, args []string) error {
tc, err := makeClient(cf)
if err != nil {
return trace.Wrap(err)
}
kubeName, err := getKubeClusterName(args, tc.SiteName)
if err != nil {
return trace.Wrap(err)
}
for _, rec := range resources {
cf.RequestedResourceIDs = append(
cf.RequestedResourceIDs,
filepath.Join("/", tc.SiteName, rec.kind, kubeName, rec.subResourceName),
)
}
cf.Reason = fmt.Sprintf("Resource request automatically created for %v", args)
if err := executeAccessRequest(cf, tc); err != nil {
// TODO(tigrato): intercept the error to validate the origin
return trace.Wrap(err)
}
return nil
}
// extractKubeConfigAndContext parses the args and extracts:
// - the "--context" flag that overrides the default context to use, if present
// - the "--kubeconfig" flag that overrides the default kubeconfig location, if
// present
func extractKubeConfigAndContext(args []string) (string, string) {
if len(args) <= 2 {
return "", ""
}
command := makeKubectlCobraCommand()
return extractKubeConfigAndContextFromCommand(command, args)
}
// extractKubeConfigAndContextFromCommand parses the args using provided
// kubectl command and extracts:
// - the "--context" flag that overrides the default context to use, if present
// - the "--kubeconfig" flag that overrides the default kubeconfig location, if
// present
func extractKubeConfigAndContextFromCommand(command *cobra.Command, args []string) (kubeconfig string, context string) {
if len(args) <= 2 {
return
}
// Find subcommand.
if subcommand, _, err := command.Find(args[1:]); err == nil {
command = subcommand
}
// Ignore errors from ParseFlags.
command.ParseFlags(args[1:])
kubeconfig = command.Flag("kubeconfig").Value.String()
context = command.Flag("context").Value.String()
return
}
var makeKubectlCobraCommandLock sync.Mutex
// makeKubectlCobraCommand creates a cobra.Command for kubectl.
//
// Note that cmd.NewKubectlCommand is slow (15+ ms, 20k+ alloc), so avoid
// making/re-making it when possible.
//
// Also cmd.NewKubectlCommand is not goroutine-safe, thus using a lock.
func makeKubectlCobraCommand() *cobra.Command {
makeKubectlCobraCommandLock.Lock()
defer makeKubectlCobraCommandLock.Unlock()
return cmd.NewKubectlCommand(cmd.KubectlOptions{
// Use NewConfigFlags to avoid load existing values from
// defaultConfigFlags.
ConfigFlags: genericclioptions.NewConfigFlags(true),
})
}
// getKubeClusterName extracts the Kubernetes Cluster name if the Kube belongs to
// the teleportClusterName cluster. It parses the args to extract the `--kubeconfig`
// and `--context` flag values and to use them if any was overriten.
func getKubeClusterName(args []string, teleportClusterName string) (string, error) {
kubeconfigLocation, selectedContext := extractKubeConfigAndContext(args)
if selectedContext == "" {
kubeName, err := kubeconfig.SelectedKubeCluster(kubeconfigLocation, teleportClusterName)
return kubeName, trace.Wrap(err)
}
kc, err := kubeconfig.Load(kubeconfigLocation)
if err != nil {
return "", trace.Wrap(err)
}
kubeName := kubeconfig.KubeClusterFromContext(selectedContext, kc.Contexts[selectedContext], teleportClusterName)
if kubeName == "" {
return "", trace.BadParameter("selected context %q does not belong to Teleport cluster %q", selectedContext, teleportClusterName)
}
return kubeName, nil
}
type kubeLocalProxyOpts struct {
// kubectlArgs is a list of command arguments passed in for `tsh kubectl`.
// used to decide if local proxy is required.
kubectlArgs []string
// makeAndStartKubeLocalProxyFunc is a callback function to create and
// start a kube local proxy, when it is decided that a local proxy is
// required. Default to makeAndStartKubeLocalProxy. Can be set another
// function for testing.
makeAndStartKubeLocalProxyFunc func(*CLIConf, *clientcmdapi.Config, kubeconfig.LocalProxyClusters) (func(), string, error)
}
type applyKubeLocalProxyOpts func(o *kubeLocalProxyOpts)
func withKubectlArgs(args []string) applyKubeLocalProxyOpts {
return func(o *kubeLocalProxyOpts) {
o.kubectlArgs = args
}
}
func newKubeLocalProxyOpts(applyOpts ...applyKubeLocalProxyOpts) kubeLocalProxyOpts {
opts := kubeLocalProxyOpts{
makeAndStartKubeLocalProxyFunc: makeAndStartKubeLocalProxy,
}
for _, applyOpt := range applyOpts {
applyOpt(&opts)
}
return opts
}
// maybeStartKubeLocalProxy starts a kube local proxy if local proxy is
// required. A closeFn and the new kubeconfig path are returned if local proxy
// is successfully created. Called by `tsh kubectl` and `tsh kube exec`.
func maybeStartKubeLocalProxy(cf *CLIConf, applyOpts ...applyKubeLocalProxyOpts) (func(), string, error) |
// makeAndStartKubeLocalProxy is a helper to create a kube local proxy and
// start it in a goroutine. If successful, a closeFn and the generated
// kubeconfig location are returned.
func makeAndStartKubeLocalProxy(cf *CLIConf, config *clientcmdapi.Config, clusters kubeconfig.LocalProxyClusters) (func(), string, error) {
tc, err := makeClient(cf)
if err != nil {
return nil, "", trace.Wrap(err)
}
localProxy, err := makeKubeLocalProxy(cf, tc, clusters, config, cf.LocalProxyPort)
if err != nil {
return nil, "", trace.Wrap(err)
}
go localProxy.Start(cf.Context)
closeFn := func() {
localProxy.Close()
}
return closeFn, localProxy.KubeConfigPath(), nil
}
// shouldUseKubeLocalProxy checks if a local proxy is required for kube
// access for `tsh kubectl` or `tsh kube exec`.
//
// The local proxy is required when all of these conditions are met:
// - profile is loadable
// - kube access is enabled, and is accessed through web proxy address
// - ALPN connection upgrade is required (e.g. Proxy behind ALB)
// - not `kubectl config` commands
// - original/default kubeconfig is loadable
// - Selected cluster is a Teleport cluster that uses KubeClusterAddr
func shouldUseKubeLocalProxy(cf *CLIConf, kubectlArgs []string) (*clientcmdapi.Config, kubeconfig.LocalProxyClusters, bool) {
// When failed to load profile, assume this CLI command is not running
// against Teleport clusters.
profile, err := cf.GetProfile()
if err != nil {
return nil, nil, false
}
if !profile.RequireKubeLocalProxy() {
return nil, nil, false
}
// Skip "kubectl config" commands.
var kubeconfigLocation, selectedContext string
if len(kubectlArgs) > 0 {
kubectlCommand := makeKubectlCobraCommand()
if isKubectlConfigCommand(kubectlCommand, kubectlArgs) {
return nil, nil, false
}
kubeconfigLocation, selectedContext = extractKubeConfigAndContextFromCommand(kubectlCommand, kubectlArgs)
}
// Nothing to do if cannot load original kubeconfig.
defaultConfig, err := kubeconfig.Load(kubeconfigLocation)
if err != nil {
return nil, nil, false
}
// Prepare Teleport kube cluster based on selected context.
kubeCluster, found := kubeconfig.FindTeleportClusterForLocalProxy(defaultConfig, kubeClusterAddrFromProfile(profile), selectedContext)
if !found {
return nil, nil, false
}
return defaultConfig, kubeconfig.LocalProxyClusters{kubeCluster}, true
}
func isKubectlConfigCommand(kubectlCommand *cobra.Command, args []string) bool {
if len(args) < 2 || args[0] != "kubectl" {
return false
}
find, _, _ := kubectlCommand.Find(args[1:])
for ; find != nil; find = find.Parent() {
if find.Name() == "config" {
return true
}
}
return false
}
func kubeClusterAddrFromProfile(profile *profile.Profile) string {
partialClientConfig := client.Config{
WebProxyAddr: profile.WebProxyAddr,
KubeProxyAddr: profile.KubeProxyAddr,
}
return partialClientConfig.KubeClusterAddr()
}
func overwriteKubeconfigFlagInArgs(args []string, newPath string) []string {
// Make a clone to avoid changing the original args.
args = slices.Clone(args)
for i, arg := range args {
switch {
case strings.HasPrefix(arg, "--kubeconfig="):
args[i] = fmt.Sprintf("--kubeconfig=%v", newPath)
case arg == "--kubeconfig" && len(args) > i+1:
args[i+1] = newPath
}
}
return args
}
func overwriteKubeconfigInEnv(env []string, newPath string) (output []string) {
kubeConfigEnvPrefix := teleport.EnvKubeConfig + "="
for _, entry := range env {
if strings.HasPrefix(entry, kubeConfigEnvPrefix) {
continue
}
output = append(output, entry)
}
output = append(output, kubeConfigEnvPrefix+newPath)
return
}
| {
opts := newKubeLocalProxyOpts(applyOpts...)
config, clusters, useLocalProxy := shouldUseKubeLocalProxy(cf, opts.kubectlArgs)
if !useLocalProxy {
return func() {}, "", nil
}
closeFn, newKubeConfigLocation, err := opts.makeAndStartKubeLocalProxyFunc(cf, config, clusters)
return closeFn, newKubeConfigLocation, trace.Wrap(err)
} | identifier_body |
colocalization_snps.py | import os
import re
import sys
import datetime
import itertools
import json
from classes.ensembl_client import EnsemblRestClient
from classes.gene_database import GeneDatabase
from classes.enrichr import EnrichR
import numpy as np
import scipy.stats as stats
from statsmodels.sandbox.stats.multicomp import multipletests
from scripts.similarity_matrix_graph import generate_similarity_graph
VALID_CHRs = [str(i) for i in range(1, 23)] + ['X', 'Y']
WSIZES = [500000.0, 250000.0, 100000.0, 50000.0, 20000.0]
def load_lines(file_name):
# read lines from file and returns them in a list
f = open(file_name, 'r')
ids = [line.strip() for line in f]
f.close()
return ids
def get_regions_from_ensembl_snps(snps):
# returns regions in this format: {'<assembly1>': {'<chrNN>': [<pos1>, <pos2>, ...], ...}, ...}
regions = {}
for snp_id in snps:
data = snps[snp_id]
mappings = data.get('mappings', [])
for mapping in mappings:
pos = mapping['start']
chr = str(mapping['seq_region_name'])
if chr not in VALID_CHRs:
continue
assembly = mapping['assembly_name']
reg_assembly = regions.get(assembly, {}) # create a new one if not exists
regions[assembly] = reg_assembly # assign to ensure creation
reg_chr = reg_assembly.get(chr, []) # create a new one if not exists
reg_assembly[chr] = reg_chr # assign to ensure creation
reg_chr.append(pos)
return regions
def __is_in_region(region, chr, start, end, wsize):
# checks whether a feature located at chr:start-end overlaps a region
dist = wsize / 2
positions = region.get(chr, [])
for pos_obj in positions:
pos = pos_obj['pos'] if type(pos_obj) is dict else pos_obj
if abs(pos - start) < dist or abs(pos - end) < dist:
return pos_obj
return None
def count_genes_in_region(genes, genes_db, region, wsize, add_to_list=None):
# count genes in list that overlaps a region
count = 0
for gene_id in genes:
gene_data = genes_db.get_by_id(gene_id)
if __is_in_region(region, gene_data.chr, gene_data.start, gene_data.end, wsize) is not None:
count += 1
if type(add_to_list) is list:
add_to_list.append(gene_data)
return count
def associate_genes_with_region(genes_data, region, wsize):
assoc = {chr: [{'chr': chr, 'pos': pos, 'genes': set([])} for pos in region[chr]] for chr in region.keys()}
for gene_data in genes_data:
pos_obj = __is_in_region(assoc, gene_data.chr, gene_data.start, gene_data.end, wsize)
if pos_obj is not None:
pos_obj['genes'].add(gene_data)
else:
print 'warning: gene %s [%s] should be in a region' % (gene_data.name, gene_data.id)
return assoc
def calc_regions_by_gene_count(assoc):
"""
calculates data table for graph: num_genes vs num_regions (how many regions have 1, 2, 3, ... genes)
:param assoc:
:return: an array of tuples (x, y) sorted by x
"""
counts = {}
max_count = -1
for pos_list in assoc.values():
for pos_obj in pos_list:
num_genes = len(pos_obj['genes'])
if num_genes > max_count:
max_count = num_genes
val = counts.get(num_genes, 0)
counts[num_genes] = val + 1
for i in range(0, max_count):
# fill gaps in counts dictionary
if i not in counts:
counts[i] = 0
return map(lambda n: (n, counts[n]), sorted(counts.keys()))
def create_snp_regions(snps_ids):
# build snps regions using ensembl REST API
client = EnsemblRestClient()
snps = client.get_snps(snps_ids)
return get_regions_from_ensembl_snps(snps)
def create_gene_db(taxonomy_id, mart_file):
# create a gene DB using an ensembl mart export
genes_db = GeneDatabase(taxonomy_id)
genes_db.load_mart_export(mart_file)
return genes_db
def calc_genes_in_region_table(region, genes_db, gene_ids, wsize):
"""
Calculates 2x2 table of gene counts: [[b1, n1], [b2, n2]]
b1: number of selected genes that match a region, n1: total of selected genes
b2: number of background genes that match a region, n2: total of background genes
:param region: dict of {chr: [p1, p2, ...]} as returned by 'get_regions_from_ensembl_snps'
:param genes_db:
:param gene_ids: list of selected genes (for example: diff expressed in a GEO study)
:param wsize: window size used to calculate region match (centered in a region)
:return: a tuple with the contingency table (2x2) and matching genes array
"""
control_gene_ids = genes_db.get_difference(gene_ids)
matching_genes = []
b1 = count_genes_in_region(gene_ids, genes_db, region, wsize, add_to_list=matching_genes)
n1 = len(gene_ids)
b2 = count_genes_in_region(control_gene_ids, genes_db, region, wsize)
n2 = len(control_gene_ids)
table = [[b1, n1], [b2, n2]]
return table, matching_genes
def human_format(num):
# returns an amount in a human readable format
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '%i%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def test1():
base_path = '/home/victor/Escritorio/Genotipado_Alternativo/colocalizacion'
snps_ids = load_lines(os.path.join(base_path, 'MS.txt'))
regions = create_snp_regions(snps_ids)
genes_db = create_gene_db('9606', os.path.join(base_path, 'GRCh38/mart_export.txt.gz'))
gene_ids = load_lines(os.path.join(base_path, 'sp140_genes.txt'))
wsize = 500000
table, match_genes = calc_genes_in_region_table(regions.get('GRCh38'), genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(table)
assoc = associate_genes_with_region(match_genes, regions.get('GRCh38'), wsize)
reg_by_gene = calc_regions_by_gene_count(assoc)
print 'b: %i, n: %i' % tuple(table[0])
print 'B: %i, N: %i' % tuple(table[1])
print 'oddsratio: %f, pvalue: %f' % (oddsratio, pvalue)
def test_genes_vs_multiple_snps(gene_ids, input_path, output_path):
|
def enrichr_db_test(file_name, region, genes_db, wsize, pvalue_thr=0.05, record_filter=None):
"""
Runs, for each record of a 'enrich_db table', a fisher test using 'calc_genes_in_region_table' contingency table
:param file_name: full path to enrich_db table
:param region: dict of {chr: [p1, p2, ...]} as returned by 'get_regions_from_ensembl_snps'
:param genes_db:
:param wsize: window size used to calculate region match (centered in a region)
:param pvalue_thr: pvalue threshold for FDR
:param record_filter: lambda function for record filtering
:return: a list of tuples (lib_name, record, b1, n1, b2, n2, oddsratio, pval, corr_pval, matching_genes)
"""
data_lib = EnrichR.load_library(file_name)
lib_name = os.path.basename(file_name[:-7]) # remove '.txt.gz'
results = []
if record_filter is not None:
selected_records = filter(lambda r: record_filter(r), data_lib.keys())
else:
selected_records = data_lib.keys()
for record in selected_records:
# extract genes highlighted by the study
gene_names = EnrichR.extract_gene_list(data_lib, record)
gene_ids = []
for name in gene_names:
# translate gene names to gene_id (ensembl ids)
gene = genes_db.get_by_name(name)
if gene:
gene_ids.append(gene.id)
else:
pass # gene name not found
# calculate contingency table for the genes in the study
t, match_genes = calc_genes_in_region_table(region, genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(t)
results.append((lib_name, record, t[0][0], t[0][1], t[1][0], t[1][1], oddsratio, pvalue, match_genes))
# multiple test correction using FDR
pvals = map(lambda r: r[7], results)
vals = multipletests(pvals, alpha=pvalue_thr, method='fdr_bh')
# add corrected p-value to results and filter by FDR threshold
results_corr = []
for i, res in enumerate(results):
if vals[0][i]:
# if test passed -> append to results_corr list and put matching_genes at the end of the tuple
results_corr.append(res[:-1] + ((vals[1][i]).item(), res[-1],))
return results_corr
def __result_similarity(gene_res_1, gene_res_2):
set1 = set(gene_res_1)
set2 = set(gene_res_2)
total = len(set1.union(set2))
common = len(set1.intersection(set2))
return common / float(total)
def __calc_similarities(results):
num = len(results)
sims = np.zeros((num, num))
for i in range(0, num):
for j in range(i, num):
sims[i][j] = __result_similarity(results[i][9], results[j][9]) if i != j else 1.0
return sims
def __write_similarities_output(base_path, wsize_str, similarities, results_all):
# write similarities
f = open(os.path.join(base_path, 'output_enrichr_similarities_%s.txt' % wsize_str), 'w')
f.write('A\tB\tSim\n')
num = similarities.shape[0]
for i in range(0, num):
label_i = results_all[i][0] + '|' + results_all[i][1] # lib name + record name
for j in range(i, num):
label_j = results_all[j][0] + '|' + results_all[j][1] # lib name + record name
f.write('%s\t%s\t%f\n' % (label_i, label_j, similarities[i][j]))
f.close()
# write similarities as json
num = similarities.shape[0]
sim_dict = {'records': [], 'similarities': []}
for i in range(0, num):
sim_dict['records'].append({'idx': i, 'lib': results_all[i][0], 'name': results_all[i][1]})
for j in range(i, num):
sim_dict['similarities'].append({'x': i, 'y': j, 'val': similarities[i][j]})
sim_json_file = os.path.join(base_path, 'output_enrichr_similarities_%s.json' % wsize_str)
f = open(sim_json_file, 'w')
json.dump(sim_dict, f)
f.close()
# generate similarity html graph
sim_out_html_file = os.path.join(base_path, 'output_enrichr_similarities_%s.html' % wsize_str)
sim_graph_title = 'Enrichr similarities %s' % wsize_str
generate_similarity_graph(sim_json_file, sim_out_html_file, title=sim_graph_title)
def __write_per_region_gene_matching(out_file, assoc):
# write per region gene matching
f = open(out_file, 'w')
for chr in assoc:
for pos_obj in assoc[chr]:
f.write('%s\t%i\t%i\t' % (pos_obj['chr'], pos_obj['pos'], len(pos_obj['genes'])))
# add matching genes at the end of the row (comma separated)
f.write('%s\n' % ','.join(map(lambda g: g.name, pos_obj['genes'])))
f.close()
def __write_genes_vs_regions_table(out_file, regions_by_gene_count):
# write #genes vs #regions data table
f = open(out_file, 'w')
f.write('#genes\t#regions\n')
for val in regions_by_gene_count:
f.write('%i\t%i\n' % val)
f.close()
if __name__ == "__main__":
base_path = '/home/victor/Escritorio/Genotipado_Alternativo/colocalizacion'
# test1()
test_genes_vs_multiple_snps(load_lines(os.path.join(base_path, 'sp140_genes.txt')),
os.path.join(base_path, 'snps_diseases'),
os.path.join(base_path, 'snps_diseases/output'))
sys.exit(0)
print 'Started:', datetime.datetime.now().isoformat()
snps_ids = load_lines(os.path.join(base_path, 'MS.txt'))
regions = create_snp_regions(snps_ids)
genes_db = create_gene_db('9606', os.path.join(base_path, 'GRCh38/mart_export.txt.gz'))
enrichr_path = os.path.join(base_path, 'enrichr')
lib_files = EnrichR.list_libraries(enrichr_path)
lib_files = filter(lambda n: n.startswith('Single_Gene_Perturbations_from_GEO'), lib_files)
lib_files = sorted(lib_files)
wsizes = WSIZES
# wsizes = [500000.0]
record_filter = lambda r: r.find('GSE50588') > -1
for wsize in wsizes:
wsize_str = human_format(wsize)
lib_results = {}
genes_in_regions = []
results_all = []
for name in lib_files: # Note: lib_files is sorted
lib_name = name[:-7] # remove '.txt.gz'
res = enrichr_db_test(os.path.join(enrichr_path, name), regions.get('GRCh38'), genes_db, wsize,
record_filter=record_filter)
print '%i matches in %s, [%s]' % (len(res), lib_name, datetime.datetime.now().isoformat())
lib_results[lib_name] = res
results_all += res
genes_in_regions += list(itertools.chain.from_iterable(map(lambda r: r[9], res)))
assoc = associate_genes_with_region(genes_in_regions, regions.get('GRCh38'), wsize)
regions_by_gene_count = calc_regions_by_gene_count(assoc)
similarities = __calc_similarities(results_all)
# write per library/study results
f = open(os.path.join(base_path, 'output_enrichr_%s.txt' % wsize_str), 'w')
for res in results_all:
f.write('%s\t%s\t%i\t%i\t%i\t%i\t%f\t%f\t%f\t' % res[:9])
# add matching genes at the end of the row (comma separated)
f.write('%s\n' % ','.join(map(lambda g: g.name, res[9])))
f.close()
# write per region gene matching
__write_per_region_gene_matching(os.path.join(base_path, 'output_regions_%s.txt' % wsize_str), assoc)
# write #genes vs #regions data table
__write_genes_vs_regions_table(os.path.join(base_path, 'output_regions_by_gene_count_%s.txt' % wsize_str),
regions_by_gene_count)
# Skip similarities output
# __write_similarities_output(base_path, wsize_str, similarities, results_all)
print 'Finished:', datetime.datetime.now().isoformat()
| file_pattern = '(.+)\.txt'
ll_ids_pattern = '"([\w\s;]+)"'
prog = re.compile(file_pattern)
ll_prog = re.compile(ll_ids_pattern)
genes_db = create_gene_db('9606', os.path.join(input_path, 'GRCh38/mart_export.txt.gz'))
snp_files = [f for f in os.listdir(input_path) if os.path.isfile(os.path.join(input_path, f)) and prog.match(f)]
wsizes = WSIZES
for snp_file in snp_files:
disease = prog.match(snp_file).groups()[0]
snps_ids = load_lines(os.path.join(input_path, snp_file))
good_ids = filter(lambda i: i.startswith('rs'), snps_ids)
ld_ids = filter(lambda i: ll_prog.match(i), snps_ids)
ld_ids = map(lambda i: ll_prog.match(i).groups()[0].split(';')[0], ld_ids)
regions = create_snp_regions(good_ids + ld_ids)
print '\n===== Test for disease: %s =====' % disease
for wsize in wsizes:
wsize_str = human_format(wsize)
print '--- Window size = %s ---' % wsize_str
table, match_genes = calc_genes_in_region_table(regions.get('GRCh38'), genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(table)
assoc = associate_genes_with_region(match_genes, regions.get('GRCh38'), wsize)
regions_by_gene_count = calc_regions_by_gene_count(assoc)
print 'b: %i, n: %i' % tuple(table[0])
print 'B: %i, N: %i' % tuple(table[1])
print 'oddsratio: %f, pvalue: %f' % (oddsratio, pvalue)
__write_per_region_gene_matching(
os.path.join(output_path, 'output_regions_%s_%s.txt' % (disease, wsize_str)), assoc)
__write_genes_vs_regions_table(
os.path.join(output_path, 'output_regions_by_gene_count_%s_%s.txt' % (disease, wsize_str)),
regions_by_gene_count) | identifier_body |
colocalization_snps.py | import os
import re
import sys
import datetime
import itertools
import json
from classes.ensembl_client import EnsemblRestClient
from classes.gene_database import GeneDatabase
from classes.enrichr import EnrichR
import numpy as np
import scipy.stats as stats
from statsmodels.sandbox.stats.multicomp import multipletests
from scripts.similarity_matrix_graph import generate_similarity_graph
VALID_CHRs = [str(i) for i in range(1, 23)] + ['X', 'Y']
WSIZES = [500000.0, 250000.0, 100000.0, 50000.0, 20000.0]
def load_lines(file_name):
# read lines from file and returns them in a list
f = open(file_name, 'r')
ids = [line.strip() for line in f]
f.close()
return ids
def get_regions_from_ensembl_snps(snps):
# returns regions in this format: {'<assembly1>': {'<chrNN>': [<pos1>, <pos2>, ...], ...}, ...}
regions = {}
for snp_id in snps:
data = snps[snp_id]
mappings = data.get('mappings', [])
for mapping in mappings:
pos = mapping['start']
chr = str(mapping['seq_region_name'])
if chr not in VALID_CHRs:
continue
assembly = mapping['assembly_name']
reg_assembly = regions.get(assembly, {}) # create a new one if not exists
regions[assembly] = reg_assembly # assign to ensure creation
reg_chr = reg_assembly.get(chr, []) # create a new one if not exists
reg_assembly[chr] = reg_chr # assign to ensure creation
reg_chr.append(pos)
return regions
def __is_in_region(region, chr, start, end, wsize):
# checks whether a feature located at chr:start-end overlaps a region
dist = wsize / 2
positions = region.get(chr, [])
for pos_obj in positions:
pos = pos_obj['pos'] if type(pos_obj) is dict else pos_obj
if abs(pos - start) < dist or abs(pos - end) < dist:
return pos_obj
return None
def count_genes_in_region(genes, genes_db, region, wsize, add_to_list=None):
# count genes in list that overlaps a region
count = 0
for gene_id in genes:
gene_data = genes_db.get_by_id(gene_id)
if __is_in_region(region, gene_data.chr, gene_data.start, gene_data.end, wsize) is not None:
count += 1
if type(add_to_list) is list:
add_to_list.append(gene_data)
return count
def associate_genes_with_region(genes_data, region, wsize):
assoc = {chr: [{'chr': chr, 'pos': pos, 'genes': set([])} for pos in region[chr]] for chr in region.keys()}
for gene_data in genes_data:
pos_obj = __is_in_region(assoc, gene_data.chr, gene_data.start, gene_data.end, wsize)
if pos_obj is not None:
pos_obj['genes'].add(gene_data)
else:
|
return assoc
def calc_regions_by_gene_count(assoc):
"""
calculates data table for graph: num_genes vs num_regions (how many regions have 1, 2, 3, ... genes)
:param assoc:
:return: an array of tuples (x, y) sorted by x
"""
counts = {}
max_count = -1
for pos_list in assoc.values():
for pos_obj in pos_list:
num_genes = len(pos_obj['genes'])
if num_genes > max_count:
max_count = num_genes
val = counts.get(num_genes, 0)
counts[num_genes] = val + 1
for i in range(0, max_count):
# fill gaps in counts dictionary
if i not in counts:
counts[i] = 0
return map(lambda n: (n, counts[n]), sorted(counts.keys()))
def create_snp_regions(snps_ids):
# build snps regions using ensembl REST API
client = EnsemblRestClient()
snps = client.get_snps(snps_ids)
return get_regions_from_ensembl_snps(snps)
def create_gene_db(taxonomy_id, mart_file):
# create a gene DB using an ensembl mart export
genes_db = GeneDatabase(taxonomy_id)
genes_db.load_mart_export(mart_file)
return genes_db
def calc_genes_in_region_table(region, genes_db, gene_ids, wsize):
"""
Calculates 2x2 table of gene counts: [[b1, n1], [b2, n2]]
b1: number of selected genes that match a region, n1: total of selected genes
b2: number of background genes that match a region, n2: total of background genes
:param region: dict of {chr: [p1, p2, ...]} as returned by 'get_regions_from_ensembl_snps'
:param genes_db:
:param gene_ids: list of selected genes (for example: diff expressed in a GEO study)
:param wsize: window size used to calculate region match (centered in a region)
:return: a tuple with the contingency table (2x2) and matching genes array
"""
control_gene_ids = genes_db.get_difference(gene_ids)
matching_genes = []
b1 = count_genes_in_region(gene_ids, genes_db, region, wsize, add_to_list=matching_genes)
n1 = len(gene_ids)
b2 = count_genes_in_region(control_gene_ids, genes_db, region, wsize)
n2 = len(control_gene_ids)
table = [[b1, n1], [b2, n2]]
return table, matching_genes
def human_format(num):
# returns an amount in a human readable format
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '%i%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def test1():
base_path = '/home/victor/Escritorio/Genotipado_Alternativo/colocalizacion'
snps_ids = load_lines(os.path.join(base_path, 'MS.txt'))
regions = create_snp_regions(snps_ids)
genes_db = create_gene_db('9606', os.path.join(base_path, 'GRCh38/mart_export.txt.gz'))
gene_ids = load_lines(os.path.join(base_path, 'sp140_genes.txt'))
wsize = 500000
table, match_genes = calc_genes_in_region_table(regions.get('GRCh38'), genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(table)
assoc = associate_genes_with_region(match_genes, regions.get('GRCh38'), wsize)
reg_by_gene = calc_regions_by_gene_count(assoc)
print 'b: %i, n: %i' % tuple(table[0])
print 'B: %i, N: %i' % tuple(table[1])
print 'oddsratio: %f, pvalue: %f' % (oddsratio, pvalue)
def test_genes_vs_multiple_snps(gene_ids, input_path, output_path):
file_pattern = '(.+)\.txt'
ll_ids_pattern = '"([\w\s;]+)"'
prog = re.compile(file_pattern)
ll_prog = re.compile(ll_ids_pattern)
genes_db = create_gene_db('9606', os.path.join(input_path, 'GRCh38/mart_export.txt.gz'))
snp_files = [f for f in os.listdir(input_path) if os.path.isfile(os.path.join(input_path, f)) and prog.match(f)]
wsizes = WSIZES
for snp_file in snp_files:
disease = prog.match(snp_file).groups()[0]
snps_ids = load_lines(os.path.join(input_path, snp_file))
good_ids = filter(lambda i: i.startswith('rs'), snps_ids)
ld_ids = filter(lambda i: ll_prog.match(i), snps_ids)
ld_ids = map(lambda i: ll_prog.match(i).groups()[0].split(';')[0], ld_ids)
regions = create_snp_regions(good_ids + ld_ids)
print '\n===== Test for disease: %s =====' % disease
for wsize in wsizes:
wsize_str = human_format(wsize)
print '--- Window size = %s ---' % wsize_str
table, match_genes = calc_genes_in_region_table(regions.get('GRCh38'), genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(table)
assoc = associate_genes_with_region(match_genes, regions.get('GRCh38'), wsize)
regions_by_gene_count = calc_regions_by_gene_count(assoc)
print 'b: %i, n: %i' % tuple(table[0])
print 'B: %i, N: %i' % tuple(table[1])
print 'oddsratio: %f, pvalue: %f' % (oddsratio, pvalue)
__write_per_region_gene_matching(
os.path.join(output_path, 'output_regions_%s_%s.txt' % (disease, wsize_str)), assoc)
__write_genes_vs_regions_table(
os.path.join(output_path, 'output_regions_by_gene_count_%s_%s.txt' % (disease, wsize_str)),
regions_by_gene_count)
def enrichr_db_test(file_name, region, genes_db, wsize, pvalue_thr=0.05, record_filter=None):
"""
Runs, for each record of a 'enrich_db table', a fisher test using 'calc_genes_in_region_table' contingency table
:param file_name: full path to enrich_db table
:param region: dict of {chr: [p1, p2, ...]} as returned by 'get_regions_from_ensembl_snps'
:param genes_db:
:param wsize: window size used to calculate region match (centered in a region)
:param pvalue_thr: pvalue threshold for FDR
:param record_filter: lambda function for record filtering
:return: a list of tuples (lib_name, record, b1, n1, b2, n2, oddsratio, pval, corr_pval, matching_genes)
"""
data_lib = EnrichR.load_library(file_name)
lib_name = os.path.basename(file_name[:-7]) # remove '.txt.gz'
results = []
if record_filter is not None:
selected_records = filter(lambda r: record_filter(r), data_lib.keys())
else:
selected_records = data_lib.keys()
for record in selected_records:
# extract genes highlighted by the study
gene_names = EnrichR.extract_gene_list(data_lib, record)
gene_ids = []
for name in gene_names:
# translate gene names to gene_id (ensembl ids)
gene = genes_db.get_by_name(name)
if gene:
gene_ids.append(gene.id)
else:
pass # gene name not found
# calculate contingency table for the genes in the study
t, match_genes = calc_genes_in_region_table(region, genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(t)
results.append((lib_name, record, t[0][0], t[0][1], t[1][0], t[1][1], oddsratio, pvalue, match_genes))
# multiple test correction using FDR
pvals = map(lambda r: r[7], results)
vals = multipletests(pvals, alpha=pvalue_thr, method='fdr_bh')
# add corrected p-value to results and filter by FDR threshold
results_corr = []
for i, res in enumerate(results):
if vals[0][i]:
# if test passed -> append to results_corr list and put matching_genes at the end of the tuple
results_corr.append(res[:-1] + ((vals[1][i]).item(), res[-1],))
return results_corr
def __result_similarity(gene_res_1, gene_res_2):
set1 = set(gene_res_1)
set2 = set(gene_res_2)
total = len(set1.union(set2))
common = len(set1.intersection(set2))
return common / float(total)
def __calc_similarities(results):
num = len(results)
sims = np.zeros((num, num))
for i in range(0, num):
for j in range(i, num):
sims[i][j] = __result_similarity(results[i][9], results[j][9]) if i != j else 1.0
return sims
def __write_similarities_output(base_path, wsize_str, similarities, results_all):
# write similarities
f = open(os.path.join(base_path, 'output_enrichr_similarities_%s.txt' % wsize_str), 'w')
f.write('A\tB\tSim\n')
num = similarities.shape[0]
for i in range(0, num):
label_i = results_all[i][0] + '|' + results_all[i][1] # lib name + record name
for j in range(i, num):
label_j = results_all[j][0] + '|' + results_all[j][1] # lib name + record name
f.write('%s\t%s\t%f\n' % (label_i, label_j, similarities[i][j]))
f.close()
# write similarities as json
num = similarities.shape[0]
sim_dict = {'records': [], 'similarities': []}
for i in range(0, num):
sim_dict['records'].append({'idx': i, 'lib': results_all[i][0], 'name': results_all[i][1]})
for j in range(i, num):
sim_dict['similarities'].append({'x': i, 'y': j, 'val': similarities[i][j]})
sim_json_file = os.path.join(base_path, 'output_enrichr_similarities_%s.json' % wsize_str)
f = open(sim_json_file, 'w')
json.dump(sim_dict, f)
f.close()
# generate similarity html graph
sim_out_html_file = os.path.join(base_path, 'output_enrichr_similarities_%s.html' % wsize_str)
sim_graph_title = 'Enrichr similarities %s' % wsize_str
generate_similarity_graph(sim_json_file, sim_out_html_file, title=sim_graph_title)
def __write_per_region_gene_matching(out_file, assoc):
# write per region gene matching
f = open(out_file, 'w')
for chr in assoc:
for pos_obj in assoc[chr]:
f.write('%s\t%i\t%i\t' % (pos_obj['chr'], pos_obj['pos'], len(pos_obj['genes'])))
# add matching genes at the end of the row (comma separated)
f.write('%s\n' % ','.join(map(lambda g: g.name, pos_obj['genes'])))
f.close()
def __write_genes_vs_regions_table(out_file, regions_by_gene_count):
# write #genes vs #regions data table
f = open(out_file, 'w')
f.write('#genes\t#regions\n')
for val in regions_by_gene_count:
f.write('%i\t%i\n' % val)
f.close()
if __name__ == "__main__":
base_path = '/home/victor/Escritorio/Genotipado_Alternativo/colocalizacion'
# test1()
test_genes_vs_multiple_snps(load_lines(os.path.join(base_path, 'sp140_genes.txt')),
os.path.join(base_path, 'snps_diseases'),
os.path.join(base_path, 'snps_diseases/output'))
sys.exit(0)
print 'Started:', datetime.datetime.now().isoformat()
snps_ids = load_lines(os.path.join(base_path, 'MS.txt'))
regions = create_snp_regions(snps_ids)
genes_db = create_gene_db('9606', os.path.join(base_path, 'GRCh38/mart_export.txt.gz'))
enrichr_path = os.path.join(base_path, 'enrichr')
lib_files = EnrichR.list_libraries(enrichr_path)
lib_files = filter(lambda n: n.startswith('Single_Gene_Perturbations_from_GEO'), lib_files)
lib_files = sorted(lib_files)
wsizes = WSIZES
# wsizes = [500000.0]
record_filter = lambda r: r.find('GSE50588') > -1
for wsize in wsizes:
wsize_str = human_format(wsize)
lib_results = {}
genes_in_regions = []
results_all = []
for name in lib_files: # Note: lib_files is sorted
lib_name = name[:-7] # remove '.txt.gz'
res = enrichr_db_test(os.path.join(enrichr_path, name), regions.get('GRCh38'), genes_db, wsize,
record_filter=record_filter)
print '%i matches in %s, [%s]' % (len(res), lib_name, datetime.datetime.now().isoformat())
lib_results[lib_name] = res
results_all += res
genes_in_regions += list(itertools.chain.from_iterable(map(lambda r: r[9], res)))
assoc = associate_genes_with_region(genes_in_regions, regions.get('GRCh38'), wsize)
regions_by_gene_count = calc_regions_by_gene_count(assoc)
similarities = __calc_similarities(results_all)
# write per library/study results
f = open(os.path.join(base_path, 'output_enrichr_%s.txt' % wsize_str), 'w')
for res in results_all:
f.write('%s\t%s\t%i\t%i\t%i\t%i\t%f\t%f\t%f\t' % res[:9])
# add matching genes at the end of the row (comma separated)
f.write('%s\n' % ','.join(map(lambda g: g.name, res[9])))
f.close()
# write per region gene matching
__write_per_region_gene_matching(os.path.join(base_path, 'output_regions_%s.txt' % wsize_str), assoc)
# write #genes vs #regions data table
__write_genes_vs_regions_table(os.path.join(base_path, 'output_regions_by_gene_count_%s.txt' % wsize_str),
regions_by_gene_count)
# Skip similarities output
# __write_similarities_output(base_path, wsize_str, similarities, results_all)
print 'Finished:', datetime.datetime.now().isoformat()
| print 'warning: gene %s [%s] should be in a region' % (gene_data.name, gene_data.id) | conditional_block |
colocalization_snps.py | import os
import re
import sys
import datetime
import itertools
import json
from classes.ensembl_client import EnsemblRestClient
from classes.gene_database import GeneDatabase
from classes.enrichr import EnrichR
import numpy as np
import scipy.stats as stats
from statsmodels.sandbox.stats.multicomp import multipletests
from scripts.similarity_matrix_graph import generate_similarity_graph
VALID_CHRs = [str(i) for i in range(1, 23)] + ['X', 'Y']
WSIZES = [500000.0, 250000.0, 100000.0, 50000.0, 20000.0]
def load_lines(file_name):
# read lines from file and returns them in a list
f = open(file_name, 'r')
ids = [line.strip() for line in f]
f.close()
return ids
def get_regions_from_ensembl_snps(snps):
# returns regions in this format: {'<assembly1>': {'<chrNN>': [<pos1>, <pos2>, ...], ...}, ...}
regions = {}
for snp_id in snps:
data = snps[snp_id]
mappings = data.get('mappings', [])
for mapping in mappings:
pos = mapping['start']
chr = str(mapping['seq_region_name'])
if chr not in VALID_CHRs:
continue
assembly = mapping['assembly_name']
reg_assembly = regions.get(assembly, {}) # create a new one if not exists
regions[assembly] = reg_assembly # assign to ensure creation
reg_chr = reg_assembly.get(chr, []) # create a new one if not exists
reg_assembly[chr] = reg_chr # assign to ensure creation
reg_chr.append(pos)
return regions
def __is_in_region(region, chr, start, end, wsize):
# checks whether a feature located at chr:start-end overlaps a region
dist = wsize / 2
positions = region.get(chr, [])
for pos_obj in positions:
pos = pos_obj['pos'] if type(pos_obj) is dict else pos_obj
if abs(pos - start) < dist or abs(pos - end) < dist:
return pos_obj
return None
def count_genes_in_region(genes, genes_db, region, wsize, add_to_list=None):
# count genes in list that overlaps a region
count = 0
for gene_id in genes:
gene_data = genes_db.get_by_id(gene_id)
if __is_in_region(region, gene_data.chr, gene_data.start, gene_data.end, wsize) is not None:
count += 1
if type(add_to_list) is list:
add_to_list.append(gene_data)
return count
def associate_genes_with_region(genes_data, region, wsize):
assoc = {chr: [{'chr': chr, 'pos': pos, 'genes': set([])} for pos in region[chr]] for chr in region.keys()}
for gene_data in genes_data:
pos_obj = __is_in_region(assoc, gene_data.chr, gene_data.start, gene_data.end, wsize)
if pos_obj is not None:
pos_obj['genes'].add(gene_data)
else:
print 'warning: gene %s [%s] should be in a region' % (gene_data.name, gene_data.id)
return assoc
def calc_regions_by_gene_count(assoc):
"""
calculates data table for graph: num_genes vs num_regions (how many regions have 1, 2, 3, ... genes)
:param assoc:
:return: an array of tuples (x, y) sorted by x
"""
counts = {}
max_count = -1
for pos_list in assoc.values():
for pos_obj in pos_list:
num_genes = len(pos_obj['genes'])
if num_genes > max_count:
max_count = num_genes
val = counts.get(num_genes, 0)
counts[num_genes] = val + 1
for i in range(0, max_count):
# fill gaps in counts dictionary
if i not in counts:
counts[i] = 0
return map(lambda n: (n, counts[n]), sorted(counts.keys()))
def create_snp_regions(snps_ids):
# build snps regions using ensembl REST API
client = EnsemblRestClient()
snps = client.get_snps(snps_ids)
return get_regions_from_ensembl_snps(snps)
def create_gene_db(taxonomy_id, mart_file):
# create a gene DB using an ensembl mart export
genes_db = GeneDatabase(taxonomy_id)
genes_db.load_mart_export(mart_file)
return genes_db
def calc_genes_in_region_table(region, genes_db, gene_ids, wsize):
"""
Calculates 2x2 table of gene counts: [[b1, n1], [b2, n2]]
b1: number of selected genes that match a region, n1: total of selected genes
b2: number of background genes that match a region, n2: total of background genes
:param region: dict of {chr: [p1, p2, ...]} as returned by 'get_regions_from_ensembl_snps'
:param genes_db:
:param gene_ids: list of selected genes (for example: diff expressed in a GEO study)
:param wsize: window size used to calculate region match (centered in a region)
:return: a tuple with the contingency table (2x2) and matching genes array
"""
control_gene_ids = genes_db.get_difference(gene_ids)
matching_genes = []
b1 = count_genes_in_region(gene_ids, genes_db, region, wsize, add_to_list=matching_genes)
n1 = len(gene_ids)
b2 = count_genes_in_region(control_gene_ids, genes_db, region, wsize)
n2 = len(control_gene_ids)
table = [[b1, n1], [b2, n2]]
return table, matching_genes
def human_format(num):
# returns an amount in a human readable format
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '%i%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def test1():
base_path = '/home/victor/Escritorio/Genotipado_Alternativo/colocalizacion'
snps_ids = load_lines(os.path.join(base_path, 'MS.txt'))
regions = create_snp_regions(snps_ids)
genes_db = create_gene_db('9606', os.path.join(base_path, 'GRCh38/mart_export.txt.gz'))
gene_ids = load_lines(os.path.join(base_path, 'sp140_genes.txt'))
wsize = 500000
table, match_genes = calc_genes_in_region_table(regions.get('GRCh38'), genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(table)
assoc = associate_genes_with_region(match_genes, regions.get('GRCh38'), wsize)
reg_by_gene = calc_regions_by_gene_count(assoc)
print 'b: %i, n: %i' % tuple(table[0])
print 'B: %i, N: %i' % tuple(table[1])
print 'oddsratio: %f, pvalue: %f' % (oddsratio, pvalue)
def | (gene_ids, input_path, output_path):
file_pattern = '(.+)\.txt'
ll_ids_pattern = '"([\w\s;]+)"'
prog = re.compile(file_pattern)
ll_prog = re.compile(ll_ids_pattern)
genes_db = create_gene_db('9606', os.path.join(input_path, 'GRCh38/mart_export.txt.gz'))
snp_files = [f for f in os.listdir(input_path) if os.path.isfile(os.path.join(input_path, f)) and prog.match(f)]
wsizes = WSIZES
for snp_file in snp_files:
disease = prog.match(snp_file).groups()[0]
snps_ids = load_lines(os.path.join(input_path, snp_file))
good_ids = filter(lambda i: i.startswith('rs'), snps_ids)
ld_ids = filter(lambda i: ll_prog.match(i), snps_ids)
ld_ids = map(lambda i: ll_prog.match(i).groups()[0].split(';')[0], ld_ids)
regions = create_snp_regions(good_ids + ld_ids)
print '\n===== Test for disease: %s =====' % disease
for wsize in wsizes:
wsize_str = human_format(wsize)
print '--- Window size = %s ---' % wsize_str
table, match_genes = calc_genes_in_region_table(regions.get('GRCh38'), genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(table)
assoc = associate_genes_with_region(match_genes, regions.get('GRCh38'), wsize)
regions_by_gene_count = calc_regions_by_gene_count(assoc)
print 'b: %i, n: %i' % tuple(table[0])
print 'B: %i, N: %i' % tuple(table[1])
print 'oddsratio: %f, pvalue: %f' % (oddsratio, pvalue)
__write_per_region_gene_matching(
os.path.join(output_path, 'output_regions_%s_%s.txt' % (disease, wsize_str)), assoc)
__write_genes_vs_regions_table(
os.path.join(output_path, 'output_regions_by_gene_count_%s_%s.txt' % (disease, wsize_str)),
regions_by_gene_count)
def enrichr_db_test(file_name, region, genes_db, wsize, pvalue_thr=0.05, record_filter=None):
"""
Runs, for each record of a 'enrich_db table', a fisher test using 'calc_genes_in_region_table' contingency table
:param file_name: full path to enrich_db table
:param region: dict of {chr: [p1, p2, ...]} as returned by 'get_regions_from_ensembl_snps'
:param genes_db:
:param wsize: window size used to calculate region match (centered in a region)
:param pvalue_thr: pvalue threshold for FDR
:param record_filter: lambda function for record filtering
:return: a list of tuples (lib_name, record, b1, n1, b2, n2, oddsratio, pval, corr_pval, matching_genes)
"""
data_lib = EnrichR.load_library(file_name)
lib_name = os.path.basename(file_name[:-7]) # remove '.txt.gz'
results = []
if record_filter is not None:
selected_records = filter(lambda r: record_filter(r), data_lib.keys())
else:
selected_records = data_lib.keys()
for record in selected_records:
# extract genes highlighted by the study
gene_names = EnrichR.extract_gene_list(data_lib, record)
gene_ids = []
for name in gene_names:
# translate gene names to gene_id (ensembl ids)
gene = genes_db.get_by_name(name)
if gene:
gene_ids.append(gene.id)
else:
pass # gene name not found
# calculate contingency table for the genes in the study
t, match_genes = calc_genes_in_region_table(region, genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(t)
results.append((lib_name, record, t[0][0], t[0][1], t[1][0], t[1][1], oddsratio, pvalue, match_genes))
# multiple test correction using FDR
pvals = map(lambda r: r[7], results)
vals = multipletests(pvals, alpha=pvalue_thr, method='fdr_bh')
# add corrected p-value to results and filter by FDR threshold
results_corr = []
for i, res in enumerate(results):
if vals[0][i]:
# if test passed -> append to results_corr list and put matching_genes at the end of the tuple
results_corr.append(res[:-1] + ((vals[1][i]).item(), res[-1],))
return results_corr
def __result_similarity(gene_res_1, gene_res_2):
set1 = set(gene_res_1)
set2 = set(gene_res_2)
total = len(set1.union(set2))
common = len(set1.intersection(set2))
return common / float(total)
def __calc_similarities(results):
num = len(results)
sims = np.zeros((num, num))
for i in range(0, num):
for j in range(i, num):
sims[i][j] = __result_similarity(results[i][9], results[j][9]) if i != j else 1.0
return sims
def __write_similarities_output(base_path, wsize_str, similarities, results_all):
# write similarities
f = open(os.path.join(base_path, 'output_enrichr_similarities_%s.txt' % wsize_str), 'w')
f.write('A\tB\tSim\n')
num = similarities.shape[0]
for i in range(0, num):
label_i = results_all[i][0] + '|' + results_all[i][1] # lib name + record name
for j in range(i, num):
label_j = results_all[j][0] + '|' + results_all[j][1] # lib name + record name
f.write('%s\t%s\t%f\n' % (label_i, label_j, similarities[i][j]))
f.close()
# write similarities as json
num = similarities.shape[0]
sim_dict = {'records': [], 'similarities': []}
for i in range(0, num):
sim_dict['records'].append({'idx': i, 'lib': results_all[i][0], 'name': results_all[i][1]})
for j in range(i, num):
sim_dict['similarities'].append({'x': i, 'y': j, 'val': similarities[i][j]})
sim_json_file = os.path.join(base_path, 'output_enrichr_similarities_%s.json' % wsize_str)
f = open(sim_json_file, 'w')
json.dump(sim_dict, f)
f.close()
# generate similarity html graph
sim_out_html_file = os.path.join(base_path, 'output_enrichr_similarities_%s.html' % wsize_str)
sim_graph_title = 'Enrichr similarities %s' % wsize_str
generate_similarity_graph(sim_json_file, sim_out_html_file, title=sim_graph_title)
def __write_per_region_gene_matching(out_file, assoc):
# write per region gene matching
f = open(out_file, 'w')
for chr in assoc:
for pos_obj in assoc[chr]:
f.write('%s\t%i\t%i\t' % (pos_obj['chr'], pos_obj['pos'], len(pos_obj['genes'])))
# add matching genes at the end of the row (comma separated)
f.write('%s\n' % ','.join(map(lambda g: g.name, pos_obj['genes'])))
f.close()
def __write_genes_vs_regions_table(out_file, regions_by_gene_count):
# write #genes vs #regions data table
f = open(out_file, 'w')
f.write('#genes\t#regions\n')
for val in regions_by_gene_count:
f.write('%i\t%i\n' % val)
f.close()
if __name__ == "__main__":
base_path = '/home/victor/Escritorio/Genotipado_Alternativo/colocalizacion'
# test1()
test_genes_vs_multiple_snps(load_lines(os.path.join(base_path, 'sp140_genes.txt')),
os.path.join(base_path, 'snps_diseases'),
os.path.join(base_path, 'snps_diseases/output'))
sys.exit(0)
print 'Started:', datetime.datetime.now().isoformat()
snps_ids = load_lines(os.path.join(base_path, 'MS.txt'))
regions = create_snp_regions(snps_ids)
genes_db = create_gene_db('9606', os.path.join(base_path, 'GRCh38/mart_export.txt.gz'))
enrichr_path = os.path.join(base_path, 'enrichr')
lib_files = EnrichR.list_libraries(enrichr_path)
lib_files = filter(lambda n: n.startswith('Single_Gene_Perturbations_from_GEO'), lib_files)
lib_files = sorted(lib_files)
wsizes = WSIZES
# wsizes = [500000.0]
record_filter = lambda r: r.find('GSE50588') > -1
for wsize in wsizes:
wsize_str = human_format(wsize)
lib_results = {}
genes_in_regions = []
results_all = []
for name in lib_files: # Note: lib_files is sorted
lib_name = name[:-7] # remove '.txt.gz'
res = enrichr_db_test(os.path.join(enrichr_path, name), regions.get('GRCh38'), genes_db, wsize,
record_filter=record_filter)
print '%i matches in %s, [%s]' % (len(res), lib_name, datetime.datetime.now().isoformat())
lib_results[lib_name] = res
results_all += res
genes_in_regions += list(itertools.chain.from_iterable(map(lambda r: r[9], res)))
assoc = associate_genes_with_region(genes_in_regions, regions.get('GRCh38'), wsize)
regions_by_gene_count = calc_regions_by_gene_count(assoc)
similarities = __calc_similarities(results_all)
# write per library/study results
f = open(os.path.join(base_path, 'output_enrichr_%s.txt' % wsize_str), 'w')
for res in results_all:
f.write('%s\t%s\t%i\t%i\t%i\t%i\t%f\t%f\t%f\t' % res[:9])
# add matching genes at the end of the row (comma separated)
f.write('%s\n' % ','.join(map(lambda g: g.name, res[9])))
f.close()
# write per region gene matching
__write_per_region_gene_matching(os.path.join(base_path, 'output_regions_%s.txt' % wsize_str), assoc)
# write #genes vs #regions data table
__write_genes_vs_regions_table(os.path.join(base_path, 'output_regions_by_gene_count_%s.txt' % wsize_str),
regions_by_gene_count)
# Skip similarities output
# __write_similarities_output(base_path, wsize_str, similarities, results_all)
print 'Finished:', datetime.datetime.now().isoformat()
| test_genes_vs_multiple_snps | identifier_name |
colocalization_snps.py | import os
import re
import sys
import datetime
import itertools
import json
from classes.ensembl_client import EnsemblRestClient
from classes.gene_database import GeneDatabase
from classes.enrichr import EnrichR
import numpy as np
import scipy.stats as stats
from statsmodels.sandbox.stats.multicomp import multipletests
from scripts.similarity_matrix_graph import generate_similarity_graph
VALID_CHRs = [str(i) for i in range(1, 23)] + ['X', 'Y']
WSIZES = [500000.0, 250000.0, 100000.0, 50000.0, 20000.0]
def load_lines(file_name):
# read lines from file and returns them in a list
f = open(file_name, 'r')
ids = [line.strip() for line in f]
f.close()
return ids
def get_regions_from_ensembl_snps(snps):
# returns regions in this format: {'<assembly1>': {'<chrNN>': [<pos1>, <pos2>, ...], ...}, ...}
regions = {}
for snp_id in snps:
data = snps[snp_id]
mappings = data.get('mappings', [])
for mapping in mappings:
pos = mapping['start']
chr = str(mapping['seq_region_name'])
if chr not in VALID_CHRs:
continue
assembly = mapping['assembly_name']
reg_assembly = regions.get(assembly, {}) # create a new one if not exists
regions[assembly] = reg_assembly # assign to ensure creation
reg_chr = reg_assembly.get(chr, []) # create a new one if not exists
reg_assembly[chr] = reg_chr # assign to ensure creation
reg_chr.append(pos)
return regions
def __is_in_region(region, chr, start, end, wsize):
# checks whether a feature located at chr:start-end overlaps a region
dist = wsize / 2
positions = region.get(chr, [])
for pos_obj in positions:
pos = pos_obj['pos'] if type(pos_obj) is dict else pos_obj
if abs(pos - start) < dist or abs(pos - end) < dist:
return pos_obj
return None
def count_genes_in_region(genes, genes_db, region, wsize, add_to_list=None):
# count genes in list that overlaps a region
count = 0
for gene_id in genes:
gene_data = genes_db.get_by_id(gene_id)
if __is_in_region(region, gene_data.chr, gene_data.start, gene_data.end, wsize) is not None:
count += 1
if type(add_to_list) is list:
add_to_list.append(gene_data)
return count
def associate_genes_with_region(genes_data, region, wsize):
assoc = {chr: [{'chr': chr, 'pos': pos, 'genes': set([])} for pos in region[chr]] for chr in region.keys()}
for gene_data in genes_data:
pos_obj = __is_in_region(assoc, gene_data.chr, gene_data.start, gene_data.end, wsize)
if pos_obj is not None:
pos_obj['genes'].add(gene_data)
else:
print 'warning: gene %s [%s] should be in a region' % (gene_data.name, gene_data.id)
return assoc
def calc_regions_by_gene_count(assoc):
"""
calculates data table for graph: num_genes vs num_regions (how many regions have 1, 2, 3, ... genes)
:param assoc:
:return: an array of tuples (x, y) sorted by x
"""
counts = {}
max_count = -1
for pos_list in assoc.values():
for pos_obj in pos_list:
num_genes = len(pos_obj['genes'])
if num_genes > max_count:
max_count = num_genes
val = counts.get(num_genes, 0)
counts[num_genes] = val + 1
for i in range(0, max_count):
# fill gaps in counts dictionary
if i not in counts:
counts[i] = 0
return map(lambda n: (n, counts[n]), sorted(counts.keys()))
def create_snp_regions(snps_ids):
# build snps regions using ensembl REST API
client = EnsemblRestClient()
snps = client.get_snps(snps_ids)
return get_regions_from_ensembl_snps(snps)
def create_gene_db(taxonomy_id, mart_file):
# create a gene DB using an ensembl mart export
genes_db = GeneDatabase(taxonomy_id)
genes_db.load_mart_export(mart_file)
return genes_db
def calc_genes_in_region_table(region, genes_db, gene_ids, wsize):
"""
Calculates 2x2 table of gene counts: [[b1, n1], [b2, n2]]
b1: number of selected genes that match a region, n1: total of selected genes
b2: number of background genes that match a region, n2: total of background genes
:param region: dict of {chr: [p1, p2, ...]} as returned by 'get_regions_from_ensembl_snps'
:param genes_db:
:param gene_ids: list of selected genes (for example: diff expressed in a GEO study)
:param wsize: window size used to calculate region match (centered in a region)
:return: a tuple with the contingency table (2x2) and matching genes array
"""
control_gene_ids = genes_db.get_difference(gene_ids)
matching_genes = []
b1 = count_genes_in_region(gene_ids, genes_db, region, wsize, add_to_list=matching_genes)
n1 = len(gene_ids)
b2 = count_genes_in_region(control_gene_ids, genes_db, region, wsize)
n2 = len(control_gene_ids)
table = [[b1, n1], [b2, n2]]
return table, matching_genes
| magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '%i%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def test1():
base_path = '/home/victor/Escritorio/Genotipado_Alternativo/colocalizacion'
snps_ids = load_lines(os.path.join(base_path, 'MS.txt'))
regions = create_snp_regions(snps_ids)
genes_db = create_gene_db('9606', os.path.join(base_path, 'GRCh38/mart_export.txt.gz'))
gene_ids = load_lines(os.path.join(base_path, 'sp140_genes.txt'))
wsize = 500000
table, match_genes = calc_genes_in_region_table(regions.get('GRCh38'), genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(table)
assoc = associate_genes_with_region(match_genes, regions.get('GRCh38'), wsize)
reg_by_gene = calc_regions_by_gene_count(assoc)
print 'b: %i, n: %i' % tuple(table[0])
print 'B: %i, N: %i' % tuple(table[1])
print 'oddsratio: %f, pvalue: %f' % (oddsratio, pvalue)
def test_genes_vs_multiple_snps(gene_ids, input_path, output_path):
file_pattern = '(.+)\.txt'
ll_ids_pattern = '"([\w\s;]+)"'
prog = re.compile(file_pattern)
ll_prog = re.compile(ll_ids_pattern)
genes_db = create_gene_db('9606', os.path.join(input_path, 'GRCh38/mart_export.txt.gz'))
snp_files = [f for f in os.listdir(input_path) if os.path.isfile(os.path.join(input_path, f)) and prog.match(f)]
wsizes = WSIZES
for snp_file in snp_files:
disease = prog.match(snp_file).groups()[0]
snps_ids = load_lines(os.path.join(input_path, snp_file))
good_ids = filter(lambda i: i.startswith('rs'), snps_ids)
ld_ids = filter(lambda i: ll_prog.match(i), snps_ids)
ld_ids = map(lambda i: ll_prog.match(i).groups()[0].split(';')[0], ld_ids)
regions = create_snp_regions(good_ids + ld_ids)
print '\n===== Test for disease: %s =====' % disease
for wsize in wsizes:
wsize_str = human_format(wsize)
print '--- Window size = %s ---' % wsize_str
table, match_genes = calc_genes_in_region_table(regions.get('GRCh38'), genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(table)
assoc = associate_genes_with_region(match_genes, regions.get('GRCh38'), wsize)
regions_by_gene_count = calc_regions_by_gene_count(assoc)
print 'b: %i, n: %i' % tuple(table[0])
print 'B: %i, N: %i' % tuple(table[1])
print 'oddsratio: %f, pvalue: %f' % (oddsratio, pvalue)
__write_per_region_gene_matching(
os.path.join(output_path, 'output_regions_%s_%s.txt' % (disease, wsize_str)), assoc)
__write_genes_vs_regions_table(
os.path.join(output_path, 'output_regions_by_gene_count_%s_%s.txt' % (disease, wsize_str)),
regions_by_gene_count)
def enrichr_db_test(file_name, region, genes_db, wsize, pvalue_thr=0.05, record_filter=None):
"""
Runs, for each record of a 'enrich_db table', a fisher test using 'calc_genes_in_region_table' contingency table
:param file_name: full path to enrich_db table
:param region: dict of {chr: [p1, p2, ...]} as returned by 'get_regions_from_ensembl_snps'
:param genes_db:
:param wsize: window size used to calculate region match (centered in a region)
:param pvalue_thr: pvalue threshold for FDR
:param record_filter: lambda function for record filtering
:return: a list of tuples (lib_name, record, b1, n1, b2, n2, oddsratio, pval, corr_pval, matching_genes)
"""
data_lib = EnrichR.load_library(file_name)
lib_name = os.path.basename(file_name[:-7]) # remove '.txt.gz'
results = []
if record_filter is not None:
selected_records = filter(lambda r: record_filter(r), data_lib.keys())
else:
selected_records = data_lib.keys()
for record in selected_records:
# extract genes highlighted by the study
gene_names = EnrichR.extract_gene_list(data_lib, record)
gene_ids = []
for name in gene_names:
# translate gene names to gene_id (ensembl ids)
gene = genes_db.get_by_name(name)
if gene:
gene_ids.append(gene.id)
else:
pass # gene name not found
# calculate contingency table for the genes in the study
t, match_genes = calc_genes_in_region_table(region, genes_db, gene_ids, wsize)
oddsratio, pvalue = stats.fisher_exact(t)
results.append((lib_name, record, t[0][0], t[0][1], t[1][0], t[1][1], oddsratio, pvalue, match_genes))
# multiple test correction using FDR
pvals = map(lambda r: r[7], results)
vals = multipletests(pvals, alpha=pvalue_thr, method='fdr_bh')
# add corrected p-value to results and filter by FDR threshold
results_corr = []
for i, res in enumerate(results):
if vals[0][i]:
# if test passed -> append to results_corr list and put matching_genes at the end of the tuple
results_corr.append(res[:-1] + ((vals[1][i]).item(), res[-1],))
return results_corr
def __result_similarity(gene_res_1, gene_res_2):
set1 = set(gene_res_1)
set2 = set(gene_res_2)
total = len(set1.union(set2))
common = len(set1.intersection(set2))
return common / float(total)
def __calc_similarities(results):
num = len(results)
sims = np.zeros((num, num))
for i in range(0, num):
for j in range(i, num):
sims[i][j] = __result_similarity(results[i][9], results[j][9]) if i != j else 1.0
return sims
def __write_similarities_output(base_path, wsize_str, similarities, results_all):
# write similarities
f = open(os.path.join(base_path, 'output_enrichr_similarities_%s.txt' % wsize_str), 'w')
f.write('A\tB\tSim\n')
num = similarities.shape[0]
for i in range(0, num):
label_i = results_all[i][0] + '|' + results_all[i][1] # lib name + record name
for j in range(i, num):
label_j = results_all[j][0] + '|' + results_all[j][1] # lib name + record name
f.write('%s\t%s\t%f\n' % (label_i, label_j, similarities[i][j]))
f.close()
# write similarities as json
num = similarities.shape[0]
sim_dict = {'records': [], 'similarities': []}
for i in range(0, num):
sim_dict['records'].append({'idx': i, 'lib': results_all[i][0], 'name': results_all[i][1]})
for j in range(i, num):
sim_dict['similarities'].append({'x': i, 'y': j, 'val': similarities[i][j]})
sim_json_file = os.path.join(base_path, 'output_enrichr_similarities_%s.json' % wsize_str)
f = open(sim_json_file, 'w')
json.dump(sim_dict, f)
f.close()
# generate similarity html graph
sim_out_html_file = os.path.join(base_path, 'output_enrichr_similarities_%s.html' % wsize_str)
sim_graph_title = 'Enrichr similarities %s' % wsize_str
generate_similarity_graph(sim_json_file, sim_out_html_file, title=sim_graph_title)
def __write_per_region_gene_matching(out_file, assoc):
# write per region gene matching
f = open(out_file, 'w')
for chr in assoc:
for pos_obj in assoc[chr]:
f.write('%s\t%i\t%i\t' % (pos_obj['chr'], pos_obj['pos'], len(pos_obj['genes'])))
# add matching genes at the end of the row (comma separated)
f.write('%s\n' % ','.join(map(lambda g: g.name, pos_obj['genes'])))
f.close()
def __write_genes_vs_regions_table(out_file, regions_by_gene_count):
# write #genes vs #regions data table
f = open(out_file, 'w')
f.write('#genes\t#regions\n')
for val in regions_by_gene_count:
f.write('%i\t%i\n' % val)
f.close()
if __name__ == "__main__":
base_path = '/home/victor/Escritorio/Genotipado_Alternativo/colocalizacion'
# test1()
test_genes_vs_multiple_snps(load_lines(os.path.join(base_path, 'sp140_genes.txt')),
os.path.join(base_path, 'snps_diseases'),
os.path.join(base_path, 'snps_diseases/output'))
sys.exit(0)
print 'Started:', datetime.datetime.now().isoformat()
snps_ids = load_lines(os.path.join(base_path, 'MS.txt'))
regions = create_snp_regions(snps_ids)
genes_db = create_gene_db('9606', os.path.join(base_path, 'GRCh38/mart_export.txt.gz'))
enrichr_path = os.path.join(base_path, 'enrichr')
lib_files = EnrichR.list_libraries(enrichr_path)
lib_files = filter(lambda n: n.startswith('Single_Gene_Perturbations_from_GEO'), lib_files)
lib_files = sorted(lib_files)
wsizes = WSIZES
# wsizes = [500000.0]
record_filter = lambda r: r.find('GSE50588') > -1
for wsize in wsizes:
wsize_str = human_format(wsize)
lib_results = {}
genes_in_regions = []
results_all = []
for name in lib_files: # Note: lib_files is sorted
lib_name = name[:-7] # remove '.txt.gz'
res = enrichr_db_test(os.path.join(enrichr_path, name), regions.get('GRCh38'), genes_db, wsize,
record_filter=record_filter)
print '%i matches in %s, [%s]' % (len(res), lib_name, datetime.datetime.now().isoformat())
lib_results[lib_name] = res
results_all += res
genes_in_regions += list(itertools.chain.from_iterable(map(lambda r: r[9], res)))
assoc = associate_genes_with_region(genes_in_regions, regions.get('GRCh38'), wsize)
regions_by_gene_count = calc_regions_by_gene_count(assoc)
similarities = __calc_similarities(results_all)
# write per library/study results
f = open(os.path.join(base_path, 'output_enrichr_%s.txt' % wsize_str), 'w')
for res in results_all:
f.write('%s\t%s\t%i\t%i\t%i\t%i\t%f\t%f\t%f\t' % res[:9])
# add matching genes at the end of the row (comma separated)
f.write('%s\n' % ','.join(map(lambda g: g.name, res[9])))
f.close()
# write per region gene matching
__write_per_region_gene_matching(os.path.join(base_path, 'output_regions_%s.txt' % wsize_str), assoc)
# write #genes vs #regions data table
__write_genes_vs_regions_table(os.path.join(base_path, 'output_regions_by_gene_count_%s.txt' % wsize_str),
regions_by_gene_count)
# Skip similarities output
# __write_similarities_output(base_path, wsize_str, similarities, results_all)
print 'Finished:', datetime.datetime.now().isoformat() |
def human_format(num):
# returns an amount in a human readable format
magnitude = 0
while abs(num) >= 1000: | random_line_split |
image.rs | use core::ops::Range;
use crate::ImageTrait;
use crate::vec::*;
use crate::rangetools::*;
use std::path::Path;
use static_assertions::*;
pub enum PixelPos {
R,
G,
B,
A,
}
pub fn convert(slice: &mut [Color]) -> &mut [u32] {
assert_eq_size!(Color, u32);
assert_eq_align!(Color, u32);
unsafe { std::slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u32, slice.len()) }
}
#[derive(Clone)]
pub struct Image {
pub buffer: Vec<u8>,
pub width: usize,
pub height: usize,
}
impl ImageTrait for Image {
fn get_rgba8_buffer(&self) -> &[u8] { &self.buffer[0..(self.height * self.width *4)] }
fn get_width(&self) -> usize { self.width }
fn get_height(&self) -> usize { self.height }
}
#[derive(Clone, Debug)]
#[repr(C, align(4))]
pub struct Color {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl Image {
pub fn get_u32_buffer(&self) -> &[u32] {
let len = self.height * self.width;
let buffer = &self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn get_u32_mut_buffer(&mut self) -> &mut [u32] {
let len = self.height * self.width;
let buffer = &mut self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to_mut();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn new(size: &Vec2i) -> Image {
let width = size.x as usize;
let height = size.y as usize;
Image {
buffer: vec![0; width * height * 4],
width,
height,
}
}
pub fn resize_lazy(&mut self, size: &Vec2i) {
let width = size.x as usize;
let height = size.y as usize;
let needed_size = width * height * 4 * 12 / 10; // With capacity
if self.buffer.len() < needed_size {
self.buffer.resize(needed_size, 0);
}
self.width = width;
self.height = height;
}
#[inline]
pub fn clear(&mut self, color: &Color) {
let color = color.to_u32();
for pix in self.get_u32_mut_buffer() {
*pix = color;
}
}
#[inline]
pub fn get_rect(&self) -> Rect2i {
Rect2i {
min: Vec2i::default(),
max: Vec2i::new(self.width as i32, self.height as i32),
}
}
#[inline]
pub fn range_x(&self) -> Range<i32> {
0..(self.width as i32)
}
#[inline]
pub fn range_y(&self) -> Range<i32> {
0..(self.height as i32)
}
pub fn save_png(&self, path: &Path) -> Result<(), std::io::Error> {
use std::fs::File;
use std::io::BufWriter;
let file = File::create(path)?;
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, self.width as u32, self.height as u32);
encoder.set_color(png::ColorType::RGBA);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header()?;
writer.write_image_data(&self.buffer)?;
Ok(())
}
}
impl Color {
#[inline]
pub fn rgba(r: u8, g: u8, b: u8, a: u8) -> Color {
Color { r, g, b, a }
}
#[inline]
pub fn rgba_f64(r: f64, g: f64, b: f64, a: f64) -> Color {
Color {
r: (r * 255.0) as u8,
g: (g * 255.0) as u8,
b: (b * 255.0) as u8,
a: (a * 255.0) as u8,
}
}
#[inline]
pub fn to_rgba_f64(&self) -> (f64, f64, f64, f64) {
(
self.r as f64 / 255.0,
self.g as f64 / 255.0,
self.b as f64 / 255.0,
self.a as f64 / 255.0,
)
}
#[inline]
pub fn rgb(r: u8, g: u8, b: u8) -> Color {
Color::rgba(r, g, b, 255)
}
#[inline]
pub fn gray(rgb: u8) -> Color {
Color::rgb(rgb, rgb, rgb)
}
#[inline]
pub fn from_u32(v: u32) -> Self {
let res = u32::to_le_bytes(v);
Color::rgba(res[0], res[1], res[2], res[3])
}
#[inline]
pub fn to_u32(&self) -> u32 {
u32::from_le_bytes([self.r, self.g, self.b, self.a])
}
}
#[inline]
pub fn get_pixel(image: &Image, pos: &Vec2i) -> Color {
Color::from_u32(image.get_u32_buffer()[pos.x as usize + pos.y as usize * image.width])
}
#[inline]
pub fn set_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
let width = image.width;
image.get_u32_mut_buffer()[pos.x as usize + pos.y as usize * width] = color.to_u32();
}
#[inline]
pub fn draw_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
set_pixel(image, &pos, &blend(&color, &get_pixel(image, &pos)));
}
fn for_two_images<F: Fn(&mut u32, &u32)>(dst: &mut Image, src: &Image, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&src.range_y(), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&src.range_x(), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_x_range = offset_range(&src_x_range, src_y_range.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
let src_slice = &src_buf[range_to_usize(&src_x_range)];
for (pix_dst, pix_src) in dst_slice.iter_mut().zip(src_slice.iter()) {
f(pix_dst, pix_src);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
src_x_range = offset_range(&src_x_range, src_width);
}
}
pub fn place_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| *pix_dst = *pix_src);
}
pub fn place_image_scaled(dst: &mut Image, src: &Image, pos: &Vec2i, scale: i32) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..(src.height as i32 * scale)), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..(src.width as i32 * scale)), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let src_y_range_slice = div_range(&src_y_range, scale);
let src_x_range_slice = div_range(&src_x_range, scale);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_pos_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_pos_range = offset_range(&src_x_range_slice, src_y_range_slice.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
let mut current_y = src_y_range.start / scale;
for src_y in src_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_pos_range)];
let src_slice = &src_buf[range_to_usize(&src_pos_range)];
let mut current_x = src_x_range_slice.start;
let mut src_iter = src_slice.iter();
let mut pix_src = src_iter.next().unwrap();
for (pix_dst, src_x) in dst_slice.iter_mut().zip(src_x_range.clone()) {
if src_x / scale != current_x {
pix_src = src_iter.next().unwrap();
current_x = src_x / scale;
}
*pix_dst = *pix_src;
}
dst_pos_range = offset_range(&dst_pos_range, dst_width);
if src_y / scale != current_y {
src_pos_range = offset_range(&src_pos_range, src_width);
current_y = src_y / scale;
}
}
}
pub fn draw_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| {
*pix_dst = blend(&Color::from_u32(*pix_src), &Color::from_u32(*pix_dst)).to_u32();
});
}
#[inline]
pub fn function_for_all_pixels<F: FnMut(usize, usize) -> Color>(image: &mut Image, mut f: F) {
let height = image.height;
let width = image.width;
let mut iter = image.get_u32_mut_buffer().iter_mut();
for y in 0..height {
for x in 0..width {
let color = f(x, y);
if let Some(c) = iter.next() {
*c = color.to_u32();
}
}
}
}
fn for_image_and_rect<F: Fn(&mut u32)>(dst: &mut Image, rect_size: &Vec2i, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..rect_size.y), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..rect_size.x), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let dst_width = dst.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let dst_buf = dst.get_u32_mut_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
for pix_dst in dst_slice.iter_mut() {
f(pix_dst);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
}
}
#[inline]
pub fn draw_rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
for_image_and_rect(image, size, pos, |pix| {
*pix = blend(&color, &Color::from_u32(*pix)).to_u32();
});
}
#[inline]
pub fn rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
let color = color.to_u32();
for_image_and_rect(image, size, pos, |pix| *pix = color);
}
#[inline]
/// Fast blend on integer numbers without gamma correction and premultiplied alpha. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
pub fn blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as i32;
let dsta = dst.a as i32;
let outa = (srca + dsta) * 255 - srca * dsta;
macro_rules! blend {
($src:expr, $dst:expr) => {
((255 * ($src as i32) * srca + ($dst as i32) * dsta * (255 - srca)) / outa) as u8
};
}
if outa == 0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa / 255) as u8
)
}
}
#[inline]
/// Works on f32 with gamma correction of 2.2 power. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending + https://en.wikipedia.org/wiki/Alpha_compositing#Composing_alpha_blending_with_gamma_correction
pub fn ideal_blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as f32 / 255.0;
let dsta = dst.a as f32 / 255.0;
let outa = 1. - (1. - srca) * (1. - dsta);
macro_rules! blend {
($src:expr, $dst:expr) => {
(((($src as f32 / 255.0).powf(2.2) * srca + ($dst as f32 / 255.0).powf(2.2) * dsta * (1.0 - srca)) / outa).powf(1. / 2.2) * 255.0) as u8
};
}
if outa == 0.0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa * 255.0) as u8 | )
}
}
pub fn place_repeated_scaled_image(image: &mut Image, repeated_image: &Image, pos: &Vec2i, scale: i32, repeat_x: bool, repeat_y: bool) {
let size = Vec2i::new(repeated_image.get_width() as i32, repeated_image.get_height() as i32) * scale;
let range_x = calc_range_for_repeated_line(repeat_x, pos.x, size.x, image.get_width() as i32);
let range_y = calc_range_for_repeated_line(repeat_y, pos.y, size.y, image.get_height() as i32);
for y in range_y {
for x in range_x.clone() {
place_image_scaled(image, repeated_image, &(Vec2i::new(
x * size.x,
y * size.y
) + pos), scale);
}
}
fn calc_range_for_repeated_line(repeat: bool, pos: i32, len: i32, size: i32) -> std::ops::Range<i32> {
if repeat {
let minus = {
let mut pos_offset = 0;
while pos + pos_offset * len >= -len {
pos_offset -= 1 ;
}
pos_offset
};
let plus = {
let mut pos_offset = 0;
while pos + pos_offset * len < size {
pos_offset += 1 ;
}
pos_offset
};
minus..plus
} else {
0i32..1i32
}
}
} | random_line_split | |
image.rs | use core::ops::Range;
use crate::ImageTrait;
use crate::vec::*;
use crate::rangetools::*;
use std::path::Path;
use static_assertions::*;
pub enum PixelPos {
R,
G,
B,
A,
}
pub fn convert(slice: &mut [Color]) -> &mut [u32] {
assert_eq_size!(Color, u32);
assert_eq_align!(Color, u32);
unsafe { std::slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u32, slice.len()) }
}
#[derive(Clone)]
pub struct Image {
pub buffer: Vec<u8>,
pub width: usize,
pub height: usize,
}
impl ImageTrait for Image {
fn get_rgba8_buffer(&self) -> &[u8] { &self.buffer[0..(self.height * self.width *4)] }
fn get_width(&self) -> usize { self.width }
fn get_height(&self) -> usize { self.height }
}
#[derive(Clone, Debug)]
#[repr(C, align(4))]
pub struct Color {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl Image {
pub fn get_u32_buffer(&self) -> &[u32] {
let len = self.height * self.width;
let buffer = &self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn get_u32_mut_buffer(&mut self) -> &mut [u32] {
let len = self.height * self.width;
let buffer = &mut self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to_mut();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn new(size: &Vec2i) -> Image {
let width = size.x as usize;
let height = size.y as usize;
Image {
buffer: vec![0; width * height * 4],
width,
height,
}
}
pub fn resize_lazy(&mut self, size: &Vec2i) {
let width = size.x as usize;
let height = size.y as usize;
let needed_size = width * height * 4 * 12 / 10; // With capacity
if self.buffer.len() < needed_size {
self.buffer.resize(needed_size, 0);
}
self.width = width;
self.height = height;
}
#[inline]
pub fn clear(&mut self, color: &Color) {
let color = color.to_u32();
for pix in self.get_u32_mut_buffer() {
*pix = color;
}
}
#[inline]
pub fn get_rect(&self) -> Rect2i {
Rect2i {
min: Vec2i::default(),
max: Vec2i::new(self.width as i32, self.height as i32),
}
}
#[inline]
pub fn range_x(&self) -> Range<i32> {
0..(self.width as i32)
}
#[inline]
pub fn range_y(&self) -> Range<i32> {
0..(self.height as i32)
}
pub fn save_png(&self, path: &Path) -> Result<(), std::io::Error> {
use std::fs::File;
use std::io::BufWriter;
let file = File::create(path)?;
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, self.width as u32, self.height as u32);
encoder.set_color(png::ColorType::RGBA);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header()?;
writer.write_image_data(&self.buffer)?;
Ok(())
}
}
impl Color {
#[inline]
pub fn rgba(r: u8, g: u8, b: u8, a: u8) -> Color {
Color { r, g, b, a }
}
#[inline]
pub fn rgba_f64(r: f64, g: f64, b: f64, a: f64) -> Color {
Color {
r: (r * 255.0) as u8,
g: (g * 255.0) as u8,
b: (b * 255.0) as u8,
a: (a * 255.0) as u8,
}
}
#[inline]
pub fn to_rgba_f64(&self) -> (f64, f64, f64, f64) {
(
self.r as f64 / 255.0,
self.g as f64 / 255.0,
self.b as f64 / 255.0,
self.a as f64 / 255.0,
)
}
#[inline]
pub fn rgb(r: u8, g: u8, b: u8) -> Color {
Color::rgba(r, g, b, 255)
}
#[inline]
pub fn gray(rgb: u8) -> Color {
Color::rgb(rgb, rgb, rgb)
}
#[inline]
pub fn from_u32(v: u32) -> Self {
let res = u32::to_le_bytes(v);
Color::rgba(res[0], res[1], res[2], res[3])
}
#[inline]
pub fn to_u32(&self) -> u32 {
u32::from_le_bytes([self.r, self.g, self.b, self.a])
}
}
#[inline]
pub fn get_pixel(image: &Image, pos: &Vec2i) -> Color {
Color::from_u32(image.get_u32_buffer()[pos.x as usize + pos.y as usize * image.width])
}
#[inline]
pub fn set_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
let width = image.width;
image.get_u32_mut_buffer()[pos.x as usize + pos.y as usize * width] = color.to_u32();
}
#[inline]
pub fn draw_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
set_pixel(image, &pos, &blend(&color, &get_pixel(image, &pos)));
}
fn for_two_images<F: Fn(&mut u32, &u32)>(dst: &mut Image, src: &Image, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&src.range_y(), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&src.range_x(), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_x_range = offset_range(&src_x_range, src_y_range.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
let src_slice = &src_buf[range_to_usize(&src_x_range)];
for (pix_dst, pix_src) in dst_slice.iter_mut().zip(src_slice.iter()) {
f(pix_dst, pix_src);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
src_x_range = offset_range(&src_x_range, src_width);
}
}
pub fn place_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| *pix_dst = *pix_src);
}
pub fn place_image_scaled(dst: &mut Image, src: &Image, pos: &Vec2i, scale: i32) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..(src.height as i32 * scale)), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..(src.width as i32 * scale)), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let src_y_range_slice = div_range(&src_y_range, scale);
let src_x_range_slice = div_range(&src_x_range, scale);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_pos_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_pos_range = offset_range(&src_x_range_slice, src_y_range_slice.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
let mut current_y = src_y_range.start / scale;
for src_y in src_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_pos_range)];
let src_slice = &src_buf[range_to_usize(&src_pos_range)];
let mut current_x = src_x_range_slice.start;
let mut src_iter = src_slice.iter();
let mut pix_src = src_iter.next().unwrap();
for (pix_dst, src_x) in dst_slice.iter_mut().zip(src_x_range.clone()) {
if src_x / scale != current_x {
pix_src = src_iter.next().unwrap();
current_x = src_x / scale;
}
*pix_dst = *pix_src;
}
dst_pos_range = offset_range(&dst_pos_range, dst_width);
if src_y / scale != current_y {
src_pos_range = offset_range(&src_pos_range, src_width);
current_y = src_y / scale;
}
}
}
pub fn draw_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| {
*pix_dst = blend(&Color::from_u32(*pix_src), &Color::from_u32(*pix_dst)).to_u32();
});
}
#[inline]
pub fn function_for_all_pixels<F: FnMut(usize, usize) -> Color>(image: &mut Image, mut f: F) {
let height = image.height;
let width = image.width;
let mut iter = image.get_u32_mut_buffer().iter_mut();
for y in 0..height {
for x in 0..width {
let color = f(x, y);
if let Some(c) = iter.next() {
*c = color.to_u32();
}
}
}
}
fn for_image_and_rect<F: Fn(&mut u32)>(dst: &mut Image, rect_size: &Vec2i, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..rect_size.y), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..rect_size.x), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let dst_width = dst.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let dst_buf = dst.get_u32_mut_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
for pix_dst in dst_slice.iter_mut() {
f(pix_dst);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
}
}
#[inline]
pub fn draw_rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
for_image_and_rect(image, size, pos, |pix| {
*pix = blend(&color, &Color::from_u32(*pix)).to_u32();
});
}
#[inline]
pub fn rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
let color = color.to_u32();
for_image_and_rect(image, size, pos, |pix| *pix = color);
}
#[inline]
/// Fast blend on integer numbers without gamma correction and premultiplied alpha. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
pub fn blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as i32;
let dsta = dst.a as i32;
let outa = (srca + dsta) * 255 - srca * dsta;
macro_rules! blend {
($src:expr, $dst:expr) => {
((255 * ($src as i32) * srca + ($dst as i32) * dsta * (255 - srca)) / outa) as u8
};
}
if outa == 0 | else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa / 255) as u8
)
}
}
#[inline]
/// Works on f32 with gamma correction of 2.2 power. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending + https://en.wikipedia.org/wiki/Alpha_compositing#Composing_alpha_blending_with_gamma_correction
pub fn ideal_blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as f32 / 255.0;
let dsta = dst.a as f32 / 255.0;
let outa = 1. - (1. - srca) * (1. - dsta);
macro_rules! blend {
($src:expr, $dst:expr) => {
(((($src as f32 / 255.0).powf(2.2) * srca + ($dst as f32 / 255.0).powf(2.2) * dsta * (1.0 - srca)) / outa).powf(1. / 2.2) * 255.0) as u8
};
}
if outa == 0.0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa * 255.0) as u8
)
}
}
pub fn place_repeated_scaled_image(image: &mut Image, repeated_image: &Image, pos: &Vec2i, scale: i32, repeat_x: bool, repeat_y: bool) {
let size = Vec2i::new(repeated_image.get_width() as i32, repeated_image.get_height() as i32) * scale;
let range_x = calc_range_for_repeated_line(repeat_x, pos.x, size.x, image.get_width() as i32);
let range_y = calc_range_for_repeated_line(repeat_y, pos.y, size.y, image.get_height() as i32);
for y in range_y {
for x in range_x.clone() {
place_image_scaled(image, repeated_image, &(Vec2i::new(
x * size.x,
y * size.y
) + pos), scale);
}
}
fn calc_range_for_repeated_line(repeat: bool, pos: i32, len: i32, size: i32) -> std::ops::Range<i32> {
if repeat {
let minus = {
let mut pos_offset = 0;
while pos + pos_offset * len >= -len {
pos_offset -= 1 ;
}
pos_offset
};
let plus = {
let mut pos_offset = 0;
while pos + pos_offset * len < size {
pos_offset += 1 ;
}
pos_offset
};
minus..plus
} else {
0i32..1i32
}
}
}
| {
Color::rgba(0, 0, 0, 0)
} | conditional_block |
image.rs | use core::ops::Range;
use crate::ImageTrait;
use crate::vec::*;
use crate::rangetools::*;
use std::path::Path;
use static_assertions::*;
pub enum PixelPos {
R,
G,
B,
A,
}
pub fn convert(slice: &mut [Color]) -> &mut [u32] {
assert_eq_size!(Color, u32);
assert_eq_align!(Color, u32);
unsafe { std::slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u32, slice.len()) }
}
#[derive(Clone)]
pub struct Image {
pub buffer: Vec<u8>,
pub width: usize,
pub height: usize,
}
impl ImageTrait for Image {
fn get_rgba8_buffer(&self) -> &[u8] { &self.buffer[0..(self.height * self.width *4)] }
fn get_width(&self) -> usize { self.width }
fn get_height(&self) -> usize { self.height }
}
#[derive(Clone, Debug)]
#[repr(C, align(4))]
pub struct Color {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl Image {
pub fn get_u32_buffer(&self) -> &[u32] {
let len = self.height * self.width;
let buffer = &self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn get_u32_mut_buffer(&mut self) -> &mut [u32] {
let len = self.height * self.width;
let buffer = &mut self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to_mut();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn new(size: &Vec2i) -> Image {
let width = size.x as usize;
let height = size.y as usize;
Image {
buffer: vec![0; width * height * 4],
width,
height,
}
}
pub fn resize_lazy(&mut self, size: &Vec2i) {
let width = size.x as usize;
let height = size.y as usize;
let needed_size = width * height * 4 * 12 / 10; // With capacity
if self.buffer.len() < needed_size {
self.buffer.resize(needed_size, 0);
}
self.width = width;
self.height = height;
}
#[inline]
pub fn clear(&mut self, color: &Color) {
let color = color.to_u32();
for pix in self.get_u32_mut_buffer() {
*pix = color;
}
}
#[inline]
pub fn get_rect(&self) -> Rect2i {
Rect2i {
min: Vec2i::default(),
max: Vec2i::new(self.width as i32, self.height as i32),
}
}
#[inline]
pub fn range_x(&self) -> Range<i32> {
0..(self.width as i32)
}
#[inline]
pub fn range_y(&self) -> Range<i32> |
pub fn save_png(&self, path: &Path) -> Result<(), std::io::Error> {
use std::fs::File;
use std::io::BufWriter;
let file = File::create(path)?;
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, self.width as u32, self.height as u32);
encoder.set_color(png::ColorType::RGBA);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header()?;
writer.write_image_data(&self.buffer)?;
Ok(())
}
}
impl Color {
#[inline]
pub fn rgba(r: u8, g: u8, b: u8, a: u8) -> Color {
Color { r, g, b, a }
}
#[inline]
pub fn rgba_f64(r: f64, g: f64, b: f64, a: f64) -> Color {
Color {
r: (r * 255.0) as u8,
g: (g * 255.0) as u8,
b: (b * 255.0) as u8,
a: (a * 255.0) as u8,
}
}
#[inline]
pub fn to_rgba_f64(&self) -> (f64, f64, f64, f64) {
(
self.r as f64 / 255.0,
self.g as f64 / 255.0,
self.b as f64 / 255.0,
self.a as f64 / 255.0,
)
}
#[inline]
pub fn rgb(r: u8, g: u8, b: u8) -> Color {
Color::rgba(r, g, b, 255)
}
#[inline]
pub fn gray(rgb: u8) -> Color {
Color::rgb(rgb, rgb, rgb)
}
#[inline]
pub fn from_u32(v: u32) -> Self {
let res = u32::to_le_bytes(v);
Color::rgba(res[0], res[1], res[2], res[3])
}
#[inline]
pub fn to_u32(&self) -> u32 {
u32::from_le_bytes([self.r, self.g, self.b, self.a])
}
}
#[inline]
pub fn get_pixel(image: &Image, pos: &Vec2i) -> Color {
Color::from_u32(image.get_u32_buffer()[pos.x as usize + pos.y as usize * image.width])
}
#[inline]
pub fn set_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
let width = image.width;
image.get_u32_mut_buffer()[pos.x as usize + pos.y as usize * width] = color.to_u32();
}
#[inline]
pub fn draw_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
set_pixel(image, &pos, &blend(&color, &get_pixel(image, &pos)));
}
fn for_two_images<F: Fn(&mut u32, &u32)>(dst: &mut Image, src: &Image, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&src.range_y(), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&src.range_x(), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_x_range = offset_range(&src_x_range, src_y_range.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
let src_slice = &src_buf[range_to_usize(&src_x_range)];
for (pix_dst, pix_src) in dst_slice.iter_mut().zip(src_slice.iter()) {
f(pix_dst, pix_src);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
src_x_range = offset_range(&src_x_range, src_width);
}
}
pub fn place_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| *pix_dst = *pix_src);
}
pub fn place_image_scaled(dst: &mut Image, src: &Image, pos: &Vec2i, scale: i32) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..(src.height as i32 * scale)), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..(src.width as i32 * scale)), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let src_y_range_slice = div_range(&src_y_range, scale);
let src_x_range_slice = div_range(&src_x_range, scale);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_pos_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_pos_range = offset_range(&src_x_range_slice, src_y_range_slice.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
let mut current_y = src_y_range.start / scale;
for src_y in src_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_pos_range)];
let src_slice = &src_buf[range_to_usize(&src_pos_range)];
let mut current_x = src_x_range_slice.start;
let mut src_iter = src_slice.iter();
let mut pix_src = src_iter.next().unwrap();
for (pix_dst, src_x) in dst_slice.iter_mut().zip(src_x_range.clone()) {
if src_x / scale != current_x {
pix_src = src_iter.next().unwrap();
current_x = src_x / scale;
}
*pix_dst = *pix_src;
}
dst_pos_range = offset_range(&dst_pos_range, dst_width);
if src_y / scale != current_y {
src_pos_range = offset_range(&src_pos_range, src_width);
current_y = src_y / scale;
}
}
}
pub fn draw_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| {
*pix_dst = blend(&Color::from_u32(*pix_src), &Color::from_u32(*pix_dst)).to_u32();
});
}
#[inline]
pub fn function_for_all_pixels<F: FnMut(usize, usize) -> Color>(image: &mut Image, mut f: F) {
let height = image.height;
let width = image.width;
let mut iter = image.get_u32_mut_buffer().iter_mut();
for y in 0..height {
for x in 0..width {
let color = f(x, y);
if let Some(c) = iter.next() {
*c = color.to_u32();
}
}
}
}
fn for_image_and_rect<F: Fn(&mut u32)>(dst: &mut Image, rect_size: &Vec2i, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..rect_size.y), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..rect_size.x), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let dst_width = dst.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let dst_buf = dst.get_u32_mut_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
for pix_dst in dst_slice.iter_mut() {
f(pix_dst);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
}
}
#[inline]
pub fn draw_rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
for_image_and_rect(image, size, pos, |pix| {
*pix = blend(&color, &Color::from_u32(*pix)).to_u32();
});
}
#[inline]
pub fn rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
let color = color.to_u32();
for_image_and_rect(image, size, pos, |pix| *pix = color);
}
#[inline]
/// Fast blend on integer numbers without gamma correction and premultiplied alpha. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
pub fn blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as i32;
let dsta = dst.a as i32;
let outa = (srca + dsta) * 255 - srca * dsta;
macro_rules! blend {
($src:expr, $dst:expr) => {
((255 * ($src as i32) * srca + ($dst as i32) * dsta * (255 - srca)) / outa) as u8
};
}
if outa == 0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa / 255) as u8
)
}
}
#[inline]
/// Works on f32 with gamma correction of 2.2 power. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending + https://en.wikipedia.org/wiki/Alpha_compositing#Composing_alpha_blending_with_gamma_correction
pub fn ideal_blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as f32 / 255.0;
let dsta = dst.a as f32 / 255.0;
let outa = 1. - (1. - srca) * (1. - dsta);
macro_rules! blend {
($src:expr, $dst:expr) => {
(((($src as f32 / 255.0).powf(2.2) * srca + ($dst as f32 / 255.0).powf(2.2) * dsta * (1.0 - srca)) / outa).powf(1. / 2.2) * 255.0) as u8
};
}
if outa == 0.0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa * 255.0) as u8
)
}
}
pub fn place_repeated_scaled_image(image: &mut Image, repeated_image: &Image, pos: &Vec2i, scale: i32, repeat_x: bool, repeat_y: bool) {
let size = Vec2i::new(repeated_image.get_width() as i32, repeated_image.get_height() as i32) * scale;
let range_x = calc_range_for_repeated_line(repeat_x, pos.x, size.x, image.get_width() as i32);
let range_y = calc_range_for_repeated_line(repeat_y, pos.y, size.y, image.get_height() as i32);
for y in range_y {
for x in range_x.clone() {
place_image_scaled(image, repeated_image, &(Vec2i::new(
x * size.x,
y * size.y
) + pos), scale);
}
}
fn calc_range_for_repeated_line(repeat: bool, pos: i32, len: i32, size: i32) -> std::ops::Range<i32> {
if repeat {
let minus = {
let mut pos_offset = 0;
while pos + pos_offset * len >= -len {
pos_offset -= 1 ;
}
pos_offset
};
let plus = {
let mut pos_offset = 0;
while pos + pos_offset * len < size {
pos_offset += 1 ;
}
pos_offset
};
minus..plus
} else {
0i32..1i32
}
}
}
| {
0..(self.height as i32)
} | identifier_body |
image.rs | use core::ops::Range;
use crate::ImageTrait;
use crate::vec::*;
use crate::rangetools::*;
use std::path::Path;
use static_assertions::*;
pub enum PixelPos {
R,
G,
B,
A,
}
pub fn convert(slice: &mut [Color]) -> &mut [u32] {
assert_eq_size!(Color, u32);
assert_eq_align!(Color, u32);
unsafe { std::slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u32, slice.len()) }
}
#[derive(Clone)]
pub struct Image {
pub buffer: Vec<u8>,
pub width: usize,
pub height: usize,
}
impl ImageTrait for Image {
fn get_rgba8_buffer(&self) -> &[u8] { &self.buffer[0..(self.height * self.width *4)] }
fn get_width(&self) -> usize { self.width }
fn get_height(&self) -> usize { self.height }
}
#[derive(Clone, Debug)]
#[repr(C, align(4))]
pub struct Color {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl Image {
pub fn get_u32_buffer(&self) -> &[u32] {
let len = self.height * self.width;
let buffer = &self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn get_u32_mut_buffer(&mut self) -> &mut [u32] {
let len = self.height * self.width;
let buffer = &mut self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to_mut();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn new(size: &Vec2i) -> Image {
let width = size.x as usize;
let height = size.y as usize;
Image {
buffer: vec![0; width * height * 4],
width,
height,
}
}
pub fn resize_lazy(&mut self, size: &Vec2i) {
let width = size.x as usize;
let height = size.y as usize;
let needed_size = width * height * 4 * 12 / 10; // With capacity
if self.buffer.len() < needed_size {
self.buffer.resize(needed_size, 0);
}
self.width = width;
self.height = height;
}
#[inline]
pub fn clear(&mut self, color: &Color) {
let color = color.to_u32();
for pix in self.get_u32_mut_buffer() {
*pix = color;
}
}
#[inline]
pub fn get_rect(&self) -> Rect2i {
Rect2i {
min: Vec2i::default(),
max: Vec2i::new(self.width as i32, self.height as i32),
}
}
#[inline]
pub fn range_x(&self) -> Range<i32> {
0..(self.width as i32)
}
#[inline]
pub fn range_y(&self) -> Range<i32> {
0..(self.height as i32)
}
pub fn save_png(&self, path: &Path) -> Result<(), std::io::Error> {
use std::fs::File;
use std::io::BufWriter;
let file = File::create(path)?;
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, self.width as u32, self.height as u32);
encoder.set_color(png::ColorType::RGBA);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header()?;
writer.write_image_data(&self.buffer)?;
Ok(())
}
}
impl Color {
#[inline]
pub fn rgba(r: u8, g: u8, b: u8, a: u8) -> Color {
Color { r, g, b, a }
}
#[inline]
pub fn rgba_f64(r: f64, g: f64, b: f64, a: f64) -> Color {
Color {
r: (r * 255.0) as u8,
g: (g * 255.0) as u8,
b: (b * 255.0) as u8,
a: (a * 255.0) as u8,
}
}
#[inline]
pub fn to_rgba_f64(&self) -> (f64, f64, f64, f64) {
(
self.r as f64 / 255.0,
self.g as f64 / 255.0,
self.b as f64 / 255.0,
self.a as f64 / 255.0,
)
}
#[inline]
pub fn rgb(r: u8, g: u8, b: u8) -> Color {
Color::rgba(r, g, b, 255)
}
#[inline]
pub fn gray(rgb: u8) -> Color {
Color::rgb(rgb, rgb, rgb)
}
#[inline]
pub fn from_u32(v: u32) -> Self {
let res = u32::to_le_bytes(v);
Color::rgba(res[0], res[1], res[2], res[3])
}
#[inline]
pub fn to_u32(&self) -> u32 {
u32::from_le_bytes([self.r, self.g, self.b, self.a])
}
}
#[inline]
pub fn get_pixel(image: &Image, pos: &Vec2i) -> Color {
Color::from_u32(image.get_u32_buffer()[pos.x as usize + pos.y as usize * image.width])
}
#[inline]
pub fn set_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
let width = image.width;
image.get_u32_mut_buffer()[pos.x as usize + pos.y as usize * width] = color.to_u32();
}
#[inline]
pub fn draw_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
set_pixel(image, &pos, &blend(&color, &get_pixel(image, &pos)));
}
fn for_two_images<F: Fn(&mut u32, &u32)>(dst: &mut Image, src: &Image, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&src.range_y(), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&src.range_x(), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_x_range = offset_range(&src_x_range, src_y_range.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
let src_slice = &src_buf[range_to_usize(&src_x_range)];
for (pix_dst, pix_src) in dst_slice.iter_mut().zip(src_slice.iter()) {
f(pix_dst, pix_src);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
src_x_range = offset_range(&src_x_range, src_width);
}
}
pub fn place_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| *pix_dst = *pix_src);
}
pub fn place_image_scaled(dst: &mut Image, src: &Image, pos: &Vec2i, scale: i32) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..(src.height as i32 * scale)), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..(src.width as i32 * scale)), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let src_y_range_slice = div_range(&src_y_range, scale);
let src_x_range_slice = div_range(&src_x_range, scale);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_pos_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_pos_range = offset_range(&src_x_range_slice, src_y_range_slice.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
let mut current_y = src_y_range.start / scale;
for src_y in src_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_pos_range)];
let src_slice = &src_buf[range_to_usize(&src_pos_range)];
let mut current_x = src_x_range_slice.start;
let mut src_iter = src_slice.iter();
let mut pix_src = src_iter.next().unwrap();
for (pix_dst, src_x) in dst_slice.iter_mut().zip(src_x_range.clone()) {
if src_x / scale != current_x {
pix_src = src_iter.next().unwrap();
current_x = src_x / scale;
}
*pix_dst = *pix_src;
}
dst_pos_range = offset_range(&dst_pos_range, dst_width);
if src_y / scale != current_y {
src_pos_range = offset_range(&src_pos_range, src_width);
current_y = src_y / scale;
}
}
}
pub fn draw_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| {
*pix_dst = blend(&Color::from_u32(*pix_src), &Color::from_u32(*pix_dst)).to_u32();
});
}
#[inline]
pub fn function_for_all_pixels<F: FnMut(usize, usize) -> Color>(image: &mut Image, mut f: F) {
let height = image.height;
let width = image.width;
let mut iter = image.get_u32_mut_buffer().iter_mut();
for y in 0..height {
for x in 0..width {
let color = f(x, y);
if let Some(c) = iter.next() {
*c = color.to_u32();
}
}
}
}
fn for_image_and_rect<F: Fn(&mut u32)>(dst: &mut Image, rect_size: &Vec2i, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..rect_size.y), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..rect_size.x), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let dst_width = dst.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let dst_buf = dst.get_u32_mut_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
for pix_dst in dst_slice.iter_mut() {
f(pix_dst);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
}
}
#[inline]
pub fn draw_rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
for_image_and_rect(image, size, pos, |pix| {
*pix = blend(&color, &Color::from_u32(*pix)).to_u32();
});
}
#[inline]
pub fn rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
let color = color.to_u32();
for_image_and_rect(image, size, pos, |pix| *pix = color);
}
#[inline]
/// Fast blend on integer numbers without gamma correction and premultiplied alpha. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
pub fn blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as i32;
let dsta = dst.a as i32;
let outa = (srca + dsta) * 255 - srca * dsta;
macro_rules! blend {
($src:expr, $dst:expr) => {
((255 * ($src as i32) * srca + ($dst as i32) * dsta * (255 - srca)) / outa) as u8
};
}
if outa == 0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa / 255) as u8
)
}
}
#[inline]
/// Works on f32 with gamma correction of 2.2 power. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending + https://en.wikipedia.org/wiki/Alpha_compositing#Composing_alpha_blending_with_gamma_correction
pub fn | (src: &Color, dst: &Color) -> Color {
let srca = src.a as f32 / 255.0;
let dsta = dst.a as f32 / 255.0;
let outa = 1. - (1. - srca) * (1. - dsta);
macro_rules! blend {
($src:expr, $dst:expr) => {
(((($src as f32 / 255.0).powf(2.2) * srca + ($dst as f32 / 255.0).powf(2.2) * dsta * (1.0 - srca)) / outa).powf(1. / 2.2) * 255.0) as u8
};
}
if outa == 0.0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa * 255.0) as u8
)
}
}
pub fn place_repeated_scaled_image(image: &mut Image, repeated_image: &Image, pos: &Vec2i, scale: i32, repeat_x: bool, repeat_y: bool) {
let size = Vec2i::new(repeated_image.get_width() as i32, repeated_image.get_height() as i32) * scale;
let range_x = calc_range_for_repeated_line(repeat_x, pos.x, size.x, image.get_width() as i32);
let range_y = calc_range_for_repeated_line(repeat_y, pos.y, size.y, image.get_height() as i32);
for y in range_y {
for x in range_x.clone() {
place_image_scaled(image, repeated_image, &(Vec2i::new(
x * size.x,
y * size.y
) + pos), scale);
}
}
fn calc_range_for_repeated_line(repeat: bool, pos: i32, len: i32, size: i32) -> std::ops::Range<i32> {
if repeat {
let minus = {
let mut pos_offset = 0;
while pos + pos_offset * len >= -len {
pos_offset -= 1 ;
}
pos_offset
};
let plus = {
let mut pos_offset = 0;
while pos + pos_offset * len < size {
pos_offset += 1 ;
}
pos_offset
};
minus..plus
} else {
0i32..1i32
}
}
}
| ideal_blend | identifier_name |
fish-eye-chart.ts | import {
Axis,
AxisScale,
ScaleOrdinal,
ScalePower,
Selection,
axisBottom,
axisLeft,
format,
pointer as pointerD3,
scaleLinear,
scaleLog,
scaleOrdinal,
scaleSqrt,
schemePastel2,
select,
} from "d3"
import d3Fisheye, { FishEyeScale } from "@/utils/fishEye"
import * as styles from "./fish-eye.module.css"
const margin = {
bottom: 70,
left: 70,
right: 50,
top: 80,
}
const LEFT_OFFSET_SMALL_DEVICE = 20
const height = 700 - margin.top - margin.bottom
type FishEyeChartOpts<ChartData> = Readonly<{
chartItems: ChartData[]
colorDomain: string[]
getCircleTitle: (chartItem: ChartData) => string
getColorValue: (chartItem: ChartData) => string
getRadiusValue: (chartItem: ChartData) => number
getXValue: (chartItem: ChartData) => number
getYValue: (chartItem: ChartData) => number
rootElId: string
titles: {
long: string
short: string
}
xAxisLabel: string
yAxisLabel: string
}>
class FishEyeChart<ChartData> {
private readonly config: FishEyeChartOpts<ChartData>
private width = 0
private dom!: {
dot?: Selection<SVGCircleElement, ChartData, SVGGElement, unknown>
pointer?: Selection<SVGTextElement, unknown, HTMLElement, unknown>
svg: Selection<SVGSVGElement, unknown, HTMLElement, unknown>
svgG: Selection<SVGGElement, unknown, HTMLElement, unknown>
xAxis?: Axis<number>
yAxis?: Axis<number>
}
private vars!: {
colorScale: ScaleOrdinal<string, string>
focused: boolean
radiusScale: ScalePower<number, number>
xScale: FishEyeScale
yScale: FishEyeScale
}
public constructor(chartConfig: FishEyeChartOpts<ChartData>) {
this.config = chartConfig
this.setupRootEl()
this.setVars()
this.setDom()
this.setChartTitle()
this.setBackground()
this.setPointer()
this.setAxis()
this.setLabels()
this.setDots()
this.setTitles()
this.updateDimensions()
this.bindMousemove()
this.bindMouseLeave()
this.bindClick()
this.bindResize()
this.setZoom({
animationDuration: 0,
distortion: 0,
focus: [0, 0],
})
}
private static isTouchDevice() |
public refresh() {
this.updateDimensions(1000)
}
private setupRootEl() {
const rootEl = document.getElementById(this.config.rootElId) as HTMLElement
rootEl.classList.add(styles.fishEyeChart)
this.width =
rootEl.getBoundingClientRect().width - margin.left - margin.right
}
private isSmallDevice() {
return this.width < 500
}
private setDom() {
const svg = select(`#${this.config.rootElId}`).append("svg")
const svgG = svg.append("g")
this.dom = {
svg,
svgG,
}
}
private setChartTitle() {
this.dom.svgG
.append("text")
.attr("class", styles.chartTitle)
.attr("text-anchor", "middle")
.style("font-weight", "bold")
}
private setVars() {
const colorScale = scaleOrdinal<string>()
.domain(this.config.colorDomain)
.range(schemePastel2)
const radiusScale = scaleSqrt().domain([0, 5e8]).range([5, 60])
const xScale = d3Fisheye
.scale(scaleLog)
.domain([200, 1e5])
.range([0, this.width]) as FishEyeScale
const yScale = d3Fisheye
.scale(scaleLinear)
.domain([20, 90])
.range([height, 0]) as FishEyeScale
this.vars = {
colorScale,
focused: false,
radiusScale,
xScale,
yScale,
}
}
private setAxis() {
const formatFn = format(",d")
this.dom.xAxis = axisBottom(this.vars.xScale as AxisScale<number>)
.tickFormat((tickNumber) => {
if (tickNumber < 1000) {
return formatFn(tickNumber)
}
const reducedNum = Math.round(tickNumber / 1000)
return `${formatFn(reducedNum)}k`
})
.tickSize(-height)
this.dom.yAxis = axisLeft(this.vars.yScale as AxisScale<number>).tickSize(
-this.width
)
this.dom.svgG
.append("g")
.attr("class", `x ${styles.axis}`)
.attr("transform", `translate(0,${height})`)
.call(this.dom.xAxis)
this.dom.svgG
.append("g")
.attr("class", `y ${styles.axis}`)
.call(this.dom.yAxis)
}
private setBackground() {
return this.dom.svgG.append("rect").attr("class", styles.background)
}
private setLabels() {
this.dom.svgG
.append("text")
.attr("class", "x label")
.attr("text-anchor", "middle")
.text(this.config.xAxisLabel)
this.dom.svgG
.append("text")
.attr("class", "y label")
.attr("text-anchor", "middle")
.attr("x", -height / 2)
.attr("y", -40)
.attr("dy", ".75em")
.attr("transform", "rotate(-90)")
.text(this.config.yAxisLabel)
}
private position(animationDuration: number) {
this.dom.svgG.attr(
"transform",
`translate(${
margin.left - (this.isSmallDevice() ? LEFT_OFFSET_SMALL_DEVICE : 0)
},${margin.top})`
)
this.dom
// Sort the circles by radius, so the largest circles appear below
.dot!.sort(
(...[chartItemA, chartItemB]) =>
this.config.getRadiusValue(chartItemB) -
this.config.getRadiusValue(chartItemA)
)
.transition()
.duration(animationDuration)
.attr("cx", (chartItem) => {
const xValue = this.config.getXValue(chartItem)
return this.vars.xScale(xValue) as number
})
.attr("cy", (chartItem) => {
const yValue = this.config.getYValue(chartItem)
return this.vars.yScale(yValue) as number
})
.attr("r", (chartItem) => {
const radiusValue = this.config.getRadiusValue(chartItem)
return (
this.vars.radiusScale(radiusValue) / (this.isSmallDevice() ? 2 : 1)
)
})
this.dom.xAxis!.ticks(this.isSmallDevice() ? 2 : undefined)
this.dom.svgG
.select<SVGGElement>(`.x.${styles.axis}`)
.transition()
.duration(animationDuration)
.call(this.dom.xAxis!)
this.dom.svgG
.select<SVGGElement>(`.y.${styles.axis}`)
.transition()
.duration(animationDuration)
.call(this.dom.yAxis!)
}
private setDots() {
this.dom.dot = this.dom.svgG
.append("g")
.attr("class", "dots")
.selectAll(".dot")
.data<ChartData>(this.config.chartItems)
.enter()
.append("circle")
.attr("class", "dot")
.style("fill", (chartItem) => {
const colorValue = this.config.getColorValue(chartItem)
return this.vars.colorScale(colorValue)
})
.style("stroke", "black")
.style('"stroke-width"', "1px")
this.position(0)
}
private setTitles() {
this.dom.dot!.append("title").attr("class", "dot-title")
this.updateTitles()
}
private setZoom({
animationDuration,
distortion,
focus,
}: {
animationDuration: number
distortion: number
focus: [number, number]
}) {
this.vars.xScale.distortion(distortion).focus(focus[0])
this.vars.yScale.distortion(distortion).focus(focus[1])
this.position(animationDuration)
}
private updateTitles() {
this.dom
.dot!.selectAll<SVGTitleElement, ChartData>(".dot-title")
.text((chartItem) => this.config.getCircleTitle(chartItem))
this.dom.svgG
.select<SVGTitleElement>(`.${styles.chartTitle}`)
.text(
this.isSmallDevice()
? this.config.titles.short
: this.config.titles.long
)
}
private zoom({
animationDuration,
interactionEvent,
}: {
animationDuration: number
interactionEvent: Event
}) {
const focus = pointerD3(interactionEvent)
this.setZoom({
animationDuration,
distortion: 2.5,
focus,
})
}
private setPointer() {
this.dom.pointer = this.dom.svgG
.append("text")
.text("+")
.attr("class", styles.pointer)
}
private bindMousemove() {
return this.dom.svgG.on("mousemove", (interactionEvent) => {
if (FishEyeChart.isTouchDevice()) {
return
}
if (!this.vars.focused) {
this.zoom({
animationDuration: 0,
interactionEvent,
})
}
})
}
private bindMouseLeave() {
return this.dom.svgG.on("mouseleave", () => {
if (!this.vars.focused) {
this.setZoom({
animationDuration: 1000,
distortion: 0,
focus: [0, 0],
})
}
})
}
private bindClick() {
this.dom.svgG.on("click", (interactionEvent: Event) => {
const isTouchDevice = FishEyeChart.isTouchDevice()
if (!isTouchDevice) {
this.vars.focused = !this.vars.focused
if (this.vars.focused) {
const pointer = pointerD3(this)
this.dom
.pointer!.attr("x", pointer[0])
.attr("y", pointer[1])
.style("opacity", 1)
return
}
}
this.dom.pointer!.style("opacity", 0)
this.zoom({
animationDuration: isTouchDevice ? 1000 : 0,
interactionEvent,
})
})
}
private updateDimensions(animationDuration = 0) {
this.setupRootEl()
const isSmallDevice = this.isSmallDevice()
const widthOffset = isSmallDevice ? LEFT_OFFSET_SMALL_DEVICE : 0
const totalWidth = this.width + widthOffset
this.dom.svg
.attr("width", this.width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
this.dom.svgG
.select(`.${styles.chartTitle}`)
.attr("transform", `translate(${totalWidth / 2},-40)`)
this.dom.svgG
.select(`.${styles.background}`)
.attr("width", this.width)
.attr("height", height)
this.dom.svgG
.select(".x.label")
.attr("y", height + 26)
.attr("x", this.width / 2)
this.vars.xScale.range([0, totalWidth])
this.updateTitles()
this.position(animationDuration)
}
private bindResize() {
window.addEventListener("resize", () => {
this.updateDimensions()
})
}
}
export { FishEyeChart, FishEyeChartOpts }
| {
return (
"ontouchstart" in window ||
navigator.maxTouchPoints > 0 ||
(navigator as any).msMaxTouchPoints > 0 // eslint-disable-line @typescript-eslint/no-explicit-any
)
} | identifier_body |
fish-eye-chart.ts | import {
Axis,
AxisScale,
ScaleOrdinal,
ScalePower,
Selection,
axisBottom,
axisLeft,
format,
pointer as pointerD3,
scaleLinear,
scaleLog,
scaleOrdinal,
scaleSqrt,
schemePastel2,
select,
} from "d3"
import d3Fisheye, { FishEyeScale } from "@/utils/fishEye"
import * as styles from "./fish-eye.module.css"
const margin = {
bottom: 70,
left: 70,
right: 50,
top: 80,
}
const LEFT_OFFSET_SMALL_DEVICE = 20
const height = 700 - margin.top - margin.bottom
type FishEyeChartOpts<ChartData> = Readonly<{
chartItems: ChartData[]
colorDomain: string[]
getCircleTitle: (chartItem: ChartData) => string
getColorValue: (chartItem: ChartData) => string
getRadiusValue: (chartItem: ChartData) => number
getXValue: (chartItem: ChartData) => number
getYValue: (chartItem: ChartData) => number
rootElId: string
titles: {
long: string
short: string
}
xAxisLabel: string
yAxisLabel: string
}>
class FishEyeChart<ChartData> {
private readonly config: FishEyeChartOpts<ChartData>
private width = 0
private dom!: {
dot?: Selection<SVGCircleElement, ChartData, SVGGElement, unknown>
pointer?: Selection<SVGTextElement, unknown, HTMLElement, unknown>
svg: Selection<SVGSVGElement, unknown, HTMLElement, unknown>
svgG: Selection<SVGGElement, unknown, HTMLElement, unknown>
xAxis?: Axis<number>
yAxis?: Axis<number>
}
private vars!: {
colorScale: ScaleOrdinal<string, string>
focused: boolean
radiusScale: ScalePower<number, number>
xScale: FishEyeScale
yScale: FishEyeScale
}
public constructor(chartConfig: FishEyeChartOpts<ChartData>) {
this.config = chartConfig
this.setupRootEl()
this.setVars()
this.setDom()
this.setChartTitle()
this.setBackground()
this.setPointer()
this.setAxis()
this.setLabels()
this.setDots()
this.setTitles()
this.updateDimensions()
this.bindMousemove()
this.bindMouseLeave()
this.bindClick()
this.bindResize()
this.setZoom({
animationDuration: 0,
distortion: 0,
focus: [0, 0],
})
}
private static isTouchDevice() {
return (
"ontouchstart" in window ||
navigator.maxTouchPoints > 0 ||
(navigator as any).msMaxTouchPoints > 0 // eslint-disable-line @typescript-eslint/no-explicit-any
)
}
public refresh() {
this.updateDimensions(1000)
}
private | () {
const rootEl = document.getElementById(this.config.rootElId) as HTMLElement
rootEl.classList.add(styles.fishEyeChart)
this.width =
rootEl.getBoundingClientRect().width - margin.left - margin.right
}
private isSmallDevice() {
return this.width < 500
}
private setDom() {
const svg = select(`#${this.config.rootElId}`).append("svg")
const svgG = svg.append("g")
this.dom = {
svg,
svgG,
}
}
private setChartTitle() {
this.dom.svgG
.append("text")
.attr("class", styles.chartTitle)
.attr("text-anchor", "middle")
.style("font-weight", "bold")
}
private setVars() {
const colorScale = scaleOrdinal<string>()
.domain(this.config.colorDomain)
.range(schemePastel2)
const radiusScale = scaleSqrt().domain([0, 5e8]).range([5, 60])
const xScale = d3Fisheye
.scale(scaleLog)
.domain([200, 1e5])
.range([0, this.width]) as FishEyeScale
const yScale = d3Fisheye
.scale(scaleLinear)
.domain([20, 90])
.range([height, 0]) as FishEyeScale
this.vars = {
colorScale,
focused: false,
radiusScale,
xScale,
yScale,
}
}
private setAxis() {
const formatFn = format(",d")
this.dom.xAxis = axisBottom(this.vars.xScale as AxisScale<number>)
.tickFormat((tickNumber) => {
if (tickNumber < 1000) {
return formatFn(tickNumber)
}
const reducedNum = Math.round(tickNumber / 1000)
return `${formatFn(reducedNum)}k`
})
.tickSize(-height)
this.dom.yAxis = axisLeft(this.vars.yScale as AxisScale<number>).tickSize(
-this.width
)
this.dom.svgG
.append("g")
.attr("class", `x ${styles.axis}`)
.attr("transform", `translate(0,${height})`)
.call(this.dom.xAxis)
this.dom.svgG
.append("g")
.attr("class", `y ${styles.axis}`)
.call(this.dom.yAxis)
}
private setBackground() {
return this.dom.svgG.append("rect").attr("class", styles.background)
}
private setLabels() {
this.dom.svgG
.append("text")
.attr("class", "x label")
.attr("text-anchor", "middle")
.text(this.config.xAxisLabel)
this.dom.svgG
.append("text")
.attr("class", "y label")
.attr("text-anchor", "middle")
.attr("x", -height / 2)
.attr("y", -40)
.attr("dy", ".75em")
.attr("transform", "rotate(-90)")
.text(this.config.yAxisLabel)
}
private position(animationDuration: number) {
this.dom.svgG.attr(
"transform",
`translate(${
margin.left - (this.isSmallDevice() ? LEFT_OFFSET_SMALL_DEVICE : 0)
},${margin.top})`
)
this.dom
// Sort the circles by radius, so the largest circles appear below
.dot!.sort(
(...[chartItemA, chartItemB]) =>
this.config.getRadiusValue(chartItemB) -
this.config.getRadiusValue(chartItemA)
)
.transition()
.duration(animationDuration)
.attr("cx", (chartItem) => {
const xValue = this.config.getXValue(chartItem)
return this.vars.xScale(xValue) as number
})
.attr("cy", (chartItem) => {
const yValue = this.config.getYValue(chartItem)
return this.vars.yScale(yValue) as number
})
.attr("r", (chartItem) => {
const radiusValue = this.config.getRadiusValue(chartItem)
return (
this.vars.radiusScale(radiusValue) / (this.isSmallDevice() ? 2 : 1)
)
})
this.dom.xAxis!.ticks(this.isSmallDevice() ? 2 : undefined)
this.dom.svgG
.select<SVGGElement>(`.x.${styles.axis}`)
.transition()
.duration(animationDuration)
.call(this.dom.xAxis!)
this.dom.svgG
.select<SVGGElement>(`.y.${styles.axis}`)
.transition()
.duration(animationDuration)
.call(this.dom.yAxis!)
}
private setDots() {
this.dom.dot = this.dom.svgG
.append("g")
.attr("class", "dots")
.selectAll(".dot")
.data<ChartData>(this.config.chartItems)
.enter()
.append("circle")
.attr("class", "dot")
.style("fill", (chartItem) => {
const colorValue = this.config.getColorValue(chartItem)
return this.vars.colorScale(colorValue)
})
.style("stroke", "black")
.style('"stroke-width"', "1px")
this.position(0)
}
private setTitles() {
this.dom.dot!.append("title").attr("class", "dot-title")
this.updateTitles()
}
private setZoom({
animationDuration,
distortion,
focus,
}: {
animationDuration: number
distortion: number
focus: [number, number]
}) {
this.vars.xScale.distortion(distortion).focus(focus[0])
this.vars.yScale.distortion(distortion).focus(focus[1])
this.position(animationDuration)
}
private updateTitles() {
this.dom
.dot!.selectAll<SVGTitleElement, ChartData>(".dot-title")
.text((chartItem) => this.config.getCircleTitle(chartItem))
this.dom.svgG
.select<SVGTitleElement>(`.${styles.chartTitle}`)
.text(
this.isSmallDevice()
? this.config.titles.short
: this.config.titles.long
)
}
private zoom({
animationDuration,
interactionEvent,
}: {
animationDuration: number
interactionEvent: Event
}) {
const focus = pointerD3(interactionEvent)
this.setZoom({
animationDuration,
distortion: 2.5,
focus,
})
}
private setPointer() {
this.dom.pointer = this.dom.svgG
.append("text")
.text("+")
.attr("class", styles.pointer)
}
private bindMousemove() {
return this.dom.svgG.on("mousemove", (interactionEvent) => {
if (FishEyeChart.isTouchDevice()) {
return
}
if (!this.vars.focused) {
this.zoom({
animationDuration: 0,
interactionEvent,
})
}
})
}
private bindMouseLeave() {
return this.dom.svgG.on("mouseleave", () => {
if (!this.vars.focused) {
this.setZoom({
animationDuration: 1000,
distortion: 0,
focus: [0, 0],
})
}
})
}
private bindClick() {
this.dom.svgG.on("click", (interactionEvent: Event) => {
const isTouchDevice = FishEyeChart.isTouchDevice()
if (!isTouchDevice) {
this.vars.focused = !this.vars.focused
if (this.vars.focused) {
const pointer = pointerD3(this)
this.dom
.pointer!.attr("x", pointer[0])
.attr("y", pointer[1])
.style("opacity", 1)
return
}
}
this.dom.pointer!.style("opacity", 0)
this.zoom({
animationDuration: isTouchDevice ? 1000 : 0,
interactionEvent,
})
})
}
private updateDimensions(animationDuration = 0) {
this.setupRootEl()
const isSmallDevice = this.isSmallDevice()
const widthOffset = isSmallDevice ? LEFT_OFFSET_SMALL_DEVICE : 0
const totalWidth = this.width + widthOffset
this.dom.svg
.attr("width", this.width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
this.dom.svgG
.select(`.${styles.chartTitle}`)
.attr("transform", `translate(${totalWidth / 2},-40)`)
this.dom.svgG
.select(`.${styles.background}`)
.attr("width", this.width)
.attr("height", height)
this.dom.svgG
.select(".x.label")
.attr("y", height + 26)
.attr("x", this.width / 2)
this.vars.xScale.range([0, totalWidth])
this.updateTitles()
this.position(animationDuration)
}
private bindResize() {
window.addEventListener("resize", () => {
this.updateDimensions()
})
}
}
export { FishEyeChart, FishEyeChartOpts }
| setupRootEl | identifier_name |
fish-eye-chart.ts | import {
Axis,
AxisScale,
ScaleOrdinal,
ScalePower,
Selection,
axisBottom,
axisLeft,
format,
pointer as pointerD3,
scaleLinear,
scaleLog,
scaleOrdinal,
scaleSqrt,
schemePastel2,
select,
} from "d3"
import d3Fisheye, { FishEyeScale } from "@/utils/fishEye"
import * as styles from "./fish-eye.module.css"
const margin = {
bottom: 70,
left: 70,
right: 50,
top: 80,
}
const LEFT_OFFSET_SMALL_DEVICE = 20
const height = 700 - margin.top - margin.bottom
type FishEyeChartOpts<ChartData> = Readonly<{
chartItems: ChartData[]
colorDomain: string[]
getCircleTitle: (chartItem: ChartData) => string
getColorValue: (chartItem: ChartData) => string
getRadiusValue: (chartItem: ChartData) => number
getXValue: (chartItem: ChartData) => number
getYValue: (chartItem: ChartData) => number
rootElId: string
titles: {
long: string
short: string
}
xAxisLabel: string
yAxisLabel: string
}>
class FishEyeChart<ChartData> {
private readonly config: FishEyeChartOpts<ChartData>
private width = 0
private dom!: {
dot?: Selection<SVGCircleElement, ChartData, SVGGElement, unknown>
pointer?: Selection<SVGTextElement, unknown, HTMLElement, unknown>
svg: Selection<SVGSVGElement, unknown, HTMLElement, unknown>
svgG: Selection<SVGGElement, unknown, HTMLElement, unknown>
xAxis?: Axis<number>
yAxis?: Axis<number>
}
private vars!: {
colorScale: ScaleOrdinal<string, string>
focused: boolean
radiusScale: ScalePower<number, number>
xScale: FishEyeScale
yScale: FishEyeScale
}
public constructor(chartConfig: FishEyeChartOpts<ChartData>) {
this.config = chartConfig
this.setupRootEl()
this.setVars()
this.setDom()
this.setChartTitle()
this.setBackground()
this.setPointer()
this.setAxis()
this.setLabels()
this.setDots()
this.setTitles()
this.updateDimensions()
this.bindMousemove()
this.bindMouseLeave()
this.bindClick()
this.bindResize()
this.setZoom({
animationDuration: 0,
distortion: 0,
focus: [0, 0],
})
}
private static isTouchDevice() {
return (
"ontouchstart" in window ||
navigator.maxTouchPoints > 0 ||
(navigator as any).msMaxTouchPoints > 0 // eslint-disable-line @typescript-eslint/no-explicit-any
)
}
public refresh() {
this.updateDimensions(1000)
}
private setupRootEl() {
const rootEl = document.getElementById(this.config.rootElId) as HTMLElement
rootEl.classList.add(styles.fishEyeChart)
this.width =
rootEl.getBoundingClientRect().width - margin.left - margin.right
}
private isSmallDevice() {
return this.width < 500
}
private setDom() {
const svg = select(`#${this.config.rootElId}`).append("svg")
const svgG = svg.append("g")
this.dom = {
svg,
svgG,
}
}
private setChartTitle() {
this.dom.svgG
.append("text")
.attr("class", styles.chartTitle)
.attr("text-anchor", "middle")
.style("font-weight", "bold")
}
private setVars() {
const colorScale = scaleOrdinal<string>()
.domain(this.config.colorDomain)
.range(schemePastel2)
const radiusScale = scaleSqrt().domain([0, 5e8]).range([5, 60])
const xScale = d3Fisheye
.scale(scaleLog)
.domain([200, 1e5])
.range([0, this.width]) as FishEyeScale
const yScale = d3Fisheye
.scale(scaleLinear)
.domain([20, 90])
.range([height, 0]) as FishEyeScale
this.vars = {
colorScale,
focused: false,
radiusScale, | yScale,
}
}
private setAxis() {
const formatFn = format(",d")
this.dom.xAxis = axisBottom(this.vars.xScale as AxisScale<number>)
.tickFormat((tickNumber) => {
if (tickNumber < 1000) {
return formatFn(tickNumber)
}
const reducedNum = Math.round(tickNumber / 1000)
return `${formatFn(reducedNum)}k`
})
.tickSize(-height)
this.dom.yAxis = axisLeft(this.vars.yScale as AxisScale<number>).tickSize(
-this.width
)
this.dom.svgG
.append("g")
.attr("class", `x ${styles.axis}`)
.attr("transform", `translate(0,${height})`)
.call(this.dom.xAxis)
this.dom.svgG
.append("g")
.attr("class", `y ${styles.axis}`)
.call(this.dom.yAxis)
}
private setBackground() {
return this.dom.svgG.append("rect").attr("class", styles.background)
}
private setLabels() {
this.dom.svgG
.append("text")
.attr("class", "x label")
.attr("text-anchor", "middle")
.text(this.config.xAxisLabel)
this.dom.svgG
.append("text")
.attr("class", "y label")
.attr("text-anchor", "middle")
.attr("x", -height / 2)
.attr("y", -40)
.attr("dy", ".75em")
.attr("transform", "rotate(-90)")
.text(this.config.yAxisLabel)
}
private position(animationDuration: number) {
this.dom.svgG.attr(
"transform",
`translate(${
margin.left - (this.isSmallDevice() ? LEFT_OFFSET_SMALL_DEVICE : 0)
},${margin.top})`
)
this.dom
// Sort the circles by radius, so the largest circles appear below
.dot!.sort(
(...[chartItemA, chartItemB]) =>
this.config.getRadiusValue(chartItemB) -
this.config.getRadiusValue(chartItemA)
)
.transition()
.duration(animationDuration)
.attr("cx", (chartItem) => {
const xValue = this.config.getXValue(chartItem)
return this.vars.xScale(xValue) as number
})
.attr("cy", (chartItem) => {
const yValue = this.config.getYValue(chartItem)
return this.vars.yScale(yValue) as number
})
.attr("r", (chartItem) => {
const radiusValue = this.config.getRadiusValue(chartItem)
return (
this.vars.radiusScale(radiusValue) / (this.isSmallDevice() ? 2 : 1)
)
})
this.dom.xAxis!.ticks(this.isSmallDevice() ? 2 : undefined)
this.dom.svgG
.select<SVGGElement>(`.x.${styles.axis}`)
.transition()
.duration(animationDuration)
.call(this.dom.xAxis!)
this.dom.svgG
.select<SVGGElement>(`.y.${styles.axis}`)
.transition()
.duration(animationDuration)
.call(this.dom.yAxis!)
}
private setDots() {
this.dom.dot = this.dom.svgG
.append("g")
.attr("class", "dots")
.selectAll(".dot")
.data<ChartData>(this.config.chartItems)
.enter()
.append("circle")
.attr("class", "dot")
.style("fill", (chartItem) => {
const colorValue = this.config.getColorValue(chartItem)
return this.vars.colorScale(colorValue)
})
.style("stroke", "black")
.style('"stroke-width"', "1px")
this.position(0)
}
private setTitles() {
this.dom.dot!.append("title").attr("class", "dot-title")
this.updateTitles()
}
private setZoom({
animationDuration,
distortion,
focus,
}: {
animationDuration: number
distortion: number
focus: [number, number]
}) {
this.vars.xScale.distortion(distortion).focus(focus[0])
this.vars.yScale.distortion(distortion).focus(focus[1])
this.position(animationDuration)
}
private updateTitles() {
this.dom
.dot!.selectAll<SVGTitleElement, ChartData>(".dot-title")
.text((chartItem) => this.config.getCircleTitle(chartItem))
this.dom.svgG
.select<SVGTitleElement>(`.${styles.chartTitle}`)
.text(
this.isSmallDevice()
? this.config.titles.short
: this.config.titles.long
)
}
private zoom({
animationDuration,
interactionEvent,
}: {
animationDuration: number
interactionEvent: Event
}) {
const focus = pointerD3(interactionEvent)
this.setZoom({
animationDuration,
distortion: 2.5,
focus,
})
}
private setPointer() {
this.dom.pointer = this.dom.svgG
.append("text")
.text("+")
.attr("class", styles.pointer)
}
private bindMousemove() {
return this.dom.svgG.on("mousemove", (interactionEvent) => {
if (FishEyeChart.isTouchDevice()) {
return
}
if (!this.vars.focused) {
this.zoom({
animationDuration: 0,
interactionEvent,
})
}
})
}
private bindMouseLeave() {
return this.dom.svgG.on("mouseleave", () => {
if (!this.vars.focused) {
this.setZoom({
animationDuration: 1000,
distortion: 0,
focus: [0, 0],
})
}
})
}
private bindClick() {
this.dom.svgG.on("click", (interactionEvent: Event) => {
const isTouchDevice = FishEyeChart.isTouchDevice()
if (!isTouchDevice) {
this.vars.focused = !this.vars.focused
if (this.vars.focused) {
const pointer = pointerD3(this)
this.dom
.pointer!.attr("x", pointer[0])
.attr("y", pointer[1])
.style("opacity", 1)
return
}
}
this.dom.pointer!.style("opacity", 0)
this.zoom({
animationDuration: isTouchDevice ? 1000 : 0,
interactionEvent,
})
})
}
private updateDimensions(animationDuration = 0) {
this.setupRootEl()
const isSmallDevice = this.isSmallDevice()
const widthOffset = isSmallDevice ? LEFT_OFFSET_SMALL_DEVICE : 0
const totalWidth = this.width + widthOffset
this.dom.svg
.attr("width", this.width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
this.dom.svgG
.select(`.${styles.chartTitle}`)
.attr("transform", `translate(${totalWidth / 2},-40)`)
this.dom.svgG
.select(`.${styles.background}`)
.attr("width", this.width)
.attr("height", height)
this.dom.svgG
.select(".x.label")
.attr("y", height + 26)
.attr("x", this.width / 2)
this.vars.xScale.range([0, totalWidth])
this.updateTitles()
this.position(animationDuration)
}
private bindResize() {
window.addEventListener("resize", () => {
this.updateDimensions()
})
}
}
export { FishEyeChart, FishEyeChartOpts } | xScale, | random_line_split |
fish-eye-chart.ts | import {
Axis,
AxisScale,
ScaleOrdinal,
ScalePower,
Selection,
axisBottom,
axisLeft,
format,
pointer as pointerD3,
scaleLinear,
scaleLog,
scaleOrdinal,
scaleSqrt,
schemePastel2,
select,
} from "d3"
import d3Fisheye, { FishEyeScale } from "@/utils/fishEye"
import * as styles from "./fish-eye.module.css"
const margin = {
bottom: 70,
left: 70,
right: 50,
top: 80,
}
const LEFT_OFFSET_SMALL_DEVICE = 20
const height = 700 - margin.top - margin.bottom
type FishEyeChartOpts<ChartData> = Readonly<{
chartItems: ChartData[]
colorDomain: string[]
getCircleTitle: (chartItem: ChartData) => string
getColorValue: (chartItem: ChartData) => string
getRadiusValue: (chartItem: ChartData) => number
getXValue: (chartItem: ChartData) => number
getYValue: (chartItem: ChartData) => number
rootElId: string
titles: {
long: string
short: string
}
xAxisLabel: string
yAxisLabel: string
}>
class FishEyeChart<ChartData> {
private readonly config: FishEyeChartOpts<ChartData>
private width = 0
private dom!: {
dot?: Selection<SVGCircleElement, ChartData, SVGGElement, unknown>
pointer?: Selection<SVGTextElement, unknown, HTMLElement, unknown>
svg: Selection<SVGSVGElement, unknown, HTMLElement, unknown>
svgG: Selection<SVGGElement, unknown, HTMLElement, unknown>
xAxis?: Axis<number>
yAxis?: Axis<number>
}
private vars!: {
colorScale: ScaleOrdinal<string, string>
focused: boolean
radiusScale: ScalePower<number, number>
xScale: FishEyeScale
yScale: FishEyeScale
}
public constructor(chartConfig: FishEyeChartOpts<ChartData>) {
this.config = chartConfig
this.setupRootEl()
this.setVars()
this.setDom()
this.setChartTitle()
this.setBackground()
this.setPointer()
this.setAxis()
this.setLabels()
this.setDots()
this.setTitles()
this.updateDimensions()
this.bindMousemove()
this.bindMouseLeave()
this.bindClick()
this.bindResize()
this.setZoom({
animationDuration: 0,
distortion: 0,
focus: [0, 0],
})
}
private static isTouchDevice() {
return (
"ontouchstart" in window ||
navigator.maxTouchPoints > 0 ||
(navigator as any).msMaxTouchPoints > 0 // eslint-disable-line @typescript-eslint/no-explicit-any
)
}
public refresh() {
this.updateDimensions(1000)
}
private setupRootEl() {
const rootEl = document.getElementById(this.config.rootElId) as HTMLElement
rootEl.classList.add(styles.fishEyeChart)
this.width =
rootEl.getBoundingClientRect().width - margin.left - margin.right
}
private isSmallDevice() {
return this.width < 500
}
private setDom() {
const svg = select(`#${this.config.rootElId}`).append("svg")
const svgG = svg.append("g")
this.dom = {
svg,
svgG,
}
}
private setChartTitle() {
this.dom.svgG
.append("text")
.attr("class", styles.chartTitle)
.attr("text-anchor", "middle")
.style("font-weight", "bold")
}
private setVars() {
const colorScale = scaleOrdinal<string>()
.domain(this.config.colorDomain)
.range(schemePastel2)
const radiusScale = scaleSqrt().domain([0, 5e8]).range([5, 60])
const xScale = d3Fisheye
.scale(scaleLog)
.domain([200, 1e5])
.range([0, this.width]) as FishEyeScale
const yScale = d3Fisheye
.scale(scaleLinear)
.domain([20, 90])
.range([height, 0]) as FishEyeScale
this.vars = {
colorScale,
focused: false,
radiusScale,
xScale,
yScale,
}
}
private setAxis() {
const formatFn = format(",d")
this.dom.xAxis = axisBottom(this.vars.xScale as AxisScale<number>)
.tickFormat((tickNumber) => {
if (tickNumber < 1000) {
return formatFn(tickNumber)
}
const reducedNum = Math.round(tickNumber / 1000)
return `${formatFn(reducedNum)}k`
})
.tickSize(-height)
this.dom.yAxis = axisLeft(this.vars.yScale as AxisScale<number>).tickSize(
-this.width
)
this.dom.svgG
.append("g")
.attr("class", `x ${styles.axis}`)
.attr("transform", `translate(0,${height})`)
.call(this.dom.xAxis)
this.dom.svgG
.append("g")
.attr("class", `y ${styles.axis}`)
.call(this.dom.yAxis)
}
private setBackground() {
return this.dom.svgG.append("rect").attr("class", styles.background)
}
private setLabels() {
this.dom.svgG
.append("text")
.attr("class", "x label")
.attr("text-anchor", "middle")
.text(this.config.xAxisLabel)
this.dom.svgG
.append("text")
.attr("class", "y label")
.attr("text-anchor", "middle")
.attr("x", -height / 2)
.attr("y", -40)
.attr("dy", ".75em")
.attr("transform", "rotate(-90)")
.text(this.config.yAxisLabel)
}
private position(animationDuration: number) {
this.dom.svgG.attr(
"transform",
`translate(${
margin.left - (this.isSmallDevice() ? LEFT_OFFSET_SMALL_DEVICE : 0)
},${margin.top})`
)
this.dom
// Sort the circles by radius, so the largest circles appear below
.dot!.sort(
(...[chartItemA, chartItemB]) =>
this.config.getRadiusValue(chartItemB) -
this.config.getRadiusValue(chartItemA)
)
.transition()
.duration(animationDuration)
.attr("cx", (chartItem) => {
const xValue = this.config.getXValue(chartItem)
return this.vars.xScale(xValue) as number
})
.attr("cy", (chartItem) => {
const yValue = this.config.getYValue(chartItem)
return this.vars.yScale(yValue) as number
})
.attr("r", (chartItem) => {
const radiusValue = this.config.getRadiusValue(chartItem)
return (
this.vars.radiusScale(radiusValue) / (this.isSmallDevice() ? 2 : 1)
)
})
this.dom.xAxis!.ticks(this.isSmallDevice() ? 2 : undefined)
this.dom.svgG
.select<SVGGElement>(`.x.${styles.axis}`)
.transition()
.duration(animationDuration)
.call(this.dom.xAxis!)
this.dom.svgG
.select<SVGGElement>(`.y.${styles.axis}`)
.transition()
.duration(animationDuration)
.call(this.dom.yAxis!)
}
private setDots() {
this.dom.dot = this.dom.svgG
.append("g")
.attr("class", "dots")
.selectAll(".dot")
.data<ChartData>(this.config.chartItems)
.enter()
.append("circle")
.attr("class", "dot")
.style("fill", (chartItem) => {
const colorValue = this.config.getColorValue(chartItem)
return this.vars.colorScale(colorValue)
})
.style("stroke", "black")
.style('"stroke-width"', "1px")
this.position(0)
}
private setTitles() {
this.dom.dot!.append("title").attr("class", "dot-title")
this.updateTitles()
}
private setZoom({
animationDuration,
distortion,
focus,
}: {
animationDuration: number
distortion: number
focus: [number, number]
}) {
this.vars.xScale.distortion(distortion).focus(focus[0])
this.vars.yScale.distortion(distortion).focus(focus[1])
this.position(animationDuration)
}
private updateTitles() {
this.dom
.dot!.selectAll<SVGTitleElement, ChartData>(".dot-title")
.text((chartItem) => this.config.getCircleTitle(chartItem))
this.dom.svgG
.select<SVGTitleElement>(`.${styles.chartTitle}`)
.text(
this.isSmallDevice()
? this.config.titles.short
: this.config.titles.long
)
}
private zoom({
animationDuration,
interactionEvent,
}: {
animationDuration: number
interactionEvent: Event
}) {
const focus = pointerD3(interactionEvent)
this.setZoom({
animationDuration,
distortion: 2.5,
focus,
})
}
private setPointer() {
this.dom.pointer = this.dom.svgG
.append("text")
.text("+")
.attr("class", styles.pointer)
}
private bindMousemove() {
return this.dom.svgG.on("mousemove", (interactionEvent) => {
if (FishEyeChart.isTouchDevice()) |
if (!this.vars.focused) {
this.zoom({
animationDuration: 0,
interactionEvent,
})
}
})
}
private bindMouseLeave() {
return this.dom.svgG.on("mouseleave", () => {
if (!this.vars.focused) {
this.setZoom({
animationDuration: 1000,
distortion: 0,
focus: [0, 0],
})
}
})
}
private bindClick() {
this.dom.svgG.on("click", (interactionEvent: Event) => {
const isTouchDevice = FishEyeChart.isTouchDevice()
if (!isTouchDevice) {
this.vars.focused = !this.vars.focused
if (this.vars.focused) {
const pointer = pointerD3(this)
this.dom
.pointer!.attr("x", pointer[0])
.attr("y", pointer[1])
.style("opacity", 1)
return
}
}
this.dom.pointer!.style("opacity", 0)
this.zoom({
animationDuration: isTouchDevice ? 1000 : 0,
interactionEvent,
})
})
}
private updateDimensions(animationDuration = 0) {
this.setupRootEl()
const isSmallDevice = this.isSmallDevice()
const widthOffset = isSmallDevice ? LEFT_OFFSET_SMALL_DEVICE : 0
const totalWidth = this.width + widthOffset
this.dom.svg
.attr("width", this.width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
this.dom.svgG
.select(`.${styles.chartTitle}`)
.attr("transform", `translate(${totalWidth / 2},-40)`)
this.dom.svgG
.select(`.${styles.background}`)
.attr("width", this.width)
.attr("height", height)
this.dom.svgG
.select(".x.label")
.attr("y", height + 26)
.attr("x", this.width / 2)
this.vars.xScale.range([0, totalWidth])
this.updateTitles()
this.position(animationDuration)
}
private bindResize() {
window.addEventListener("resize", () => {
this.updateDimensions()
})
}
}
export { FishEyeChart, FishEyeChartOpts }
| {
return
} | conditional_block |
modular.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license OR Apache 2.0
use authentication::perform_authentication;
use futures::{
future::{self, TryFutureExt},
Future, Stream, StreamExt, TryStreamExt,
};
use std::sync::Arc;
use tokio::sync::broadcast::{channel as event_channel, Sender as Broadcaster};
use tokio_util::sync::CancellationToken;
use tracing::Instrument;
use crate::{
common::{
authentication::{
self, AuthenticationError, AuthenticationHandler, AuthenticationHandlingError,
},
protocol::{
negotiation::{self, NegotiationError, NegotiationService},
request_handler::RequestClientHandler,
traits::{
SerializedTunnelRegistry, ServiceRegistry, TunnelNamingError, TunnelRegistrationError,
TunnelRegistry,
},
tunnel::{
self, id::TunnelIDGenerator, Tunnel, TunnelDownlink, TunnelError, TunnelId,
TunnelIncomingType, TunnelName,
},
RouteAddress, Router,
},
},
util::tunnel_stream::WrappedStream,
};
pub struct ModularDaemon<TTunnel> {
service_registry: Arc<dyn ServiceRegistry + Send + Sync + 'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static>,
router: Arc<dyn Router + Send + Sync + 'static>,
request_handler: Arc<RequestClientHandler>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync + 'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync + 'static>,
// event hooks
pub tunnel_connected: Broadcaster<(TunnelId, Arc<TTunnel>)>,
pub tunnel_authenticated: Broadcaster<(TunnelId, TunnelName, Arc<TTunnel>)>,
pub tunnel_disconnected:
Broadcaster<(TunnelId, Option<TunnelName> /*, DisconnectReason? */)>,
}
impl<TTunnel> ModularDaemon<TTunnel> {
pub fn requests<'a>(&'a self) -> &Arc<RequestClientHandler> {
&self.request_handler
}
fn authenticate_tunnel<'a>(
self: &Arc<Self>,
tunnel: tunnel::ArcTunnel<'a>,
shutdown: &CancellationToken,
) -> impl Future<Output = Result<Option<(tunnel::TunnelName, tunnel::ArcTunnel<'a>)>, anyhow::Error>>
+ 'a {
let shutdown = shutdown.clone();
let authentication_handler = Arc::clone(&self.authentication_handler);
async move {
let result = perform_authentication(
authentication_handler.as_ref(),
tunnel.as_ref(),
&shutdown.into(),
)
.await;
match result {
Err(AuthenticationError::Handling(AuthenticationHandlingError::FatalApplicationError(
fatal_error,
))) => {
tracing::error!(reason=?fatal_error, "Authentication encountered fatal error!");
anyhow::Context::context(
Err(fatal_error),
"Fatal error encountered while handling authentication",
)
}
Err(AuthenticationError::Handling(handling_error)) => {
// Non-fatal handling errors are passed to tracing and close the tunnel
tracing::warn!(
reason = (&handling_error as &dyn std::error::Error),
"Tunnel closed due to authentication handling failure"
);
Ok(None)
}
Err(AuthenticationError::Remote(remote_error)) => {
tracing::debug!(
reason = (&remote_error as &dyn std::error::Error),
"Tunnel closed due to remote authentication failure"
);
Ok(None)
}
Ok(tunnel_name) => Ok(Some((tunnel_name, tunnel))),
}
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
Self: 'static,
{
pub fn new(
service_registry: Arc<dyn ServiceRegistry + Send + Sync + 'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static>,
router: Arc<dyn Router + Send + Sync + 'static>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync + 'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync + 'static>,
) -> Self {
Self {
request_handler: Arc::new(RequestClientHandler::new(
Arc::clone(&tunnel_registry),
Arc::clone(&service_registry),
Arc::clone(&router),
)),
service_registry,
tunnel_registry,
router,
authentication_handler,
tunnel_id_generator,
// For event handlers, we simply drop the receive sides,
// as new ones can be made with Sender::subscribe(&self)
tunnel_connected: event_channel(32).0,
tunnel_authenticated: event_channel(32).0,
tunnel_disconnected: event_channel(32).0,
}
}
/// Run the server against a tunnel_source.
///
/// This can be performed concurrently against multiple sources, with a shared server instance.
/// The implementation assumes that shutdown_request_listener will also halt the tunnel_source.
pub fn run<TunnelSource, TIntoTunnel>(
self: Arc<Self>,
tunnel_source: TunnelSource,
shutdown_request_listener: CancellationToken,
) -> tokio::task::JoinHandle<()>
where
TunnelSource: Stream<Item = TIntoTunnel> + Send + 'static,
TIntoTunnel: Into<TTunnel>,
TTunnel: Tunnel + 'static,
{
let this = Arc::clone(&self);
// Pipeline phases:
// Attach baggage - Arcs need cloned once per incoming tunnel, if they need to access it
// The baggage attachment phase takes the initial Arc items clones them per-stream
// This also generates a u64 as an ID for this tunnel, using a naive interlocked/atomic counter
let pipeline = tunnel_source
.take_until({
let shutdown_request_listener = shutdown_request_listener.clone();
async move { shutdown_request_listener.cancelled().await }
})
.scan(
(this, shutdown_request_listener),
|(this, shutdown_request_listener), tunnel| {
let id = this.tunnel_id_generator.next();
let tunnel: TTunnel = tunnel.into();
future::ready(Some((
tunnel,
id,
this.clone(),
shutdown_request_listener.clone(),
)))
},
);
// Tunnel Lifecycle - Sub-pipeline performed by futures on a per-tunnel basis
// This could be done at the stream level, but Rust-Analyzer's typesystem struggles
// to understand stream associated types at this level.
let pipeline = pipeline.for_each_concurrent(
None,
|(tunnel, id, this, shutdown_request_listener)| async move {
let tunnel = Arc::new(tunnel);
if let Err(e) = this
.tunnel_lifecycle(id, tunnel, shutdown_request_listener)
.await
{
tracing::debug!(error=?e, "tunnel lifetime exited with error");
}
},
);
// Spawn an instrumented task for the server which will return
// when all connections shut down and the tunnel source closes
tokio::task::spawn(pipeline.instrument(tracing::span!(tracing::Level::INFO, "modular_server")))
}
}
#[derive(thiserror::Error, Debug)]
enum TunnelLifecycleError {
#[error(transparent)]
RegistrationError(#[from] TunnelRegistrationError),
#[error(transparent)]
RegistryNamingError(#[from] TunnelNamingError),
#[error(transparent)]
RequestProcessingError(RequestProcessingError),
#[error("Authentication refused to remote by either breach of protocol or invalid/inadequate credentials")]
AuthenticationRefused,
#[error("Fatal error encountered in tunnel lifecycle: {0:?}")]
FatalError(anyhow::Error),
}
#[derive(thiserror::Error, Debug)]
enum RequestProcessingError {
#[error("Protocol version mismatch")]
UnsupportedProtocolVersion,
#[error("Tunnel error encountered: {0}")]
TunnelError(TunnelError),
#[error(transparent)]
FatalError(anyhow::Error),
}
impl From<RequestProcessingError> for TunnelLifecycleError {
fn from(e: RequestProcessingError) -> TunnelLifecycleError {
match e {
RequestProcessingError::FatalError(fatal_error) => {
TunnelLifecycleError::FatalError(fatal_error)
}
non_fatal => TunnelLifecycleError::RequestProcessingError(non_fatal),
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
TTunnel: Tunnel + 'static,
{
fn tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
) -> impl Future<Output = Result<(), TunnelLifecycleError>> + 'static {
async move {
// A registry mutex that prevents us from racing when calling the registry for
// this particular tunnel entry. This should also be enforced at the registry level.
let serialized_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static> = Arc::new(SerializedTunnelRegistry::new(Arc::clone(&self.tunnel_registry)));
// Tunnel registration - The tunnel registry is called to imbue the tunnel with an ID
{
let tunnel_registry = Arc::clone(&serialized_registry);
Self::register_tunnel(id, Arc::clone(&tunnel), tunnel_registry)
.instrument(tracing::span!(tracing::Level::DEBUG, "registration", ?id))
}.await?;
// Send tunnel_connected event once the tunnel is successfully registered to its ID
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self.tunnel_connected.send((id, tunnel.clone()));
// From here on, any failure must trigger attempted deregistration of the tunnel,
// So further phases return their result to check for failures, which then result
// in a deregistration call.
// Phases resume in registered_tunnel_lifecycle.
let tunnel_registry = Arc::clone(&serialized_registry);
match self.registered_tunnel_lifecycle(id, tunnel, shutdown, tunnel_registry).await {
Ok(lifecycle_result) => Ok(lifecycle_result),
Err(e) => {
let deregistered = serialized_registry.deregister_tunnel(id).await.ok();
match &e {
&TunnelLifecycleError::AuthenticationRefused => tracing::debug!(err=?e, record=?deregistered, "Deregistered due to authentication refusal"),
e => tracing::info!(err=?e, record=?deregistered, "Deregistered due to lifecycle error")
}
Err(e)
}
}
}.instrument(tracing::span!(tracing::Level::DEBUG, "tunnel", ?id))
}
async fn registered_tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
serialized_tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static>,
) -> Result<(), TunnelLifecycleError> {
// Authenticate connections - Each connection will be piped into the authenticator,
// which has the option of declining the connection, and may save additional metadata.
let tunnel_authentication = {
self
.authenticate_tunnel(tunnel.clone(), &shutdown)
.instrument(tracing::span!(tracing::Level::DEBUG, "authentication", ?id))
.map_err(TunnelLifecycleError::FatalError)
};
let tunnel_name = match tunnel_authentication.await? {
Some((tunnel_name, _tunnel_dyn)) => tunnel_name,
None => |
};
// Tunnel naming - The tunnel registry is notified of the authenticator-provided tunnel name
{
let tunnel_registry = Arc::clone(&serialized_tunnel_registry);
Self::name_tunnel(id, tunnel_name.clone(), tunnel_registry).instrument(tracing::span!(
tracing::Level::DEBUG,
"naming",
?id
))
}
.await?;
// Send tunnel_authenticated event for the newly-named tunnel, once the registry is aware of it
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self
.tunnel_authenticated
.send((id, tunnel_name.clone(), tunnel.clone()));
// Process incoming requests until the incoming channel is closed.
{
let service_registry = Arc::clone(&self.service_registry);
Self::handle_incoming_requests(
id,
tunnel
.downlink()
.await
.ok_or(TunnelLifecycleError::RequestProcessingError(
RequestProcessingError::TunnelError(TunnelError::ConnectionClosed),
))?,
service_registry,
shutdown,
)
.instrument(tracing::span!(
tracing::Level::DEBUG,
"request_handling",
?id
))
}
.await?;
// Deregister closed tunnels after graceful exit
let _record = serialized_tunnel_registry.deregister_tunnel(id).await;
// TODO: Find a way to call self.tunnel_disconnected automatically, and simplify deregistration code path
// Otherwise, these deregister calls are an absurd amount of complexity.
// Maybe use drop semantics paired with a cancellation token and a task?
Ok(())
}
// Process incoming requests until the incoming channel is closed.
// Await a tunnel closure request from the host, or for the tunnel to close on its own.
// A tunnel has "closed on its own" if incoming closes *or* outgoing requests fail with
// a notification that the outgoing channel has been closed.
//
// The request handler for this side should be configured to send a close request for
// the tunnel with the given ID when it sees a request fail due to tunnel closure.
// TODO: configure request handler (?) to do that using a std::sync::Weak<ModularDaemon>.
async fn handle_incoming_requests<TDownlink: TunnelDownlink>(
id: TunnelId,
mut incoming: TDownlink,
service_registry: Arc<dyn ServiceRegistry + Send + Sync + 'static>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError> {
let negotiator = Arc::new(NegotiationService::new(service_registry));
incoming
.as_stream()
// Stop accepting new requests after a graceful shutdown is requested
.take_until(shutdown.clone().cancelled())
.map_err(|e: TunnelError| RequestProcessingError::TunnelError(e))
.scan((negotiator, shutdown), |(negotiator, shutdown), link| {
let res = link.map(|content| (Arc::clone(&*negotiator), shutdown.clone(), content));
future::ready(Some(res))
})
.try_for_each_concurrent(None, |(negotiator, shutdown, link)| {
Self::handle_incoming_request(id, link, negotiator, shutdown)
})
.await?;
Ok(())
}
async fn handle_incoming_request<Services>(
id: TunnelId,
link: TunnelIncomingType,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync + ?Sized + 'static,
{
match link {
tunnel::TunnelIncomingType::BiStream(link) => {
Self::handle_incoming_request_bistream(id, link, negotiator, shutdown).await
}
}
}
async fn handle_incoming_request_bistream<Services>(
tunnel_id: TunnelId,
link: WrappedStream,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken, // TODO: Respond to shutdown listener requests
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync + ?Sized + 'static,
{
match negotiator.negotiate(link, tunnel_id).await {
// Tunnels established on an invalid negotiation protocol are useless; consider this fatal
Err(NegotiationError::UnsupportedProtocolVersion) => {
Err(RequestProcessingError::UnsupportedProtocolVersion)
}
// Protocol violations are not considered fatal, as they do not affect other links
// They do still destroy the current link, however.
Err(NegotiationError::ProtocolViolation) => Ok(()),
Err(NegotiationError::ReadError) => Ok(()),
Err(NegotiationError::WriteError) => Ok(()),
// Generic refusal for when a service doesn't accept a route for whatever reason
Err(NegotiationError::Refused) => {
tracing::debug!("Refused remote protocol request");
Ok(())
}
// Lack of support for a service is just a more specific refusal
Err(NegotiationError::UnsupportedServiceVersion) => {
tracing::debug!("Refused request due to unsupported service version");
Ok(())
}
Err(NegotiationError::ApplicationError(e)) => {
tracing::warn!(err=?e, "Refused request due to application error in negotiation");
Ok(())
}
Err(NegotiationError::FatalError(e)) => {
tracing::error!(err=?e, "Refused request due to fatal application error in negotiation");
Err(RequestProcessingError::FatalError(
NegotiationError::FatalError(e).into(),
))
}
Ok((link, route_addr, service)) => {
if shutdown.is_cancelled() {
// Drop services post-negotiation if the connection is awaiting
// shutdown, instead of handing them to the service to be performed.
return Ok(());
}
let route_addr: RouteAddress = route_addr;
let service: negotiation::ArcService = service;
match service
.handle(route_addr.clone(), Box::new(link), tunnel_id)
.await
{
// TODO: Figure out which of these should be considered fatal to the tunnel, if any
Err(e) => {
tracing::debug!(
address = route_addr.as_str(),
error = ?e,
"Protocol Service responded with non-fatal error"
);
Ok(())
}
Ok(()) => {
tracing::trace!(
address = route_addr.as_str(),
"Protocol Service reported success"
);
Ok(())
}
}
}
}
}
async fn register_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel: Arc<TTunnel>,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelRegistrationError>
where
TTunnelRegistry: std::ops::Deref + Send + 'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let registration = async move {
tunnel_registry
.register_tunnel(id, tunnel)
.map_err(|e| match e {
TunnelRegistrationError::IdOccupied(id) => {
tracing::error!(?id, "ID occupied; dropping tunnel");
TunnelRegistrationError::IdOccupied(id)
}
TunnelRegistrationError::NameOccupied(name) => {
// This error indicates that the tunnel registry is reporting names incorrectly, or
// holding entries from prior launches beyond the lifetime of the server that created them
tracing::error!(
"Name reported as occupied, but we haven't named this tunnel yet; dropping tunnel"
);
TunnelRegistrationError::NameOccupied(name)
}
TunnelRegistrationError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel registration");
TunnelRegistrationError::ApplicationError(e)
}
})
.await
};
tokio::spawn(registration).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelRegistrationError::ApplicationError(anyhow::Error::msg("Registration task cancelled"))
}
})?
}
async fn name_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel_name: TunnelName,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelNamingError>
where
TTunnelRegistry: std::ops::Deref + Send + Sync + 'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let naming = async move {
tunnel_registry
.deref()
.name_tunnel(id, tunnel_name)
.map_err(|e| match e {
// If a tunnel registry wishes to keep a tunnel alive past a naming clash, it
// must rename the existing tunnel then name the new one, and report Ok here.
TunnelNamingError::NameOccupied(name) => {
tracing::error!(?id, "Name reports as occupied; dropping tunnel");
TunnelNamingError::NameOccupied(name)
}
TunnelNamingError::TunnelNotRegistered(id) => {
// This indicates out-of-order processing on per-tunnel events in the registry
// To solve this, the tunnel registry task complete event processing in-order
// for events produced by a given tunnel's lifetime. The simplest way is to
// serialize all registry changes using a tokio::task with an ordered channel.
tracing::error!("Tunnel reported as not registered from naming task");
TunnelNamingError::TunnelNotRegistered(id)
}
TunnelNamingError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel naming");
TunnelNamingError::ApplicationError(e)
}
})
.await
};
tokio::spawn(naming).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelNamingError::ApplicationError(anyhow::Error::msg("Naming task cancelled"))
}
})?
}
}
| {
let _ = serialized_tunnel_registry.deregister_tunnel(id).await;
return Ok(());
} | conditional_block |
modular.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license OR Apache 2.0
use authentication::perform_authentication;
use futures::{
future::{self, TryFutureExt},
Future, Stream, StreamExt, TryStreamExt,
};
use std::sync::Arc;
use tokio::sync::broadcast::{channel as event_channel, Sender as Broadcaster};
use tokio_util::sync::CancellationToken;
use tracing::Instrument;
use crate::{
common::{
authentication::{
self, AuthenticationError, AuthenticationHandler, AuthenticationHandlingError,
},
protocol::{
negotiation::{self, NegotiationError, NegotiationService},
request_handler::RequestClientHandler,
traits::{
SerializedTunnelRegistry, ServiceRegistry, TunnelNamingError, TunnelRegistrationError,
TunnelRegistry,
},
tunnel::{
self, id::TunnelIDGenerator, Tunnel, TunnelDownlink, TunnelError, TunnelId,
TunnelIncomingType, TunnelName,
},
RouteAddress, Router,
},
},
util::tunnel_stream::WrappedStream,
};
pub struct ModularDaemon<TTunnel> {
service_registry: Arc<dyn ServiceRegistry + Send + Sync + 'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static>,
router: Arc<dyn Router + Send + Sync + 'static>,
request_handler: Arc<RequestClientHandler>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync + 'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync + 'static>,
// event hooks
pub tunnel_connected: Broadcaster<(TunnelId, Arc<TTunnel>)>,
pub tunnel_authenticated: Broadcaster<(TunnelId, TunnelName, Arc<TTunnel>)>,
pub tunnel_disconnected:
Broadcaster<(TunnelId, Option<TunnelName> /*, DisconnectReason? */)>,
}
impl<TTunnel> ModularDaemon<TTunnel> {
pub fn requests<'a>(&'a self) -> &Arc<RequestClientHandler> {
&self.request_handler
}
fn authenticate_tunnel<'a>(
self: &Arc<Self>,
tunnel: tunnel::ArcTunnel<'a>,
shutdown: &CancellationToken,
) -> impl Future<Output = Result<Option<(tunnel::TunnelName, tunnel::ArcTunnel<'a>)>, anyhow::Error>>
+ 'a {
let shutdown = shutdown.clone();
let authentication_handler = Arc::clone(&self.authentication_handler);
async move {
let result = perform_authentication(
authentication_handler.as_ref(),
tunnel.as_ref(),
&shutdown.into(),
)
.await;
match result {
Err(AuthenticationError::Handling(AuthenticationHandlingError::FatalApplicationError(
fatal_error,
))) => {
tracing::error!(reason=?fatal_error, "Authentication encountered fatal error!");
anyhow::Context::context(
Err(fatal_error),
"Fatal error encountered while handling authentication",
)
}
Err(AuthenticationError::Handling(handling_error)) => {
// Non-fatal handling errors are passed to tracing and close the tunnel
tracing::warn!(
reason = (&handling_error as &dyn std::error::Error),
"Tunnel closed due to authentication handling failure"
);
Ok(None)
}
Err(AuthenticationError::Remote(remote_error)) => {
tracing::debug!(
reason = (&remote_error as &dyn std::error::Error),
"Tunnel closed due to remote authentication failure"
);
Ok(None)
}
Ok(tunnel_name) => Ok(Some((tunnel_name, tunnel))),
}
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
Self: 'static,
{
pub fn new(
service_registry: Arc<dyn ServiceRegistry + Send + Sync + 'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static>,
router: Arc<dyn Router + Send + Sync + 'static>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync + 'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync + 'static>,
) -> Self {
Self {
request_handler: Arc::new(RequestClientHandler::new(
Arc::clone(&tunnel_registry),
Arc::clone(&service_registry),
Arc::clone(&router),
)),
service_registry,
tunnel_registry,
router,
authentication_handler,
tunnel_id_generator,
// For event handlers, we simply drop the receive sides,
// as new ones can be made with Sender::subscribe(&self)
tunnel_connected: event_channel(32).0,
tunnel_authenticated: event_channel(32).0,
tunnel_disconnected: event_channel(32).0,
}
}
/// Run the server against a tunnel_source.
///
/// This can be performed concurrently against multiple sources, with a shared server instance.
/// The implementation assumes that shutdown_request_listener will also halt the tunnel_source.
pub fn run<TunnelSource, TIntoTunnel>(
self: Arc<Self>,
tunnel_source: TunnelSource,
shutdown_request_listener: CancellationToken,
) -> tokio::task::JoinHandle<()>
where
TunnelSource: Stream<Item = TIntoTunnel> + Send + 'static,
TIntoTunnel: Into<TTunnel>,
TTunnel: Tunnel + 'static,
{
let this = Arc::clone(&self);
// Pipeline phases:
// Attach baggage - Arcs need cloned once per incoming tunnel, if they need to access it
// The baggage attachment phase takes the initial Arc items clones them per-stream
// This also generates a u64 as an ID for this tunnel, using a naive interlocked/atomic counter
let pipeline = tunnel_source
.take_until({
let shutdown_request_listener = shutdown_request_listener.clone();
async move { shutdown_request_listener.cancelled().await }
})
.scan(
(this, shutdown_request_listener),
|(this, shutdown_request_listener), tunnel| {
let id = this.tunnel_id_generator.next();
let tunnel: TTunnel = tunnel.into();
future::ready(Some((
tunnel,
id,
this.clone(),
shutdown_request_listener.clone(),
)))
},
);
// Tunnel Lifecycle - Sub-pipeline performed by futures on a per-tunnel basis
// This could be done at the stream level, but Rust-Analyzer's typesystem struggles
// to understand stream associated types at this level.
let pipeline = pipeline.for_each_concurrent(
None,
|(tunnel, id, this, shutdown_request_listener)| async move {
let tunnel = Arc::new(tunnel);
if let Err(e) = this
.tunnel_lifecycle(id, tunnel, shutdown_request_listener)
.await
{
tracing::debug!(error=?e, "tunnel lifetime exited with error");
}
},
);
// Spawn an instrumented task for the server which will return
// when all connections shut down and the tunnel source closes
tokio::task::spawn(pipeline.instrument(tracing::span!(tracing::Level::INFO, "modular_server")))
}
}
#[derive(thiserror::Error, Debug)]
enum TunnelLifecycleError {
#[error(transparent)]
RegistrationError(#[from] TunnelRegistrationError),
#[error(transparent)]
RegistryNamingError(#[from] TunnelNamingError),
#[error(transparent)]
RequestProcessingError(RequestProcessingError),
#[error("Authentication refused to remote by either breach of protocol or invalid/inadequate credentials")]
AuthenticationRefused,
#[error("Fatal error encountered in tunnel lifecycle: {0:?}")]
FatalError(anyhow::Error),
}
#[derive(thiserror::Error, Debug)]
enum RequestProcessingError {
#[error("Protocol version mismatch")]
UnsupportedProtocolVersion,
#[error("Tunnel error encountered: {0}")]
TunnelError(TunnelError),
#[error(transparent)]
FatalError(anyhow::Error),
}
impl From<RequestProcessingError> for TunnelLifecycleError {
fn from(e: RequestProcessingError) -> TunnelLifecycleError {
match e {
RequestProcessingError::FatalError(fatal_error) => {
TunnelLifecycleError::FatalError(fatal_error)
}
non_fatal => TunnelLifecycleError::RequestProcessingError(non_fatal),
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
TTunnel: Tunnel + 'static,
{
fn tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
) -> impl Future<Output = Result<(), TunnelLifecycleError>> + 'static {
async move {
// A registry mutex that prevents us from racing when calling the registry for
// this particular tunnel entry. This should also be enforced at the registry level.
let serialized_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static> = Arc::new(SerializedTunnelRegistry::new(Arc::clone(&self.tunnel_registry)));
// Tunnel registration - The tunnel registry is called to imbue the tunnel with an ID
{
let tunnel_registry = Arc::clone(&serialized_registry);
Self::register_tunnel(id, Arc::clone(&tunnel), tunnel_registry)
.instrument(tracing::span!(tracing::Level::DEBUG, "registration", ?id))
}.await?;
// Send tunnel_connected event once the tunnel is successfully registered to its ID
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self.tunnel_connected.send((id, tunnel.clone()));
// From here on, any failure must trigger attempted deregistration of the tunnel,
// So further phases return their result to check for failures, which then result
// in a deregistration call.
// Phases resume in registered_tunnel_lifecycle.
let tunnel_registry = Arc::clone(&serialized_registry);
match self.registered_tunnel_lifecycle(id, tunnel, shutdown, tunnel_registry).await {
Ok(lifecycle_result) => Ok(lifecycle_result),
Err(e) => {
let deregistered = serialized_registry.deregister_tunnel(id).await.ok();
match &e {
&TunnelLifecycleError::AuthenticationRefused => tracing::debug!(err=?e, record=?deregistered, "Deregistered due to authentication refusal"),
e => tracing::info!(err=?e, record=?deregistered, "Deregistered due to lifecycle error")
}
Err(e)
}
}
}.instrument(tracing::span!(tracing::Level::DEBUG, "tunnel", ?id))
}
async fn registered_tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
serialized_tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static>,
) -> Result<(), TunnelLifecycleError> {
// Authenticate connections - Each connection will be piped into the authenticator,
// which has the option of declining the connection, and may save additional metadata.
let tunnel_authentication = {
self
.authenticate_tunnel(tunnel.clone(), &shutdown)
.instrument(tracing::span!(tracing::Level::DEBUG, "authentication", ?id))
.map_err(TunnelLifecycleError::FatalError)
};
let tunnel_name = match tunnel_authentication.await? {
Some((tunnel_name, _tunnel_dyn)) => tunnel_name,
None => {
let _ = serialized_tunnel_registry.deregister_tunnel(id).await;
return Ok(());
}
};
// Tunnel naming - The tunnel registry is notified of the authenticator-provided tunnel name
{
let tunnel_registry = Arc::clone(&serialized_tunnel_registry);
Self::name_tunnel(id, tunnel_name.clone(), tunnel_registry).instrument(tracing::span!(
tracing::Level::DEBUG,
"naming",
?id
))
}
.await?;
// Send tunnel_authenticated event for the newly-named tunnel, once the registry is aware of it
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self
.tunnel_authenticated
.send((id, tunnel_name.clone(), tunnel.clone()));
// Process incoming requests until the incoming channel is closed.
{
let service_registry = Arc::clone(&self.service_registry);
Self::handle_incoming_requests(
id,
tunnel
.downlink()
.await
.ok_or(TunnelLifecycleError::RequestProcessingError(
RequestProcessingError::TunnelError(TunnelError::ConnectionClosed),
))?,
service_registry,
shutdown,
)
.instrument(tracing::span!(
tracing::Level::DEBUG,
"request_handling",
?id
))
}
.await?;
// Deregister closed tunnels after graceful exit
let _record = serialized_tunnel_registry.deregister_tunnel(id).await;
// TODO: Find a way to call self.tunnel_disconnected automatically, and simplify deregistration code path
// Otherwise, these deregister calls are an absurd amount of complexity.
// Maybe use drop semantics paired with a cancellation token and a task?
Ok(())
}
// Process incoming requests until the incoming channel is closed.
// Await a tunnel closure request from the host, or for the tunnel to close on its own.
// A tunnel has "closed on its own" if incoming closes *or* outgoing requests fail with
// a notification that the outgoing channel has been closed.
//
// The request handler for this side should be configured to send a close request for
// the tunnel with the given ID when it sees a request fail due to tunnel closure.
// TODO: configure request handler (?) to do that using a std::sync::Weak<ModularDaemon>.
async fn handle_incoming_requests<TDownlink: TunnelDownlink>(
id: TunnelId,
mut incoming: TDownlink,
service_registry: Arc<dyn ServiceRegistry + Send + Sync + 'static>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError> {
let negotiator = Arc::new(NegotiationService::new(service_registry));
incoming
.as_stream()
// Stop accepting new requests after a graceful shutdown is requested
.take_until(shutdown.clone().cancelled())
.map_err(|e: TunnelError| RequestProcessingError::TunnelError(e))
.scan((negotiator, shutdown), |(negotiator, shutdown), link| {
let res = link.map(|content| (Arc::clone(&*negotiator), shutdown.clone(), content));
future::ready(Some(res))
})
.try_for_each_concurrent(None, |(negotiator, shutdown, link)| {
Self::handle_incoming_request(id, link, negotiator, shutdown)
})
.await?;
Ok(())
}
async fn handle_incoming_request<Services>(
id: TunnelId,
link: TunnelIncomingType,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync + ?Sized + 'static, | }
}
}
async fn handle_incoming_request_bistream<Services>(
tunnel_id: TunnelId,
link: WrappedStream,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken, // TODO: Respond to shutdown listener requests
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync + ?Sized + 'static,
{
match negotiator.negotiate(link, tunnel_id).await {
// Tunnels established on an invalid negotiation protocol are useless; consider this fatal
Err(NegotiationError::UnsupportedProtocolVersion) => {
Err(RequestProcessingError::UnsupportedProtocolVersion)
}
// Protocol violations are not considered fatal, as they do not affect other links
// They do still destroy the current link, however.
Err(NegotiationError::ProtocolViolation) => Ok(()),
Err(NegotiationError::ReadError) => Ok(()),
Err(NegotiationError::WriteError) => Ok(()),
// Generic refusal for when a service doesn't accept a route for whatever reason
Err(NegotiationError::Refused) => {
tracing::debug!("Refused remote protocol request");
Ok(())
}
// Lack of support for a service is just a more specific refusal
Err(NegotiationError::UnsupportedServiceVersion) => {
tracing::debug!("Refused request due to unsupported service version");
Ok(())
}
Err(NegotiationError::ApplicationError(e)) => {
tracing::warn!(err=?e, "Refused request due to application error in negotiation");
Ok(())
}
Err(NegotiationError::FatalError(e)) => {
tracing::error!(err=?e, "Refused request due to fatal application error in negotiation");
Err(RequestProcessingError::FatalError(
NegotiationError::FatalError(e).into(),
))
}
Ok((link, route_addr, service)) => {
if shutdown.is_cancelled() {
// Drop services post-negotiation if the connection is awaiting
// shutdown, instead of handing them to the service to be performed.
return Ok(());
}
let route_addr: RouteAddress = route_addr;
let service: negotiation::ArcService = service;
match service
.handle(route_addr.clone(), Box::new(link), tunnel_id)
.await
{
// TODO: Figure out which of these should be considered fatal to the tunnel, if any
Err(e) => {
tracing::debug!(
address = route_addr.as_str(),
error = ?e,
"Protocol Service responded with non-fatal error"
);
Ok(())
}
Ok(()) => {
tracing::trace!(
address = route_addr.as_str(),
"Protocol Service reported success"
);
Ok(())
}
}
}
}
}
async fn register_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel: Arc<TTunnel>,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelRegistrationError>
where
TTunnelRegistry: std::ops::Deref + Send + 'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let registration = async move {
tunnel_registry
.register_tunnel(id, tunnel)
.map_err(|e| match e {
TunnelRegistrationError::IdOccupied(id) => {
tracing::error!(?id, "ID occupied; dropping tunnel");
TunnelRegistrationError::IdOccupied(id)
}
TunnelRegistrationError::NameOccupied(name) => {
// This error indicates that the tunnel registry is reporting names incorrectly, or
// holding entries from prior launches beyond the lifetime of the server that created them
tracing::error!(
"Name reported as occupied, but we haven't named this tunnel yet; dropping tunnel"
);
TunnelRegistrationError::NameOccupied(name)
}
TunnelRegistrationError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel registration");
TunnelRegistrationError::ApplicationError(e)
}
})
.await
};
tokio::spawn(registration).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelRegistrationError::ApplicationError(anyhow::Error::msg("Registration task cancelled"))
}
})?
}
async fn name_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel_name: TunnelName,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelNamingError>
where
TTunnelRegistry: std::ops::Deref + Send + Sync + 'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let naming = async move {
tunnel_registry
.deref()
.name_tunnel(id, tunnel_name)
.map_err(|e| match e {
// If a tunnel registry wishes to keep a tunnel alive past a naming clash, it
// must rename the existing tunnel then name the new one, and report Ok here.
TunnelNamingError::NameOccupied(name) => {
tracing::error!(?id, "Name reports as occupied; dropping tunnel");
TunnelNamingError::NameOccupied(name)
}
TunnelNamingError::TunnelNotRegistered(id) => {
// This indicates out-of-order processing on per-tunnel events in the registry
// To solve this, the tunnel registry task complete event processing in-order
// for events produced by a given tunnel's lifetime. The simplest way is to
// serialize all registry changes using a tokio::task with an ordered channel.
tracing::error!("Tunnel reported as not registered from naming task");
TunnelNamingError::TunnelNotRegistered(id)
}
TunnelNamingError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel naming");
TunnelNamingError::ApplicationError(e)
}
})
.await
};
tokio::spawn(naming).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelNamingError::ApplicationError(anyhow::Error::msg("Naming task cancelled"))
}
})?
}
} | {
match link {
tunnel::TunnelIncomingType::BiStream(link) => {
Self::handle_incoming_request_bistream(id, link, negotiator, shutdown).await | random_line_split |
modular.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license OR Apache 2.0
use authentication::perform_authentication;
use futures::{
future::{self, TryFutureExt},
Future, Stream, StreamExt, TryStreamExt,
};
use std::sync::Arc;
use tokio::sync::broadcast::{channel as event_channel, Sender as Broadcaster};
use tokio_util::sync::CancellationToken;
use tracing::Instrument;
use crate::{
common::{
authentication::{
self, AuthenticationError, AuthenticationHandler, AuthenticationHandlingError,
},
protocol::{
negotiation::{self, NegotiationError, NegotiationService},
request_handler::RequestClientHandler,
traits::{
SerializedTunnelRegistry, ServiceRegistry, TunnelNamingError, TunnelRegistrationError,
TunnelRegistry,
},
tunnel::{
self, id::TunnelIDGenerator, Tunnel, TunnelDownlink, TunnelError, TunnelId,
TunnelIncomingType, TunnelName,
},
RouteAddress, Router,
},
},
util::tunnel_stream::WrappedStream,
};
pub struct ModularDaemon<TTunnel> {
service_registry: Arc<dyn ServiceRegistry + Send + Sync + 'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static>,
router: Arc<dyn Router + Send + Sync + 'static>,
request_handler: Arc<RequestClientHandler>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync + 'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync + 'static>,
// event hooks
pub tunnel_connected: Broadcaster<(TunnelId, Arc<TTunnel>)>,
pub tunnel_authenticated: Broadcaster<(TunnelId, TunnelName, Arc<TTunnel>)>,
pub tunnel_disconnected:
Broadcaster<(TunnelId, Option<TunnelName> /*, DisconnectReason? */)>,
}
impl<TTunnel> ModularDaemon<TTunnel> {
pub fn requests<'a>(&'a self) -> &Arc<RequestClientHandler> {
&self.request_handler
}
fn authenticate_tunnel<'a>(
self: &Arc<Self>,
tunnel: tunnel::ArcTunnel<'a>,
shutdown: &CancellationToken,
) -> impl Future<Output = Result<Option<(tunnel::TunnelName, tunnel::ArcTunnel<'a>)>, anyhow::Error>>
+ 'a {
let shutdown = shutdown.clone();
let authentication_handler = Arc::clone(&self.authentication_handler);
async move {
let result = perform_authentication(
authentication_handler.as_ref(),
tunnel.as_ref(),
&shutdown.into(),
)
.await;
match result {
Err(AuthenticationError::Handling(AuthenticationHandlingError::FatalApplicationError(
fatal_error,
))) => {
tracing::error!(reason=?fatal_error, "Authentication encountered fatal error!");
anyhow::Context::context(
Err(fatal_error),
"Fatal error encountered while handling authentication",
)
}
Err(AuthenticationError::Handling(handling_error)) => {
// Non-fatal handling errors are passed to tracing and close the tunnel
tracing::warn!(
reason = (&handling_error as &dyn std::error::Error),
"Tunnel closed due to authentication handling failure"
);
Ok(None)
}
Err(AuthenticationError::Remote(remote_error)) => {
tracing::debug!(
reason = (&remote_error as &dyn std::error::Error),
"Tunnel closed due to remote authentication failure"
);
Ok(None)
}
Ok(tunnel_name) => Ok(Some((tunnel_name, tunnel))),
}
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
Self: 'static,
{
pub fn new(
service_registry: Arc<dyn ServiceRegistry + Send + Sync + 'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static>,
router: Arc<dyn Router + Send + Sync + 'static>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync + 'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync + 'static>,
) -> Self {
Self {
request_handler: Arc::new(RequestClientHandler::new(
Arc::clone(&tunnel_registry),
Arc::clone(&service_registry),
Arc::clone(&router),
)),
service_registry,
tunnel_registry,
router,
authentication_handler,
tunnel_id_generator,
// For event handlers, we simply drop the receive sides,
// as new ones can be made with Sender::subscribe(&self)
tunnel_connected: event_channel(32).0,
tunnel_authenticated: event_channel(32).0,
tunnel_disconnected: event_channel(32).0,
}
}
/// Run the server against a tunnel_source.
///
/// This can be performed concurrently against multiple sources, with a shared server instance.
/// The implementation assumes that shutdown_request_listener will also halt the tunnel_source.
pub fn run<TunnelSource, TIntoTunnel>(
self: Arc<Self>,
tunnel_source: TunnelSource,
shutdown_request_listener: CancellationToken,
) -> tokio::task::JoinHandle<()>
where
TunnelSource: Stream<Item = TIntoTunnel> + Send + 'static,
TIntoTunnel: Into<TTunnel>,
TTunnel: Tunnel + 'static,
{
let this = Arc::clone(&self);
// Pipeline phases:
// Attach baggage - Arcs need cloned once per incoming tunnel, if they need to access it
// The baggage attachment phase takes the initial Arc items clones them per-stream
// This also generates a u64 as an ID for this tunnel, using a naive interlocked/atomic counter
let pipeline = tunnel_source
.take_until({
let shutdown_request_listener = shutdown_request_listener.clone();
async move { shutdown_request_listener.cancelled().await }
})
.scan(
(this, shutdown_request_listener),
|(this, shutdown_request_listener), tunnel| {
let id = this.tunnel_id_generator.next();
let tunnel: TTunnel = tunnel.into();
future::ready(Some((
tunnel,
id,
this.clone(),
shutdown_request_listener.clone(),
)))
},
);
// Tunnel Lifecycle - Sub-pipeline performed by futures on a per-tunnel basis
// This could be done at the stream level, but Rust-Analyzer's typesystem struggles
// to understand stream associated types at this level.
let pipeline = pipeline.for_each_concurrent(
None,
|(tunnel, id, this, shutdown_request_listener)| async move {
let tunnel = Arc::new(tunnel);
if let Err(e) = this
.tunnel_lifecycle(id, tunnel, shutdown_request_listener)
.await
{
tracing::debug!(error=?e, "tunnel lifetime exited with error");
}
},
);
// Spawn an instrumented task for the server which will return
// when all connections shut down and the tunnel source closes
tokio::task::spawn(pipeline.instrument(tracing::span!(tracing::Level::INFO, "modular_server")))
}
}
#[derive(thiserror::Error, Debug)]
enum | {
#[error(transparent)]
RegistrationError(#[from] TunnelRegistrationError),
#[error(transparent)]
RegistryNamingError(#[from] TunnelNamingError),
#[error(transparent)]
RequestProcessingError(RequestProcessingError),
#[error("Authentication refused to remote by either breach of protocol or invalid/inadequate credentials")]
AuthenticationRefused,
#[error("Fatal error encountered in tunnel lifecycle: {0:?}")]
FatalError(anyhow::Error),
}
#[derive(thiserror::Error, Debug)]
enum RequestProcessingError {
#[error("Protocol version mismatch")]
UnsupportedProtocolVersion,
#[error("Tunnel error encountered: {0}")]
TunnelError(TunnelError),
#[error(transparent)]
FatalError(anyhow::Error),
}
impl From<RequestProcessingError> for TunnelLifecycleError {
fn from(e: RequestProcessingError) -> TunnelLifecycleError {
match e {
RequestProcessingError::FatalError(fatal_error) => {
TunnelLifecycleError::FatalError(fatal_error)
}
non_fatal => TunnelLifecycleError::RequestProcessingError(non_fatal),
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
TTunnel: Tunnel + 'static,
{
fn tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
) -> impl Future<Output = Result<(), TunnelLifecycleError>> + 'static {
async move {
// A registry mutex that prevents us from racing when calling the registry for
// this particular tunnel entry. This should also be enforced at the registry level.
let serialized_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static> = Arc::new(SerializedTunnelRegistry::new(Arc::clone(&self.tunnel_registry)));
// Tunnel registration - The tunnel registry is called to imbue the tunnel with an ID
{
let tunnel_registry = Arc::clone(&serialized_registry);
Self::register_tunnel(id, Arc::clone(&tunnel), tunnel_registry)
.instrument(tracing::span!(tracing::Level::DEBUG, "registration", ?id))
}.await?;
// Send tunnel_connected event once the tunnel is successfully registered to its ID
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self.tunnel_connected.send((id, tunnel.clone()));
// From here on, any failure must trigger attempted deregistration of the tunnel,
// So further phases return their result to check for failures, which then result
// in a deregistration call.
// Phases resume in registered_tunnel_lifecycle.
let tunnel_registry = Arc::clone(&serialized_registry);
match self.registered_tunnel_lifecycle(id, tunnel, shutdown, tunnel_registry).await {
Ok(lifecycle_result) => Ok(lifecycle_result),
Err(e) => {
let deregistered = serialized_registry.deregister_tunnel(id).await.ok();
match &e {
&TunnelLifecycleError::AuthenticationRefused => tracing::debug!(err=?e, record=?deregistered, "Deregistered due to authentication refusal"),
e => tracing::info!(err=?e, record=?deregistered, "Deregistered due to lifecycle error")
}
Err(e)
}
}
}.instrument(tracing::span!(tracing::Level::DEBUG, "tunnel", ?id))
}
async fn registered_tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
serialized_tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync + 'static>,
) -> Result<(), TunnelLifecycleError> {
// Authenticate connections - Each connection will be piped into the authenticator,
// which has the option of declining the connection, and may save additional metadata.
let tunnel_authentication = {
self
.authenticate_tunnel(tunnel.clone(), &shutdown)
.instrument(tracing::span!(tracing::Level::DEBUG, "authentication", ?id))
.map_err(TunnelLifecycleError::FatalError)
};
let tunnel_name = match tunnel_authentication.await? {
Some((tunnel_name, _tunnel_dyn)) => tunnel_name,
None => {
let _ = serialized_tunnel_registry.deregister_tunnel(id).await;
return Ok(());
}
};
// Tunnel naming - The tunnel registry is notified of the authenticator-provided tunnel name
{
let tunnel_registry = Arc::clone(&serialized_tunnel_registry);
Self::name_tunnel(id, tunnel_name.clone(), tunnel_registry).instrument(tracing::span!(
tracing::Level::DEBUG,
"naming",
?id
))
}
.await?;
// Send tunnel_authenticated event for the newly-named tunnel, once the registry is aware of it
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self
.tunnel_authenticated
.send((id, tunnel_name.clone(), tunnel.clone()));
// Process incoming requests until the incoming channel is closed.
{
let service_registry = Arc::clone(&self.service_registry);
Self::handle_incoming_requests(
id,
tunnel
.downlink()
.await
.ok_or(TunnelLifecycleError::RequestProcessingError(
RequestProcessingError::TunnelError(TunnelError::ConnectionClosed),
))?,
service_registry,
shutdown,
)
.instrument(tracing::span!(
tracing::Level::DEBUG,
"request_handling",
?id
))
}
.await?;
// Deregister closed tunnels after graceful exit
let _record = serialized_tunnel_registry.deregister_tunnel(id).await;
// TODO: Find a way to call self.tunnel_disconnected automatically, and simplify deregistration code path
// Otherwise, these deregister calls are an absurd amount of complexity.
// Maybe use drop semantics paired with a cancellation token and a task?
Ok(())
}
// Process incoming requests until the incoming channel is closed.
// Await a tunnel closure request from the host, or for the tunnel to close on its own.
// A tunnel has "closed on its own" if incoming closes *or* outgoing requests fail with
// a notification that the outgoing channel has been closed.
//
// The request handler for this side should be configured to send a close request for
// the tunnel with the given ID when it sees a request fail due to tunnel closure.
// TODO: configure request handler (?) to do that using a std::sync::Weak<ModularDaemon>.
async fn handle_incoming_requests<TDownlink: TunnelDownlink>(
id: TunnelId,
mut incoming: TDownlink,
service_registry: Arc<dyn ServiceRegistry + Send + Sync + 'static>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError> {
let negotiator = Arc::new(NegotiationService::new(service_registry));
incoming
.as_stream()
// Stop accepting new requests after a graceful shutdown is requested
.take_until(shutdown.clone().cancelled())
.map_err(|e: TunnelError| RequestProcessingError::TunnelError(e))
.scan((negotiator, shutdown), |(negotiator, shutdown), link| {
let res = link.map(|content| (Arc::clone(&*negotiator), shutdown.clone(), content));
future::ready(Some(res))
})
.try_for_each_concurrent(None, |(negotiator, shutdown, link)| {
Self::handle_incoming_request(id, link, negotiator, shutdown)
})
.await?;
Ok(())
}
async fn handle_incoming_request<Services>(
id: TunnelId,
link: TunnelIncomingType,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync + ?Sized + 'static,
{
match link {
tunnel::TunnelIncomingType::BiStream(link) => {
Self::handle_incoming_request_bistream(id, link, negotiator, shutdown).await
}
}
}
async fn handle_incoming_request_bistream<Services>(
tunnel_id: TunnelId,
link: WrappedStream,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken, // TODO: Respond to shutdown listener requests
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync + ?Sized + 'static,
{
match negotiator.negotiate(link, tunnel_id).await {
// Tunnels established on an invalid negotiation protocol are useless; consider this fatal
Err(NegotiationError::UnsupportedProtocolVersion) => {
Err(RequestProcessingError::UnsupportedProtocolVersion)
}
// Protocol violations are not considered fatal, as they do not affect other links
// They do still destroy the current link, however.
Err(NegotiationError::ProtocolViolation) => Ok(()),
Err(NegotiationError::ReadError) => Ok(()),
Err(NegotiationError::WriteError) => Ok(()),
// Generic refusal for when a service doesn't accept a route for whatever reason
Err(NegotiationError::Refused) => {
tracing::debug!("Refused remote protocol request");
Ok(())
}
// Lack of support for a service is just a more specific refusal
Err(NegotiationError::UnsupportedServiceVersion) => {
tracing::debug!("Refused request due to unsupported service version");
Ok(())
}
Err(NegotiationError::ApplicationError(e)) => {
tracing::warn!(err=?e, "Refused request due to application error in negotiation");
Ok(())
}
Err(NegotiationError::FatalError(e)) => {
tracing::error!(err=?e, "Refused request due to fatal application error in negotiation");
Err(RequestProcessingError::FatalError(
NegotiationError::FatalError(e).into(),
))
}
Ok((link, route_addr, service)) => {
if shutdown.is_cancelled() {
// Drop services post-negotiation if the connection is awaiting
// shutdown, instead of handing them to the service to be performed.
return Ok(());
}
let route_addr: RouteAddress = route_addr;
let service: negotiation::ArcService = service;
match service
.handle(route_addr.clone(), Box::new(link), tunnel_id)
.await
{
// TODO: Figure out which of these should be considered fatal to the tunnel, if any
Err(e) => {
tracing::debug!(
address = route_addr.as_str(),
error = ?e,
"Protocol Service responded with non-fatal error"
);
Ok(())
}
Ok(()) => {
tracing::trace!(
address = route_addr.as_str(),
"Protocol Service reported success"
);
Ok(())
}
}
}
}
}
async fn register_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel: Arc<TTunnel>,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelRegistrationError>
where
TTunnelRegistry: std::ops::Deref + Send + 'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let registration = async move {
tunnel_registry
.register_tunnel(id, tunnel)
.map_err(|e| match e {
TunnelRegistrationError::IdOccupied(id) => {
tracing::error!(?id, "ID occupied; dropping tunnel");
TunnelRegistrationError::IdOccupied(id)
}
TunnelRegistrationError::NameOccupied(name) => {
// This error indicates that the tunnel registry is reporting names incorrectly, or
// holding entries from prior launches beyond the lifetime of the server that created them
tracing::error!(
"Name reported as occupied, but we haven't named this tunnel yet; dropping tunnel"
);
TunnelRegistrationError::NameOccupied(name)
}
TunnelRegistrationError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel registration");
TunnelRegistrationError::ApplicationError(e)
}
})
.await
};
tokio::spawn(registration).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelRegistrationError::ApplicationError(anyhow::Error::msg("Registration task cancelled"))
}
})?
}
async fn name_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel_name: TunnelName,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelNamingError>
where
TTunnelRegistry: std::ops::Deref + Send + Sync + 'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let naming = async move {
tunnel_registry
.deref()
.name_tunnel(id, tunnel_name)
.map_err(|e| match e {
// If a tunnel registry wishes to keep a tunnel alive past a naming clash, it
// must rename the existing tunnel then name the new one, and report Ok here.
TunnelNamingError::NameOccupied(name) => {
tracing::error!(?id, "Name reports as occupied; dropping tunnel");
TunnelNamingError::NameOccupied(name)
}
TunnelNamingError::TunnelNotRegistered(id) => {
// This indicates out-of-order processing on per-tunnel events in the registry
// To solve this, the tunnel registry task complete event processing in-order
// for events produced by a given tunnel's lifetime. The simplest way is to
// serialize all registry changes using a tokio::task with an ordered channel.
tracing::error!("Tunnel reported as not registered from naming task");
TunnelNamingError::TunnelNotRegistered(id)
}
TunnelNamingError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel naming");
TunnelNamingError::ApplicationError(e)
}
})
.await
};
tokio::spawn(naming).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelNamingError::ApplicationError(anyhow::Error::msg("Naming task cancelled"))
}
})?
}
}
| TunnelLifecycleError | identifier_name |
pixel_format.rs | // The MIT License (MIT)
//
// Copyright (c) 2018 Michael Dilger
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use super::{D3DFormat, DataFormat, DxgiFormat};
use crate::error::*;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io::{Read, Write};
#[derive(Clone)]
pub struct PixelFormat {
/// Size of this structure in bytes; set to 32
pub size: u32,
/// Values which indicate what type of data is in the surface
pub flags: PixelFormatFlags,
/// Codes for specifying compressed or custom formats.
pub fourcc: Option<FourCC>,
/// Number of bits in an RGB (possibly including alpha) format. Valid when
/// flags includes RGB or LUMINANCE.
pub rgb_bit_count: Option<u32>,
/// Red (or Y) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the red mask would be 0x00ff0000.
pub r_bit_mask: Option<u32>,
/// Green (or U) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the green mask would be 0x0000ff00.
pub g_bit_mask: Option<u32>,
/// Blue (or V) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the blue mask would be 0x000000ff
pub b_bit_mask: Option<u32>,
/// Alpha mask for reading alpha data. Valid of flags includes ALPHA_PIXELS or ALPHA.
/// For instance, given the A8R8G8B8 format, the alpha mask would be 0xff000000
pub a_bit_mask: Option<u32>,
}
impl PixelFormat {
pub fn read<R: Read>(mut r: R) -> Result<PixelFormat, Error> {
let size = r.read_u32::<LittleEndian>()?;
if size != 32 {
return Err(Error::InvalidField("Pixel format struct size".to_owned()));
}
let flags = PixelFormatFlags::from_bits_truncate(r.read_u32::<LittleEndian>()?);
let fourcc = r.read_u32::<LittleEndian>()?;
let rgb_bit_count = r.read_u32::<LittleEndian>()?;
let r_bit_mask = r.read_u32::<LittleEndian>()?;
let g_bit_mask = r.read_u32::<LittleEndian>()?;
let b_bit_mask = r.read_u32::<LittleEndian>()?;
let a_bit_mask = r.read_u32::<LittleEndian>()?;
Ok(PixelFormat {
size,
flags,
fourcc: if flags.contains(PixelFormatFlags::FOURCC) {
Some(FourCC(fourcc))
} else {
None
},
rgb_bit_count: if flags.contains(PixelFormatFlags::RGB)
|| flags.contains(PixelFormatFlags::LUMINANCE)
{
Some(rgb_bit_count)
} else {
None
},
r_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(r_bit_mask)
} else {
None
},
g_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(g_bit_mask)
} else {
None
},
b_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(b_bit_mask)
} else {
None
},
a_bit_mask: if flags.contains(PixelFormatFlags::ALPHA_PIXELS)
|| flags.contains(PixelFormatFlags::ALPHA)
{
Some(a_bit_mask)
} else {
None
},
})
}
pub fn write<W: Write>(&self, w: &mut W) -> Result<(), Error> {
w.write_u32::<LittleEndian>(self.size)?;
w.write_u32::<LittleEndian>(self.flags.bits())?;
w.write_u32::<LittleEndian>(self.fourcc.as_ref().unwrap_or(&FourCC(0)).0)?;
w.write_u32::<LittleEndian>(self.rgb_bit_count.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.r_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.g_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.b_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.a_bit_mask.unwrap_or(0))?;
Ok(())
}
}
impl fmt::Debug for PixelFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, " Pixel Format:")?;
writeln!(f, " flags: {:?}", self.flags)?;
writeln!(f, " fourcc: {:?}", self.fourcc)?;
writeln!(f, " bits_per_pixel: {:?}", self.rgb_bit_count)?;
writeln!(
f,
" RGBA bitmasks: {:?}, {:?}, {:?}, {:?}",
self.r_bit_mask, self.g_bit_mask, self.b_bit_mask, self.a_bit_mask
)?;
Ok(())
}
}
impl Default for PixelFormat {
fn default() -> PixelFormat {
PixelFormat {
size: 32, // must be 32
flags: PixelFormatFlags::empty(),
fourcc: None,
rgb_bit_count: None,
r_bit_mask: None,
g_bit_mask: None,
b_bit_mask: None,
a_bit_mask: None,
}
}
}
impl From<D3DFormat> for PixelFormat {
fn from(format: D3DFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB);
pf.rgb_bit_count = Some(bpp as u32)
} else if let Some(fourcc) = format.get_fourcc() {
pf.flags.insert(PixelFormatFlags::FOURCC);
pf.fourcc = Some(fourcc);
}
if let Some(abitmask) = format.a_bit_mask() {
pf.flags.insert(PixelFormatFlags::ALPHA_PIXELS);
pf.a_bit_mask = Some(abitmask);
}
pf.r_bit_mask = format.r_bit_mask();
pf.g_bit_mask = format.g_bit_mask();
pf.b_bit_mask = format.b_bit_mask();
pf
}
}
impl From<DxgiFormat> for PixelFormat {
fn from(format: DxgiFormat) -> PixelFormat |
}
bitflags! {
pub struct PixelFormatFlags: u32 {
/// Texture contains alpha data.
const ALPHA_PIXELS = 0x1;
/// Alpha channel only uncomressed data (used in older DDS files)
const ALPHA = 0x2;
/// Texture contains compressed RGB data.
const FOURCC = 0x4;
/// Texture contains uncompressed RGB data.
const RGB = 0x40;
/// YUV uncompressed data (used in older DDS files)
const YUV = 0x200;
/// Single channel color uncompressed data (used in older DDS files)
const LUMINANCE = 0x20000;
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct FourCC(pub u32);
// generate little-endian u32 from 4 bytes
// rust is not ready for this yet
/*
macro_rules! u32_code {
($w:expr) => {
((($w[0] as u32) << 0) |
(($w[1] as u32) << 8) |
(($w[2] as u32) << 16) |
(($w[3] as u32) << 24) |
((*$w as [u8; 4])[0] as u32 * 0))
}
}
*/
impl FourCC {
pub const NONE: u32 = 0;
// D3D formats
pub const DXT1: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const DXT2: u32 = 0x32545844; //u32_code!(b"DXT2");
pub const DXT3: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const DXT4: u32 = 0x34545844; //u32_code!(b"DXT4");
pub const DXT5: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const R8G8_B8G8: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const A16B16G16R16: u32 = 36;
pub const Q16W16V16U16: u32 = 110;
pub const R16F: u32 = 111;
pub const G16R16F: u32 = 112;
pub const A16B16G16R16F: u32 = 113;
pub const R32F: u32 = 114;
pub const G32R32F: u32 = 115;
pub const A32B32G32R32F: u32 = 116;
pub const UYVY: u32 = 0x59565955; //u32_code!(b"UYVY");
pub const YUY2: u32 = 0x32595559; //u32_code!(b"YUY2");
pub const CXV8U8: u32 = 117;
pub const ATI1: u32 = 0x31495441; //u32_code!(b"ATI1"); // BC4 unorm
pub const ATI2: u32 = 0x32495441; //u32_code!(b"ATI2"); // BC5 unorm
pub const DX10: u32 = 0x30315844; //u32_code!(b"DX10");
// DXGI formats (different names, often for same things)
pub const BC1_UNORM: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const BC2_UNORM: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const BC3_UNORM: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const BC4_UNORM: u32 = 0x55344342; //u32_code!(b"BC4U");
pub const BC4_SNORM: u32 = 0x53344342; //u32_code!(b"BC4S");
pub const BC5_UNORM: u32 = 0x32495441; //u32_code!(b"ATI2");
pub const BC5_SNORM: u32 = 0x53354342; //u32_code!(b"BC5S");
pub const R8G8_B8G8_UNORM: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8_UNORM: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const R16G16B16A16_UNORM: u32 = 36;
pub const R16G16B16A16_SNORM: u32 = 110;
pub const R16_FLOAT: u32 = 111;
pub const R16G16_FLOAT: u32 = 112;
pub const R16G16B16A16_FLOAT: u32 = 113;
pub const R32_FLOAT: u32 = 114;
pub const R32G32_FLOAT: u32 = 115;
pub const R32G32B32A32_FLOAT: u32 = 116;
}
| {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB); // means uncompressed
pf.rgb_bit_count = Some(bpp as u32)
}
pf.fourcc = Some(FourCC(FourCC::DX10)); // we always use extention for Dxgi
pf.flags.insert(PixelFormatFlags::FOURCC);
// flags::ALPHA_PIXELS is not set, use DX10 extension.
// r_bit_mask, g_bit_mask, b_bit_mask and a_bit_mask are not set.
// FIXME - we may need to set these in some circumstances.
pf
} | identifier_body |
pixel_format.rs | // The MIT License (MIT)
//
// Copyright (c) 2018 Michael Dilger
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use super::{D3DFormat, DataFormat, DxgiFormat};
use crate::error::*;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io::{Read, Write};
#[derive(Clone)]
pub struct PixelFormat {
/// Size of this structure in bytes; set to 32
pub size: u32,
/// Values which indicate what type of data is in the surface
pub flags: PixelFormatFlags,
/// Codes for specifying compressed or custom formats.
pub fourcc: Option<FourCC>,
/// Number of bits in an RGB (possibly including alpha) format. Valid when
/// flags includes RGB or LUMINANCE.
pub rgb_bit_count: Option<u32>,
/// Red (or Y) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the red mask would be 0x00ff0000.
pub r_bit_mask: Option<u32>,
/// Green (or U) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the green mask would be 0x0000ff00.
pub g_bit_mask: Option<u32>,
/// Blue (or V) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the blue mask would be 0x000000ff
pub b_bit_mask: Option<u32>,
/// Alpha mask for reading alpha data. Valid of flags includes ALPHA_PIXELS or ALPHA.
/// For instance, given the A8R8G8B8 format, the alpha mask would be 0xff000000
pub a_bit_mask: Option<u32>,
}
impl PixelFormat {
pub fn read<R: Read>(mut r: R) -> Result<PixelFormat, Error> {
let size = r.read_u32::<LittleEndian>()?;
if size != 32 |
let flags = PixelFormatFlags::from_bits_truncate(r.read_u32::<LittleEndian>()?);
let fourcc = r.read_u32::<LittleEndian>()?;
let rgb_bit_count = r.read_u32::<LittleEndian>()?;
let r_bit_mask = r.read_u32::<LittleEndian>()?;
let g_bit_mask = r.read_u32::<LittleEndian>()?;
let b_bit_mask = r.read_u32::<LittleEndian>()?;
let a_bit_mask = r.read_u32::<LittleEndian>()?;
Ok(PixelFormat {
size,
flags,
fourcc: if flags.contains(PixelFormatFlags::FOURCC) {
Some(FourCC(fourcc))
} else {
None
},
rgb_bit_count: if flags.contains(PixelFormatFlags::RGB)
|| flags.contains(PixelFormatFlags::LUMINANCE)
{
Some(rgb_bit_count)
} else {
None
},
r_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(r_bit_mask)
} else {
None
},
g_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(g_bit_mask)
} else {
None
},
b_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(b_bit_mask)
} else {
None
},
a_bit_mask: if flags.contains(PixelFormatFlags::ALPHA_PIXELS)
|| flags.contains(PixelFormatFlags::ALPHA)
{
Some(a_bit_mask)
} else {
None
},
})
}
pub fn write<W: Write>(&self, w: &mut W) -> Result<(), Error> {
w.write_u32::<LittleEndian>(self.size)?;
w.write_u32::<LittleEndian>(self.flags.bits())?;
w.write_u32::<LittleEndian>(self.fourcc.as_ref().unwrap_or(&FourCC(0)).0)?;
w.write_u32::<LittleEndian>(self.rgb_bit_count.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.r_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.g_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.b_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.a_bit_mask.unwrap_or(0))?;
Ok(())
}
}
impl fmt::Debug for PixelFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, " Pixel Format:")?;
writeln!(f, " flags: {:?}", self.flags)?;
writeln!(f, " fourcc: {:?}", self.fourcc)?;
writeln!(f, " bits_per_pixel: {:?}", self.rgb_bit_count)?;
writeln!(
f,
" RGBA bitmasks: {:?}, {:?}, {:?}, {:?}",
self.r_bit_mask, self.g_bit_mask, self.b_bit_mask, self.a_bit_mask
)?;
Ok(())
}
}
impl Default for PixelFormat {
fn default() -> PixelFormat {
PixelFormat {
size: 32, // must be 32
flags: PixelFormatFlags::empty(),
fourcc: None,
rgb_bit_count: None,
r_bit_mask: None,
g_bit_mask: None,
b_bit_mask: None,
a_bit_mask: None,
}
}
}
impl From<D3DFormat> for PixelFormat {
fn from(format: D3DFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB);
pf.rgb_bit_count = Some(bpp as u32)
} else if let Some(fourcc) = format.get_fourcc() {
pf.flags.insert(PixelFormatFlags::FOURCC);
pf.fourcc = Some(fourcc);
}
if let Some(abitmask) = format.a_bit_mask() {
pf.flags.insert(PixelFormatFlags::ALPHA_PIXELS);
pf.a_bit_mask = Some(abitmask);
}
pf.r_bit_mask = format.r_bit_mask();
pf.g_bit_mask = format.g_bit_mask();
pf.b_bit_mask = format.b_bit_mask();
pf
}
}
impl From<DxgiFormat> for PixelFormat {
fn from(format: DxgiFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB); // means uncompressed
pf.rgb_bit_count = Some(bpp as u32)
}
pf.fourcc = Some(FourCC(FourCC::DX10)); // we always use extention for Dxgi
pf.flags.insert(PixelFormatFlags::FOURCC);
// flags::ALPHA_PIXELS is not set, use DX10 extension.
// r_bit_mask, g_bit_mask, b_bit_mask and a_bit_mask are not set.
// FIXME - we may need to set these in some circumstances.
pf
}
}
bitflags! {
pub struct PixelFormatFlags: u32 {
/// Texture contains alpha data.
const ALPHA_PIXELS = 0x1;
/// Alpha channel only uncomressed data (used in older DDS files)
const ALPHA = 0x2;
/// Texture contains compressed RGB data.
const FOURCC = 0x4;
/// Texture contains uncompressed RGB data.
const RGB = 0x40;
/// YUV uncompressed data (used in older DDS files)
const YUV = 0x200;
/// Single channel color uncompressed data (used in older DDS files)
const LUMINANCE = 0x20000;
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct FourCC(pub u32);
// generate little-endian u32 from 4 bytes
// rust is not ready for this yet
/*
macro_rules! u32_code {
($w:expr) => {
((($w[0] as u32) << 0) |
(($w[1] as u32) << 8) |
(($w[2] as u32) << 16) |
(($w[3] as u32) << 24) |
((*$w as [u8; 4])[0] as u32 * 0))
}
}
*/
impl FourCC {
pub const NONE: u32 = 0;
// D3D formats
pub const DXT1: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const DXT2: u32 = 0x32545844; //u32_code!(b"DXT2");
pub const DXT3: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const DXT4: u32 = 0x34545844; //u32_code!(b"DXT4");
pub const DXT5: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const R8G8_B8G8: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const A16B16G16R16: u32 = 36;
pub const Q16W16V16U16: u32 = 110;
pub const R16F: u32 = 111;
pub const G16R16F: u32 = 112;
pub const A16B16G16R16F: u32 = 113;
pub const R32F: u32 = 114;
pub const G32R32F: u32 = 115;
pub const A32B32G32R32F: u32 = 116;
pub const UYVY: u32 = 0x59565955; //u32_code!(b"UYVY");
pub const YUY2: u32 = 0x32595559; //u32_code!(b"YUY2");
pub const CXV8U8: u32 = 117;
pub const ATI1: u32 = 0x31495441; //u32_code!(b"ATI1"); // BC4 unorm
pub const ATI2: u32 = 0x32495441; //u32_code!(b"ATI2"); // BC5 unorm
pub const DX10: u32 = 0x30315844; //u32_code!(b"DX10");
// DXGI formats (different names, often for same things)
pub const BC1_UNORM: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const BC2_UNORM: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const BC3_UNORM: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const BC4_UNORM: u32 = 0x55344342; //u32_code!(b"BC4U");
pub const BC4_SNORM: u32 = 0x53344342; //u32_code!(b"BC4S");
pub const BC5_UNORM: u32 = 0x32495441; //u32_code!(b"ATI2");
pub const BC5_SNORM: u32 = 0x53354342; //u32_code!(b"BC5S");
pub const R8G8_B8G8_UNORM: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8_UNORM: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const R16G16B16A16_UNORM: u32 = 36;
pub const R16G16B16A16_SNORM: u32 = 110;
pub const R16_FLOAT: u32 = 111;
pub const R16G16_FLOAT: u32 = 112;
pub const R16G16B16A16_FLOAT: u32 = 113;
pub const R32_FLOAT: u32 = 114;
pub const R32G32_FLOAT: u32 = 115;
pub const R32G32B32A32_FLOAT: u32 = 116;
}
| {
return Err(Error::InvalidField("Pixel format struct size".to_owned()));
} | conditional_block |
pixel_format.rs | // The MIT License (MIT)
//
// Copyright (c) 2018 Michael Dilger
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use super::{D3DFormat, DataFormat, DxgiFormat};
use crate::error::*;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io::{Read, Write};
#[derive(Clone)]
pub struct PixelFormat {
/// Size of this structure in bytes; set to 32
pub size: u32,
/// Values which indicate what type of data is in the surface
pub flags: PixelFormatFlags,
/// Codes for specifying compressed or custom formats.
pub fourcc: Option<FourCC>,
/// Number of bits in an RGB (possibly including alpha) format. Valid when
/// flags includes RGB or LUMINANCE.
pub rgb_bit_count: Option<u32>,
/// Red (or Y) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the red mask would be 0x00ff0000.
pub r_bit_mask: Option<u32>,
/// Green (or U) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the green mask would be 0x0000ff00.
pub g_bit_mask: Option<u32>,
/// Blue (or V) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the blue mask would be 0x000000ff
pub b_bit_mask: Option<u32>,
/// Alpha mask for reading alpha data. Valid of flags includes ALPHA_PIXELS or ALPHA.
/// For instance, given the A8R8G8B8 format, the alpha mask would be 0xff000000
pub a_bit_mask: Option<u32>,
}
impl PixelFormat {
pub fn read<R: Read>(mut r: R) -> Result<PixelFormat, Error> {
let size = r.read_u32::<LittleEndian>()?;
if size != 32 {
return Err(Error::InvalidField("Pixel format struct size".to_owned()));
}
let flags = PixelFormatFlags::from_bits_truncate(r.read_u32::<LittleEndian>()?);
let fourcc = r.read_u32::<LittleEndian>()?;
let rgb_bit_count = r.read_u32::<LittleEndian>()?;
let r_bit_mask = r.read_u32::<LittleEndian>()?;
let g_bit_mask = r.read_u32::<LittleEndian>()?;
let b_bit_mask = r.read_u32::<LittleEndian>()?;
let a_bit_mask = r.read_u32::<LittleEndian>()?;
Ok(PixelFormat {
size,
flags,
fourcc: if flags.contains(PixelFormatFlags::FOURCC) {
Some(FourCC(fourcc))
} else {
None
},
rgb_bit_count: if flags.contains(PixelFormatFlags::RGB)
|| flags.contains(PixelFormatFlags::LUMINANCE)
{
Some(rgb_bit_count)
} else {
None
},
r_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(r_bit_mask)
} else {
None
},
g_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(g_bit_mask)
} else {
None
},
b_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(b_bit_mask)
} else {
None
},
a_bit_mask: if flags.contains(PixelFormatFlags::ALPHA_PIXELS)
|| flags.contains(PixelFormatFlags::ALPHA)
{
Some(a_bit_mask)
} else {
None
},
})
}
pub fn write<W: Write>(&self, w: &mut W) -> Result<(), Error> {
w.write_u32::<LittleEndian>(self.size)?;
w.write_u32::<LittleEndian>(self.flags.bits())?;
w.write_u32::<LittleEndian>(self.fourcc.as_ref().unwrap_or(&FourCC(0)).0)?;
w.write_u32::<LittleEndian>(self.rgb_bit_count.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.r_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.g_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.b_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.a_bit_mask.unwrap_or(0))?;
Ok(())
}
}
impl fmt::Debug for PixelFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, " Pixel Format:")?;
writeln!(f, " flags: {:?}", self.flags)?;
writeln!(f, " fourcc: {:?}", self.fourcc)?;
writeln!(f, " bits_per_pixel: {:?}", self.rgb_bit_count)?; | )?;
Ok(())
}
}
impl Default for PixelFormat {
fn default() -> PixelFormat {
PixelFormat {
size: 32, // must be 32
flags: PixelFormatFlags::empty(),
fourcc: None,
rgb_bit_count: None,
r_bit_mask: None,
g_bit_mask: None,
b_bit_mask: None,
a_bit_mask: None,
}
}
}
impl From<D3DFormat> for PixelFormat {
fn from(format: D3DFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB);
pf.rgb_bit_count = Some(bpp as u32)
} else if let Some(fourcc) = format.get_fourcc() {
pf.flags.insert(PixelFormatFlags::FOURCC);
pf.fourcc = Some(fourcc);
}
if let Some(abitmask) = format.a_bit_mask() {
pf.flags.insert(PixelFormatFlags::ALPHA_PIXELS);
pf.a_bit_mask = Some(abitmask);
}
pf.r_bit_mask = format.r_bit_mask();
pf.g_bit_mask = format.g_bit_mask();
pf.b_bit_mask = format.b_bit_mask();
pf
}
}
impl From<DxgiFormat> for PixelFormat {
fn from(format: DxgiFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB); // means uncompressed
pf.rgb_bit_count = Some(bpp as u32)
}
pf.fourcc = Some(FourCC(FourCC::DX10)); // we always use extention for Dxgi
pf.flags.insert(PixelFormatFlags::FOURCC);
// flags::ALPHA_PIXELS is not set, use DX10 extension.
// r_bit_mask, g_bit_mask, b_bit_mask and a_bit_mask are not set.
// FIXME - we may need to set these in some circumstances.
pf
}
}
bitflags! {
pub struct PixelFormatFlags: u32 {
/// Texture contains alpha data.
const ALPHA_PIXELS = 0x1;
/// Alpha channel only uncomressed data (used in older DDS files)
const ALPHA = 0x2;
/// Texture contains compressed RGB data.
const FOURCC = 0x4;
/// Texture contains uncompressed RGB data.
const RGB = 0x40;
/// YUV uncompressed data (used in older DDS files)
const YUV = 0x200;
/// Single channel color uncompressed data (used in older DDS files)
const LUMINANCE = 0x20000;
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct FourCC(pub u32);
// generate little-endian u32 from 4 bytes
// rust is not ready for this yet
/*
macro_rules! u32_code {
($w:expr) => {
((($w[0] as u32) << 0) |
(($w[1] as u32) << 8) |
(($w[2] as u32) << 16) |
(($w[3] as u32) << 24) |
((*$w as [u8; 4])[0] as u32 * 0))
}
}
*/
impl FourCC {
pub const NONE: u32 = 0;
// D3D formats
pub const DXT1: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const DXT2: u32 = 0x32545844; //u32_code!(b"DXT2");
pub const DXT3: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const DXT4: u32 = 0x34545844; //u32_code!(b"DXT4");
pub const DXT5: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const R8G8_B8G8: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const A16B16G16R16: u32 = 36;
pub const Q16W16V16U16: u32 = 110;
pub const R16F: u32 = 111;
pub const G16R16F: u32 = 112;
pub const A16B16G16R16F: u32 = 113;
pub const R32F: u32 = 114;
pub const G32R32F: u32 = 115;
pub const A32B32G32R32F: u32 = 116;
pub const UYVY: u32 = 0x59565955; //u32_code!(b"UYVY");
pub const YUY2: u32 = 0x32595559; //u32_code!(b"YUY2");
pub const CXV8U8: u32 = 117;
pub const ATI1: u32 = 0x31495441; //u32_code!(b"ATI1"); // BC4 unorm
pub const ATI2: u32 = 0x32495441; //u32_code!(b"ATI2"); // BC5 unorm
pub const DX10: u32 = 0x30315844; //u32_code!(b"DX10");
// DXGI formats (different names, often for same things)
pub const BC1_UNORM: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const BC2_UNORM: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const BC3_UNORM: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const BC4_UNORM: u32 = 0x55344342; //u32_code!(b"BC4U");
pub const BC4_SNORM: u32 = 0x53344342; //u32_code!(b"BC4S");
pub const BC5_UNORM: u32 = 0x32495441; //u32_code!(b"ATI2");
pub const BC5_SNORM: u32 = 0x53354342; //u32_code!(b"BC5S");
pub const R8G8_B8G8_UNORM: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8_UNORM: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const R16G16B16A16_UNORM: u32 = 36;
pub const R16G16B16A16_SNORM: u32 = 110;
pub const R16_FLOAT: u32 = 111;
pub const R16G16_FLOAT: u32 = 112;
pub const R16G16B16A16_FLOAT: u32 = 113;
pub const R32_FLOAT: u32 = 114;
pub const R32G32_FLOAT: u32 = 115;
pub const R32G32B32A32_FLOAT: u32 = 116;
} | writeln!(
f,
" RGBA bitmasks: {:?}, {:?}, {:?}, {:?}",
self.r_bit_mask, self.g_bit_mask, self.b_bit_mask, self.a_bit_mask | random_line_split |
pixel_format.rs | // The MIT License (MIT)
//
// Copyright (c) 2018 Michael Dilger
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use super::{D3DFormat, DataFormat, DxgiFormat};
use crate::error::*;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io::{Read, Write};
#[derive(Clone)]
pub struct PixelFormat {
/// Size of this structure in bytes; set to 32
pub size: u32,
/// Values which indicate what type of data is in the surface
pub flags: PixelFormatFlags,
/// Codes for specifying compressed or custom formats.
pub fourcc: Option<FourCC>,
/// Number of bits in an RGB (possibly including alpha) format. Valid when
/// flags includes RGB or LUMINANCE.
pub rgb_bit_count: Option<u32>,
/// Red (or Y) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the red mask would be 0x00ff0000.
pub r_bit_mask: Option<u32>,
/// Green (or U) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the green mask would be 0x0000ff00.
pub g_bit_mask: Option<u32>,
/// Blue (or V) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the blue mask would be 0x000000ff
pub b_bit_mask: Option<u32>,
/// Alpha mask for reading alpha data. Valid of flags includes ALPHA_PIXELS or ALPHA.
/// For instance, given the A8R8G8B8 format, the alpha mask would be 0xff000000
pub a_bit_mask: Option<u32>,
}
impl PixelFormat {
pub fn read<R: Read>(mut r: R) -> Result<PixelFormat, Error> {
let size = r.read_u32::<LittleEndian>()?;
if size != 32 {
return Err(Error::InvalidField("Pixel format struct size".to_owned()));
}
let flags = PixelFormatFlags::from_bits_truncate(r.read_u32::<LittleEndian>()?);
let fourcc = r.read_u32::<LittleEndian>()?;
let rgb_bit_count = r.read_u32::<LittleEndian>()?;
let r_bit_mask = r.read_u32::<LittleEndian>()?;
let g_bit_mask = r.read_u32::<LittleEndian>()?;
let b_bit_mask = r.read_u32::<LittleEndian>()?;
let a_bit_mask = r.read_u32::<LittleEndian>()?;
Ok(PixelFormat {
size,
flags,
fourcc: if flags.contains(PixelFormatFlags::FOURCC) {
Some(FourCC(fourcc))
} else {
None
},
rgb_bit_count: if flags.contains(PixelFormatFlags::RGB)
|| flags.contains(PixelFormatFlags::LUMINANCE)
{
Some(rgb_bit_count)
} else {
None
},
r_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(r_bit_mask)
} else {
None
},
g_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(g_bit_mask)
} else {
None
},
b_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(b_bit_mask)
} else {
None
},
a_bit_mask: if flags.contains(PixelFormatFlags::ALPHA_PIXELS)
|| flags.contains(PixelFormatFlags::ALPHA)
{
Some(a_bit_mask)
} else {
None
},
})
}
pub fn | <W: Write>(&self, w: &mut W) -> Result<(), Error> {
w.write_u32::<LittleEndian>(self.size)?;
w.write_u32::<LittleEndian>(self.flags.bits())?;
w.write_u32::<LittleEndian>(self.fourcc.as_ref().unwrap_or(&FourCC(0)).0)?;
w.write_u32::<LittleEndian>(self.rgb_bit_count.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.r_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.g_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.b_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.a_bit_mask.unwrap_or(0))?;
Ok(())
}
}
impl fmt::Debug for PixelFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, " Pixel Format:")?;
writeln!(f, " flags: {:?}", self.flags)?;
writeln!(f, " fourcc: {:?}", self.fourcc)?;
writeln!(f, " bits_per_pixel: {:?}", self.rgb_bit_count)?;
writeln!(
f,
" RGBA bitmasks: {:?}, {:?}, {:?}, {:?}",
self.r_bit_mask, self.g_bit_mask, self.b_bit_mask, self.a_bit_mask
)?;
Ok(())
}
}
impl Default for PixelFormat {
fn default() -> PixelFormat {
PixelFormat {
size: 32, // must be 32
flags: PixelFormatFlags::empty(),
fourcc: None,
rgb_bit_count: None,
r_bit_mask: None,
g_bit_mask: None,
b_bit_mask: None,
a_bit_mask: None,
}
}
}
impl From<D3DFormat> for PixelFormat {
fn from(format: D3DFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB);
pf.rgb_bit_count = Some(bpp as u32)
} else if let Some(fourcc) = format.get_fourcc() {
pf.flags.insert(PixelFormatFlags::FOURCC);
pf.fourcc = Some(fourcc);
}
if let Some(abitmask) = format.a_bit_mask() {
pf.flags.insert(PixelFormatFlags::ALPHA_PIXELS);
pf.a_bit_mask = Some(abitmask);
}
pf.r_bit_mask = format.r_bit_mask();
pf.g_bit_mask = format.g_bit_mask();
pf.b_bit_mask = format.b_bit_mask();
pf
}
}
impl From<DxgiFormat> for PixelFormat {
fn from(format: DxgiFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB); // means uncompressed
pf.rgb_bit_count = Some(bpp as u32)
}
pf.fourcc = Some(FourCC(FourCC::DX10)); // we always use extention for Dxgi
pf.flags.insert(PixelFormatFlags::FOURCC);
// flags::ALPHA_PIXELS is not set, use DX10 extension.
// r_bit_mask, g_bit_mask, b_bit_mask and a_bit_mask are not set.
// FIXME - we may need to set these in some circumstances.
pf
}
}
bitflags! {
pub struct PixelFormatFlags: u32 {
/// Texture contains alpha data.
const ALPHA_PIXELS = 0x1;
/// Alpha channel only uncomressed data (used in older DDS files)
const ALPHA = 0x2;
/// Texture contains compressed RGB data.
const FOURCC = 0x4;
/// Texture contains uncompressed RGB data.
const RGB = 0x40;
/// YUV uncompressed data (used in older DDS files)
const YUV = 0x200;
/// Single channel color uncompressed data (used in older DDS files)
const LUMINANCE = 0x20000;
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct FourCC(pub u32);
// generate little-endian u32 from 4 bytes
// rust is not ready for this yet
/*
macro_rules! u32_code {
($w:expr) => {
((($w[0] as u32) << 0) |
(($w[1] as u32) << 8) |
(($w[2] as u32) << 16) |
(($w[3] as u32) << 24) |
((*$w as [u8; 4])[0] as u32 * 0))
}
}
*/
impl FourCC {
pub const NONE: u32 = 0;
// D3D formats
pub const DXT1: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const DXT2: u32 = 0x32545844; //u32_code!(b"DXT2");
pub const DXT3: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const DXT4: u32 = 0x34545844; //u32_code!(b"DXT4");
pub const DXT5: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const R8G8_B8G8: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const A16B16G16R16: u32 = 36;
pub const Q16W16V16U16: u32 = 110;
pub const R16F: u32 = 111;
pub const G16R16F: u32 = 112;
pub const A16B16G16R16F: u32 = 113;
pub const R32F: u32 = 114;
pub const G32R32F: u32 = 115;
pub const A32B32G32R32F: u32 = 116;
pub const UYVY: u32 = 0x59565955; //u32_code!(b"UYVY");
pub const YUY2: u32 = 0x32595559; //u32_code!(b"YUY2");
pub const CXV8U8: u32 = 117;
pub const ATI1: u32 = 0x31495441; //u32_code!(b"ATI1"); // BC4 unorm
pub const ATI2: u32 = 0x32495441; //u32_code!(b"ATI2"); // BC5 unorm
pub const DX10: u32 = 0x30315844; //u32_code!(b"DX10");
// DXGI formats (different names, often for same things)
pub const BC1_UNORM: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const BC2_UNORM: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const BC3_UNORM: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const BC4_UNORM: u32 = 0x55344342; //u32_code!(b"BC4U");
pub const BC4_SNORM: u32 = 0x53344342; //u32_code!(b"BC4S");
pub const BC5_UNORM: u32 = 0x32495441; //u32_code!(b"ATI2");
pub const BC5_SNORM: u32 = 0x53354342; //u32_code!(b"BC5S");
pub const R8G8_B8G8_UNORM: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8_UNORM: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const R16G16B16A16_UNORM: u32 = 36;
pub const R16G16B16A16_SNORM: u32 = 110;
pub const R16_FLOAT: u32 = 111;
pub const R16G16_FLOAT: u32 = 112;
pub const R16G16B16A16_FLOAT: u32 = 113;
pub const R32_FLOAT: u32 = 114;
pub const R32G32_FLOAT: u32 = 115;
pub const R32G32B32A32_FLOAT: u32 = 116;
}
| write | identifier_name |
amfinder_config.py | # AMFinder - amfinder_config.py
#
# MIT License
# Copyright (c) 2021 Edouard Evangelisti, Carl Turner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
AMFinder configuration module.
Read command-line arguments and store user settings.
Variables
------------
HEADERS - Table headers for the different annotation levels.
PAR - User settings.
Functions
------------
tsv_name - Return the TSV file corresponding to the current annotation level.
get - Retrieve the value associated with the given parameter ID.
colonization - Indicate whether the current level is level 1 (colonization).
intra_struct - Indicate whether the current level is level 2 (structures).
set - Assign a new value to the given parameter ID.
training_subparser - Define the command-line parser used in training mode.
prediction_subparser - Define the command-line parser used in prediction mode.
build_arg_parser - Build the full command-line parser.
import_settings - Read tile size from `settings.json`.
get_input_files - Return the list of vaid input images (based on MIME type).
initialize - Read command-line arguments and store user-defined values.
"""
import os
import glob
import yaml
import mimetypes
import zipfile as zf
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
import amfinder_log as AmfLog
HEADERS = [['Y', 'N', 'X'], ['A', 'V', 'H', 'I']]
PAR = {
'run_mode': None,
'level': 1,
'model': None,
'tile_edge': 126,
'input_files': ['*.jpg'],
'batch_size': 32,
'learning_rate': 0.001,
'drop': True,
'epochs': 100,
'vfrac': 15,
'data_augm': False,
'save_augmented_tiles': 0,
'summary': False,
'patience': 12,
'outdir': os.getcwd(),
'header': HEADERS[0],
'generator': None,
'discriminator': None,
'super_resolution': False,
'save_conv2d_kernels': False,
'save_conv2d_outputs': False,
'colormap': 'plasma',
'monitors': {
'csv_logger': None,
'early_stopping': None,
'reduce_lr_on_plateau': None,
'model_checkpoint': None,
}
}
APP_PATH = os.path.dirname(os.path.realpath(__file__))
def get_appdir():
""" Returns the application directory. """
return APP_PATH
def tsv_name():
"""
Return the TSV file corresponding to the current annotation level.
"""
if PAR['level'] == 1:
return 'col.tsv'
else:
return 'myc.tsv'
def get(id):
"""
Retrieve application settings.
:param id: Unique identifier.
"""
id = id.lower()
if id in PAR:
# Special case, look into a specific folder.
if id in ['generator', 'discriminator', 'model'] and \
PAR[id] is not None:
return os.path.join(get_appdir(),
'trained_networks',
os.path.basename(PAR[id]))
else:
return PAR[id]
elif id in PAR['monitors']:
return PAR['monitors'][id]
else:
AmfLog.warning(f'Unknown parameter {id}')
return None
def colonization():
"""
Indicate whether the current level is level 1 (colonization).
"""
return get('level') == 1
def intra_struct():
"""
Indicate whether the current level is level 2 (AM fungal structures).
"""
return get('level') == 2
def set(id, value, create=False):
"""
Updates application settings.
:param id: unique identifier.
:param value: value to store.
:param create: create id if it does not exist (optional).
"""
if value is None:
return
else:
id = id.lower()
if id in PAR:
PAR[id] = value
if id == 'level':
PAR['header'] = HEADERS[int(value == 2)] # Ensures 0 or 1.
elif id in PAR['monitors']:
PAR['monitors'][id] = value
elif create:
PAR[id] = value
else:
AmfLog.warning(f'Unknown parameter {id}')
def training_subparser(subparsers):
"""
Defines arguments used in training mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('train',
help='learns how to identify AMF structures.',
formatter_class=RawTextHelpFormatter)
x = PAR['batch_size']
parser.add_argument('-b', '--batch_size',
action='store', dest='batch_size', metavar='NUM', type=int, default=x,
help='training batch size.'
'\ndefault value: {}'.format(x))
x = PAR['drop']
parser.add_argument('-k', '--keep_background',
action='store_false', dest='drop', default=x,
help='keep all background tiles.'
'\nby default, downscale background to equilibrate classes.')
x = PAR['data_augm']
parser.add_argument('-a', '--data_augmentation',
action='store_true', dest='data_augm', default=x,
help='apply data augmentation (hue, chroma, saturation, etc.)'
'\nby default, data augmentation is not used.')
x = PAR['save_augmented_tiles']
parser.add_argument('-sa', '--save_augmented_tiles',
action='store', dest='save_augmented_tiles',
metavar='NUM', type=int, default=x,
help='save a subset of augmented tiles.'
'\nby default, does not save any tile.')
x = PAR['summary']
parser.add_argument('-s', '--summary',
action='store_true', dest='summary', default=x,
help='save CNN architecture (CNN graph and model summary)'
'\nby default, does not save any information.')
x = PAR['outdir']
parser.add_argument('-o', '--outdir',
action='store', dest='outdir', default=x,
help='folder where to save trained model and CNN architecture.'
'\ndefault: {}'.format(x))
x = PAR['epochs']
parser.add_argument('-e', '--epochs',
action='store', dest='epochs', metavar='NUM', type=int, default=x,
help='number of epochs to run.'
'\ndefault value: {}'.format(x))
x = PAR['patience']
parser.add_argument('-p', '--patience',
action='store', dest='patience', metavar='NUM', type=int, default=x,
help='number of epochs to wait before early stopping is triggered.'
'\ndefault value: {}'.format(x))
x = PAR['learning_rate']
parser.add_argument('-lr', '--learning_rate',
action='store', dest='learning_rate', metavar='NUM',
type=int, default=x,
help='learning rate used by the Adam optimizer.'
'\ndefault value: {}'.format(x))
x = PAR['vfrac']
parser.add_argument('-vf', '--validation_fraction',
action='store', dest='vfrac', metavar='N%', type=int, default=x,
help='Percentage of tiles used for validation.'
'\ndefault value: {}%%'.format(x))
level = parser.add_mutually_exclusive_group()
level.add_argument('-1', '--CNN1',
action='store_const', dest='level', const=1,
help='Train for root colonisation (default)')
level.add_argument('-2', '--CNN2',
action='store_const', dest='level', const=2,
help='Train for fungal hyphal structures.')
x = None
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained network to use as a basis for training.'
'\ndefault value: {}'.format(x))
parser.add_argument('-sr', '--super_resolution',
action='store_const', dest='super_resolution', const=True,
help='Apply super-resolution before predictions.'
'\ndefault value: no super-resolution.')
x = None
parser.add_argument('-g', '--generator',
action='store', dest='generator', metavar='H5', type=str, default=x,
help='name of the pre-trained generator.'
'\ndefault value: {}'.format(x))
x = None
parser.add_argument('-d', '--discriminator',
action='store', dest='discriminator', metavar='H5', type=str, default=x,
help='name of the pre-trained discriminator.'
'\ndefault value: {}'.format(x))
x = PAR['input_files']
parser.add_argument('image', nargs='*',
default=x,
help='plant root image to process.'
'\ndefault value: {}'.format(x))
return parser
def prediction_subparser(subparsers):
"""
Defines arguments used in prediction mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('predict',
help='Runs AMFinder in prediction mode.',
formatter_class=RawTextHelpFormatter)
x = PAR['tile_edge']
parser.add_argument('-t', '--tile_size',
action='store', dest='edge', type=int, default=x,
help='Tile size (in pixels) used for image segmentation.'
'\ndefault value: {} pixels'.format(x))
parser.add_argument('-sr', '--super_resolution',
action='store_const', dest='super_resolution', const=True,
help='Apply super-resolution before predictions.'
'\ndefault value: no super-resolution.')
x = 'SRGANGenv1beta.h5'
parser.add_argument('-g', '--generator',
action='store', dest='generator', metavar='H5', type=str, default=x,
help='name of the pre-trained generator.'
'\ndefault value: {}'.format(x))
x = PAR['colormap']
parser.add_argument('-map', '--colormap',
action='store', dest='colormap', metavar='id', type=str, default=x,
help='Name of the colormap used to display conv2d outputs and kernels.'
'\ndefault value: {}'.format(x))
x = 'CNN1v2.h5'
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained model to use for predictions.'
'\ndefault value: {}'.format(x))
parser.add_argument('-so', '--save_conv2d_outputs',
action='store_const', dest='save_conv2d_outputs', const=True,
help='save conv2d outputs in a separate zip file.'
'\ndefault value: False')
parser.add_argument('-sk', '--save_conv2d_kernels',
action='store_const', dest='save_conv2d_kernels', const=True,
help='save convolution kernels in a separate zip file (takes time).'
'\ndefault value: False')
x = PAR['input_files']
parser.add_argument('image', nargs='*', default=x,
help='plant root scan to be processed.'
'\ndefault value: {}'.format(x))
return parser
def diagnostic_subparser(subparsers):
"""
Defines arguments used in diagnostic mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('diagnose',
help='Runs AMFinder in diagnostic mode.',
formatter_class=RawTextHelpFormatter)
x = 'CNN1_pretrained_2021-01-18.h5'
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained model to use for diagnostic.'
'\ndefault value: {}'.format(x))
x = PAR['input_files']
parser.add_argument('image', nargs='*', default=x,
help='plant root scan to be processed.'
'\ndefault value: {}'.format(x))
return parser
def | ():
"""
Builds AMFinder command-line parser.
"""
main = ArgumentParser(description='AMFinder command-line arguments.',
allow_abbrev=False,
formatter_class=RawTextHelpFormatter)
subparsers = main.add_subparsers(dest='run_mode', required=True,
help='action to be performed.')
_ = training_subparser(subparsers)
_ = prediction_subparser(subparsers)
_ = diagnostic_subparser(subparsers)
return main
def abspath(files):
"""
Returns absolute paths to input files.
:param files: Raw list of input file names (can contain wildcards).
"""
files = sum([glob.glob(x) for x in files], [])
return [os.path.abspath(x) for x in files]
def update_tile_edge(path):
"""
Import image settings (currently tile edge).
:param path: path to the input image.
"""
zfile = os.path.splitext(path)[0] + '.zip'
if zf.is_zipfile(zfile):
with zf.ZipFile(zfile) as z:
if 'settings.json' in z.namelist():
x = z.read('settings.json').decode('utf-8')
x = yaml.safe_load(x)
set('tile_edge', x['tile_edge'])
return get('tile_edge')
def get_input_files():
"""
Filter input file list and keep valid JPEG or TIFF images.
"""
raw_list = abspath(get('input_files'))
valid_types = ['image/jpeg', 'image/tiff']
images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types]
print('* Input images: {}'.format(len(images)))
return images
def initialize():
"""
Read command line and store user settings.
"""
parser = build_arg_parser()
par = parser.parse_known_args()[0]
# Main arguments.
set('run_mode', par.run_mode)
set('input_files', par.image)
# Sub-parser specific arguments.
if par.run_mode == 'train':
set('batch_size', par.batch_size)
set('drop', par.drop)
set('epochs', par.epochs)
set('model', par.model)
set('level', par.level)
set('vfrac', par.vfrac)
set('data_augm', par.data_augm)
set('summary', par.summary)
set('outdir', par.outdir)
# Parameters associated with super-resolution.
set('super_resolution', par.super_resolution)
set('generator', par.generator)
set('discriminator', par.discriminator)
elif par.run_mode == 'predict':
set('tile_edge', par.edge)
set('model', par.model)
set('save_conv2d_kernels', par.save_conv2d_kernels)
set('save_conv2d_outputs', par.save_conv2d_outputs)
set('colormap', par.colormap)
# Parameters associated with super-resolution.
set('super_resolution', par.super_resolution)
set('generator', par.generator)
elif par.run_mode == 'diagnose':
set('model', par.model)
else:
pass
| build_arg_parser | identifier_name |
amfinder_config.py | # AMFinder - amfinder_config.py
#
# MIT License
# Copyright (c) 2021 Edouard Evangelisti, Carl Turner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
AMFinder configuration module.
Read command-line arguments and store user settings.
Variables
------------
HEADERS - Table headers for the different annotation levels.
PAR - User settings.
Functions
------------
tsv_name - Return the TSV file corresponding to the current annotation level.
get - Retrieve the value associated with the given parameter ID.
colonization - Indicate whether the current level is level 1 (colonization).
intra_struct - Indicate whether the current level is level 2 (structures).
set - Assign a new value to the given parameter ID.
training_subparser - Define the command-line parser used in training mode.
prediction_subparser - Define the command-line parser used in prediction mode.
build_arg_parser - Build the full command-line parser.
import_settings - Read tile size from `settings.json`.
get_input_files - Return the list of vaid input images (based on MIME type).
initialize - Read command-line arguments and store user-defined values.
"""
import os
import glob
import yaml
import mimetypes
import zipfile as zf
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
import amfinder_log as AmfLog
HEADERS = [['Y', 'N', 'X'], ['A', 'V', 'H', 'I']]
PAR = {
'run_mode': None,
'level': 1,
'model': None,
'tile_edge': 126,
'input_files': ['*.jpg'],
'batch_size': 32,
'learning_rate': 0.001,
'drop': True,
'epochs': 100,
'vfrac': 15,
'data_augm': False,
'save_augmented_tiles': 0,
'summary': False,
'patience': 12,
'outdir': os.getcwd(),
'header': HEADERS[0],
'generator': None,
'discriminator': None,
'super_resolution': False,
'save_conv2d_kernels': False,
'save_conv2d_outputs': False,
'colormap': 'plasma',
'monitors': {
'csv_logger': None,
'early_stopping': None,
'reduce_lr_on_plateau': None,
'model_checkpoint': None,
}
}
APP_PATH = os.path.dirname(os.path.realpath(__file__))
def get_appdir():
""" Returns the application directory. """
return APP_PATH
def tsv_name():
"""
Return the TSV file corresponding to the current annotation level.
"""
if PAR['level'] == 1:
return 'col.tsv'
else:
return 'myc.tsv'
def get(id):
"""
Retrieve application settings.
:param id: Unique identifier.
"""
id = id.lower()
if id in PAR:
# Special case, look into a specific folder.
if id in ['generator', 'discriminator', 'model'] and \
PAR[id] is not None:
return os.path.join(get_appdir(),
'trained_networks',
os.path.basename(PAR[id]))
else:
return PAR[id]
elif id in PAR['monitors']:
return PAR['monitors'][id]
else:
AmfLog.warning(f'Unknown parameter {id}')
return None
def colonization():
"""
Indicate whether the current level is level 1 (colonization).
"""
return get('level') == 1
def intra_struct():
"""
Indicate whether the current level is level 2 (AM fungal structures).
"""
return get('level') == 2
def set(id, value, create=False):
"""
Updates application settings.
:param id: unique identifier.
:param value: value to store.
:param create: create id if it does not exist (optional).
"""
if value is None:
return
else:
id = id.lower()
if id in PAR:
PAR[id] = value
if id == 'level':
PAR['header'] = HEADERS[int(value == 2)] # Ensures 0 or 1.
elif id in PAR['monitors']:
PAR['monitors'][id] = value
elif create:
PAR[id] = value
else:
AmfLog.warning(f'Unknown parameter {id}')
def training_subparser(subparsers):
"""
Defines arguments used in training mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('train',
help='learns how to identify AMF structures.',
formatter_class=RawTextHelpFormatter)
x = PAR['batch_size']
parser.add_argument('-b', '--batch_size',
action='store', dest='batch_size', metavar='NUM', type=int, default=x,
help='training batch size.'
'\ndefault value: {}'.format(x))
x = PAR['drop']
parser.add_argument('-k', '--keep_background',
action='store_false', dest='drop', default=x,
help='keep all background tiles.'
'\nby default, downscale background to equilibrate classes.')
x = PAR['data_augm']
parser.add_argument('-a', '--data_augmentation',
action='store_true', dest='data_augm', default=x,
help='apply data augmentation (hue, chroma, saturation, etc.)'
'\nby default, data augmentation is not used.')
x = PAR['save_augmented_tiles']
parser.add_argument('-sa', '--save_augmented_tiles',
action='store', dest='save_augmented_tiles',
metavar='NUM', type=int, default=x,
help='save a subset of augmented tiles.'
'\nby default, does not save any tile.')
x = PAR['summary']
parser.add_argument('-s', '--summary',
action='store_true', dest='summary', default=x,
help='save CNN architecture (CNN graph and model summary)'
'\nby default, does not save any information.')
x = PAR['outdir']
parser.add_argument('-o', '--outdir',
action='store', dest='outdir', default=x,
help='folder where to save trained model and CNN architecture.'
'\ndefault: {}'.format(x))
x = PAR['epochs']
parser.add_argument('-e', '--epochs',
action='store', dest='epochs', metavar='NUM', type=int, default=x,
help='number of epochs to run.'
'\ndefault value: {}'.format(x))
x = PAR['patience']
parser.add_argument('-p', '--patience',
action='store', dest='patience', metavar='NUM', type=int, default=x,
help='number of epochs to wait before early stopping is triggered.'
'\ndefault value: {}'.format(x))
x = PAR['learning_rate']
parser.add_argument('-lr', '--learning_rate',
action='store', dest='learning_rate', metavar='NUM',
type=int, default=x,
help='learning rate used by the Adam optimizer.'
'\ndefault value: {}'.format(x))
x = PAR['vfrac']
parser.add_argument('-vf', '--validation_fraction',
action='store', dest='vfrac', metavar='N%', type=int, default=x,
help='Percentage of tiles used for validation.'
'\ndefault value: {}%%'.format(x))
level = parser.add_mutually_exclusive_group()
level.add_argument('-1', '--CNN1',
action='store_const', dest='level', const=1,
help='Train for root colonisation (default)')
level.add_argument('-2', '--CNN2',
action='store_const', dest='level', const=2,
help='Train for fungal hyphal structures.')
x = None
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained network to use as a basis for training.'
'\ndefault value: {}'.format(x))
parser.add_argument('-sr', '--super_resolution',
action='store_const', dest='super_resolution', const=True,
help='Apply super-resolution before predictions.'
'\ndefault value: no super-resolution.')
x = None
parser.add_argument('-g', '--generator',
action='store', dest='generator', metavar='H5', type=str, default=x,
help='name of the pre-trained generator.'
'\ndefault value: {}'.format(x))
x = None
parser.add_argument('-d', '--discriminator',
action='store', dest='discriminator', metavar='H5', type=str, default=x,
help='name of the pre-trained discriminator.'
'\ndefault value: {}'.format(x))
x = PAR['input_files']
parser.add_argument('image', nargs='*',
default=x,
help='plant root image to process.'
'\ndefault value: {}'.format(x))
return parser
def prediction_subparser(subparsers):
"""
Defines arguments used in prediction mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('predict',
help='Runs AMFinder in prediction mode.',
formatter_class=RawTextHelpFormatter)
x = PAR['tile_edge']
parser.add_argument('-t', '--tile_size',
action='store', dest='edge', type=int, default=x,
help='Tile size (in pixels) used for image segmentation.'
'\ndefault value: {} pixels'.format(x))
parser.add_argument('-sr', '--super_resolution',
action='store_const', dest='super_resolution', const=True,
help='Apply super-resolution before predictions.'
'\ndefault value: no super-resolution.')
x = 'SRGANGenv1beta.h5'
parser.add_argument('-g', '--generator',
action='store', dest='generator', metavar='H5', type=str, default=x,
help='name of the pre-trained generator.'
'\ndefault value: {}'.format(x))
x = PAR['colormap']
parser.add_argument('-map', '--colormap',
action='store', dest='colormap', metavar='id', type=str, default=x,
help='Name of the colormap used to display conv2d outputs and kernels.'
'\ndefault value: {}'.format(x))
x = 'CNN1v2.h5'
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained model to use for predictions.'
'\ndefault value: {}'.format(x))
parser.add_argument('-so', '--save_conv2d_outputs',
action='store_const', dest='save_conv2d_outputs', const=True,
help='save conv2d outputs in a separate zip file.'
'\ndefault value: False')
parser.add_argument('-sk', '--save_conv2d_kernels',
action='store_const', dest='save_conv2d_kernels', const=True,
help='save convolution kernels in a separate zip file (takes time).'
'\ndefault value: False')
x = PAR['input_files']
parser.add_argument('image', nargs='*', default=x,
help='plant root scan to be processed.'
'\ndefault value: {}'.format(x))
return parser
def diagnostic_subparser(subparsers):
"""
Defines arguments used in diagnostic mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('diagnose',
help='Runs AMFinder in diagnostic mode.',
formatter_class=RawTextHelpFormatter)
x = 'CNN1_pretrained_2021-01-18.h5'
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained model to use for diagnostic.'
'\ndefault value: {}'.format(x))
x = PAR['input_files']
parser.add_argument('image', nargs='*', default=x,
help='plant root scan to be processed.'
'\ndefault value: {}'.format(x))
return parser
def build_arg_parser():
"""
Builds AMFinder command-line parser.
"""
main = ArgumentParser(description='AMFinder command-line arguments.',
allow_abbrev=False,
formatter_class=RawTextHelpFormatter)
subparsers = main.add_subparsers(dest='run_mode', required=True,
help='action to be performed.')
_ = training_subparser(subparsers)
_ = prediction_subparser(subparsers)
_ = diagnostic_subparser(subparsers)
return main
def abspath(files):
""" | files = sum([glob.glob(x) for x in files], [])
return [os.path.abspath(x) for x in files]
def update_tile_edge(path):
"""
Import image settings (currently tile edge).
:param path: path to the input image.
"""
zfile = os.path.splitext(path)[0] + '.zip'
if zf.is_zipfile(zfile):
with zf.ZipFile(zfile) as z:
if 'settings.json' in z.namelist():
x = z.read('settings.json').decode('utf-8')
x = yaml.safe_load(x)
set('tile_edge', x['tile_edge'])
return get('tile_edge')
def get_input_files():
"""
Filter input file list and keep valid JPEG or TIFF images.
"""
raw_list = abspath(get('input_files'))
valid_types = ['image/jpeg', 'image/tiff']
images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types]
print('* Input images: {}'.format(len(images)))
return images
def initialize():
"""
Read command line and store user settings.
"""
parser = build_arg_parser()
par = parser.parse_known_args()[0]
# Main arguments.
set('run_mode', par.run_mode)
set('input_files', par.image)
# Sub-parser specific arguments.
if par.run_mode == 'train':
set('batch_size', par.batch_size)
set('drop', par.drop)
set('epochs', par.epochs)
set('model', par.model)
set('level', par.level)
set('vfrac', par.vfrac)
set('data_augm', par.data_augm)
set('summary', par.summary)
set('outdir', par.outdir)
# Parameters associated with super-resolution.
set('super_resolution', par.super_resolution)
set('generator', par.generator)
set('discriminator', par.discriminator)
elif par.run_mode == 'predict':
set('tile_edge', par.edge)
set('model', par.model)
set('save_conv2d_kernels', par.save_conv2d_kernels)
set('save_conv2d_outputs', par.save_conv2d_outputs)
set('colormap', par.colormap)
# Parameters associated with super-resolution.
set('super_resolution', par.super_resolution)
set('generator', par.generator)
elif par.run_mode == 'diagnose':
set('model', par.model)
else:
pass | Returns absolute paths to input files.
:param files: Raw list of input file names (can contain wildcards).
"""
| random_line_split |
amfinder_config.py | # AMFinder - amfinder_config.py
#
# MIT License
# Copyright (c) 2021 Edouard Evangelisti, Carl Turner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
AMFinder configuration module.
Read command-line arguments and store user settings.
Variables
------------
HEADERS - Table headers for the different annotation levels.
PAR - User settings.
Functions
------------
tsv_name - Return the TSV file corresponding to the current annotation level.
get - Retrieve the value associated with the given parameter ID.
colonization - Indicate whether the current level is level 1 (colonization).
intra_struct - Indicate whether the current level is level 2 (structures).
set - Assign a new value to the given parameter ID.
training_subparser - Define the command-line parser used in training mode.
prediction_subparser - Define the command-line parser used in prediction mode.
build_arg_parser - Build the full command-line parser.
import_settings - Read tile size from `settings.json`.
get_input_files - Return the list of vaid input images (based on MIME type).
initialize - Read command-line arguments and store user-defined values.
"""
import os
import glob
import yaml
import mimetypes
import zipfile as zf
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
import amfinder_log as AmfLog
HEADERS = [['Y', 'N', 'X'], ['A', 'V', 'H', 'I']]
PAR = {
'run_mode': None,
'level': 1,
'model': None,
'tile_edge': 126,
'input_files': ['*.jpg'],
'batch_size': 32,
'learning_rate': 0.001,
'drop': True,
'epochs': 100,
'vfrac': 15,
'data_augm': False,
'save_augmented_tiles': 0,
'summary': False,
'patience': 12,
'outdir': os.getcwd(),
'header': HEADERS[0],
'generator': None,
'discriminator': None,
'super_resolution': False,
'save_conv2d_kernels': False,
'save_conv2d_outputs': False,
'colormap': 'plasma',
'monitors': {
'csv_logger': None,
'early_stopping': None,
'reduce_lr_on_plateau': None,
'model_checkpoint': None,
}
}
APP_PATH = os.path.dirname(os.path.realpath(__file__))
def get_appdir():
""" Returns the application directory. """
return APP_PATH
def tsv_name():
"""
Return the TSV file corresponding to the current annotation level.
"""
if PAR['level'] == 1:
return 'col.tsv'
else:
return 'myc.tsv'
def get(id):
"""
Retrieve application settings.
:param id: Unique identifier.
"""
id = id.lower()
if id in PAR:
# Special case, look into a specific folder.
if id in ['generator', 'discriminator', 'model'] and \
PAR[id] is not None:
return os.path.join(get_appdir(),
'trained_networks',
os.path.basename(PAR[id]))
else:
return PAR[id]
elif id in PAR['monitors']:
return PAR['monitors'][id]
else:
AmfLog.warning(f'Unknown parameter {id}')
return None
def colonization():
"""
Indicate whether the current level is level 1 (colonization).
"""
return get('level') == 1
def intra_struct():
"""
Indicate whether the current level is level 2 (AM fungal structures).
"""
return get('level') == 2
def set(id, value, create=False):
"""
Updates application settings.
:param id: unique identifier.
:param value: value to store.
:param create: create id if it does not exist (optional).
"""
if value is None:
return
else:
id = id.lower()
if id in PAR:
|
elif id in PAR['monitors']:
PAR['monitors'][id] = value
elif create:
PAR[id] = value
else:
AmfLog.warning(f'Unknown parameter {id}')
def training_subparser(subparsers):
"""
Defines arguments used in training mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('train',
help='learns how to identify AMF structures.',
formatter_class=RawTextHelpFormatter)
x = PAR['batch_size']
parser.add_argument('-b', '--batch_size',
action='store', dest='batch_size', metavar='NUM', type=int, default=x,
help='training batch size.'
'\ndefault value: {}'.format(x))
x = PAR['drop']
parser.add_argument('-k', '--keep_background',
action='store_false', dest='drop', default=x,
help='keep all background tiles.'
'\nby default, downscale background to equilibrate classes.')
x = PAR['data_augm']
parser.add_argument('-a', '--data_augmentation',
action='store_true', dest='data_augm', default=x,
help='apply data augmentation (hue, chroma, saturation, etc.)'
'\nby default, data augmentation is not used.')
x = PAR['save_augmented_tiles']
parser.add_argument('-sa', '--save_augmented_tiles',
action='store', dest='save_augmented_tiles',
metavar='NUM', type=int, default=x,
help='save a subset of augmented tiles.'
'\nby default, does not save any tile.')
x = PAR['summary']
parser.add_argument('-s', '--summary',
action='store_true', dest='summary', default=x,
help='save CNN architecture (CNN graph and model summary)'
'\nby default, does not save any information.')
x = PAR['outdir']
parser.add_argument('-o', '--outdir',
action='store', dest='outdir', default=x,
help='folder where to save trained model and CNN architecture.'
'\ndefault: {}'.format(x))
x = PAR['epochs']
parser.add_argument('-e', '--epochs',
action='store', dest='epochs', metavar='NUM', type=int, default=x,
help='number of epochs to run.'
'\ndefault value: {}'.format(x))
x = PAR['patience']
parser.add_argument('-p', '--patience',
action='store', dest='patience', metavar='NUM', type=int, default=x,
help='number of epochs to wait before early stopping is triggered.'
'\ndefault value: {}'.format(x))
x = PAR['learning_rate']
parser.add_argument('-lr', '--learning_rate',
action='store', dest='learning_rate', metavar='NUM',
type=int, default=x,
help='learning rate used by the Adam optimizer.'
'\ndefault value: {}'.format(x))
x = PAR['vfrac']
parser.add_argument('-vf', '--validation_fraction',
action='store', dest='vfrac', metavar='N%', type=int, default=x,
help='Percentage of tiles used for validation.'
'\ndefault value: {}%%'.format(x))
level = parser.add_mutually_exclusive_group()
level.add_argument('-1', '--CNN1',
action='store_const', dest='level', const=1,
help='Train for root colonisation (default)')
level.add_argument('-2', '--CNN2',
action='store_const', dest='level', const=2,
help='Train for fungal hyphal structures.')
x = None
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained network to use as a basis for training.'
'\ndefault value: {}'.format(x))
parser.add_argument('-sr', '--super_resolution',
action='store_const', dest='super_resolution', const=True,
help='Apply super-resolution before predictions.'
'\ndefault value: no super-resolution.')
x = None
parser.add_argument('-g', '--generator',
action='store', dest='generator', metavar='H5', type=str, default=x,
help='name of the pre-trained generator.'
'\ndefault value: {}'.format(x))
x = None
parser.add_argument('-d', '--discriminator',
action='store', dest='discriminator', metavar='H5', type=str, default=x,
help='name of the pre-trained discriminator.'
'\ndefault value: {}'.format(x))
x = PAR['input_files']
parser.add_argument('image', nargs='*',
default=x,
help='plant root image to process.'
'\ndefault value: {}'.format(x))
return parser
def prediction_subparser(subparsers):
"""
Defines arguments used in prediction mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('predict',
help='Runs AMFinder in prediction mode.',
formatter_class=RawTextHelpFormatter)
x = PAR['tile_edge']
parser.add_argument('-t', '--tile_size',
action='store', dest='edge', type=int, default=x,
help='Tile size (in pixels) used for image segmentation.'
'\ndefault value: {} pixels'.format(x))
parser.add_argument('-sr', '--super_resolution',
action='store_const', dest='super_resolution', const=True,
help='Apply super-resolution before predictions.'
'\ndefault value: no super-resolution.')
x = 'SRGANGenv1beta.h5'
parser.add_argument('-g', '--generator',
action='store', dest='generator', metavar='H5', type=str, default=x,
help='name of the pre-trained generator.'
'\ndefault value: {}'.format(x))
x = PAR['colormap']
parser.add_argument('-map', '--colormap',
action='store', dest='colormap', metavar='id', type=str, default=x,
help='Name of the colormap used to display conv2d outputs and kernels.'
'\ndefault value: {}'.format(x))
x = 'CNN1v2.h5'
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained model to use for predictions.'
'\ndefault value: {}'.format(x))
parser.add_argument('-so', '--save_conv2d_outputs',
action='store_const', dest='save_conv2d_outputs', const=True,
help='save conv2d outputs in a separate zip file.'
'\ndefault value: False')
parser.add_argument('-sk', '--save_conv2d_kernels',
action='store_const', dest='save_conv2d_kernels', const=True,
help='save convolution kernels in a separate zip file (takes time).'
'\ndefault value: False')
x = PAR['input_files']
parser.add_argument('image', nargs='*', default=x,
help='plant root scan to be processed.'
'\ndefault value: {}'.format(x))
return parser
def diagnostic_subparser(subparsers):
"""
Defines arguments used in diagnostic mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('diagnose',
help='Runs AMFinder in diagnostic mode.',
formatter_class=RawTextHelpFormatter)
x = 'CNN1_pretrained_2021-01-18.h5'
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained model to use for diagnostic.'
'\ndefault value: {}'.format(x))
x = PAR['input_files']
parser.add_argument('image', nargs='*', default=x,
help='plant root scan to be processed.'
'\ndefault value: {}'.format(x))
return parser
def build_arg_parser():
"""
Builds AMFinder command-line parser.
"""
main = ArgumentParser(description='AMFinder command-line arguments.',
allow_abbrev=False,
formatter_class=RawTextHelpFormatter)
subparsers = main.add_subparsers(dest='run_mode', required=True,
help='action to be performed.')
_ = training_subparser(subparsers)
_ = prediction_subparser(subparsers)
_ = diagnostic_subparser(subparsers)
return main
def abspath(files):
"""
Returns absolute paths to input files.
:param files: Raw list of input file names (can contain wildcards).
"""
files = sum([glob.glob(x) for x in files], [])
return [os.path.abspath(x) for x in files]
def update_tile_edge(path):
"""
Import image settings (currently tile edge).
:param path: path to the input image.
"""
zfile = os.path.splitext(path)[0] + '.zip'
if zf.is_zipfile(zfile):
with zf.ZipFile(zfile) as z:
if 'settings.json' in z.namelist():
x = z.read('settings.json').decode('utf-8')
x = yaml.safe_load(x)
set('tile_edge', x['tile_edge'])
return get('tile_edge')
def get_input_files():
"""
Filter input file list and keep valid JPEG or TIFF images.
"""
raw_list = abspath(get('input_files'))
valid_types = ['image/jpeg', 'image/tiff']
images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types]
print('* Input images: {}'.format(len(images)))
return images
def initialize():
"""
Read command line and store user settings.
"""
parser = build_arg_parser()
par = parser.parse_known_args()[0]
# Main arguments.
set('run_mode', par.run_mode)
set('input_files', par.image)
# Sub-parser specific arguments.
if par.run_mode == 'train':
set('batch_size', par.batch_size)
set('drop', par.drop)
set('epochs', par.epochs)
set('model', par.model)
set('level', par.level)
set('vfrac', par.vfrac)
set('data_augm', par.data_augm)
set('summary', par.summary)
set('outdir', par.outdir)
# Parameters associated with super-resolution.
set('super_resolution', par.super_resolution)
set('generator', par.generator)
set('discriminator', par.discriminator)
elif par.run_mode == 'predict':
set('tile_edge', par.edge)
set('model', par.model)
set('save_conv2d_kernels', par.save_conv2d_kernels)
set('save_conv2d_outputs', par.save_conv2d_outputs)
set('colormap', par.colormap)
# Parameters associated with super-resolution.
set('super_resolution', par.super_resolution)
set('generator', par.generator)
elif par.run_mode == 'diagnose':
set('model', par.model)
else:
pass
| PAR[id] = value
if id == 'level':
PAR['header'] = HEADERS[int(value == 2)] # Ensures 0 or 1. | conditional_block |
amfinder_config.py | # AMFinder - amfinder_config.py
#
# MIT License
# Copyright (c) 2021 Edouard Evangelisti, Carl Turner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
AMFinder configuration module.
Read command-line arguments and store user settings.
Variables
------------
HEADERS - Table headers for the different annotation levels.
PAR - User settings.
Functions
------------
tsv_name - Return the TSV file corresponding to the current annotation level.
get - Retrieve the value associated with the given parameter ID.
colonization - Indicate whether the current level is level 1 (colonization).
intra_struct - Indicate whether the current level is level 2 (structures).
set - Assign a new value to the given parameter ID.
training_subparser - Define the command-line parser used in training mode.
prediction_subparser - Define the command-line parser used in prediction mode.
build_arg_parser - Build the full command-line parser.
import_settings - Read tile size from `settings.json`.
get_input_files - Return the list of vaid input images (based on MIME type).
initialize - Read command-line arguments and store user-defined values.
"""
import os
import glob
import yaml
import mimetypes
import zipfile as zf
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
import amfinder_log as AmfLog
HEADERS = [['Y', 'N', 'X'], ['A', 'V', 'H', 'I']]
PAR = {
'run_mode': None,
'level': 1,
'model': None,
'tile_edge': 126,
'input_files': ['*.jpg'],
'batch_size': 32,
'learning_rate': 0.001,
'drop': True,
'epochs': 100,
'vfrac': 15,
'data_augm': False,
'save_augmented_tiles': 0,
'summary': False,
'patience': 12,
'outdir': os.getcwd(),
'header': HEADERS[0],
'generator': None,
'discriminator': None,
'super_resolution': False,
'save_conv2d_kernels': False,
'save_conv2d_outputs': False,
'colormap': 'plasma',
'monitors': {
'csv_logger': None,
'early_stopping': None,
'reduce_lr_on_plateau': None,
'model_checkpoint': None,
}
}
APP_PATH = os.path.dirname(os.path.realpath(__file__))
def get_appdir():
|
def tsv_name():
"""
Return the TSV file corresponding to the current annotation level.
"""
if PAR['level'] == 1:
return 'col.tsv'
else:
return 'myc.tsv'
def get(id):
"""
Retrieve application settings.
:param id: Unique identifier.
"""
id = id.lower()
if id in PAR:
# Special case, look into a specific folder.
if id in ['generator', 'discriminator', 'model'] and \
PAR[id] is not None:
return os.path.join(get_appdir(),
'trained_networks',
os.path.basename(PAR[id]))
else:
return PAR[id]
elif id in PAR['monitors']:
return PAR['monitors'][id]
else:
AmfLog.warning(f'Unknown parameter {id}')
return None
def colonization():
"""
Indicate whether the current level is level 1 (colonization).
"""
return get('level') == 1
def intra_struct():
"""
Indicate whether the current level is level 2 (AM fungal structures).
"""
return get('level') == 2
def set(id, value, create=False):
"""
Updates application settings.
:param id: unique identifier.
:param value: value to store.
:param create: create id if it does not exist (optional).
"""
if value is None:
return
else:
id = id.lower()
if id in PAR:
PAR[id] = value
if id == 'level':
PAR['header'] = HEADERS[int(value == 2)] # Ensures 0 or 1.
elif id in PAR['monitors']:
PAR['monitors'][id] = value
elif create:
PAR[id] = value
else:
AmfLog.warning(f'Unknown parameter {id}')
def training_subparser(subparsers):
"""
Defines arguments used in training mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('train',
help='learns how to identify AMF structures.',
formatter_class=RawTextHelpFormatter)
x = PAR['batch_size']
parser.add_argument('-b', '--batch_size',
action='store', dest='batch_size', metavar='NUM', type=int, default=x,
help='training batch size.'
'\ndefault value: {}'.format(x))
x = PAR['drop']
parser.add_argument('-k', '--keep_background',
action='store_false', dest='drop', default=x,
help='keep all background tiles.'
'\nby default, downscale background to equilibrate classes.')
x = PAR['data_augm']
parser.add_argument('-a', '--data_augmentation',
action='store_true', dest='data_augm', default=x,
help='apply data augmentation (hue, chroma, saturation, etc.)'
'\nby default, data augmentation is not used.')
x = PAR['save_augmented_tiles']
parser.add_argument('-sa', '--save_augmented_tiles',
action='store', dest='save_augmented_tiles',
metavar='NUM', type=int, default=x,
help='save a subset of augmented tiles.'
'\nby default, does not save any tile.')
x = PAR['summary']
parser.add_argument('-s', '--summary',
action='store_true', dest='summary', default=x,
help='save CNN architecture (CNN graph and model summary)'
'\nby default, does not save any information.')
x = PAR['outdir']
parser.add_argument('-o', '--outdir',
action='store', dest='outdir', default=x,
help='folder where to save trained model and CNN architecture.'
'\ndefault: {}'.format(x))
x = PAR['epochs']
parser.add_argument('-e', '--epochs',
action='store', dest='epochs', metavar='NUM', type=int, default=x,
help='number of epochs to run.'
'\ndefault value: {}'.format(x))
x = PAR['patience']
parser.add_argument('-p', '--patience',
action='store', dest='patience', metavar='NUM', type=int, default=x,
help='number of epochs to wait before early stopping is triggered.'
'\ndefault value: {}'.format(x))
x = PAR['learning_rate']
parser.add_argument('-lr', '--learning_rate',
action='store', dest='learning_rate', metavar='NUM',
type=int, default=x,
help='learning rate used by the Adam optimizer.'
'\ndefault value: {}'.format(x))
x = PAR['vfrac']
parser.add_argument('-vf', '--validation_fraction',
action='store', dest='vfrac', metavar='N%', type=int, default=x,
help='Percentage of tiles used for validation.'
'\ndefault value: {}%%'.format(x))
level = parser.add_mutually_exclusive_group()
level.add_argument('-1', '--CNN1',
action='store_const', dest='level', const=1,
help='Train for root colonisation (default)')
level.add_argument('-2', '--CNN2',
action='store_const', dest='level', const=2,
help='Train for fungal hyphal structures.')
x = None
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained network to use as a basis for training.'
'\ndefault value: {}'.format(x))
parser.add_argument('-sr', '--super_resolution',
action='store_const', dest='super_resolution', const=True,
help='Apply super-resolution before predictions.'
'\ndefault value: no super-resolution.')
x = None
parser.add_argument('-g', '--generator',
action='store', dest='generator', metavar='H5', type=str, default=x,
help='name of the pre-trained generator.'
'\ndefault value: {}'.format(x))
x = None
parser.add_argument('-d', '--discriminator',
action='store', dest='discriminator', metavar='H5', type=str, default=x,
help='name of the pre-trained discriminator.'
'\ndefault value: {}'.format(x))
x = PAR['input_files']
parser.add_argument('image', nargs='*',
default=x,
help='plant root image to process.'
'\ndefault value: {}'.format(x))
return parser
def prediction_subparser(subparsers):
"""
Defines arguments used in prediction mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('predict',
help='Runs AMFinder in prediction mode.',
formatter_class=RawTextHelpFormatter)
x = PAR['tile_edge']
parser.add_argument('-t', '--tile_size',
action='store', dest='edge', type=int, default=x,
help='Tile size (in pixels) used for image segmentation.'
'\ndefault value: {} pixels'.format(x))
parser.add_argument('-sr', '--super_resolution',
action='store_const', dest='super_resolution', const=True,
help='Apply super-resolution before predictions.'
'\ndefault value: no super-resolution.')
x = 'SRGANGenv1beta.h5'
parser.add_argument('-g', '--generator',
action='store', dest='generator', metavar='H5', type=str, default=x,
help='name of the pre-trained generator.'
'\ndefault value: {}'.format(x))
x = PAR['colormap']
parser.add_argument('-map', '--colormap',
action='store', dest='colormap', metavar='id', type=str, default=x,
help='Name of the colormap used to display conv2d outputs and kernels.'
'\ndefault value: {}'.format(x))
x = 'CNN1v2.h5'
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained model to use for predictions.'
'\ndefault value: {}'.format(x))
parser.add_argument('-so', '--save_conv2d_outputs',
action='store_const', dest='save_conv2d_outputs', const=True,
help='save conv2d outputs in a separate zip file.'
'\ndefault value: False')
parser.add_argument('-sk', '--save_conv2d_kernels',
action='store_const', dest='save_conv2d_kernels', const=True,
help='save convolution kernels in a separate zip file (takes time).'
'\ndefault value: False')
x = PAR['input_files']
parser.add_argument('image', nargs='*', default=x,
help='plant root scan to be processed.'
'\ndefault value: {}'.format(x))
return parser
def diagnostic_subparser(subparsers):
"""
Defines arguments used in diagnostic mode.
:param subparsers: subparser generator.
"""
parser = subparsers.add_parser('diagnose',
help='Runs AMFinder in diagnostic mode.',
formatter_class=RawTextHelpFormatter)
x = 'CNN1_pretrained_2021-01-18.h5'
parser.add_argument('-net', '--network',
action='store', dest='model', metavar='H5', type=str, default=x,
help='name of the pre-trained model to use for diagnostic.'
'\ndefault value: {}'.format(x))
x = PAR['input_files']
parser.add_argument('image', nargs='*', default=x,
help='plant root scan to be processed.'
'\ndefault value: {}'.format(x))
return parser
def build_arg_parser():
"""
Builds AMFinder command-line parser.
"""
main = ArgumentParser(description='AMFinder command-line arguments.',
allow_abbrev=False,
formatter_class=RawTextHelpFormatter)
subparsers = main.add_subparsers(dest='run_mode', required=True,
help='action to be performed.')
_ = training_subparser(subparsers)
_ = prediction_subparser(subparsers)
_ = diagnostic_subparser(subparsers)
return main
def abspath(files):
"""
Returns absolute paths to input files.
:param files: Raw list of input file names (can contain wildcards).
"""
files = sum([glob.glob(x) for x in files], [])
return [os.path.abspath(x) for x in files]
def update_tile_edge(path):
"""
Import image settings (currently tile edge).
:param path: path to the input image.
"""
zfile = os.path.splitext(path)[0] + '.zip'
if zf.is_zipfile(zfile):
with zf.ZipFile(zfile) as z:
if 'settings.json' in z.namelist():
x = z.read('settings.json').decode('utf-8')
x = yaml.safe_load(x)
set('tile_edge', x['tile_edge'])
return get('tile_edge')
def get_input_files():
"""
Filter input file list and keep valid JPEG or TIFF images.
"""
raw_list = abspath(get('input_files'))
valid_types = ['image/jpeg', 'image/tiff']
images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types]
print('* Input images: {}'.format(len(images)))
return images
def initialize():
"""
Read command line and store user settings.
"""
parser = build_arg_parser()
par = parser.parse_known_args()[0]
# Main arguments.
set('run_mode', par.run_mode)
set('input_files', par.image)
# Sub-parser specific arguments.
if par.run_mode == 'train':
set('batch_size', par.batch_size)
set('drop', par.drop)
set('epochs', par.epochs)
set('model', par.model)
set('level', par.level)
set('vfrac', par.vfrac)
set('data_augm', par.data_augm)
set('summary', par.summary)
set('outdir', par.outdir)
# Parameters associated with super-resolution.
set('super_resolution', par.super_resolution)
set('generator', par.generator)
set('discriminator', par.discriminator)
elif par.run_mode == 'predict':
set('tile_edge', par.edge)
set('model', par.model)
set('save_conv2d_kernels', par.save_conv2d_kernels)
set('save_conv2d_outputs', par.save_conv2d_outputs)
set('colormap', par.colormap)
# Parameters associated with super-resolution.
set('super_resolution', par.super_resolution)
set('generator', par.generator)
elif par.run_mode == 'diagnose':
set('model', par.model)
else:
pass
| """ Returns the application directory. """
return APP_PATH | identifier_body |
main.rs | #![feature(proc_macro)]
#![no_std]
extern crate cortex_m;
extern crate cortex_m_rtfm as rtfm;
extern crate stm32f30x_hal as hal;
extern crate ls010b7dh01;
extern crate rn4870;
extern crate embedded_graphics as graphics;
extern crate panic_abort;
extern crate nb;
mod display;
mod ble;
use cortex_m::asm;
use cortex_m::peripheral::syst::SystClkSource;
use rtfm::{app, Threshold};
use hal::prelude::*;
use hal::timer;
use hal::timer::Timer;
use hal::spi::Spi;
use hal::serial;
use hal::serial::Serial;
use hal::delay::Delay;
use hal::gpio::{gpiob, gpioc, Input, Output, PullUp, PushPull, AF7};
use ls010b7dh01::Ls010b7dh01;
use graphics::prelude::*;
use graphics::primitives::{Circle, Line, Rect};
use graphics::fonts::{Font, Font6x8};
use graphics::transform::Transform;
use graphics::image::Image1BPP;
app! {
device: hal::stm32f30x,
resources: {
static TOGGLE: bool = false;
static TIME: u8 = 0;
static STATE: State = State::Time;
static EXTI: hal::stm32f30x::EXTI;
static RESET_BLE: bool = true;
static REDRAW: bool = true;
static DRAW_BUFFER: [u8; 16] = [32; 16];
static BUFFER_POS: u8 = 0;
// Late Resources
static EXTCOMIN: display::Extcomin;
static DISPLAY: display::Display;
static BLE: ble::Ble;
},
tasks: {
TIM7: {
path: tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY],
},
SYS_TICK: {
path: sys_tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY,
TIME, BLE, RESET_BLE, STATE, REDRAW,
DRAW_BUFFER],
},
USART1_EXTI25: {
path: ble_message,
resources: [BLE, DRAW_BUFFER, BUFFER_POS],
},
EXTI9_5: {
enabled: true,
priority: 1,
path: exti9_5,
resources: [STATE, EXTI],
},
EXTI15_10: {
path: exti15_10,
resources: [STATE, EXTI],
},
},
}
pub enum State {
Ble,
Time,
Face,
}
fn init(mut p: init::Peripherals, _r: init::Resources) -> init::LateResources {
let mut rcc = p.device.RCC.constrain();
let mut flash = p.device.FLASH.constrain();
let mut gpioa = p.device.GPIOA.split(&mut rcc.ahb);
let mut gpiob = p.device.GPIOB.split(&mut rcc.ahb);
let mut gpioc = p.device.GPIOC.split(&mut rcc.ahb); | rcc.apb2.rstr().modify(|_, w| w.syscfgrst().clear_bit());
// Enable systick
p.core.SYST.set_clock_source(SystClkSource::Core);
p.core.SYST.set_reload(16_000_000);
p.core.SYST.enable_interrupt();
p.core.SYST.enable_counter();
// Set up our clocks & timer & delay
let clocks = rcc.cfgr.freeze(&mut flash.acr);
let mut timer = Timer::tim7(p.device.TIM7, 1.hz(), clocks, &mut rcc.apb1);
//timer.listen(timer::Event::TimeOut);
let mut delay = Delay::new(p.core.SYST, clocks);
// Set up our GPIO pins
let disp_en = gpiob.pb2.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let extcomin = gpiob.pb1.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let cs = gpiob.pb0.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let mut v5_en = gpioa.pa3.into_push_pull_output(
&mut gpioa.moder,
&mut gpioa.otyper,
);
let reset_ble = gpiob.pb5.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let sck = gpioa.pa5.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let miso = gpioa.pa6.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let mosi = gpioa.pa7.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let tx = gpiob.pb6.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let rx = gpiob.pb7.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let button_1 = gpiob.pb8.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_2 = gpiob.pb9.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_3 = gpioc.pc13.into_pull_up_input(
&mut gpioc.moder,
&mut gpioc.pupdr,
);
// Set up our display
let mode = ls010b7dh01::MODE;
let spi = Spi::spi1(
p.device.SPI1,
(sck, miso, mosi),
mode,
1.mhz(),
clocks,
&mut rcc.apb2,
);
let mut display = Ls010b7dh01::new(spi, cs, disp_en);
// Set up our BLE
let mut serial = Serial::usart1(
p.device.USART1,
(tx, rx),
115_200.bps(),
clocks,
&mut rcc.apb2,
);
serial.listen(serial::Event::Rxne); // TODO: Serial interrupts?
let mut ble = rn4870::Rn4870::new(serial, reset_ble);
// Set the default values
v5_en.set_high();
display.enable();
// Set up syscfg to link GPIO to EXTI
p.device.SYSCFG.exticr3.modify(|_, w| unsafe {
w.bits(0x11)
/* This does not work
w.exti8().bits(0b001) // Port b
.exti9().bits(0b001) // Port b
*/
});
p.device.SYSCFG.exticr4.modify(|_, w| unsafe {
w.exti13().bits(0b010) // Port c
});
p.device.EXTI.imr1.modify(|_, w| {
w.mr8().set_bit().mr9().set_bit().mr13().set_bit()
});
p.device.EXTI.ftsr1.modify(|_, w| {
w.tr8().set_bit().tr9().set_bit().tr13().set_bit()
});
init::LateResources {
DISPLAY: display,
EXTCOMIN: extcomin,
BLE: ble,
EXTI: p.device.EXTI,
}
}
fn idle() -> ! {
loop {
rtfm::wfi();
}
}
fn ble_message(_t: &mut Threshold, mut r: USART1_EXTI25::Resources) {
let res = r.BLE.read_raw();
match res {
Ok(n) => {
if n < 32 {
return
}
(*r.DRAW_BUFFER)[*r.BUFFER_POS as usize] = n;
*r.BUFFER_POS += 1;
if *r.BUFFER_POS == 16 {
*r.BUFFER_POS = 0;
}
}
Err(nb::Error::Other(_)) => {
r.BLE.handle_error(|uart| { uart.clear_overflow_error(); } );
}
Err(nb::Error::WouldBlock) => {}
}
}
fn exti9_5(_t: &mut Threshold, mut r: EXTI9_5::Resources) {
if r.EXTI.pr1.read().pr8().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr8().set_bit());
*r.STATE = State::Ble;
}
if r.EXTI.pr1.read().pr9().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr9().set_bit());
*r.STATE = State::Time;
}
}
fn exti15_10(_t: &mut Threshold, mut r: EXTI15_10::Resources) {
if r.EXTI.pr1.read().pr13().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr13().set_bit());
*r.STATE = State::Face;
}
}
fn tick(_t: &mut Threshold, mut r: TIM7::Resources) {
}
fn sys_tick(_t: &mut Threshold, mut r: SYS_TICK::Resources) {
let toggle = *r.TOGGLE;
let extcomin = &mut *r.EXTCOMIN;
if *r.RESET_BLE {
r.BLE.hard_reset_on();
*r.RESET_BLE = false;
} else {
r.BLE.hard_reset_off();
}
match *r.STATE {
State::Ble => {
r.DISPLAY.clear();
//let s = String::from_utf8_lossy(&*r.DRAW_BUFFER);
unsafe {
let s = &*(&*r.DRAW_BUFFER as *const [u8] as *const str);
r.DISPLAY.draw(Font6x8::render_str(s).translate((5, 50)).into_iter());
r.DISPLAY.flush_buffer();
}
}
State::Time => {
*r.REDRAW = true;
draw_time(&mut *r.DISPLAY, *r.TIME);
*r.TIME += 1;
if *r.TIME == 60 {
*r.TIME = 0;
}
}
State::Face => {
if *r.REDRAW {
draw_face(&mut *r.DISPLAY);
*r.REDRAW = false;
}
}
}
// Toggle extcomin manually
if toggle {
(*extcomin).set_high();
} else {
(*extcomin).set_low();
}
*r.TOGGLE = !toggle;
}
fn draw_face(mut display: &mut display::Display) {
display.clear();
let bpp = Image1BPP::new(include_bytes!("../data/face_1bpp_neg.raw"), 120, 120)
.translate((0, 0));
display.draw(bpp.into_iter());
display.flush_buffer();
}
fn draw_time(mut display: &mut display::Display, time: u8) {
display.clear();
/*
let values = [
(125, 65), (124, 71), (123, 77), (122, 83), (119, 89),
(116, 94), (113, 100), (109, 105), (105, 109), (100, 113),
(95, 116), (89, 119), (83, 122), (77, 123), (71, 124),
(65, 125), (59, 124), (53, 123), (47, 122), (41, 119),
(36, 116), (30, 113), (25, 109), (21, 105), (17, 100),
(14, 95), (11, 89), (8, 83), (7, 77), (6, 71),
(5, 65), (6, 59), (7, 53), (8, 47), (11, 41),
(14, 36), (17, 30), (21, 25), (25, 21), (30, 17),
(35, 14), (41, 11), (47, 8), (53, 7), (59, 6),
(65, 5), (71, 6), (77, 7), (83, 8), (89, 11),
(94, 14), (100, 17), (105, 21), (109, 25), (113, 30),
(116, 35), (119, 41), (122, 47), (123, 53), (124, 59),
];
*/
let values =[(109, 64), (108, 68), (108, 73), (106, 77), (105, 82), (102, 86), (100, 90), (97, 94), (94, 97), (90, 100), (86, 102), (82, 105), (77, 106), (73, 108), (68, 108), (64, 109), (60, 108), (55, 108), (51, 106), (46, 105), (42, 102), (38, 100), (34, 97), (31, 94), (28, 90), (26, 86), (23, 82), (22, 77), (20, 73), (20, 68), (19, 64), (20, 60), (20, 55), (22, 51), (23, 46), (26, 42), (28, 38), (31, 34), (34, 31), (38, 28), (42, 26), (46, 23), (51, 22), (55, 20), (60, 20), (64, 19), (68, 20), (73, 20), (77, 22), (82, 23), (86, 26), (90, 28), (94, 31), (97, 34), (100, 38), (102, 42), (105, 46), (106, 51), (108, 55), (108, 60)];
let digits = [(116, 60), (108, 87), (88, 107), (61, 115), (34, 107), (14, 87), (6, 60), (14, 33), (34, 13), (61, 5), (88, 13), (108, 33)];
display.draw(Font6x8::render_str("3").translate(digits[0]).into_iter());
display.draw(Font6x8::render_str("4").translate(digits[1]).into_iter());
display.draw(Font6x8::render_str("5").translate(digits[2]).into_iter());
display.draw(Font6x8::render_str("6").translate(digits[3]).into_iter());
display.draw(Font6x8::render_str("7").translate(digits[4]).into_iter());
display.draw(Font6x8::render_str("8").translate(digits[5]).into_iter());
display.draw(Font6x8::render_str("9").translate(digits[6]).into_iter());
display.draw(Font6x8::render_str("10").translate(digits[7]).into_iter());
display.draw(Font6x8::render_str("11").translate(digits[8]).into_iter());
display.draw(Font6x8::render_str("12").translate(digits[9]).into_iter());
display.draw(Font6x8::render_str("1").translate(digits[10]).into_iter());
display.draw(Font6x8::render_str("2").translate(digits[11]).into_iter());
display.draw(Line::new((65, 65), values[time as usize], 1).into_iter());
display.flush_buffer();
}
fn draw_buffer(buffer: &[u8]) {
} |
// Enable the syscfg
rcc.apb2.enr().modify(|_, w| w.syscfgen().enabled());
rcc.apb2.rstr().modify(|_, w| w.syscfgrst().set_bit()); | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.