file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
box.py
import math from enum import Enum from functools import lru_cache from .feedback import printable_ascii_codes, drep, truncate_list, dimrep from .utils import InfiniteDimension, sum_infinities, LogicError class LineState(Enum): naturally_good = 1 should_stretch = 2 should_shrink = 3 class GlueRatio(Enum): no_stretchability = 2 no_shrinkability = 3 class BreakPoint(Enum): """The types of places where line or page breaks may happen. Used to decide how to assign penalties to breaks. """ glue = 1 kern = 2 math_off = 3 penalty = 4 discretionary_break = 5 not_a_break_point = 6 def extract_dimen(d): if isinstance(d, int): order = 0 factor = d elif isinstance(d, InfiniteDimension): order = d.nr_fils factor = d.factor else: raise LogicError(f"Unknown dimen type: '{d}'") return order, factor @lru_cache(512) def glue_set_ratio(natural_length, desired_length, stretch, shrink): excess_length = natural_length - desired_length if excess_length == 0: line_state = LineState.naturally_good elif excess_length > 0: line_state = LineState.should_shrink else: line_state = LineState.should_stretch # If x = w, all glue gets its natural length. if line_state == LineState.naturally_good: glue_ratio = 0.0 # Not stated, but assuming this value does not matter. glue_order = 0 # Otherwise the glue will be modified, by computing a 'glue set ratio', # r and a 'glue set order', i, in the following way: # Let's say that there's a total of # y_0 + y_1 fil + y_2 fill + y_3 filll # available for stretching and # z_0 + z_1 fil + z_2 fill + z_3 filll # available for shrinking. # "If x < w, TeX attempts to stretch the contents of the box; the # glue order is the highest subscript i such that y_i is nonzero, and # the glue ratio is r = (w - x) / y_i. (If y_0 = y_1 = y_2 = y_3 = 0, # there's no stretchability; both i and r are set to zero.)" elif line_state == LineState.should_stretch: stretch = stretch stretch = [d for d in stretch if d > 0] if not stretch: glue_order = 0 # I actually don't obey the rules in this case, because it results # in a weird situation where lines with no stretchability, such as # single words, are assigned zero badness. glue_ratio = GlueRatio.no_stretchability else: glue_order = len(stretch) - 1 relevant_stretch_dimen = stretch[-1] glue_ratio = -excess_length / relevant_stretch_dimen # If x > w, the glue order is the highest subscript i such that z_i # != 0, and the glue ratio is normally r = (x - w) / z_i. (see below # for exception at 'However...') elif line_state == LineState.should_shrink: shrink = shrink shrink = [d for d in shrink if d > 0] # I assume that the rule when stretch_i = 0 also applies for # shrink_i = 0, though I can't see it stated anywhere. if not shrink: glue_order = 0 glue_ratio = GlueRatio.no_shrinkability else: glue_order = len(shrink) - 1 relevant_shrink_dimen = shrink[-1] glue_ratio = excess_length / relevant_shrink_dimen # However, r is set to 1.0 in the case i=0 and x - w > z_0, # because the maximum shrinkability must not be exceeded. if glue_order == 0: glue_ratio = min(glue_ratio, 1.0) return line_state, glue_ratio, glue_order def get_penalty(pre_break_conts, break_item): # Will assume if breaking at end of paragraph, so no break item, penalty is # zero. Not actually sure this is a real case, because \hfil is always # inserted right? if break_item is None: return 0 # "Each potential breakpoint has an associated 'penalty,' which # represents the 'aesthetic cost' of breaking at that place." # "In cases (a), (b), (c), the penalty is zero". if isinstance(break_item, (Glue, Kern, MathOff)): return 0 # "In case (d) an explicit penalty has been specified" elif isinstance(break_item, Penalty): return break_item.size # "In case (e) the penalty is the current value of \hyphenpenalty if # the pre-break text is nonempty, or the current value of # \exhyphenpenalty if the pre-break text is empty." elif isinstance(break_item, DiscretionaryBreak): raise NotImplementedError else: raise ValueError(f"Item is not a break-point: {break_item}") def is_break_point(h_list, i): """These rules apply to both horizontal and vertical lists, but cases (d) and (e) should never happen. """ item = h_list[i] # a) at glue, provided that this glue is immediately preceded by a non- # discardable item, and that it is not part of a math formula (i.e., # not between math-on and math-off). # A break 'at glue' occurs at the left edge of the glue space. # TODO: Add math conditions. if (isinstance(item, Glue) # Check a previous item exists, and it is not discardable. and ((i - 1) >= 0) and (not h_list[i - 1].discardable)): return True # b) at a kern, provided that this kern is immediately followed by # glue, and that it is not part of a math formula. # TODO: Add math conditions. elif (isinstance(item, Kern) # Check a following item exists, and it is glue. and ((i + 1) <= (len(h_list) - 1)) and isinstance(h_list[i + 1], Glue)): return True # c) at a math-off that is immediately followed by glue. elif (isinstance(item, MathOff) # Check a following item exists, and it is glue. and ((i + 1) <= (len(h_list) - 1)) and isinstance(h_list[i + 1], Glue)): return True # d) at a penalty (which might have been inserted automatically in a # formula). elif isinstance(item, Penalty): return True # e) at a discretionary break. elif isinstance(item, DiscretionaryBreak): return True else: return False class ListElement: def __repr__(self): return f'{self.__class__.__name__}({self.__dict__.__repr__()})' # All modes. # Boxes. def contsrep(contents, n=9): """Get a nice representation of the contents of a box.""" cs_rep = [] for c in contents: if isinstance(c, Character) and c.code in printable_ascii_codes: c_str = chr(c.code) if cs_rep and isinstance(cs_rep[-1], str): cs_rep[-1] += c_str else: cs_rep.append(c_str) else: cs_rep.append(c) return truncate_list(cs_rep, n=n) class AbstractBox(ListElement): discardable = False def __init__(self, contents, to=None, spread=None, set_glue=True, offset=0): self.to = to self.spread = spread if to is not None and spread is not None: raise Exception('Cannot specify both to and spread') self.contents = list(contents) self.set_glue = set_glue if set_glue: self.scale_and_set() self.offset = offset def __repr__(self): a = [] a.append(f'naturally {dimrep(self.natural_length)}') a.append(f'minimally {dimrep(self.min_length)}') if self.to is not None: a.append(f'to {dimrep(self.to)}') elif self.spread is not None: a.append(f'spread {dimrep(self.to)}') a.append(contsrep(self.contents)) cls_name = self.__class__.__name__ if self.set_glue: cls_name = f'|{cls_name}|' return drep(cls_name, a) @property def un_set_glues(self): return [e for e in self.contents if isinstance(e, Glue) and not e.is_set] @property def stretch(self): return sum_infinities(g.stretch for g in self.un_set_glues) @property def shrink(self): return sum_infinities(g.shrink for g in self.un_set_glues) @property def natural_length(self): # The natural width, x, of the box contents is determined by adding up # the widths of the boxes and kerns inside, together with the natural # widths of all the glue inside. # I'm assuming this also applies to VBoxes, but adding heights instead # of widths. Might not be true, considering depths exist. w = 0 for item in self.contents: if isinstance(item, Glue): w += item.natural_length elif isinstance(item, Kern): w += item.length else: w += self.get_length(item) return w @property def min_length(self): """ Non-Knuthian concept, used to decide if a box is over-full: the length even if all glue is maximally shrunk. """ w = 0 for item in self.contents: if isinstance(item, Glue): w += item.min_length elif isinstance(item, Kern): w += item.length else: w += self.get_length(item) return w @property def is_over_full(self): return self.min_length > self.desired_length @property def desired_length(self): if self.to is not None: return self.to w = self.natural_length if self.spread is not None: w += self.spread return w def append(self, *args, **kwargs): self.contents.append(*args, **kwargs) def extend(self, *args, **kwargs): self.contents.extend(*args, **kwargs) def copy(self, *args, **kwargs): # If glue is set, need to tell the constructor that set_glue should be # True, but that the glue is already set. if self.set_glue: raise NotImplementedError('Can only copy un-set boxes at the ' 'moment, because that is all that is ' 'needed') return self.__class__(contents=self.contents[:], to=self.to, spread=self.spread, set_glue=False) def glue_set_ratio(self): return glue_set_ratio(self.natural_length, self.desired_length, tuple(self.stretch), tuple(self.shrink)) def scale_and_set(self): line_state, glue_ratio, glue_set_order = self.glue_set_ratio() # I undo the disobeyance I did in the glue set ratio logic, to align # with the TeXbook from now on. if glue_ratio in (GlueRatio.no_shrinkability, GlueRatio.no_stretchability): glue_ratio = 0.0 # Note I've quoted this from the TeXbook, talking about setting glue in # an H Box. But it later says that this all applies to V Boxes, so I've # changed 'width' to 'length'. # Every glob of glue in the list being boxed is modified. Suppose the # glue has natural length u, stretchability y, and shrinkability z, # where y is a jth order infinity and z is a kth order infinity. for i, item in enumerate(self.contents): if (not isinstance(item, Glue)) or item.is_set: continue g = item if line_state == LineState.naturally_good: glue_diff = 0 elif line_state == LineState.should_stretch: glue_order, glue_factor = extract_dimen(g.stretch) # [Each] glue takes the new length u + ry if j=i; # it keeps its natural length u if j != i. if glue_order == glue_set_order: glue_diff = glue_ratio * glue_factor else: glue_diff = 0 elif line_state == LineState.should_shrink: glue_order, glue_factor = extract_dimen(g.shrink) # [Each] glue takes the new length u-rz if k = i; it # keeps its natural length u if k != i. if glue_order == glue_set_order: glue_diff = -glue_ratio * glue_factor else: glue_diff = 0 else: raise ValueError(f'Unknown line state: {line_state}') # Notice that stretching or shrinking occurs only when the glue # has the highest order of infinity that doesn't cancel out. self.contents[i].set(round(g.natural_length + glue_diff)) self.set_glue = True def badness(self): """ Compute how bad this box would look if placed on a line. This is high if the line is much shorter or longer than the page width. """ # Page 97 of TeXbook. # "The badness of a line is an integer that is approximately 100 times # the cube of the ratio by which the glue inside the line must stretch # or shrink to make an hbox of the required size. For example, if the # line has a total shrinkability of 10 points, and if the glue is being # compressed by a total of 9 points, the badness is computed to be 73 # (since 100 * (9/10)^3 = 72.9); similarly, a line that stretches by # twice its total stretchability has a badness of 800. But if the # badness obtained by this method turns out to be more than 10000, the # value 10000 is used. (See the discussion of glue set ratio and glue # set order in Chapter 12; if i != 0, there is infinite stretchability # or shrinkability, so the badness is zero, otherwise the badness is # approximately min(100r^3, 10000).) Overfull boxes are considered to # be infinitely bad; they are avoided whenever possible." # Page 111 of TeXbook. # "Vertical badness is computed by the same rules as horizontal # badness; it is an integer between 0 and 10000, inclusive, except when # the box is overfull, when it is infinity." if self.is_over_full: return math.inf line_state, glue_ratio, glue_order = self.glue_set_ratio() if glue_order > 0: return 0 # I can't find this stated anywhere, but it seems intuitively correct: # a single word on a line has no flexibility, but it is probably bad. elif glue_ratio in (GlueRatio.no_stretchability, GlueRatio.no_shrinkability): return 10000 else: return min(round(100 * glue_ratio ** 3), 10000) class HBox(AbstractBox): def get_length(self, item): if isinstance(item, (Glue, Kern)): return item.length else: return item.width @property def widths(self): return [self.get_length(e) for e in self.contents] @property def heights(self): return [0 if isinstance(e, (Glue, Kern)) else e.height for e in self.contents] @property def depths(self): return [0 if isinstance(e, (Glue, Kern)) else e.depth for e in self.contents] @property def width(self): if not self.set_glue: raise AttributeError('HBox is not set yet, does not have a width') return self.desired_length # TODO: I'm not sure the height and depth definitions are correct. @property def height(self): return max(self.heights, default=0) @property def depth(self): return max(self.depths, default=0) def demerit(self, break_item, line_penalty): ten_k = 10000 el = line_penalty b = self.badness() p = get_penalty(self.contents, break_item) d = (el + b)**2 if 0 <= p < ten_k: d += p**2 elif -ten_k < p < 0: d -= p**2 elif p <= -ten_k: pass else: raise LogicError('Undefined condition state when computing ' 'demerit') return d def considerable_as_line(self, tolerance, break_item): return (get_penalty(self.contents, break_item) < 10000 and (self.badness() <= tolerance)) class VBox(AbstractBox): def get_length(self, item): if isinstance(item, (Glue, Kern)): return item.length else: return item.height @property def widths(self): return [0 if isinstance(e, (Glue, Kern)) else e.width for e in self.contents] @property def heights(self): return [self.get_length(e) for e in self.contents] @property def depths(self): return [0 if isinstance(e, (Glue, Kern)) else e.width for e in self.contents] @property def width(self):
@property def height(self): if not self.set_glue: raise AttributeError('VBox is not set yet, does not have a height') return self.desired_length # TODO: This is wrong. Correct rules are in TeXbook page 80. @property def depth(self): if self.contents: # This is an approximation of the rules, not an attempt at # correctness. if not isinstance(self.contents[-1], AbstractBox): return 0 else: return self.contents[-1].depth else: return 0 def page_break_cost_and_penalty(self, break_item, insert_penalties): # Page 111 of TeXbook. ten_k = 10000 b = self.badness() p = get_penalty(self.contents, break_item) q = insert_penalties if b < math.inf and p <= -ten_k and q < ten_k: c = p elif b < ten_k and -ten_k < p < ten_k and q < ten_k: c = b + p + q elif b >= ten_k and -ten_k < p < ten_k and q < ten_k: # Not ten_k, I checked! hundred_k = 100000 c = hundred_k elif (b == math.inf or q >= ten_k) and p < ten_k: c = math.inf else: raise LogicError('TeX implies we should not get here') return c, p class Rule(ListElement): discardable = False def __init__(self, width, height, depth): self.width = width self.height = height self.depth = depth # /Boxes. # Miscellanea. class WhatsIt(ListElement): discardable = False width = height = depth = 0 class Glue(ListElement): discardable = True def __init__(self, dimen, stretch=0, shrink=0): self.natural_length = dimen self.stretch = stretch self.shrink = shrink self.set_dimen = None def __repr__(self): if self.set_dimen is None: return 'G({} +{} -{})'.format(dimrep(self.natural_length), dimrep(self.stretch), dimrep(self.shrink)) else: return f'|G|({dimrep(self.set_dimen)})' @property def is_set(self): return self.set_dimen is not None @property def min_length(self): if isinstance(self.shrink, InfiniteDimension): return 0 else: return self.natural_length - self.shrink def set_naturally(self): self.set_dimen = self.natural_length def set(self, dimen): self.set_dimen = dimen def unset(self): self.set_dimen = None @property def length(self): if self.set_dimen is not None: return self.set_dimen else: raise AttributeError('Glue is not set, so has no length') class Kern(ListElement): discardable = True def __init__(self, dimen): self.length = dimen def __repr__(self): return f'K({dimrep(self.length)})' class Leaders(ListElement): discardable = True pass class Penalty(ListElement): discardable = True def __init__(self, size): self.size = size def __repr__(self): return f'P({self.size})' width = height = depth = 0 # /Miscellanea. # Vertical material. class Mark(ListElement): discardable = False pass class Insertion(ListElement): discardable = False pass # /Vertical material. # Horizontal mode only. # Boxes. class Character(ListElement): discardable = False def __init__(self, code, width, height, depth): self.code = code self.width = width self.height = height self.depth = depth def __repr__(self): if self.code in printable_ascii_codes: return f"'{chr(self.code)}'" else: return f"C({self.code})" class Ligature(ListElement): discardable = False pass # /Boxes. # Miscellanea. class DiscretionaryBreak(ListElement): discardable = False class MathOn(ListElement): discardable = True class MathOff(ListElement): discardable = True # /Miscellanea. # Vertical material. class VAdjust(ListElement): discardable = False # /Vertical material. # Fake items for font things. class FontDefinition(ListElement): discardable = False def __init__(self, font_nr, font_name, file_name, at_clause=None): self.font_nr = font_nr self.font_name = font_name self.file_name = file_name self.at_clause = at_clause width = height = depth = 0 def __repr__(self): return f'FD({self.font_nr}: {self.font_name})' class FontSelection(ListElement): discardable = False def __init__(self, font_nr): self.font_nr = font_nr def __repr__(self): return f'F({self.font_nr})' width = height = depth = 0
return max(self.widths)
identifier_body
box.py
import math from enum import Enum from functools import lru_cache from .feedback import printable_ascii_codes, drep, truncate_list, dimrep from .utils import InfiniteDimension, sum_infinities, LogicError class LineState(Enum): naturally_good = 1 should_stretch = 2 should_shrink = 3 class GlueRatio(Enum): no_stretchability = 2 no_shrinkability = 3 class BreakPoint(Enum): """The types of places where line or page breaks may happen. Used to decide how to assign penalties to breaks. """ glue = 1 kern = 2 math_off = 3 penalty = 4 discretionary_break = 5 not_a_break_point = 6 def extract_dimen(d): if isinstance(d, int): order = 0 factor = d elif isinstance(d, InfiniteDimension): order = d.nr_fils factor = d.factor else: raise LogicError(f"Unknown dimen type: '{d}'") return order, factor @lru_cache(512) def glue_set_ratio(natural_length, desired_length, stretch, shrink): excess_length = natural_length - desired_length if excess_length == 0: line_state = LineState.naturally_good elif excess_length > 0: line_state = LineState.should_shrink else: line_state = LineState.should_stretch # If x = w, all glue gets its natural length. if line_state == LineState.naturally_good: glue_ratio = 0.0 # Not stated, but assuming this value does not matter. glue_order = 0 # Otherwise the glue will be modified, by computing a 'glue set ratio', # r and a 'glue set order', i, in the following way: # Let's say that there's a total of # y_0 + y_1 fil + y_2 fill + y_3 filll # available for stretching and # z_0 + z_1 fil + z_2 fill + z_3 filll # available for shrinking. # "If x < w, TeX attempts to stretch the contents of the box; the # glue order is the highest subscript i such that y_i is nonzero, and # the glue ratio is r = (w - x) / y_i. (If y_0 = y_1 = y_2 = y_3 = 0, # there's no stretchability; both i and r are set to zero.)" elif line_state == LineState.should_stretch: stretch = stretch stretch = [d for d in stretch if d > 0] if not stretch: glue_order = 0 # I actually don't obey the rules in this case, because it results # in a weird situation where lines with no stretchability, such as # single words, are assigned zero badness. glue_ratio = GlueRatio.no_stretchability else: glue_order = len(stretch) - 1 relevant_stretch_dimen = stretch[-1] glue_ratio = -excess_length / relevant_stretch_dimen # If x > w, the glue order is the highest subscript i such that z_i # != 0, and the glue ratio is normally r = (x - w) / z_i. (see below # for exception at 'However...') elif line_state == LineState.should_shrink: shrink = shrink shrink = [d for d in shrink if d > 0] # I assume that the rule when stretch_i = 0 also applies for # shrink_i = 0, though I can't see it stated anywhere. if not shrink: glue_order = 0 glue_ratio = GlueRatio.no_shrinkability else: glue_order = len(shrink) - 1 relevant_shrink_dimen = shrink[-1] glue_ratio = excess_length / relevant_shrink_dimen # However, r is set to 1.0 in the case i=0 and x - w > z_0, # because the maximum shrinkability must not be exceeded. if glue_order == 0: glue_ratio = min(glue_ratio, 1.0) return line_state, glue_ratio, glue_order def get_penalty(pre_break_conts, break_item): # Will assume if breaking at end of paragraph, so no break item, penalty is # zero. Not actually sure this is a real case, because \hfil is always # inserted right? if break_item is None: return 0 # "Each potential breakpoint has an associated 'penalty,' which # represents the 'aesthetic cost' of breaking at that place." # "In cases (a), (b), (c), the penalty is zero". if isinstance(break_item, (Glue, Kern, MathOff)): return 0 # "In case (d) an explicit penalty has been specified" elif isinstance(break_item, Penalty): return break_item.size # "In case (e) the penalty is the current value of \hyphenpenalty if # the pre-break text is nonempty, or the current value of # \exhyphenpenalty if the pre-break text is empty." elif isinstance(break_item, DiscretionaryBreak): raise NotImplementedError else: raise ValueError(f"Item is not a break-point: {break_item}") def is_break_point(h_list, i): """These rules apply to both horizontal and vertical lists, but cases (d) and (e) should never happen. """ item = h_list[i] # a) at glue, provided that this glue is immediately preceded by a non- # discardable item, and that it is not part of a math formula (i.e., # not between math-on and math-off). # A break 'at glue' occurs at the left edge of the glue space. # TODO: Add math conditions. if (isinstance(item, Glue) # Check a previous item exists, and it is not discardable. and ((i - 1) >= 0) and (not h_list[i - 1].discardable)): return True # b) at a kern, provided that this kern is immediately followed by # glue, and that it is not part of a math formula. # TODO: Add math conditions. elif (isinstance(item, Kern) # Check a following item exists, and it is glue. and ((i + 1) <= (len(h_list) - 1)) and isinstance(h_list[i + 1], Glue)): return True # c) at a math-off that is immediately followed by glue. elif (isinstance(item, MathOff) # Check a following item exists, and it is glue. and ((i + 1) <= (len(h_list) - 1)) and isinstance(h_list[i + 1], Glue)): return True # d) at a penalty (which might have been inserted automatically in a # formula). elif isinstance(item, Penalty): return True # e) at a discretionary break. elif isinstance(item, DiscretionaryBreak): return True else: return False class ListElement: def __repr__(self): return f'{self.__class__.__name__}({self.__dict__.__repr__()})' # All modes. # Boxes. def contsrep(contents, n=9): """Get a nice representation of the contents of a box.""" cs_rep = [] for c in contents: if isinstance(c, Character) and c.code in printable_ascii_codes: c_str = chr(c.code) if cs_rep and isinstance(cs_rep[-1], str): cs_rep[-1] += c_str else: cs_rep.append(c_str) else: cs_rep.append(c) return truncate_list(cs_rep, n=n) class AbstractBox(ListElement): discardable = False def __init__(self, contents, to=None, spread=None, set_glue=True, offset=0): self.to = to self.spread = spread if to is not None and spread is not None: raise Exception('Cannot specify both to and spread') self.contents = list(contents) self.set_glue = set_glue if set_glue:
self.offset = offset def __repr__(self): a = [] a.append(f'naturally {dimrep(self.natural_length)}') a.append(f'minimally {dimrep(self.min_length)}') if self.to is not None: a.append(f'to {dimrep(self.to)}') elif self.spread is not None: a.append(f'spread {dimrep(self.to)}') a.append(contsrep(self.contents)) cls_name = self.__class__.__name__ if self.set_glue: cls_name = f'|{cls_name}|' return drep(cls_name, a) @property def un_set_glues(self): return [e for e in self.contents if isinstance(e, Glue) and not e.is_set] @property def stretch(self): return sum_infinities(g.stretch for g in self.un_set_glues) @property def shrink(self): return sum_infinities(g.shrink for g in self.un_set_glues) @property def natural_length(self): # The natural width, x, of the box contents is determined by adding up # the widths of the boxes and kerns inside, together with the natural # widths of all the glue inside. # I'm assuming this also applies to VBoxes, but adding heights instead # of widths. Might not be true, considering depths exist. w = 0 for item in self.contents: if isinstance(item, Glue): w += item.natural_length elif isinstance(item, Kern): w += item.length else: w += self.get_length(item) return w @property def min_length(self): """ Non-Knuthian concept, used to decide if a box is over-full: the length even if all glue is maximally shrunk. """ w = 0 for item in self.contents: if isinstance(item, Glue): w += item.min_length elif isinstance(item, Kern): w += item.length else: w += self.get_length(item) return w @property def is_over_full(self): return self.min_length > self.desired_length @property def desired_length(self): if self.to is not None: return self.to w = self.natural_length if self.spread is not None: w += self.spread return w def append(self, *args, **kwargs): self.contents.append(*args, **kwargs) def extend(self, *args, **kwargs): self.contents.extend(*args, **kwargs) def copy(self, *args, **kwargs): # If glue is set, need to tell the constructor that set_glue should be # True, but that the glue is already set. if self.set_glue: raise NotImplementedError('Can only copy un-set boxes at the ' 'moment, because that is all that is ' 'needed') return self.__class__(contents=self.contents[:], to=self.to, spread=self.spread, set_glue=False) def glue_set_ratio(self): return glue_set_ratio(self.natural_length, self.desired_length, tuple(self.stretch), tuple(self.shrink)) def scale_and_set(self): line_state, glue_ratio, glue_set_order = self.glue_set_ratio() # I undo the disobeyance I did in the glue set ratio logic, to align # with the TeXbook from now on. if glue_ratio in (GlueRatio.no_shrinkability, GlueRatio.no_stretchability): glue_ratio = 0.0 # Note I've quoted this from the TeXbook, talking about setting glue in # an H Box. But it later says that this all applies to V Boxes, so I've # changed 'width' to 'length'. # Every glob of glue in the list being boxed is modified. Suppose the # glue has natural length u, stretchability y, and shrinkability z, # where y is a jth order infinity and z is a kth order infinity. for i, item in enumerate(self.contents): if (not isinstance(item, Glue)) or item.is_set: continue g = item if line_state == LineState.naturally_good: glue_diff = 0 elif line_state == LineState.should_stretch: glue_order, glue_factor = extract_dimen(g.stretch) # [Each] glue takes the new length u + ry if j=i; # it keeps its natural length u if j != i. if glue_order == glue_set_order: glue_diff = glue_ratio * glue_factor else: glue_diff = 0 elif line_state == LineState.should_shrink: glue_order, glue_factor = extract_dimen(g.shrink) # [Each] glue takes the new length u-rz if k = i; it # keeps its natural length u if k != i. if glue_order == glue_set_order: glue_diff = -glue_ratio * glue_factor else: glue_diff = 0 else: raise ValueError(f'Unknown line state: {line_state}') # Notice that stretching or shrinking occurs only when the glue # has the highest order of infinity that doesn't cancel out. self.contents[i].set(round(g.natural_length + glue_diff)) self.set_glue = True def badness(self): """ Compute how bad this box would look if placed on a line. This is high if the line is much shorter or longer than the page width. """ # Page 97 of TeXbook. # "The badness of a line is an integer that is approximately 100 times # the cube of the ratio by which the glue inside the line must stretch # or shrink to make an hbox of the required size. For example, if the # line has a total shrinkability of 10 points, and if the glue is being # compressed by a total of 9 points, the badness is computed to be 73 # (since 100 * (9/10)^3 = 72.9); similarly, a line that stretches by # twice its total stretchability has a badness of 800. But if the # badness obtained by this method turns out to be more than 10000, the # value 10000 is used. (See the discussion of glue set ratio and glue # set order in Chapter 12; if i != 0, there is infinite stretchability # or shrinkability, so the badness is zero, otherwise the badness is # approximately min(100r^3, 10000).) Overfull boxes are considered to # be infinitely bad; they are avoided whenever possible." # Page 111 of TeXbook. # "Vertical badness is computed by the same rules as horizontal # badness; it is an integer between 0 and 10000, inclusive, except when # the box is overfull, when it is infinity." if self.is_over_full: return math.inf line_state, glue_ratio, glue_order = self.glue_set_ratio() if glue_order > 0: return 0 # I can't find this stated anywhere, but it seems intuitively correct: # a single word on a line has no flexibility, but it is probably bad. elif glue_ratio in (GlueRatio.no_stretchability, GlueRatio.no_shrinkability): return 10000 else: return min(round(100 * glue_ratio ** 3), 10000) class HBox(AbstractBox): def get_length(self, item): if isinstance(item, (Glue, Kern)): return item.length else: return item.width @property def widths(self): return [self.get_length(e) for e in self.contents] @property def heights(self): return [0 if isinstance(e, (Glue, Kern)) else e.height for e in self.contents] @property def depths(self): return [0 if isinstance(e, (Glue, Kern)) else e.depth for e in self.contents] @property def width(self): if not self.set_glue: raise AttributeError('HBox is not set yet, does not have a width') return self.desired_length # TODO: I'm not sure the height and depth definitions are correct. @property def height(self): return max(self.heights, default=0) @property def depth(self): return max(self.depths, default=0) def demerit(self, break_item, line_penalty): ten_k = 10000 el = line_penalty b = self.badness() p = get_penalty(self.contents, break_item) d = (el + b)**2 if 0 <= p < ten_k: d += p**2 elif -ten_k < p < 0: d -= p**2 elif p <= -ten_k: pass else: raise LogicError('Undefined condition state when computing ' 'demerit') return d def considerable_as_line(self, tolerance, break_item): return (get_penalty(self.contents, break_item) < 10000 and (self.badness() <= tolerance)) class VBox(AbstractBox): def get_length(self, item): if isinstance(item, (Glue, Kern)): return item.length else: return item.height @property def widths(self): return [0 if isinstance(e, (Glue, Kern)) else e.width for e in self.contents] @property def heights(self): return [self.get_length(e) for e in self.contents] @property def depths(self): return [0 if isinstance(e, (Glue, Kern)) else e.width for e in self.contents] @property def width(self): return max(self.widths) @property def height(self): if not self.set_glue: raise AttributeError('VBox is not set yet, does not have a height') return self.desired_length # TODO: This is wrong. Correct rules are in TeXbook page 80. @property def depth(self): if self.contents: # This is an approximation of the rules, not an attempt at # correctness. if not isinstance(self.contents[-1], AbstractBox): return 0 else: return self.contents[-1].depth else: return 0 def page_break_cost_and_penalty(self, break_item, insert_penalties): # Page 111 of TeXbook. ten_k = 10000 b = self.badness() p = get_penalty(self.contents, break_item) q = insert_penalties if b < math.inf and p <= -ten_k and q < ten_k: c = p elif b < ten_k and -ten_k < p < ten_k and q < ten_k: c = b + p + q elif b >= ten_k and -ten_k < p < ten_k and q < ten_k: # Not ten_k, I checked! hundred_k = 100000 c = hundred_k elif (b == math.inf or q >= ten_k) and p < ten_k: c = math.inf else: raise LogicError('TeX implies we should not get here') return c, p class Rule(ListElement): discardable = False def __init__(self, width, height, depth): self.width = width self.height = height self.depth = depth # /Boxes. # Miscellanea. class WhatsIt(ListElement): discardable = False width = height = depth = 0 class Glue(ListElement): discardable = True def __init__(self, dimen, stretch=0, shrink=0): self.natural_length = dimen self.stretch = stretch self.shrink = shrink self.set_dimen = None def __repr__(self): if self.set_dimen is None: return 'G({} +{} -{})'.format(dimrep(self.natural_length), dimrep(self.stretch), dimrep(self.shrink)) else: return f'|G|({dimrep(self.set_dimen)})' @property def is_set(self): return self.set_dimen is not None @property def min_length(self): if isinstance(self.shrink, InfiniteDimension): return 0 else: return self.natural_length - self.shrink def set_naturally(self): self.set_dimen = self.natural_length def set(self, dimen): self.set_dimen = dimen def unset(self): self.set_dimen = None @property def length(self): if self.set_dimen is not None: return self.set_dimen else: raise AttributeError('Glue is not set, so has no length') class Kern(ListElement): discardable = True def __init__(self, dimen): self.length = dimen def __repr__(self): return f'K({dimrep(self.length)})' class Leaders(ListElement): discardable = True pass class Penalty(ListElement): discardable = True def __init__(self, size): self.size = size def __repr__(self): return f'P({self.size})' width = height = depth = 0 # /Miscellanea. # Vertical material. class Mark(ListElement): discardable = False pass class Insertion(ListElement): discardable = False pass # /Vertical material. # Horizontal mode only. # Boxes. class Character(ListElement): discardable = False def __init__(self, code, width, height, depth): self.code = code self.width = width self.height = height self.depth = depth def __repr__(self): if self.code in printable_ascii_codes: return f"'{chr(self.code)}'" else: return f"C({self.code})" class Ligature(ListElement): discardable = False pass # /Boxes. # Miscellanea. class DiscretionaryBreak(ListElement): discardable = False class MathOn(ListElement): discardable = True class MathOff(ListElement): discardable = True # /Miscellanea. # Vertical material. class VAdjust(ListElement): discardable = False # /Vertical material. # Fake items for font things. class FontDefinition(ListElement): discardable = False def __init__(self, font_nr, font_name, file_name, at_clause=None): self.font_nr = font_nr self.font_name = font_name self.file_name = file_name self.at_clause = at_clause width = height = depth = 0 def __repr__(self): return f'FD({self.font_nr}: {self.font_name})' class FontSelection(ListElement): discardable = False def __init__(self, font_nr): self.font_nr = font_nr def __repr__(self): return f'F({self.font_nr})' width = height = depth = 0
self.scale_and_set()
conditional_block
box.py
import math from enum import Enum from functools import lru_cache from .feedback import printable_ascii_codes, drep, truncate_list, dimrep from .utils import InfiniteDimension, sum_infinities, LogicError class LineState(Enum): naturally_good = 1 should_stretch = 2 should_shrink = 3 class GlueRatio(Enum): no_stretchability = 2 no_shrinkability = 3 class BreakPoint(Enum): """The types of places where line or page breaks may happen. Used to decide how to assign penalties to breaks. """ glue = 1 kern = 2 math_off = 3 penalty = 4 discretionary_break = 5 not_a_break_point = 6 def extract_dimen(d): if isinstance(d, int): order = 0 factor = d elif isinstance(d, InfiniteDimension): order = d.nr_fils factor = d.factor else: raise LogicError(f"Unknown dimen type: '{d}'") return order, factor @lru_cache(512) def glue_set_ratio(natural_length, desired_length, stretch, shrink): excess_length = natural_length - desired_length if excess_length == 0: line_state = LineState.naturally_good elif excess_length > 0: line_state = LineState.should_shrink else: line_state = LineState.should_stretch # If x = w, all glue gets its natural length. if line_state == LineState.naturally_good: glue_ratio = 0.0 # Not stated, but assuming this value does not matter. glue_order = 0 # Otherwise the glue will be modified, by computing a 'glue set ratio', # r and a 'glue set order', i, in the following way: # Let's say that there's a total of # y_0 + y_1 fil + y_2 fill + y_3 filll # available for stretching and # z_0 + z_1 fil + z_2 fill + z_3 filll # available for shrinking. # "If x < w, TeX attempts to stretch the contents of the box; the # glue order is the highest subscript i such that y_i is nonzero, and # the glue ratio is r = (w - x) / y_i. (If y_0 = y_1 = y_2 = y_3 = 0, # there's no stretchability; both i and r are set to zero.)" elif line_state == LineState.should_stretch: stretch = stretch stretch = [d for d in stretch if d > 0] if not stretch: glue_order = 0 # I actually don't obey the rules in this case, because it results # in a weird situation where lines with no stretchability, such as # single words, are assigned zero badness. glue_ratio = GlueRatio.no_stretchability else: glue_order = len(stretch) - 1 relevant_stretch_dimen = stretch[-1] glue_ratio = -excess_length / relevant_stretch_dimen # If x > w, the glue order is the highest subscript i such that z_i # != 0, and the glue ratio is normally r = (x - w) / z_i. (see below # for exception at 'However...') elif line_state == LineState.should_shrink: shrink = shrink shrink = [d for d in shrink if d > 0] # I assume that the rule when stretch_i = 0 also applies for # shrink_i = 0, though I can't see it stated anywhere. if not shrink: glue_order = 0 glue_ratio = GlueRatio.no_shrinkability else: glue_order = len(shrink) - 1 relevant_shrink_dimen = shrink[-1] glue_ratio = excess_length / relevant_shrink_dimen # However, r is set to 1.0 in the case i=0 and x - w > z_0, # because the maximum shrinkability must not be exceeded.
if glue_order == 0: glue_ratio = min(glue_ratio, 1.0) return line_state, glue_ratio, glue_order def get_penalty(pre_break_conts, break_item): # Will assume if breaking at end of paragraph, so no break item, penalty is # zero. Not actually sure this is a real case, because \hfil is always # inserted right? if break_item is None: return 0 # "Each potential breakpoint has an associated 'penalty,' which # represents the 'aesthetic cost' of breaking at that place." # "In cases (a), (b), (c), the penalty is zero". if isinstance(break_item, (Glue, Kern, MathOff)): return 0 # "In case (d) an explicit penalty has been specified" elif isinstance(break_item, Penalty): return break_item.size # "In case (e) the penalty is the current value of \hyphenpenalty if # the pre-break text is nonempty, or the current value of # \exhyphenpenalty if the pre-break text is empty." elif isinstance(break_item, DiscretionaryBreak): raise NotImplementedError else: raise ValueError(f"Item is not a break-point: {break_item}") def is_break_point(h_list, i): """These rules apply to both horizontal and vertical lists, but cases (d) and (e) should never happen. """ item = h_list[i] # a) at glue, provided that this glue is immediately preceded by a non- # discardable item, and that it is not part of a math formula (i.e., # not between math-on and math-off). # A break 'at glue' occurs at the left edge of the glue space. # TODO: Add math conditions. if (isinstance(item, Glue) # Check a previous item exists, and it is not discardable. and ((i - 1) >= 0) and (not h_list[i - 1].discardable)): return True # b) at a kern, provided that this kern is immediately followed by # glue, and that it is not part of a math formula. # TODO: Add math conditions. elif (isinstance(item, Kern) # Check a following item exists, and it is glue. and ((i + 1) <= (len(h_list) - 1)) and isinstance(h_list[i + 1], Glue)): return True # c) at a math-off that is immediately followed by glue. elif (isinstance(item, MathOff) # Check a following item exists, and it is glue. and ((i + 1) <= (len(h_list) - 1)) and isinstance(h_list[i + 1], Glue)): return True # d) at a penalty (which might have been inserted automatically in a # formula). elif isinstance(item, Penalty): return True # e) at a discretionary break. elif isinstance(item, DiscretionaryBreak): return True else: return False class ListElement: def __repr__(self): return f'{self.__class__.__name__}({self.__dict__.__repr__()})' # All modes. # Boxes. def contsrep(contents, n=9): """Get a nice representation of the contents of a box.""" cs_rep = [] for c in contents: if isinstance(c, Character) and c.code in printable_ascii_codes: c_str = chr(c.code) if cs_rep and isinstance(cs_rep[-1], str): cs_rep[-1] += c_str else: cs_rep.append(c_str) else: cs_rep.append(c) return truncate_list(cs_rep, n=n) class AbstractBox(ListElement): discardable = False def __init__(self, contents, to=None, spread=None, set_glue=True, offset=0): self.to = to self.spread = spread if to is not None and spread is not None: raise Exception('Cannot specify both to and spread') self.contents = list(contents) self.set_glue = set_glue if set_glue: self.scale_and_set() self.offset = offset def __repr__(self): a = [] a.append(f'naturally {dimrep(self.natural_length)}') a.append(f'minimally {dimrep(self.min_length)}') if self.to is not None: a.append(f'to {dimrep(self.to)}') elif self.spread is not None: a.append(f'spread {dimrep(self.to)}') a.append(contsrep(self.contents)) cls_name = self.__class__.__name__ if self.set_glue: cls_name = f'|{cls_name}|' return drep(cls_name, a) @property def un_set_glues(self): return [e for e in self.contents if isinstance(e, Glue) and not e.is_set] @property def stretch(self): return sum_infinities(g.stretch for g in self.un_set_glues) @property def shrink(self): return sum_infinities(g.shrink for g in self.un_set_glues) @property def natural_length(self): # The natural width, x, of the box contents is determined by adding up # the widths of the boxes and kerns inside, together with the natural # widths of all the glue inside. # I'm assuming this also applies to VBoxes, but adding heights instead # of widths. Might not be true, considering depths exist. w = 0 for item in self.contents: if isinstance(item, Glue): w += item.natural_length elif isinstance(item, Kern): w += item.length else: w += self.get_length(item) return w @property def min_length(self): """ Non-Knuthian concept, used to decide if a box is over-full: the length even if all glue is maximally shrunk. """ w = 0 for item in self.contents: if isinstance(item, Glue): w += item.min_length elif isinstance(item, Kern): w += item.length else: w += self.get_length(item) return w @property def is_over_full(self): return self.min_length > self.desired_length @property def desired_length(self): if self.to is not None: return self.to w = self.natural_length if self.spread is not None: w += self.spread return w def append(self, *args, **kwargs): self.contents.append(*args, **kwargs) def extend(self, *args, **kwargs): self.contents.extend(*args, **kwargs) def copy(self, *args, **kwargs): # If glue is set, need to tell the constructor that set_glue should be # True, but that the glue is already set. if self.set_glue: raise NotImplementedError('Can only copy un-set boxes at the ' 'moment, because that is all that is ' 'needed') return self.__class__(contents=self.contents[:], to=self.to, spread=self.spread, set_glue=False) def glue_set_ratio(self): return glue_set_ratio(self.natural_length, self.desired_length, tuple(self.stretch), tuple(self.shrink)) def scale_and_set(self): line_state, glue_ratio, glue_set_order = self.glue_set_ratio() # I undo the disobeyance I did in the glue set ratio logic, to align # with the TeXbook from now on. if glue_ratio in (GlueRatio.no_shrinkability, GlueRatio.no_stretchability): glue_ratio = 0.0 # Note I've quoted this from the TeXbook, talking about setting glue in # an H Box. But it later says that this all applies to V Boxes, so I've # changed 'width' to 'length'. # Every glob of glue in the list being boxed is modified. Suppose the # glue has natural length u, stretchability y, and shrinkability z, # where y is a jth order infinity and z is a kth order infinity. for i, item in enumerate(self.contents): if (not isinstance(item, Glue)) or item.is_set: continue g = item if line_state == LineState.naturally_good: glue_diff = 0 elif line_state == LineState.should_stretch: glue_order, glue_factor = extract_dimen(g.stretch) # [Each] glue takes the new length u + ry if j=i; # it keeps its natural length u if j != i. if glue_order == glue_set_order: glue_diff = glue_ratio * glue_factor else: glue_diff = 0 elif line_state == LineState.should_shrink: glue_order, glue_factor = extract_dimen(g.shrink) # [Each] glue takes the new length u-rz if k = i; it # keeps its natural length u if k != i. if glue_order == glue_set_order: glue_diff = -glue_ratio * glue_factor else: glue_diff = 0 else: raise ValueError(f'Unknown line state: {line_state}') # Notice that stretching or shrinking occurs only when the glue # has the highest order of infinity that doesn't cancel out. self.contents[i].set(round(g.natural_length + glue_diff)) self.set_glue = True def badness(self): """ Compute how bad this box would look if placed on a line. This is high if the line is much shorter or longer than the page width. """ # Page 97 of TeXbook. # "The badness of a line is an integer that is approximately 100 times # the cube of the ratio by which the glue inside the line must stretch # or shrink to make an hbox of the required size. For example, if the # line has a total shrinkability of 10 points, and if the glue is being # compressed by a total of 9 points, the badness is computed to be 73 # (since 100 * (9/10)^3 = 72.9); similarly, a line that stretches by # twice its total stretchability has a badness of 800. But if the # badness obtained by this method turns out to be more than 10000, the # value 10000 is used. (See the discussion of glue set ratio and glue # set order in Chapter 12; if i != 0, there is infinite stretchability # or shrinkability, so the badness is zero, otherwise the badness is # approximately min(100r^3, 10000).) Overfull boxes are considered to # be infinitely bad; they are avoided whenever possible." # Page 111 of TeXbook. # "Vertical badness is computed by the same rules as horizontal # badness; it is an integer between 0 and 10000, inclusive, except when # the box is overfull, when it is infinity." if self.is_over_full: return math.inf line_state, glue_ratio, glue_order = self.glue_set_ratio() if glue_order > 0: return 0 # I can't find this stated anywhere, but it seems intuitively correct: # a single word on a line has no flexibility, but it is probably bad. elif glue_ratio in (GlueRatio.no_stretchability, GlueRatio.no_shrinkability): return 10000 else: return min(round(100 * glue_ratio ** 3), 10000) class HBox(AbstractBox): def get_length(self, item): if isinstance(item, (Glue, Kern)): return item.length else: return item.width @property def widths(self): return [self.get_length(e) for e in self.contents] @property def heights(self): return [0 if isinstance(e, (Glue, Kern)) else e.height for e in self.contents] @property def depths(self): return [0 if isinstance(e, (Glue, Kern)) else e.depth for e in self.contents] @property def width(self): if not self.set_glue: raise AttributeError('HBox is not set yet, does not have a width') return self.desired_length # TODO: I'm not sure the height and depth definitions are correct. @property def height(self): return max(self.heights, default=0) @property def depth(self): return max(self.depths, default=0) def demerit(self, break_item, line_penalty): ten_k = 10000 el = line_penalty b = self.badness() p = get_penalty(self.contents, break_item) d = (el + b)**2 if 0 <= p < ten_k: d += p**2 elif -ten_k < p < 0: d -= p**2 elif p <= -ten_k: pass else: raise LogicError('Undefined condition state when computing ' 'demerit') return d def considerable_as_line(self, tolerance, break_item): return (get_penalty(self.contents, break_item) < 10000 and (self.badness() <= tolerance)) class VBox(AbstractBox): def get_length(self, item): if isinstance(item, (Glue, Kern)): return item.length else: return item.height @property def widths(self): return [0 if isinstance(e, (Glue, Kern)) else e.width for e in self.contents] @property def heights(self): return [self.get_length(e) for e in self.contents] @property def depths(self): return [0 if isinstance(e, (Glue, Kern)) else e.width for e in self.contents] @property def width(self): return max(self.widths) @property def height(self): if not self.set_glue: raise AttributeError('VBox is not set yet, does not have a height') return self.desired_length # TODO: This is wrong. Correct rules are in TeXbook page 80. @property def depth(self): if self.contents: # This is an approximation of the rules, not an attempt at # correctness. if not isinstance(self.contents[-1], AbstractBox): return 0 else: return self.contents[-1].depth else: return 0 def page_break_cost_and_penalty(self, break_item, insert_penalties): # Page 111 of TeXbook. ten_k = 10000 b = self.badness() p = get_penalty(self.contents, break_item) q = insert_penalties if b < math.inf and p <= -ten_k and q < ten_k: c = p elif b < ten_k and -ten_k < p < ten_k and q < ten_k: c = b + p + q elif b >= ten_k and -ten_k < p < ten_k and q < ten_k: # Not ten_k, I checked! hundred_k = 100000 c = hundred_k elif (b == math.inf or q >= ten_k) and p < ten_k: c = math.inf else: raise LogicError('TeX implies we should not get here') return c, p class Rule(ListElement): discardable = False def __init__(self, width, height, depth): self.width = width self.height = height self.depth = depth # /Boxes. # Miscellanea. class WhatsIt(ListElement): discardable = False width = height = depth = 0 class Glue(ListElement): discardable = True def __init__(self, dimen, stretch=0, shrink=0): self.natural_length = dimen self.stretch = stretch self.shrink = shrink self.set_dimen = None def __repr__(self): if self.set_dimen is None: return 'G({} +{} -{})'.format(dimrep(self.natural_length), dimrep(self.stretch), dimrep(self.shrink)) else: return f'|G|({dimrep(self.set_dimen)})' @property def is_set(self): return self.set_dimen is not None @property def min_length(self): if isinstance(self.shrink, InfiniteDimension): return 0 else: return self.natural_length - self.shrink def set_naturally(self): self.set_dimen = self.natural_length def set(self, dimen): self.set_dimen = dimen def unset(self): self.set_dimen = None @property def length(self): if self.set_dimen is not None: return self.set_dimen else: raise AttributeError('Glue is not set, so has no length') class Kern(ListElement): discardable = True def __init__(self, dimen): self.length = dimen def __repr__(self): return f'K({dimrep(self.length)})' class Leaders(ListElement): discardable = True pass class Penalty(ListElement): discardable = True def __init__(self, size): self.size = size def __repr__(self): return f'P({self.size})' width = height = depth = 0 # /Miscellanea. # Vertical material. class Mark(ListElement): discardable = False pass class Insertion(ListElement): discardable = False pass # /Vertical material. # Horizontal mode only. # Boxes. class Character(ListElement): discardable = False def __init__(self, code, width, height, depth): self.code = code self.width = width self.height = height self.depth = depth def __repr__(self): if self.code in printable_ascii_codes: return f"'{chr(self.code)}'" else: return f"C({self.code})" class Ligature(ListElement): discardable = False pass # /Boxes. # Miscellanea. class DiscretionaryBreak(ListElement): discardable = False class MathOn(ListElement): discardable = True class MathOff(ListElement): discardable = True # /Miscellanea. # Vertical material. class VAdjust(ListElement): discardable = False # /Vertical material. # Fake items for font things. class FontDefinition(ListElement): discardable = False def __init__(self, font_nr, font_name, file_name, at_clause=None): self.font_nr = font_nr self.font_name = font_name self.file_name = file_name self.at_clause = at_clause width = height = depth = 0 def __repr__(self): return f'FD({self.font_nr}: {self.font_name})' class FontSelection(ListElement): discardable = False def __init__(self, font_nr): self.font_nr = font_nr def __repr__(self): return f'F({self.font_nr})' width = height = depth = 0
random_line_split
box.py
import math from enum import Enum from functools import lru_cache from .feedback import printable_ascii_codes, drep, truncate_list, dimrep from .utils import InfiniteDimension, sum_infinities, LogicError class LineState(Enum): naturally_good = 1 should_stretch = 2 should_shrink = 3 class GlueRatio(Enum): no_stretchability = 2 no_shrinkability = 3 class BreakPoint(Enum): """The types of places where line or page breaks may happen. Used to decide how to assign penalties to breaks. """ glue = 1 kern = 2 math_off = 3 penalty = 4 discretionary_break = 5 not_a_break_point = 6 def extract_dimen(d): if isinstance(d, int): order = 0 factor = d elif isinstance(d, InfiniteDimension): order = d.nr_fils factor = d.factor else: raise LogicError(f"Unknown dimen type: '{d}'") return order, factor @lru_cache(512) def glue_set_ratio(natural_length, desired_length, stretch, shrink): excess_length = natural_length - desired_length if excess_length == 0: line_state = LineState.naturally_good elif excess_length > 0: line_state = LineState.should_shrink else: line_state = LineState.should_stretch # If x = w, all glue gets its natural length. if line_state == LineState.naturally_good: glue_ratio = 0.0 # Not stated, but assuming this value does not matter. glue_order = 0 # Otherwise the glue will be modified, by computing a 'glue set ratio', # r and a 'glue set order', i, in the following way: # Let's say that there's a total of # y_0 + y_1 fil + y_2 fill + y_3 filll # available for stretching and # z_0 + z_1 fil + z_2 fill + z_3 filll # available for shrinking. # "If x < w, TeX attempts to stretch the contents of the box; the # glue order is the highest subscript i such that y_i is nonzero, and # the glue ratio is r = (w - x) / y_i. (If y_0 = y_1 = y_2 = y_3 = 0, # there's no stretchability; both i and r are set to zero.)" elif line_state == LineState.should_stretch: stretch = stretch stretch = [d for d in stretch if d > 0] if not stretch: glue_order = 0 # I actually don't obey the rules in this case, because it results # in a weird situation where lines with no stretchability, such as # single words, are assigned zero badness. glue_ratio = GlueRatio.no_stretchability else: glue_order = len(stretch) - 1 relevant_stretch_dimen = stretch[-1] glue_ratio = -excess_length / relevant_stretch_dimen # If x > w, the glue order is the highest subscript i such that z_i # != 0, and the glue ratio is normally r = (x - w) / z_i. (see below # for exception at 'However...') elif line_state == LineState.should_shrink: shrink = shrink shrink = [d for d in shrink if d > 0] # I assume that the rule when stretch_i = 0 also applies for # shrink_i = 0, though I can't see it stated anywhere. if not shrink: glue_order = 0 glue_ratio = GlueRatio.no_shrinkability else: glue_order = len(shrink) - 1 relevant_shrink_dimen = shrink[-1] glue_ratio = excess_length / relevant_shrink_dimen # However, r is set to 1.0 in the case i=0 and x - w > z_0, # because the maximum shrinkability must not be exceeded. if glue_order == 0: glue_ratio = min(glue_ratio, 1.0) return line_state, glue_ratio, glue_order def get_penalty(pre_break_conts, break_item): # Will assume if breaking at end of paragraph, so no break item, penalty is # zero. Not actually sure this is a real case, because \hfil is always # inserted right? if break_item is None: return 0 # "Each potential breakpoint has an associated 'penalty,' which # represents the 'aesthetic cost' of breaking at that place." # "In cases (a), (b), (c), the penalty is zero". if isinstance(break_item, (Glue, Kern, MathOff)): return 0 # "In case (d) an explicit penalty has been specified" elif isinstance(break_item, Penalty): return break_item.size # "In case (e) the penalty is the current value of \hyphenpenalty if # the pre-break text is nonempty, or the current value of # \exhyphenpenalty if the pre-break text is empty." elif isinstance(break_item, DiscretionaryBreak): raise NotImplementedError else: raise ValueError(f"Item is not a break-point: {break_item}") def is_break_point(h_list, i): """These rules apply to both horizontal and vertical lists, but cases (d) and (e) should never happen. """ item = h_list[i] # a) at glue, provided that this glue is immediately preceded by a non- # discardable item, and that it is not part of a math formula (i.e., # not between math-on and math-off). # A break 'at glue' occurs at the left edge of the glue space. # TODO: Add math conditions. if (isinstance(item, Glue) # Check a previous item exists, and it is not discardable. and ((i - 1) >= 0) and (not h_list[i - 1].discardable)): return True # b) at a kern, provided that this kern is immediately followed by # glue, and that it is not part of a math formula. # TODO: Add math conditions. elif (isinstance(item, Kern) # Check a following item exists, and it is glue. and ((i + 1) <= (len(h_list) - 1)) and isinstance(h_list[i + 1], Glue)): return True # c) at a math-off that is immediately followed by glue. elif (isinstance(item, MathOff) # Check a following item exists, and it is glue. and ((i + 1) <= (len(h_list) - 1)) and isinstance(h_list[i + 1], Glue)): return True # d) at a penalty (which might have been inserted automatically in a # formula). elif isinstance(item, Penalty): return True # e) at a discretionary break. elif isinstance(item, DiscretionaryBreak): return True else: return False class ListElement: def __repr__(self): return f'{self.__class__.__name__}({self.__dict__.__repr__()})' # All modes. # Boxes. def contsrep(contents, n=9): """Get a nice representation of the contents of a box.""" cs_rep = [] for c in contents: if isinstance(c, Character) and c.code in printable_ascii_codes: c_str = chr(c.code) if cs_rep and isinstance(cs_rep[-1], str): cs_rep[-1] += c_str else: cs_rep.append(c_str) else: cs_rep.append(c) return truncate_list(cs_rep, n=n) class AbstractBox(ListElement): discardable = False def __init__(self, contents, to=None, spread=None, set_glue=True, offset=0): self.to = to self.spread = spread if to is not None and spread is not None: raise Exception('Cannot specify both to and spread') self.contents = list(contents) self.set_glue = set_glue if set_glue: self.scale_and_set() self.offset = offset def __repr__(self): a = [] a.append(f'naturally {dimrep(self.natural_length)}') a.append(f'minimally {dimrep(self.min_length)}') if self.to is not None: a.append(f'to {dimrep(self.to)}') elif self.spread is not None: a.append(f'spread {dimrep(self.to)}') a.append(contsrep(self.contents)) cls_name = self.__class__.__name__ if self.set_glue: cls_name = f'|{cls_name}|' return drep(cls_name, a) @property def un_set_glues(self): return [e for e in self.contents if isinstance(e, Glue) and not e.is_set] @property def stretch(self): return sum_infinities(g.stretch for g in self.un_set_glues) @property def shrink(self): return sum_infinities(g.shrink for g in self.un_set_glues) @property def natural_length(self): # The natural width, x, of the box contents is determined by adding up # the widths of the boxes and kerns inside, together with the natural # widths of all the glue inside. # I'm assuming this also applies to VBoxes, but adding heights instead # of widths. Might not be true, considering depths exist. w = 0 for item in self.contents: if isinstance(item, Glue): w += item.natural_length elif isinstance(item, Kern): w += item.length else: w += self.get_length(item) return w @property def min_length(self): """ Non-Knuthian concept, used to decide if a box is over-full: the length even if all glue is maximally shrunk. """ w = 0 for item in self.contents: if isinstance(item, Glue): w += item.min_length elif isinstance(item, Kern): w += item.length else: w += self.get_length(item) return w @property def is_over_full(self): return self.min_length > self.desired_length @property def desired_length(self): if self.to is not None: return self.to w = self.natural_length if self.spread is not None: w += self.spread return w def append(self, *args, **kwargs): self.contents.append(*args, **kwargs) def extend(self, *args, **kwargs): self.contents.extend(*args, **kwargs) def copy(self, *args, **kwargs): # If glue is set, need to tell the constructor that set_glue should be # True, but that the glue is already set. if self.set_glue: raise NotImplementedError('Can only copy un-set boxes at the ' 'moment, because that is all that is ' 'needed') return self.__class__(contents=self.contents[:], to=self.to, spread=self.spread, set_glue=False) def glue_set_ratio(self): return glue_set_ratio(self.natural_length, self.desired_length, tuple(self.stretch), tuple(self.shrink)) def scale_and_set(self): line_state, glue_ratio, glue_set_order = self.glue_set_ratio() # I undo the disobeyance I did in the glue set ratio logic, to align # with the TeXbook from now on. if glue_ratio in (GlueRatio.no_shrinkability, GlueRatio.no_stretchability): glue_ratio = 0.0 # Note I've quoted this from the TeXbook, talking about setting glue in # an H Box. But it later says that this all applies to V Boxes, so I've # changed 'width' to 'length'. # Every glob of glue in the list being boxed is modified. Suppose the # glue has natural length u, stretchability y, and shrinkability z, # where y is a jth order infinity and z is a kth order infinity. for i, item in enumerate(self.contents): if (not isinstance(item, Glue)) or item.is_set: continue g = item if line_state == LineState.naturally_good: glue_diff = 0 elif line_state == LineState.should_stretch: glue_order, glue_factor = extract_dimen(g.stretch) # [Each] glue takes the new length u + ry if j=i; # it keeps its natural length u if j != i. if glue_order == glue_set_order: glue_diff = glue_ratio * glue_factor else: glue_diff = 0 elif line_state == LineState.should_shrink: glue_order, glue_factor = extract_dimen(g.shrink) # [Each] glue takes the new length u-rz if k = i; it # keeps its natural length u if k != i. if glue_order == glue_set_order: glue_diff = -glue_ratio * glue_factor else: glue_diff = 0 else: raise ValueError(f'Unknown line state: {line_state}') # Notice that stretching or shrinking occurs only when the glue # has the highest order of infinity that doesn't cancel out. self.contents[i].set(round(g.natural_length + glue_diff)) self.set_glue = True def badness(self): """ Compute how bad this box would look if placed on a line. This is high if the line is much shorter or longer than the page width. """ # Page 97 of TeXbook. # "The badness of a line is an integer that is approximately 100 times # the cube of the ratio by which the glue inside the line must stretch # or shrink to make an hbox of the required size. For example, if the # line has a total shrinkability of 10 points, and if the glue is being # compressed by a total of 9 points, the badness is computed to be 73 # (since 100 * (9/10)^3 = 72.9); similarly, a line that stretches by # twice its total stretchability has a badness of 800. But if the # badness obtained by this method turns out to be more than 10000, the # value 10000 is used. (See the discussion of glue set ratio and glue # set order in Chapter 12; if i != 0, there is infinite stretchability # or shrinkability, so the badness is zero, otherwise the badness is # approximately min(100r^3, 10000).) Overfull boxes are considered to # be infinitely bad; they are avoided whenever possible." # Page 111 of TeXbook. # "Vertical badness is computed by the same rules as horizontal # badness; it is an integer between 0 and 10000, inclusive, except when # the box is overfull, when it is infinity." if self.is_over_full: return math.inf line_state, glue_ratio, glue_order = self.glue_set_ratio() if glue_order > 0: return 0 # I can't find this stated anywhere, but it seems intuitively correct: # a single word on a line has no flexibility, but it is probably bad. elif glue_ratio in (GlueRatio.no_stretchability, GlueRatio.no_shrinkability): return 10000 else: return min(round(100 * glue_ratio ** 3), 10000) class HBox(AbstractBox): def get_length(self, item): if isinstance(item, (Glue, Kern)): return item.length else: return item.width @property def widths(self): return [self.get_length(e) for e in self.contents] @property def heights(self): return [0 if isinstance(e, (Glue, Kern)) else e.height for e in self.contents] @property def depths(self): return [0 if isinstance(e, (Glue, Kern)) else e.depth for e in self.contents] @property def width(self): if not self.set_glue: raise AttributeError('HBox is not set yet, does not have a width') return self.desired_length # TODO: I'm not sure the height and depth definitions are correct. @property def height(self): return max(self.heights, default=0) @property def depth(self): return max(self.depths, default=0) def demerit(self, break_item, line_penalty): ten_k = 10000 el = line_penalty b = self.badness() p = get_penalty(self.contents, break_item) d = (el + b)**2 if 0 <= p < ten_k: d += p**2 elif -ten_k < p < 0: d -= p**2 elif p <= -ten_k: pass else: raise LogicError('Undefined condition state when computing ' 'demerit') return d def considerable_as_line(self, tolerance, break_item): return (get_penalty(self.contents, break_item) < 10000 and (self.badness() <= tolerance)) class VBox(AbstractBox): def get_length(self, item): if isinstance(item, (Glue, Kern)): return item.length else: return item.height @property def widths(self): return [0 if isinstance(e, (Glue, Kern)) else e.width for e in self.contents] @property def heights(self): return [self.get_length(e) for e in self.contents] @property def depths(self): return [0 if isinstance(e, (Glue, Kern)) else e.width for e in self.contents] @property def width(self): return max(self.widths) @property def height(self): if not self.set_glue: raise AttributeError('VBox is not set yet, does not have a height') return self.desired_length # TODO: This is wrong. Correct rules are in TeXbook page 80. @property def depth(self): if self.contents: # This is an approximation of the rules, not an attempt at # correctness. if not isinstance(self.contents[-1], AbstractBox): return 0 else: return self.contents[-1].depth else: return 0 def page_break_cost_and_penalty(self, break_item, insert_penalties): # Page 111 of TeXbook. ten_k = 10000 b = self.badness() p = get_penalty(self.contents, break_item) q = insert_penalties if b < math.inf and p <= -ten_k and q < ten_k: c = p elif b < ten_k and -ten_k < p < ten_k and q < ten_k: c = b + p + q elif b >= ten_k and -ten_k < p < ten_k and q < ten_k: # Not ten_k, I checked! hundred_k = 100000 c = hundred_k elif (b == math.inf or q >= ten_k) and p < ten_k: c = math.inf else: raise LogicError('TeX implies we should not get here') return c, p class
(ListElement): discardable = False def __init__(self, width, height, depth): self.width = width self.height = height self.depth = depth # /Boxes. # Miscellanea. class WhatsIt(ListElement): discardable = False width = height = depth = 0 class Glue(ListElement): discardable = True def __init__(self, dimen, stretch=0, shrink=0): self.natural_length = dimen self.stretch = stretch self.shrink = shrink self.set_dimen = None def __repr__(self): if self.set_dimen is None: return 'G({} +{} -{})'.format(dimrep(self.natural_length), dimrep(self.stretch), dimrep(self.shrink)) else: return f'|G|({dimrep(self.set_dimen)})' @property def is_set(self): return self.set_dimen is not None @property def min_length(self): if isinstance(self.shrink, InfiniteDimension): return 0 else: return self.natural_length - self.shrink def set_naturally(self): self.set_dimen = self.natural_length def set(self, dimen): self.set_dimen = dimen def unset(self): self.set_dimen = None @property def length(self): if self.set_dimen is not None: return self.set_dimen else: raise AttributeError('Glue is not set, so has no length') class Kern(ListElement): discardable = True def __init__(self, dimen): self.length = dimen def __repr__(self): return f'K({dimrep(self.length)})' class Leaders(ListElement): discardable = True pass class Penalty(ListElement): discardable = True def __init__(self, size): self.size = size def __repr__(self): return f'P({self.size})' width = height = depth = 0 # /Miscellanea. # Vertical material. class Mark(ListElement): discardable = False pass class Insertion(ListElement): discardable = False pass # /Vertical material. # Horizontal mode only. # Boxes. class Character(ListElement): discardable = False def __init__(self, code, width, height, depth): self.code = code self.width = width self.height = height self.depth = depth def __repr__(self): if self.code in printable_ascii_codes: return f"'{chr(self.code)}'" else: return f"C({self.code})" class Ligature(ListElement): discardable = False pass # /Boxes. # Miscellanea. class DiscretionaryBreak(ListElement): discardable = False class MathOn(ListElement): discardable = True class MathOff(ListElement): discardable = True # /Miscellanea. # Vertical material. class VAdjust(ListElement): discardable = False # /Vertical material. # Fake items for font things. class FontDefinition(ListElement): discardable = False def __init__(self, font_nr, font_name, file_name, at_clause=None): self.font_nr = font_nr self.font_name = font_name self.file_name = file_name self.at_clause = at_clause width = height = depth = 0 def __repr__(self): return f'FD({self.font_nr}: {self.font_name})' class FontSelection(ListElement): discardable = False def __init__(self, font_nr): self.font_nr = font_nr def __repr__(self): return f'F({self.font_nr})' width = height = depth = 0
Rule
identifier_name
twine.py
import json as jsonlib import logging import os import pkg_resources from dotenv import load_dotenv from jsonschema import ValidationError, validate as jsonschema_validate from . import exceptions from .utils import load_json, trim_suffix logger = logging.getLogger(__name__) SCHEMA_STRANDS = ("input_values", "configuration_values", "output_values", "monitor_message") MANIFEST_STRANDS = ( "configuration_manifest", "input_manifest", "output_manifest", ) CREDENTIAL_STRANDS = ("credentials",) CHILDREN_STRANDS = ("children",) ALL_STRANDS = ( *SCHEMA_STRANDS, *MANIFEST_STRANDS, *CREDENTIAL_STRANDS, *CHILDREN_STRANDS, ) class Twine: """Twine class manages validation of inputs and outputs to/from a data service, based on spec in a 'twine' file. Instantiate a Twine by providing a file name or a utf-8 encoded string containing valid json. The twine is itself validated to be correct on instantiation of Twine(). Note: Instantiating the twine does not validate that any inputs to an application are correct - it merely checks that the twine itself is correct. """ def
(self, **kwargs): for name, strand in self._load_twine(**kwargs).items(): setattr(self, name, strand) self._available_strands = set(trim_suffix(name, "_schema") for name in vars(self)) self._available_manifest_strands = self._available_strands & set(MANIFEST_STRANDS) def _load_twine(self, source=None): """Load twine from a *.json filename, file-like or a json string and validates twine contents.""" if source is None: # If loading an unspecified twine, return an empty one rather than raising error (like in _load_data()) raw_twine = {} logger.warning("No twine source specified. Loading empty twine.") else: raw_twine = self._load_json("twine", source, allowed_kinds=("file-like", "filename", "string", "object")) self._validate_against_schema("twine", raw_twine) self._validate_twine_version(twine_file_twined_version=raw_twine.get("twined_version", None)) return raw_twine def _load_json(self, kind, source, **kwargs): """Load data from either a *.json file, an open file pointer or a json string. Directly returns any other data.""" if source is None: raise exceptions.invalid_json_map[kind](f"Cannot load {kind} - no data source specified.") # Decode the json string and deserialize to objects. try: data = load_json(source, **kwargs) except FileNotFoundError as e: raise exceptions.file_not_found_map[kind](e) except jsonlib.decoder.JSONDecodeError as e: raise exceptions.invalid_json_map[kind](e) return data def _get_schema(self, strand): """Get the schema for the given strand. Can be used to validate: - the twine file contents itself against the present version twine spec - children data against the required schema for the present version twine spec - values data for compliance with schema written in the twine (for strands like input_values_schema) :param str strand: :return dict: """ if strand == "twine": # The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain # schema itself. The twine schema is distributed with this packaged to ensure version consistency... schema_path = "schema/twine_schema.json" elif strand in CHILDREN_STRANDS: # The data is a list of children. The "children" strand of the twine describes matching criteria for # the children, not the schema of the "children" data, which is distributed with this package to ensure # version consistency... schema_path = "schema/children_schema.json" elif strand in MANIFEST_STRANDS: # The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to # filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which # is distributed with this package to ensure version consistency... schema_path = "schema/manifest_schema.json" else: if strand not in SCHEMA_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.") # Get schema from twine.json file. schema_key = strand + "_schema" try: return getattr(self, schema_key) except AttributeError: raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine") return jsonlib.loads(pkg_resources.resource_string("twined", schema_path)) def _validate_against_schema(self, strand, data): """Validate data against a schema, raises exceptions of type Invalid<strand>Json if not compliant. Can be used to validate: - the twine file contents itself against the present version twine spec - children data against the required schema for the present version twine spec - values data for compliance with schema written in the twine (for strands like input_values_schema) :param str strand: :param dict data: :return None: """ schema = self._get_schema(strand) try: jsonschema_validate(instance=data, schema=schema) logger.debug("Validated %s against schema", strand) except ValidationError as e: raise exceptions.invalid_contents_map[strand](str(e)) def _validate_twine_version(self, twine_file_twined_version): """Validate that the installed version is consistent with an optional version specification in the twine file.""" installed_twined_version = pkg_resources.get_distribution("twined").version logger.debug( "Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version ) if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version): raise exceptions.TwineVersionConflict( f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed" ) def _validate_values(self, kind, source, cls=None, **kwargs): """Validate values against the twine schema.""" data = self._load_json(kind, source, **kwargs) self._validate_against_schema(kind, data) if cls: return cls(**data) return data def _validate_manifest(self, kind, source, cls=None, **kwargs): """Validate manifest against the twine schema.""" data = self._load_json(kind, source, **kwargs) # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive inbound = True if hasattr(data, "to_primitive"): inbound = False data = data.to_primitive() self._validate_against_schema(kind, data) self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data) if cls and inbound: return cls(**data) return data def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest): """Check that all non-optional datasets specified in the corresponding manifest strand in the twine are present in the given manifest. :param str manifest_kind: the kind of manifest that's being validated (so the correct schema can be accessed) :param dict manifest: the manifest whose datasets are to be validated :raise twined.exceptions.InvalidManifestContents: if one or more of the expected non-optional datasets is missing :return None: """ # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files. manifest_schema = getattr(self, manifest_kind) for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items(): if expected_dataset_name in manifest["datasets"]: continue if expected_dataset_schema.get("optional", False): continue raise exceptions.invalid_contents_map[manifest_kind]( f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing." ) @property def available_strands(self): """Get the names of strands that are found in this twine. :return set: """ return self._available_strands @property def available_manifest_strands(self): """Get the names of the manifest strands that are found in this twine. :return set: """ return self._available_manifest_strands def validate_children(self, source, **kwargs): """Validate that the children values, passed as either a file or a json string, are correct.""" # TODO cache this loaded data keyed on a hashed version of kwargs children = self._load_json("children", source, **kwargs) self._validate_against_schema("children", children) strand = getattr(self, "children", []) # Loop the children and accumulate values so we have an O(1) check children_keys = {} for child in children: children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1 # Check there is at least one child for each item described in the strand # TODO add max, min num specs to the strand schema and check here for item in strand: strand_key = item["key"] if children_keys.get(strand_key, 0) <= 0: raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}") # Loop the strand and add unique keys to dict so we have an O(1) check strand_keys = {} for item in strand: strand_keys[item["key"]] = True # Check that each child has a key which is described in the strand for child in children: child_key = child["key"] if not strand_keys.get(child_key, False): raise exceptions.InvalidValuesContents( f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine." ) # TODO Additional validation that the children match what is set as required in the Twine return children def validate_credentials(self, *args, dotenv_path=None, **kwargs): """Validate that all credentials required by the twine are present. Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials strand. If not present in the environment, validate_credentials will check for variables in a .env file (if present) and populate the environment with them. Typically a .env file resides at the root of your application (the working directory) although a specific path may be set using the `dotenv_path` argument. .env files should never be committed to git or any other version control system. A .env file can look like this: ``` # a comment that will be ignored. YOUR_SECRET_VALUE=itsasecret MEANING_OF_LIFE=42 MULTILINE_VAR="hello\nworld" ``` Or like this (also useful for bash users): ``` export YOUR_SECRET_VALUE=itsasecret export MEANING_OF_LIFE=42 export MULTILINE_VAR="hello\nworld" ``` """ if not hasattr(self, "credentials"): return set() # Load any variables from the .env file into the environment. dotenv_path = dotenv_path or os.path.join(".", ".env") load_dotenv(dotenv_path) for credential in self.credentials: if credential["name"] not in os.environ: raise exceptions.CredentialNotFound( f"Credential {credential['name']!r} missing from environment or .env file." ) return self.credentials def validate_configuration_values(self, source, **kwargs): """Validate that the configuration values, passed as either a file or a json string, are correct.""" return self._validate_values("configuration_values", source, **kwargs) def validate_input_values(self, source, **kwargs): """Validate that the input values, passed as either a file or a json string, are correct.""" return self._validate_values("input_values", source, **kwargs) def validate_output_values(self, source, **kwargs): """Validate that the output values, passed as either a file or a json string, are correct.""" return self._validate_values("output_values", source, **kwargs) def validate_monitor_message(self, source, **kwargs): """Validate monitor message against the monitor message schema strand.""" return self._validate_values(kind="monitor_message", source=source, **kwargs) def validate_configuration_manifest(self, source, **kwargs): """Validate the input manifest, passed as either a file or a json string.""" return self._validate_manifest("configuration_manifest", source, **kwargs) def validate_input_manifest(self, source, **kwargs): """Validate the input manifest, passed as either a file or a json string.""" return self._validate_manifest("input_manifest", source, **kwargs) def validate_output_manifest(self, source, **kwargs): """Validate the output manifest, passed as either a file or a json string.""" return self._validate_manifest("output_manifest", source, **kwargs) @staticmethod def _get_cls(name, cls): """Getter that will return cls[name] if cls is a dict or cls otherwise""" return cls.get(name, None) if isinstance(cls, dict) else cls def validate(self, allow_missing=False, allow_extra=False, cls=None, **kwargs): """Validate strands from sources provided as keyword arguments Usage: ``` self.twine.validate( input_values=input_values, input_manifest=input_manifest, credentials=credentials, children=children, cls=CLASS_MAP, allow_missing=False, allow_extra=False, ) ``` :param bool allow_missing: If strand is present in the twine, but the source is equal to None, allow validation to continue. :param bool allow_extra: If strand is present in the sources, but not in the twine, allow validation to continue (only strands in the twine will be validated and converted, others will be returned as-is) :param any cls: optional dict of classes keyed on strand name (alternatively, one single class which will be applied to strands) which will be instantiated with the validated source data. :return dict: dict of validated and initialised sources """ # pop any strand name:data pairs out of kwargs and into their own dict source_kwargs = tuple(name for name in kwargs.keys() if name in ALL_STRANDS) sources = dict((name, kwargs.pop(name)) for name in source_kwargs) for strand_name, strand_data in sources.items(): if not allow_extra: if (strand_data is not None) and (strand_name not in self.available_strands): raise exceptions.StrandNotFound( f"Source data is provided for '{strand_name}' but no such strand is defined in the twine" ) if not allow_missing: if (strand_name in self.available_strands) and (strand_data is None): raise exceptions.TwineValueException( f"The '{strand_name}' strand is defined in the twine, but no data is provided in sources" ) if strand_data is not None: # TODO Consider reintroducing a skip based on whether cls is already instantiated. For now, leave it the # responsibility of the caller to determine what has already been validated and what hasn't. # # Use the twine to validate and instantiate as the desired class # if not isinstance(value, type(cls)): # self.logger.debug( # "Instantiating %s as %s and validating against twine", name, cls.__name__ if cls else "default_class" # ) # return self.twine.validate(name, source=value, cls=cls) method = getattr(self, f"validate_{strand_name}") klass = self._get_cls(strand_name, cls) sources[strand_name] = method(strand_data, cls=klass, **kwargs) else: sources[strand_name] = None return sources def validate_strand(self, name, source, **kwargs): """Validate a single strand by name.""" return self.validate({name: source}, **kwargs)[name] def prepare(self, *args, cls=None, **kwargs): """Prepare instance for strand data using a class map.""" prepared = {} for arg in args: if arg not in ALL_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand '{arg}'") elif arg not in self.available_strands: prepared[arg] = None else: klass = self._get_cls(arg, cls) prepared[arg] = klass(**kwargs) if klass else dict(**kwargs) if hasattr(prepared[arg], "prepare"): prepared[arg] = prepared[arg].prepare(getattr(self, arg)) return prepared
__init__
identifier_name
twine.py
import json as jsonlib import logging import os import pkg_resources from dotenv import load_dotenv from jsonschema import ValidationError, validate as jsonschema_validate from . import exceptions from .utils import load_json, trim_suffix logger = logging.getLogger(__name__) SCHEMA_STRANDS = ("input_values", "configuration_values", "output_values", "monitor_message") MANIFEST_STRANDS = ( "configuration_manifest", "input_manifest", "output_manifest", ) CREDENTIAL_STRANDS = ("credentials",) CHILDREN_STRANDS = ("children",) ALL_STRANDS = ( *SCHEMA_STRANDS, *MANIFEST_STRANDS, *CREDENTIAL_STRANDS, *CHILDREN_STRANDS, ) class Twine: """Twine class manages validation of inputs and outputs to/from a data service, based on spec in a 'twine' file. Instantiate a Twine by providing a file name or a utf-8 encoded string containing valid json. The twine is itself validated to be correct on instantiation of Twine(). Note: Instantiating the twine does not validate that any inputs to an application are correct - it merely checks that the twine itself is correct. """ def __init__(self, **kwargs): for name, strand in self._load_twine(**kwargs).items(): setattr(self, name, strand) self._available_strands = set(trim_suffix(name, "_schema") for name in vars(self)) self._available_manifest_strands = self._available_strands & set(MANIFEST_STRANDS) def _load_twine(self, source=None): """Load twine from a *.json filename, file-like or a json string and validates twine contents.""" if source is None: # If loading an unspecified twine, return an empty one rather than raising error (like in _load_data()) raw_twine = {} logger.warning("No twine source specified. Loading empty twine.") else: raw_twine = self._load_json("twine", source, allowed_kinds=("file-like", "filename", "string", "object")) self._validate_against_schema("twine", raw_twine) self._validate_twine_version(twine_file_twined_version=raw_twine.get("twined_version", None)) return raw_twine def _load_json(self, kind, source, **kwargs): """Load data from either a *.json file, an open file pointer or a json string. Directly returns any other data.""" if source is None: raise exceptions.invalid_json_map[kind](f"Cannot load {kind} - no data source specified.") # Decode the json string and deserialize to objects. try: data = load_json(source, **kwargs) except FileNotFoundError as e: raise exceptions.file_not_found_map[kind](e) except jsonlib.decoder.JSONDecodeError as e: raise exceptions.invalid_json_map[kind](e) return data def _get_schema(self, strand): """Get the schema for the given strand. Can be used to validate: - the twine file contents itself against the present version twine spec - children data against the required schema for the present version twine spec - values data for compliance with schema written in the twine (for strands like input_values_schema) :param str strand: :return dict: """ if strand == "twine": # The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain # schema itself. The twine schema is distributed with this packaged to ensure version consistency... schema_path = "schema/twine_schema.json" elif strand in CHILDREN_STRANDS: # The data is a list of children. The "children" strand of the twine describes matching criteria for # the children, not the schema of the "children" data, which is distributed with this package to ensure
# version consistency... schema_path = "schema/children_schema.json" elif strand in MANIFEST_STRANDS: # The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to # filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which # is distributed with this package to ensure version consistency... schema_path = "schema/manifest_schema.json" else: if strand not in SCHEMA_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.") # Get schema from twine.json file. schema_key = strand + "_schema" try: return getattr(self, schema_key) except AttributeError: raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine") return jsonlib.loads(pkg_resources.resource_string("twined", schema_path)) def _validate_against_schema(self, strand, data): """Validate data against a schema, raises exceptions of type Invalid<strand>Json if not compliant. Can be used to validate: - the twine file contents itself against the present version twine spec - children data against the required schema for the present version twine spec - values data for compliance with schema written in the twine (for strands like input_values_schema) :param str strand: :param dict data: :return None: """ schema = self._get_schema(strand) try: jsonschema_validate(instance=data, schema=schema) logger.debug("Validated %s against schema", strand) except ValidationError as e: raise exceptions.invalid_contents_map[strand](str(e)) def _validate_twine_version(self, twine_file_twined_version): """Validate that the installed version is consistent with an optional version specification in the twine file.""" installed_twined_version = pkg_resources.get_distribution("twined").version logger.debug( "Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version ) if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version): raise exceptions.TwineVersionConflict( f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed" ) def _validate_values(self, kind, source, cls=None, **kwargs): """Validate values against the twine schema.""" data = self._load_json(kind, source, **kwargs) self._validate_against_schema(kind, data) if cls: return cls(**data) return data def _validate_manifest(self, kind, source, cls=None, **kwargs): """Validate manifest against the twine schema.""" data = self._load_json(kind, source, **kwargs) # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive inbound = True if hasattr(data, "to_primitive"): inbound = False data = data.to_primitive() self._validate_against_schema(kind, data) self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data) if cls and inbound: return cls(**data) return data def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest): """Check that all non-optional datasets specified in the corresponding manifest strand in the twine are present in the given manifest. :param str manifest_kind: the kind of manifest that's being validated (so the correct schema can be accessed) :param dict manifest: the manifest whose datasets are to be validated :raise twined.exceptions.InvalidManifestContents: if one or more of the expected non-optional datasets is missing :return None: """ # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files. manifest_schema = getattr(self, manifest_kind) for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items(): if expected_dataset_name in manifest["datasets"]: continue if expected_dataset_schema.get("optional", False): continue raise exceptions.invalid_contents_map[manifest_kind]( f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing." ) @property def available_strands(self): """Get the names of strands that are found in this twine. :return set: """ return self._available_strands @property def available_manifest_strands(self): """Get the names of the manifest strands that are found in this twine. :return set: """ return self._available_manifest_strands def validate_children(self, source, **kwargs): """Validate that the children values, passed as either a file or a json string, are correct.""" # TODO cache this loaded data keyed on a hashed version of kwargs children = self._load_json("children", source, **kwargs) self._validate_against_schema("children", children) strand = getattr(self, "children", []) # Loop the children and accumulate values so we have an O(1) check children_keys = {} for child in children: children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1 # Check there is at least one child for each item described in the strand # TODO add max, min num specs to the strand schema and check here for item in strand: strand_key = item["key"] if children_keys.get(strand_key, 0) <= 0: raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}") # Loop the strand and add unique keys to dict so we have an O(1) check strand_keys = {} for item in strand: strand_keys[item["key"]] = True # Check that each child has a key which is described in the strand for child in children: child_key = child["key"] if not strand_keys.get(child_key, False): raise exceptions.InvalidValuesContents( f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine." ) # TODO Additional validation that the children match what is set as required in the Twine return children def validate_credentials(self, *args, dotenv_path=None, **kwargs): """Validate that all credentials required by the twine are present. Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials strand. If not present in the environment, validate_credentials will check for variables in a .env file (if present) and populate the environment with them. Typically a .env file resides at the root of your application (the working directory) although a specific path may be set using the `dotenv_path` argument. .env files should never be committed to git or any other version control system. A .env file can look like this: ``` # a comment that will be ignored. YOUR_SECRET_VALUE=itsasecret MEANING_OF_LIFE=42 MULTILINE_VAR="hello\nworld" ``` Or like this (also useful for bash users): ``` export YOUR_SECRET_VALUE=itsasecret export MEANING_OF_LIFE=42 export MULTILINE_VAR="hello\nworld" ``` """ if not hasattr(self, "credentials"): return set() # Load any variables from the .env file into the environment. dotenv_path = dotenv_path or os.path.join(".", ".env") load_dotenv(dotenv_path) for credential in self.credentials: if credential["name"] not in os.environ: raise exceptions.CredentialNotFound( f"Credential {credential['name']!r} missing from environment or .env file." ) return self.credentials def validate_configuration_values(self, source, **kwargs): """Validate that the configuration values, passed as either a file or a json string, are correct.""" return self._validate_values("configuration_values", source, **kwargs) def validate_input_values(self, source, **kwargs): """Validate that the input values, passed as either a file or a json string, are correct.""" return self._validate_values("input_values", source, **kwargs) def validate_output_values(self, source, **kwargs): """Validate that the output values, passed as either a file or a json string, are correct.""" return self._validate_values("output_values", source, **kwargs) def validate_monitor_message(self, source, **kwargs): """Validate monitor message against the monitor message schema strand.""" return self._validate_values(kind="monitor_message", source=source, **kwargs) def validate_configuration_manifest(self, source, **kwargs): """Validate the input manifest, passed as either a file or a json string.""" return self._validate_manifest("configuration_manifest", source, **kwargs) def validate_input_manifest(self, source, **kwargs): """Validate the input manifest, passed as either a file or a json string.""" return self._validate_manifest("input_manifest", source, **kwargs) def validate_output_manifest(self, source, **kwargs): """Validate the output manifest, passed as either a file or a json string.""" return self._validate_manifest("output_manifest", source, **kwargs) @staticmethod def _get_cls(name, cls): """Getter that will return cls[name] if cls is a dict or cls otherwise""" return cls.get(name, None) if isinstance(cls, dict) else cls def validate(self, allow_missing=False, allow_extra=False, cls=None, **kwargs): """Validate strands from sources provided as keyword arguments Usage: ``` self.twine.validate( input_values=input_values, input_manifest=input_manifest, credentials=credentials, children=children, cls=CLASS_MAP, allow_missing=False, allow_extra=False, ) ``` :param bool allow_missing: If strand is present in the twine, but the source is equal to None, allow validation to continue. :param bool allow_extra: If strand is present in the sources, but not in the twine, allow validation to continue (only strands in the twine will be validated and converted, others will be returned as-is) :param any cls: optional dict of classes keyed on strand name (alternatively, one single class which will be applied to strands) which will be instantiated with the validated source data. :return dict: dict of validated and initialised sources """ # pop any strand name:data pairs out of kwargs and into their own dict source_kwargs = tuple(name for name in kwargs.keys() if name in ALL_STRANDS) sources = dict((name, kwargs.pop(name)) for name in source_kwargs) for strand_name, strand_data in sources.items(): if not allow_extra: if (strand_data is not None) and (strand_name not in self.available_strands): raise exceptions.StrandNotFound( f"Source data is provided for '{strand_name}' but no such strand is defined in the twine" ) if not allow_missing: if (strand_name in self.available_strands) and (strand_data is None): raise exceptions.TwineValueException( f"The '{strand_name}' strand is defined in the twine, but no data is provided in sources" ) if strand_data is not None: # TODO Consider reintroducing a skip based on whether cls is already instantiated. For now, leave it the # responsibility of the caller to determine what has already been validated and what hasn't. # # Use the twine to validate and instantiate as the desired class # if not isinstance(value, type(cls)): # self.logger.debug( # "Instantiating %s as %s and validating against twine", name, cls.__name__ if cls else "default_class" # ) # return self.twine.validate(name, source=value, cls=cls) method = getattr(self, f"validate_{strand_name}") klass = self._get_cls(strand_name, cls) sources[strand_name] = method(strand_data, cls=klass, **kwargs) else: sources[strand_name] = None return sources def validate_strand(self, name, source, **kwargs): """Validate a single strand by name.""" return self.validate({name: source}, **kwargs)[name] def prepare(self, *args, cls=None, **kwargs): """Prepare instance for strand data using a class map.""" prepared = {} for arg in args: if arg not in ALL_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand '{arg}'") elif arg not in self.available_strands: prepared[arg] = None else: klass = self._get_cls(arg, cls) prepared[arg] = klass(**kwargs) if klass else dict(**kwargs) if hasattr(prepared[arg], "prepare"): prepared[arg] = prepared[arg].prepare(getattr(self, arg)) return prepared
random_line_split
twine.py
import json as jsonlib import logging import os import pkg_resources from dotenv import load_dotenv from jsonschema import ValidationError, validate as jsonschema_validate from . import exceptions from .utils import load_json, trim_suffix logger = logging.getLogger(__name__) SCHEMA_STRANDS = ("input_values", "configuration_values", "output_values", "monitor_message") MANIFEST_STRANDS = ( "configuration_manifest", "input_manifest", "output_manifest", ) CREDENTIAL_STRANDS = ("credentials",) CHILDREN_STRANDS = ("children",) ALL_STRANDS = ( *SCHEMA_STRANDS, *MANIFEST_STRANDS, *CREDENTIAL_STRANDS, *CHILDREN_STRANDS, ) class Twine: """Twine class manages validation of inputs and outputs to/from a data service, based on spec in a 'twine' file. Instantiate a Twine by providing a file name or a utf-8 encoded string containing valid json. The twine is itself validated to be correct on instantiation of Twine(). Note: Instantiating the twine does not validate that any inputs to an application are correct - it merely checks that the twine itself is correct. """ def __init__(self, **kwargs): for name, strand in self._load_twine(**kwargs).items(): setattr(self, name, strand) self._available_strands = set(trim_suffix(name, "_schema") for name in vars(self)) self._available_manifest_strands = self._available_strands & set(MANIFEST_STRANDS) def _load_twine(self, source=None): """Load twine from a *.json filename, file-like or a json string and validates twine contents.""" if source is None: # If loading an unspecified twine, return an empty one rather than raising error (like in _load_data()) raw_twine = {} logger.warning("No twine source specified. Loading empty twine.") else: raw_twine = self._load_json("twine", source, allowed_kinds=("file-like", "filename", "string", "object")) self._validate_against_schema("twine", raw_twine) self._validate_twine_version(twine_file_twined_version=raw_twine.get("twined_version", None)) return raw_twine def _load_json(self, kind, source, **kwargs): """Load data from either a *.json file, an open file pointer or a json string. Directly returns any other data.""" if source is None: raise exceptions.invalid_json_map[kind](f"Cannot load {kind} - no data source specified.") # Decode the json string and deserialize to objects. try: data = load_json(source, **kwargs) except FileNotFoundError as e: raise exceptions.file_not_found_map[kind](e) except jsonlib.decoder.JSONDecodeError as e: raise exceptions.invalid_json_map[kind](e) return data def _get_schema(self, strand): """Get the schema for the given strand. Can be used to validate: - the twine file contents itself against the present version twine spec - children data against the required schema for the present version twine spec - values data for compliance with schema written in the twine (for strands like input_values_schema) :param str strand: :return dict: """ if strand == "twine": # The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain # schema itself. The twine schema is distributed with this packaged to ensure version consistency... schema_path = "schema/twine_schema.json" elif strand in CHILDREN_STRANDS: # The data is a list of children. The "children" strand of the twine describes matching criteria for # the children, not the schema of the "children" data, which is distributed with this package to ensure # version consistency... schema_path = "schema/children_schema.json" elif strand in MANIFEST_STRANDS: # The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to # filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which # is distributed with this package to ensure version consistency... schema_path = "schema/manifest_schema.json" else: if strand not in SCHEMA_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.") # Get schema from twine.json file. schema_key = strand + "_schema" try: return getattr(self, schema_key) except AttributeError: raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine") return jsonlib.loads(pkg_resources.resource_string("twined", schema_path)) def _validate_against_schema(self, strand, data): """Validate data against a schema, raises exceptions of type Invalid<strand>Json if not compliant. Can be used to validate: - the twine file contents itself against the present version twine spec - children data against the required schema for the present version twine spec - values data for compliance with schema written in the twine (for strands like input_values_schema) :param str strand: :param dict data: :return None: """ schema = self._get_schema(strand) try: jsonschema_validate(instance=data, schema=schema) logger.debug("Validated %s against schema", strand) except ValidationError as e: raise exceptions.invalid_contents_map[strand](str(e)) def _validate_twine_version(self, twine_file_twined_version): """Validate that the installed version is consistent with an optional version specification in the twine file.""" installed_twined_version = pkg_resources.get_distribution("twined").version logger.debug( "Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version ) if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version): raise exceptions.TwineVersionConflict( f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed" ) def _validate_values(self, kind, source, cls=None, **kwargs): """Validate values against the twine schema.""" data = self._load_json(kind, source, **kwargs) self._validate_against_schema(kind, data) if cls: return cls(**data) return data def _validate_manifest(self, kind, source, cls=None, **kwargs): """Validate manifest against the twine schema.""" data = self._load_json(kind, source, **kwargs) # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive inbound = True if hasattr(data, "to_primitive"): inbound = False data = data.to_primitive() self._validate_against_schema(kind, data) self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data) if cls and inbound: return cls(**data) return data def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest): """Check that all non-optional datasets specified in the corresponding manifest strand in the twine are present in the given manifest. :param str manifest_kind: the kind of manifest that's being validated (so the correct schema can be accessed) :param dict manifest: the manifest whose datasets are to be validated :raise twined.exceptions.InvalidManifestContents: if one or more of the expected non-optional datasets is missing :return None: """ # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files. manifest_schema = getattr(self, manifest_kind) for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items(): if expected_dataset_name in manifest["datasets"]: continue if expected_dataset_schema.get("optional", False): continue raise exceptions.invalid_contents_map[manifest_kind]( f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing." ) @property def available_strands(self): """Get the names of strands that are found in this twine. :return set: """ return self._available_strands @property def available_manifest_strands(self): """Get the names of the manifest strands that are found in this twine. :return set: """ return self._available_manifest_strands def validate_children(self, source, **kwargs): """Validate that the children values, passed as either a file or a json string, are correct.""" # TODO cache this loaded data keyed on a hashed version of kwargs children = self._load_json("children", source, **kwargs) self._validate_against_schema("children", children) strand = getattr(self, "children", []) # Loop the children and accumulate values so we have an O(1) check children_keys = {} for child in children: children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1 # Check there is at least one child for each item described in the strand # TODO add max, min num specs to the strand schema and check here for item in strand: strand_key = item["key"] if children_keys.get(strand_key, 0) <= 0: raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}") # Loop the strand and add unique keys to dict so we have an O(1) check strand_keys = {} for item in strand: strand_keys[item["key"]] = True # Check that each child has a key which is described in the strand for child in children: child_key = child["key"] if not strand_keys.get(child_key, False): raise exceptions.InvalidValuesContents( f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine." ) # TODO Additional validation that the children match what is set as required in the Twine return children def validate_credentials(self, *args, dotenv_path=None, **kwargs): """Validate that all credentials required by the twine are present. Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials strand. If not present in the environment, validate_credentials will check for variables in a .env file (if present) and populate the environment with them. Typically a .env file resides at the root of your application (the working directory) although a specific path may be set using the `dotenv_path` argument. .env files should never be committed to git or any other version control system. A .env file can look like this: ``` # a comment that will be ignored. YOUR_SECRET_VALUE=itsasecret MEANING_OF_LIFE=42 MULTILINE_VAR="hello\nworld" ``` Or like this (also useful for bash users): ``` export YOUR_SECRET_VALUE=itsasecret export MEANING_OF_LIFE=42 export MULTILINE_VAR="hello\nworld" ``` """ if not hasattr(self, "credentials"): return set() # Load any variables from the .env file into the environment. dotenv_path = dotenv_path or os.path.join(".", ".env") load_dotenv(dotenv_path) for credential in self.credentials: if credential["name"] not in os.environ: raise exceptions.CredentialNotFound( f"Credential {credential['name']!r} missing from environment or .env file." ) return self.credentials def validate_configuration_values(self, source, **kwargs): """Validate that the configuration values, passed as either a file or a json string, are correct.""" return self._validate_values("configuration_values", source, **kwargs) def validate_input_values(self, source, **kwargs): """Validate that the input values, passed as either a file or a json string, are correct.""" return self._validate_values("input_values", source, **kwargs) def validate_output_values(self, source, **kwargs): """Validate that the output values, passed as either a file or a json string, are correct.""" return self._validate_values("output_values", source, **kwargs) def validate_monitor_message(self, source, **kwargs): """Validate monitor message against the monitor message schema strand.""" return self._validate_values(kind="monitor_message", source=source, **kwargs) def validate_configuration_manifest(self, source, **kwargs):
def validate_input_manifest(self, source, **kwargs): """Validate the input manifest, passed as either a file or a json string.""" return self._validate_manifest("input_manifest", source, **kwargs) def validate_output_manifest(self, source, **kwargs): """Validate the output manifest, passed as either a file or a json string.""" return self._validate_manifest("output_manifest", source, **kwargs) @staticmethod def _get_cls(name, cls): """Getter that will return cls[name] if cls is a dict or cls otherwise""" return cls.get(name, None) if isinstance(cls, dict) else cls def validate(self, allow_missing=False, allow_extra=False, cls=None, **kwargs): """Validate strands from sources provided as keyword arguments Usage: ``` self.twine.validate( input_values=input_values, input_manifest=input_manifest, credentials=credentials, children=children, cls=CLASS_MAP, allow_missing=False, allow_extra=False, ) ``` :param bool allow_missing: If strand is present in the twine, but the source is equal to None, allow validation to continue. :param bool allow_extra: If strand is present in the sources, but not in the twine, allow validation to continue (only strands in the twine will be validated and converted, others will be returned as-is) :param any cls: optional dict of classes keyed on strand name (alternatively, one single class which will be applied to strands) which will be instantiated with the validated source data. :return dict: dict of validated and initialised sources """ # pop any strand name:data pairs out of kwargs and into their own dict source_kwargs = tuple(name for name in kwargs.keys() if name in ALL_STRANDS) sources = dict((name, kwargs.pop(name)) for name in source_kwargs) for strand_name, strand_data in sources.items(): if not allow_extra: if (strand_data is not None) and (strand_name not in self.available_strands): raise exceptions.StrandNotFound( f"Source data is provided for '{strand_name}' but no such strand is defined in the twine" ) if not allow_missing: if (strand_name in self.available_strands) and (strand_data is None): raise exceptions.TwineValueException( f"The '{strand_name}' strand is defined in the twine, but no data is provided in sources" ) if strand_data is not None: # TODO Consider reintroducing a skip based on whether cls is already instantiated. For now, leave it the # responsibility of the caller to determine what has already been validated and what hasn't. # # Use the twine to validate and instantiate as the desired class # if not isinstance(value, type(cls)): # self.logger.debug( # "Instantiating %s as %s and validating against twine", name, cls.__name__ if cls else "default_class" # ) # return self.twine.validate(name, source=value, cls=cls) method = getattr(self, f"validate_{strand_name}") klass = self._get_cls(strand_name, cls) sources[strand_name] = method(strand_data, cls=klass, **kwargs) else: sources[strand_name] = None return sources def validate_strand(self, name, source, **kwargs): """Validate a single strand by name.""" return self.validate({name: source}, **kwargs)[name] def prepare(self, *args, cls=None, **kwargs): """Prepare instance for strand data using a class map.""" prepared = {} for arg in args: if arg not in ALL_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand '{arg}'") elif arg not in self.available_strands: prepared[arg] = None else: klass = self._get_cls(arg, cls) prepared[arg] = klass(**kwargs) if klass else dict(**kwargs) if hasattr(prepared[arg], "prepare"): prepared[arg] = prepared[arg].prepare(getattr(self, arg)) return prepared
"""Validate the input manifest, passed as either a file or a json string.""" return self._validate_manifest("configuration_manifest", source, **kwargs)
identifier_body
twine.py
import json as jsonlib import logging import os import pkg_resources from dotenv import load_dotenv from jsonschema import ValidationError, validate as jsonschema_validate from . import exceptions from .utils import load_json, trim_suffix logger = logging.getLogger(__name__) SCHEMA_STRANDS = ("input_values", "configuration_values", "output_values", "monitor_message") MANIFEST_STRANDS = ( "configuration_manifest", "input_manifest", "output_manifest", ) CREDENTIAL_STRANDS = ("credentials",) CHILDREN_STRANDS = ("children",) ALL_STRANDS = ( *SCHEMA_STRANDS, *MANIFEST_STRANDS, *CREDENTIAL_STRANDS, *CHILDREN_STRANDS, ) class Twine: """Twine class manages validation of inputs and outputs to/from a data service, based on spec in a 'twine' file. Instantiate a Twine by providing a file name or a utf-8 encoded string containing valid json. The twine is itself validated to be correct on instantiation of Twine(). Note: Instantiating the twine does not validate that any inputs to an application are correct - it merely checks that the twine itself is correct. """ def __init__(self, **kwargs): for name, strand in self._load_twine(**kwargs).items(): setattr(self, name, strand) self._available_strands = set(trim_suffix(name, "_schema") for name in vars(self)) self._available_manifest_strands = self._available_strands & set(MANIFEST_STRANDS) def _load_twine(self, source=None): """Load twine from a *.json filename, file-like or a json string and validates twine contents.""" if source is None: # If loading an unspecified twine, return an empty one rather than raising error (like in _load_data()) raw_twine = {} logger.warning("No twine source specified. Loading empty twine.") else: raw_twine = self._load_json("twine", source, allowed_kinds=("file-like", "filename", "string", "object")) self._validate_against_schema("twine", raw_twine) self._validate_twine_version(twine_file_twined_version=raw_twine.get("twined_version", None)) return raw_twine def _load_json(self, kind, source, **kwargs): """Load data from either a *.json file, an open file pointer or a json string. Directly returns any other data.""" if source is None: raise exceptions.invalid_json_map[kind](f"Cannot load {kind} - no data source specified.") # Decode the json string and deserialize to objects. try: data = load_json(source, **kwargs) except FileNotFoundError as e: raise exceptions.file_not_found_map[kind](e) except jsonlib.decoder.JSONDecodeError as e: raise exceptions.invalid_json_map[kind](e) return data def _get_schema(self, strand): """Get the schema for the given strand. Can be used to validate: - the twine file contents itself against the present version twine spec - children data against the required schema for the present version twine spec - values data for compliance with schema written in the twine (for strands like input_values_schema) :param str strand: :return dict: """ if strand == "twine": # The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain # schema itself. The twine schema is distributed with this packaged to ensure version consistency... schema_path = "schema/twine_schema.json" elif strand in CHILDREN_STRANDS: # The data is a list of children. The "children" strand of the twine describes matching criteria for # the children, not the schema of the "children" data, which is distributed with this package to ensure # version consistency... schema_path = "schema/children_schema.json" elif strand in MANIFEST_STRANDS: # The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to # filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which # is distributed with this package to ensure version consistency... schema_path = "schema/manifest_schema.json" else: if strand not in SCHEMA_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.") # Get schema from twine.json file. schema_key = strand + "_schema" try: return getattr(self, schema_key) except AttributeError: raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine") return jsonlib.loads(pkg_resources.resource_string("twined", schema_path)) def _validate_against_schema(self, strand, data): """Validate data against a schema, raises exceptions of type Invalid<strand>Json if not compliant. Can be used to validate: - the twine file contents itself against the present version twine spec - children data against the required schema for the present version twine spec - values data for compliance with schema written in the twine (for strands like input_values_schema) :param str strand: :param dict data: :return None: """ schema = self._get_schema(strand) try: jsonschema_validate(instance=data, schema=schema) logger.debug("Validated %s against schema", strand) except ValidationError as e: raise exceptions.invalid_contents_map[strand](str(e)) def _validate_twine_version(self, twine_file_twined_version): """Validate that the installed version is consistent with an optional version specification in the twine file.""" installed_twined_version = pkg_resources.get_distribution("twined").version logger.debug( "Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version ) if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version): raise exceptions.TwineVersionConflict( f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed" ) def _validate_values(self, kind, source, cls=None, **kwargs): """Validate values against the twine schema.""" data = self._load_json(kind, source, **kwargs) self._validate_against_schema(kind, data) if cls: return cls(**data) return data def _validate_manifest(self, kind, source, cls=None, **kwargs): """Validate manifest against the twine schema.""" data = self._load_json(kind, source, **kwargs) # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive inbound = True if hasattr(data, "to_primitive"): inbound = False data = data.to_primitive() self._validate_against_schema(kind, data) self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data) if cls and inbound: return cls(**data) return data def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest): """Check that all non-optional datasets specified in the corresponding manifest strand in the twine are present in the given manifest. :param str manifest_kind: the kind of manifest that's being validated (so the correct schema can be accessed) :param dict manifest: the manifest whose datasets are to be validated :raise twined.exceptions.InvalidManifestContents: if one or more of the expected non-optional datasets is missing :return None: """ # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files. manifest_schema = getattr(self, manifest_kind) for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items(): if expected_dataset_name in manifest["datasets"]: continue if expected_dataset_schema.get("optional", False): continue raise exceptions.invalid_contents_map[manifest_kind]( f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing." ) @property def available_strands(self): """Get the names of strands that are found in this twine. :return set: """ return self._available_strands @property def available_manifest_strands(self): """Get the names of the manifest strands that are found in this twine. :return set: """ return self._available_manifest_strands def validate_children(self, source, **kwargs): """Validate that the children values, passed as either a file or a json string, are correct.""" # TODO cache this loaded data keyed on a hashed version of kwargs children = self._load_json("children", source, **kwargs) self._validate_against_schema("children", children) strand = getattr(self, "children", []) # Loop the children and accumulate values so we have an O(1) check children_keys = {} for child in children: children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1 # Check there is at least one child for each item described in the strand # TODO add max, min num specs to the strand schema and check here for item in strand: strand_key = item["key"] if children_keys.get(strand_key, 0) <= 0: raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}") # Loop the strand and add unique keys to dict so we have an O(1) check strand_keys = {} for item in strand: strand_keys[item["key"]] = True # Check that each child has a key which is described in the strand for child in children:
# TODO Additional validation that the children match what is set as required in the Twine return children def validate_credentials(self, *args, dotenv_path=None, **kwargs): """Validate that all credentials required by the twine are present. Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials strand. If not present in the environment, validate_credentials will check for variables in a .env file (if present) and populate the environment with them. Typically a .env file resides at the root of your application (the working directory) although a specific path may be set using the `dotenv_path` argument. .env files should never be committed to git or any other version control system. A .env file can look like this: ``` # a comment that will be ignored. YOUR_SECRET_VALUE=itsasecret MEANING_OF_LIFE=42 MULTILINE_VAR="hello\nworld" ``` Or like this (also useful for bash users): ``` export YOUR_SECRET_VALUE=itsasecret export MEANING_OF_LIFE=42 export MULTILINE_VAR="hello\nworld" ``` """ if not hasattr(self, "credentials"): return set() # Load any variables from the .env file into the environment. dotenv_path = dotenv_path or os.path.join(".", ".env") load_dotenv(dotenv_path) for credential in self.credentials: if credential["name"] not in os.environ: raise exceptions.CredentialNotFound( f"Credential {credential['name']!r} missing from environment or .env file." ) return self.credentials def validate_configuration_values(self, source, **kwargs): """Validate that the configuration values, passed as either a file or a json string, are correct.""" return self._validate_values("configuration_values", source, **kwargs) def validate_input_values(self, source, **kwargs): """Validate that the input values, passed as either a file or a json string, are correct.""" return self._validate_values("input_values", source, **kwargs) def validate_output_values(self, source, **kwargs): """Validate that the output values, passed as either a file or a json string, are correct.""" return self._validate_values("output_values", source, **kwargs) def validate_monitor_message(self, source, **kwargs): """Validate monitor message against the monitor message schema strand.""" return self._validate_values(kind="monitor_message", source=source, **kwargs) def validate_configuration_manifest(self, source, **kwargs): """Validate the input manifest, passed as either a file or a json string.""" return self._validate_manifest("configuration_manifest", source, **kwargs) def validate_input_manifest(self, source, **kwargs): """Validate the input manifest, passed as either a file or a json string.""" return self._validate_manifest("input_manifest", source, **kwargs) def validate_output_manifest(self, source, **kwargs): """Validate the output manifest, passed as either a file or a json string.""" return self._validate_manifest("output_manifest", source, **kwargs) @staticmethod def _get_cls(name, cls): """Getter that will return cls[name] if cls is a dict or cls otherwise""" return cls.get(name, None) if isinstance(cls, dict) else cls def validate(self, allow_missing=False, allow_extra=False, cls=None, **kwargs): """Validate strands from sources provided as keyword arguments Usage: ``` self.twine.validate( input_values=input_values, input_manifest=input_manifest, credentials=credentials, children=children, cls=CLASS_MAP, allow_missing=False, allow_extra=False, ) ``` :param bool allow_missing: If strand is present in the twine, but the source is equal to None, allow validation to continue. :param bool allow_extra: If strand is present in the sources, but not in the twine, allow validation to continue (only strands in the twine will be validated and converted, others will be returned as-is) :param any cls: optional dict of classes keyed on strand name (alternatively, one single class which will be applied to strands) which will be instantiated with the validated source data. :return dict: dict of validated and initialised sources """ # pop any strand name:data pairs out of kwargs and into their own dict source_kwargs = tuple(name for name in kwargs.keys() if name in ALL_STRANDS) sources = dict((name, kwargs.pop(name)) for name in source_kwargs) for strand_name, strand_data in sources.items(): if not allow_extra: if (strand_data is not None) and (strand_name not in self.available_strands): raise exceptions.StrandNotFound( f"Source data is provided for '{strand_name}' but no such strand is defined in the twine" ) if not allow_missing: if (strand_name in self.available_strands) and (strand_data is None): raise exceptions.TwineValueException( f"The '{strand_name}' strand is defined in the twine, but no data is provided in sources" ) if strand_data is not None: # TODO Consider reintroducing a skip based on whether cls is already instantiated. For now, leave it the # responsibility of the caller to determine what has already been validated and what hasn't. # # Use the twine to validate and instantiate as the desired class # if not isinstance(value, type(cls)): # self.logger.debug( # "Instantiating %s as %s and validating against twine", name, cls.__name__ if cls else "default_class" # ) # return self.twine.validate(name, source=value, cls=cls) method = getattr(self, f"validate_{strand_name}") klass = self._get_cls(strand_name, cls) sources[strand_name] = method(strand_data, cls=klass, **kwargs) else: sources[strand_name] = None return sources def validate_strand(self, name, source, **kwargs): """Validate a single strand by name.""" return self.validate({name: source}, **kwargs)[name] def prepare(self, *args, cls=None, **kwargs): """Prepare instance for strand data using a class map.""" prepared = {} for arg in args: if arg not in ALL_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand '{arg}'") elif arg not in self.available_strands: prepared[arg] = None else: klass = self._get_cls(arg, cls) prepared[arg] = klass(**kwargs) if klass else dict(**kwargs) if hasattr(prepared[arg], "prepare"): prepared[arg] = prepared[arg].prepare(getattr(self, arg)) return prepared
child_key = child["key"] if not strand_keys.get(child_key, False): raise exceptions.InvalidValuesContents( f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine." )
conditional_block
config.rs
use anyhow::{Context, Error}; use clipboard::{ClipboardContext, ClipboardProvider}; use image::Rgba; use silicon::directories::PROJECT_DIRS; use silicon::formatter::{ImageFormatter, ImageFormatterBuilder}; use silicon::utils::{Background, ShadowAdder, ToRgba}; use std::ffi::OsString; use std::fs::File; use std::io::{stdin, Read}; use std::num::ParseIntError; use std::path::PathBuf; use structopt::clap::AppSettings::ColoredHelp; use structopt::StructOpt; use syntect::highlighting::{Theme, ThemeSet}; use syntect::parsing::{SyntaxReference, SyntaxSet}; pub fn config_file() -> PathBuf { std::env::var("SILICON_CONFIG_PATH") .ok() .map(PathBuf::from) .filter(|config_path| config_path.is_file()) .unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config")) } pub fn get_args_from_config_file() -> Vec<OsString>
fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> { s.to_rgba() .map_err(|_| format_err!("Invalid color: `{}`", s)) } fn parse_font_str(s: &str) -> Vec<(String, f32)> { let mut result = vec![]; for font in s.split(';') { let tmp = font.split('=').collect::<Vec<_>>(); let font_name = tmp[0].to_owned(); let font_size = tmp .get(1) .map(|s| s.parse::<f32>().unwrap()) .unwrap_or(26.0); result.push((font_name, font_size)); } result } fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> { let mut result = vec![]; for range in s.split(';') { let range: Vec<u32> = range .split('-') .map(|s| s.parse::<u32>()) .collect::<Result<Vec<_>, _>>()?; if range.len() == 1 { result.push(range[0]) } else { for i in range[0]..=range[1] { result.push(i); } } } Ok(result) } // https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180 type FontList = Vec<(String, f32)>; type Lines = Vec<u32>; #[derive(StructOpt, Debug)] #[structopt(name = "silicon")] #[structopt(global_setting(ColoredHelp))] pub struct Config { /// Background image #[structopt(long, value_name = "IMAGE", conflicts_with = "background")] pub background_image: Option<PathBuf>, /// Background color of the image #[structopt( long, short, value_name = "COLOR", default_value = "#aaaaff", parse(try_from_str = parse_str_color) )] pub background: Rgba<u8>, /// Show the path of silicon config file #[structopt(long)] pub config_file: bool, /// Read input from clipboard. #[structopt(long)] pub from_clipboard: bool, /// File to read. If not set, stdin will be use. #[structopt(value_name = "FILE", parse(from_os_str))] pub file: Option<PathBuf>, /// The fallback font list. eg. 'Hack; SimSun=31' #[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))] pub font: Option<FontList>, /// Lines to high light. rg. '1-3; 4' #[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))] pub highlight_lines: Option<Lines>, /// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs"). #[structopt(short, value_name = "LANG", long)] pub language: Option<String>, /// Pad between lines #[structopt(long, value_name = "PAD", default_value = "2")] pub line_pad: u32, /// Line number offset #[structopt(long, value_name = "OFFSET", default_value = "1")] pub line_offset: u32, /// List all themes. #[structopt(long)] pub list_themes: bool, /// List all available fonts in your system #[structopt(long)] pub list_fonts: bool, /// Write output image to specific location instead of cwd. #[structopt( short, long, value_name = "PATH", required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"] )] pub output: Option<PathBuf>, /// Hide the window controls. #[structopt(long)] pub no_window_controls: bool, /// Show window title #[structopt(long, value_name = "WINDOW_TITLE")] pub window_title: Option<String>, /// Hide the line number. #[structopt(long)] pub no_line_number: bool, /// Don't round the corner #[structopt(long)] pub no_round_corner: bool, /// Pad horiz #[structopt(long, value_name = "PAD", default_value = "80")] pub pad_horiz: u32, /// Pad vert #[structopt(long, value_name = "PAD", default_value = "100")] pub pad_vert: u32, /// Color of shadow #[structopt( long, value_name = "COLOR", default_value = "#555555", parse(try_from_str = parse_str_color) )] pub shadow_color: Rgba<u8>, /// Blur radius of the shadow. (set it to 0 to hide shadow) #[structopt(long, value_name = "R", default_value = "0")] pub shadow_blur_radius: f32, /// Shadow's offset in Y axis #[structopt(long, value_name = "Y", default_value = "0")] pub shadow_offset_y: i32, /// Shadow's offset in X axis #[structopt(long, value_name = "X", default_value = "0")] pub shadow_offset_x: i32, /// Tab width #[structopt(long, value_name = "WIDTH", default_value = "4")] pub tab_width: u8, /// The syntax highlight theme. It can be a theme name or path to a .tmTheme file. #[structopt(long, value_name = "THEME", default_value = "Dracula")] pub theme: String, /// Copy the output image to clipboard. #[structopt(short = "c", long)] pub to_clipboard: bool, // Draw a custom text on the bottom right corner // #[structopt(long)] // watermark: Option<String>, /// build syntax definition and theme cache #[structopt(long, value_name = "OUTPUT_DIR")] pub build_cache: Option<Option<PathBuf>>, } impl Config { pub fn get_source_code<'a>( &self, ps: &'a SyntaxSet, ) -> Result<(&'a SyntaxReference, String), Error> { let possible_language = self.language.as_ref().map(|language| { ps.find_syntax_by_token(language) .ok_or_else(|| format_err!("Unsupported language: {}", language)) }); if self.from_clipboard { let mut ctx = ClipboardContext::new() .map_err(|e| format_err!("failed to access clipboard: {}", e))?; let code = ctx .get_contents() .map_err(|e| format_err!("failed to access clipboard: {}", e))?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_by_first_line(&code) .ok_or_else(|| format_err!("Failed to detect the language")) })?; return Ok((language, code)); } if let Some(path) = &self.file { let mut s = String::new(); let mut file = File::open(path)?; file.read_to_string(&mut s)?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_for_file(path)? .ok_or_else(|| format_err!("Failed to detect the language")) })?; return Ok((language, s)); } let mut stdin = stdin(); let mut s = String::new(); stdin.read_to_string(&mut s)?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_by_first_line(&s) .ok_or_else(|| format_err!("Failed to detect the language")) })?; Ok((language, s)) } pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> { if let Some(theme) = ts.themes.get(&self.theme) { Ok(theme.clone()) } else { ThemeSet::get_theme(&self.theme) .context(format!("Canot load the theme: {}", self.theme)) } } pub fn get_formatter(&self) -> Result<ImageFormatter, Error> { let formatter = ImageFormatterBuilder::new() .line_pad(self.line_pad) .window_controls(!self.no_window_controls) .window_title(self.window_title.clone()) .line_number(!self.no_line_number) .font(self.font.clone().unwrap_or_default()) .round_corner(!self.no_round_corner) .shadow_adder(self.get_shadow_adder()?) .tab_width(self.tab_width) .highlight_lines(self.highlight_lines.clone().unwrap_or_default()) .line_offset(self.line_offset); Ok(formatter.build()?) } pub fn get_shadow_adder(&self) -> Result<ShadowAdder, Error> { Ok(ShadowAdder::new() .background(match &self.background_image { Some(path) => Background::Image(image::open(path)?.to_rgba8()), None => Background::Solid(self.background), }) .shadow_color(self.shadow_color) .blur_radius(self.shadow_blur_radius) .pad_horiz(self.pad_horiz) .pad_vert(self.pad_vert) .offset_x(self.shadow_offset_x) .offset_y(self.shadow_offset_y)) } pub fn get_expanded_output(&self) -> Option<PathBuf> { let need_expand = self.output.as_ref().map(|p| p.starts_with("~")) == Some(true); if let (Ok(home_dir), true) = (std::env::var("HOME"), need_expand) { self.output .as_ref() .map(|p| p.to_string_lossy().replacen('~', &home_dir, 1).into()) } else { self.output.clone() } } }
{ let args = std::fs::read_to_string(config_file()) .ok() .and_then(|content| { content .split('\n') .map(|line| line.trim()) .filter(|line| !line.starts_with('#') && !line.is_empty()) .map(shell_words::split) .collect::<Result<Vec<_>, _>>() .ok() }) .unwrap_or_default(); args.iter().flatten().map(OsString::from).collect() }
identifier_body
config.rs
use anyhow::{Context, Error}; use clipboard::{ClipboardContext, ClipboardProvider}; use image::Rgba; use silicon::directories::PROJECT_DIRS; use silicon::formatter::{ImageFormatter, ImageFormatterBuilder}; use silicon::utils::{Background, ShadowAdder, ToRgba}; use std::ffi::OsString; use std::fs::File; use std::io::{stdin, Read}; use std::num::ParseIntError; use std::path::PathBuf; use structopt::clap::AppSettings::ColoredHelp; use structopt::StructOpt; use syntect::highlighting::{Theme, ThemeSet}; use syntect::parsing::{SyntaxReference, SyntaxSet}; pub fn config_file() -> PathBuf { std::env::var("SILICON_CONFIG_PATH") .ok() .map(PathBuf::from) .filter(|config_path| config_path.is_file()) .unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config")) } pub fn get_args_from_config_file() -> Vec<OsString> { let args = std::fs::read_to_string(config_file()) .ok() .and_then(|content| { content .split('\n') .map(|line| line.trim()) .filter(|line| !line.starts_with('#') && !line.is_empty()) .map(shell_words::split) .collect::<Result<Vec<_>, _>>() .ok() }) .unwrap_or_default(); args.iter().flatten().map(OsString::from).collect() } fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> { s.to_rgba() .map_err(|_| format_err!("Invalid color: `{}`", s)) } fn parse_font_str(s: &str) -> Vec<(String, f32)> { let mut result = vec![]; for font in s.split(';') { let tmp = font.split('=').collect::<Vec<_>>(); let font_name = tmp[0].to_owned(); let font_size = tmp .get(1) .map(|s| s.parse::<f32>().unwrap()) .unwrap_or(26.0); result.push((font_name, font_size)); } result } fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> { let mut result = vec![]; for range in s.split(';') { let range: Vec<u32> = range .split('-') .map(|s| s.parse::<u32>()) .collect::<Result<Vec<_>, _>>()?; if range.len() == 1 { result.push(range[0]) } else { for i in range[0]..=range[1] { result.push(i); } } } Ok(result) } // https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180 type FontList = Vec<(String, f32)>; type Lines = Vec<u32>; #[derive(StructOpt, Debug)] #[structopt(name = "silicon")] #[structopt(global_setting(ColoredHelp))] pub struct Config { /// Background image #[structopt(long, value_name = "IMAGE", conflicts_with = "background")] pub background_image: Option<PathBuf>, /// Background color of the image #[structopt( long, short, value_name = "COLOR", default_value = "#aaaaff", parse(try_from_str = parse_str_color) )] pub background: Rgba<u8>, /// Show the path of silicon config file #[structopt(long)] pub config_file: bool, /// Read input from clipboard. #[structopt(long)] pub from_clipboard: bool, /// File to read. If not set, stdin will be use. #[structopt(value_name = "FILE", parse(from_os_str))] pub file: Option<PathBuf>, /// The fallback font list. eg. 'Hack; SimSun=31' #[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))] pub font: Option<FontList>, /// Lines to high light. rg. '1-3; 4' #[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))] pub highlight_lines: Option<Lines>, /// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs"). #[structopt(short, value_name = "LANG", long)] pub language: Option<String>, /// Pad between lines #[structopt(long, value_name = "PAD", default_value = "2")] pub line_pad: u32, /// Line number offset #[structopt(long, value_name = "OFFSET", default_value = "1")] pub line_offset: u32, /// List all themes. #[structopt(long)] pub list_themes: bool, /// List all available fonts in your system #[structopt(long)] pub list_fonts: bool, /// Write output image to specific location instead of cwd. #[structopt( short, long, value_name = "PATH", required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"] )] pub output: Option<PathBuf>, /// Hide the window controls. #[structopt(long)] pub no_window_controls: bool, /// Show window title #[structopt(long, value_name = "WINDOW_TITLE")] pub window_title: Option<String>, /// Hide the line number. #[structopt(long)] pub no_line_number: bool, /// Don't round the corner #[structopt(long)] pub no_round_corner: bool, /// Pad horiz #[structopt(long, value_name = "PAD", default_value = "80")] pub pad_horiz: u32, /// Pad vert #[structopt(long, value_name = "PAD", default_value = "100")] pub pad_vert: u32, /// Color of shadow #[structopt( long, value_name = "COLOR", default_value = "#555555", parse(try_from_str = parse_str_color) )] pub shadow_color: Rgba<u8>, /// Blur radius of the shadow. (set it to 0 to hide shadow) #[structopt(long, value_name = "R", default_value = "0")] pub shadow_blur_radius: f32, /// Shadow's offset in Y axis #[structopt(long, value_name = "Y", default_value = "0")] pub shadow_offset_y: i32, /// Shadow's offset in X axis #[structopt(long, value_name = "X", default_value = "0")] pub shadow_offset_x: i32, /// Tab width #[structopt(long, value_name = "WIDTH", default_value = "4")] pub tab_width: u8, /// The syntax highlight theme. It can be a theme name or path to a .tmTheme file. #[structopt(long, value_name = "THEME", default_value = "Dracula")] pub theme: String, /// Copy the output image to clipboard. #[structopt(short = "c", long)] pub to_clipboard: bool, // Draw a custom text on the bottom right corner // #[structopt(long)] // watermark: Option<String>, /// build syntax definition and theme cache #[structopt(long, value_name = "OUTPUT_DIR")] pub build_cache: Option<Option<PathBuf>>, } impl Config { pub fn get_source_code<'a>( &self, ps: &'a SyntaxSet, ) -> Result<(&'a SyntaxReference, String), Error> { let possible_language = self.language.as_ref().map(|language| { ps.find_syntax_by_token(language) .ok_or_else(|| format_err!("Unsupported language: {}", language)) }); if self.from_clipboard { let mut ctx = ClipboardContext::new() .map_err(|e| format_err!("failed to access clipboard: {}", e))?; let code = ctx .get_contents() .map_err(|e| format_err!("failed to access clipboard: {}", e))?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_by_first_line(&code) .ok_or_else(|| format_err!("Failed to detect the language")) })?; return Ok((language, code)); } if let Some(path) = &self.file { let mut s = String::new(); let mut file = File::open(path)?; file.read_to_string(&mut s)?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_for_file(path)? .ok_or_else(|| format_err!("Failed to detect the language")) })?; return Ok((language, s)); } let mut stdin = stdin(); let mut s = String::new(); stdin.read_to_string(&mut s)?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_by_first_line(&s) .ok_or_else(|| format_err!("Failed to detect the language")) })?; Ok((language, s)) } pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> { if let Some(theme) = ts.themes.get(&self.theme) { Ok(theme.clone()) } else { ThemeSet::get_theme(&self.theme) .context(format!("Canot load the theme: {}", self.theme)) } } pub fn get_formatter(&self) -> Result<ImageFormatter, Error> { let formatter = ImageFormatterBuilder::new() .line_pad(self.line_pad) .window_controls(!self.no_window_controls) .window_title(self.window_title.clone()) .line_number(!self.no_line_number) .font(self.font.clone().unwrap_or_default()) .round_corner(!self.no_round_corner) .shadow_adder(self.get_shadow_adder()?) .tab_width(self.tab_width) .highlight_lines(self.highlight_lines.clone().unwrap_or_default()) .line_offset(self.line_offset); Ok(formatter.build()?) } pub fn get_shadow_adder(&self) -> Result<ShadowAdder, Error> { Ok(ShadowAdder::new() .background(match &self.background_image { Some(path) => Background::Image(image::open(path)?.to_rgba8()), None => Background::Solid(self.background), }) .shadow_color(self.shadow_color) .blur_radius(self.shadow_blur_radius) .pad_horiz(self.pad_horiz) .pad_vert(self.pad_vert) .offset_x(self.shadow_offset_x) .offset_y(self.shadow_offset_y)) } pub fn
(&self) -> Option<PathBuf> { let need_expand = self.output.as_ref().map(|p| p.starts_with("~")) == Some(true); if let (Ok(home_dir), true) = (std::env::var("HOME"), need_expand) { self.output .as_ref() .map(|p| p.to_string_lossy().replacen('~', &home_dir, 1).into()) } else { self.output.clone() } } }
get_expanded_output
identifier_name
config.rs
use anyhow::{Context, Error}; use clipboard::{ClipboardContext, ClipboardProvider}; use image::Rgba; use silicon::directories::PROJECT_DIRS; use silicon::formatter::{ImageFormatter, ImageFormatterBuilder}; use silicon::utils::{Background, ShadowAdder, ToRgba}; use std::ffi::OsString; use std::fs::File; use std::io::{stdin, Read}; use std::num::ParseIntError; use std::path::PathBuf; use structopt::clap::AppSettings::ColoredHelp; use structopt::StructOpt; use syntect::highlighting::{Theme, ThemeSet}; use syntect::parsing::{SyntaxReference, SyntaxSet}; pub fn config_file() -> PathBuf { std::env::var("SILICON_CONFIG_PATH") .ok() .map(PathBuf::from) .filter(|config_path| config_path.is_file()) .unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config")) } pub fn get_args_from_config_file() -> Vec<OsString> { let args = std::fs::read_to_string(config_file()) .ok() .and_then(|content| { content .split('\n') .map(|line| line.trim()) .filter(|line| !line.starts_with('#') && !line.is_empty()) .map(shell_words::split) .collect::<Result<Vec<_>, _>>() .ok() }) .unwrap_or_default(); args.iter().flatten().map(OsString::from).collect() } fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> { s.to_rgba() .map_err(|_| format_err!("Invalid color: `{}`", s)) } fn parse_font_str(s: &str) -> Vec<(String, f32)> { let mut result = vec![]; for font in s.split(';') { let tmp = font.split('=').collect::<Vec<_>>(); let font_name = tmp[0].to_owned(); let font_size = tmp .get(1) .map(|s| s.parse::<f32>().unwrap()) .unwrap_or(26.0); result.push((font_name, font_size)); } result } fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> { let mut result = vec![]; for range in s.split(';') { let range: Vec<u32> = range .split('-') .map(|s| s.parse::<u32>()) .collect::<Result<Vec<_>, _>>()?; if range.len() == 1 { result.push(range[0]) } else { for i in range[0]..=range[1] { result.push(i); } } } Ok(result) } // https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180 type FontList = Vec<(String, f32)>;
pub struct Config { /// Background image #[structopt(long, value_name = "IMAGE", conflicts_with = "background")] pub background_image: Option<PathBuf>, /// Background color of the image #[structopt( long, short, value_name = "COLOR", default_value = "#aaaaff", parse(try_from_str = parse_str_color) )] pub background: Rgba<u8>, /// Show the path of silicon config file #[structopt(long)] pub config_file: bool, /// Read input from clipboard. #[structopt(long)] pub from_clipboard: bool, /// File to read. If not set, stdin will be use. #[structopt(value_name = "FILE", parse(from_os_str))] pub file: Option<PathBuf>, /// The fallback font list. eg. 'Hack; SimSun=31' #[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))] pub font: Option<FontList>, /// Lines to high light. rg. '1-3; 4' #[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))] pub highlight_lines: Option<Lines>, /// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs"). #[structopt(short, value_name = "LANG", long)] pub language: Option<String>, /// Pad between lines #[structopt(long, value_name = "PAD", default_value = "2")] pub line_pad: u32, /// Line number offset #[structopt(long, value_name = "OFFSET", default_value = "1")] pub line_offset: u32, /// List all themes. #[structopt(long)] pub list_themes: bool, /// List all available fonts in your system #[structopt(long)] pub list_fonts: bool, /// Write output image to specific location instead of cwd. #[structopt( short, long, value_name = "PATH", required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"] )] pub output: Option<PathBuf>, /// Hide the window controls. #[structopt(long)] pub no_window_controls: bool, /// Show window title #[structopt(long, value_name = "WINDOW_TITLE")] pub window_title: Option<String>, /// Hide the line number. #[structopt(long)] pub no_line_number: bool, /// Don't round the corner #[structopt(long)] pub no_round_corner: bool, /// Pad horiz #[structopt(long, value_name = "PAD", default_value = "80")] pub pad_horiz: u32, /// Pad vert #[structopt(long, value_name = "PAD", default_value = "100")] pub pad_vert: u32, /// Color of shadow #[structopt( long, value_name = "COLOR", default_value = "#555555", parse(try_from_str = parse_str_color) )] pub shadow_color: Rgba<u8>, /// Blur radius of the shadow. (set it to 0 to hide shadow) #[structopt(long, value_name = "R", default_value = "0")] pub shadow_blur_radius: f32, /// Shadow's offset in Y axis #[structopt(long, value_name = "Y", default_value = "0")] pub shadow_offset_y: i32, /// Shadow's offset in X axis #[structopt(long, value_name = "X", default_value = "0")] pub shadow_offset_x: i32, /// Tab width #[structopt(long, value_name = "WIDTH", default_value = "4")] pub tab_width: u8, /// The syntax highlight theme. It can be a theme name or path to a .tmTheme file. #[structopt(long, value_name = "THEME", default_value = "Dracula")] pub theme: String, /// Copy the output image to clipboard. #[structopt(short = "c", long)] pub to_clipboard: bool, // Draw a custom text on the bottom right corner // #[structopt(long)] // watermark: Option<String>, /// build syntax definition and theme cache #[structopt(long, value_name = "OUTPUT_DIR")] pub build_cache: Option<Option<PathBuf>>, } impl Config { pub fn get_source_code<'a>( &self, ps: &'a SyntaxSet, ) -> Result<(&'a SyntaxReference, String), Error> { let possible_language = self.language.as_ref().map(|language| { ps.find_syntax_by_token(language) .ok_or_else(|| format_err!("Unsupported language: {}", language)) }); if self.from_clipboard { let mut ctx = ClipboardContext::new() .map_err(|e| format_err!("failed to access clipboard: {}", e))?; let code = ctx .get_contents() .map_err(|e| format_err!("failed to access clipboard: {}", e))?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_by_first_line(&code) .ok_or_else(|| format_err!("Failed to detect the language")) })?; return Ok((language, code)); } if let Some(path) = &self.file { let mut s = String::new(); let mut file = File::open(path)?; file.read_to_string(&mut s)?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_for_file(path)? .ok_or_else(|| format_err!("Failed to detect the language")) })?; return Ok((language, s)); } let mut stdin = stdin(); let mut s = String::new(); stdin.read_to_string(&mut s)?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_by_first_line(&s) .ok_or_else(|| format_err!("Failed to detect the language")) })?; Ok((language, s)) } pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> { if let Some(theme) = ts.themes.get(&self.theme) { Ok(theme.clone()) } else { ThemeSet::get_theme(&self.theme) .context(format!("Canot load the theme: {}", self.theme)) } } pub fn get_formatter(&self) -> Result<ImageFormatter, Error> { let formatter = ImageFormatterBuilder::new() .line_pad(self.line_pad) .window_controls(!self.no_window_controls) .window_title(self.window_title.clone()) .line_number(!self.no_line_number) .font(self.font.clone().unwrap_or_default()) .round_corner(!self.no_round_corner) .shadow_adder(self.get_shadow_adder()?) .tab_width(self.tab_width) .highlight_lines(self.highlight_lines.clone().unwrap_or_default()) .line_offset(self.line_offset); Ok(formatter.build()?) } pub fn get_shadow_adder(&self) -> Result<ShadowAdder, Error> { Ok(ShadowAdder::new() .background(match &self.background_image { Some(path) => Background::Image(image::open(path)?.to_rgba8()), None => Background::Solid(self.background), }) .shadow_color(self.shadow_color) .blur_radius(self.shadow_blur_radius) .pad_horiz(self.pad_horiz) .pad_vert(self.pad_vert) .offset_x(self.shadow_offset_x) .offset_y(self.shadow_offset_y)) } pub fn get_expanded_output(&self) -> Option<PathBuf> { let need_expand = self.output.as_ref().map(|p| p.starts_with("~")) == Some(true); if let (Ok(home_dir), true) = (std::env::var("HOME"), need_expand) { self.output .as_ref() .map(|p| p.to_string_lossy().replacen('~', &home_dir, 1).into()) } else { self.output.clone() } } }
type Lines = Vec<u32>; #[derive(StructOpt, Debug)] #[structopt(name = "silicon")] #[structopt(global_setting(ColoredHelp))]
random_line_split
config.rs
use anyhow::{Context, Error}; use clipboard::{ClipboardContext, ClipboardProvider}; use image::Rgba; use silicon::directories::PROJECT_DIRS; use silicon::formatter::{ImageFormatter, ImageFormatterBuilder}; use silicon::utils::{Background, ShadowAdder, ToRgba}; use std::ffi::OsString; use std::fs::File; use std::io::{stdin, Read}; use std::num::ParseIntError; use std::path::PathBuf; use structopt::clap::AppSettings::ColoredHelp; use structopt::StructOpt; use syntect::highlighting::{Theme, ThemeSet}; use syntect::parsing::{SyntaxReference, SyntaxSet}; pub fn config_file() -> PathBuf { std::env::var("SILICON_CONFIG_PATH") .ok() .map(PathBuf::from) .filter(|config_path| config_path.is_file()) .unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config")) } pub fn get_args_from_config_file() -> Vec<OsString> { let args = std::fs::read_to_string(config_file()) .ok() .and_then(|content| { content .split('\n') .map(|line| line.trim()) .filter(|line| !line.starts_with('#') && !line.is_empty()) .map(shell_words::split) .collect::<Result<Vec<_>, _>>() .ok() }) .unwrap_or_default(); args.iter().flatten().map(OsString::from).collect() } fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> { s.to_rgba() .map_err(|_| format_err!("Invalid color: `{}`", s)) } fn parse_font_str(s: &str) -> Vec<(String, f32)> { let mut result = vec![]; for font in s.split(';') { let tmp = font.split('=').collect::<Vec<_>>(); let font_name = tmp[0].to_owned(); let font_size = tmp .get(1) .map(|s| s.parse::<f32>().unwrap()) .unwrap_or(26.0); result.push((font_name, font_size)); } result } fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> { let mut result = vec![]; for range in s.split(';') { let range: Vec<u32> = range .split('-') .map(|s| s.parse::<u32>()) .collect::<Result<Vec<_>, _>>()?; if range.len() == 1 { result.push(range[0]) } else { for i in range[0]..=range[1] { result.push(i); } } } Ok(result) } // https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180 type FontList = Vec<(String, f32)>; type Lines = Vec<u32>; #[derive(StructOpt, Debug)] #[structopt(name = "silicon")] #[structopt(global_setting(ColoredHelp))] pub struct Config { /// Background image #[structopt(long, value_name = "IMAGE", conflicts_with = "background")] pub background_image: Option<PathBuf>, /// Background color of the image #[structopt( long, short, value_name = "COLOR", default_value = "#aaaaff", parse(try_from_str = parse_str_color) )] pub background: Rgba<u8>, /// Show the path of silicon config file #[structopt(long)] pub config_file: bool, /// Read input from clipboard. #[structopt(long)] pub from_clipboard: bool, /// File to read. If not set, stdin will be use. #[structopt(value_name = "FILE", parse(from_os_str))] pub file: Option<PathBuf>, /// The fallback font list. eg. 'Hack; SimSun=31' #[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))] pub font: Option<FontList>, /// Lines to high light. rg. '1-3; 4' #[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))] pub highlight_lines: Option<Lines>, /// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs"). #[structopt(short, value_name = "LANG", long)] pub language: Option<String>, /// Pad between lines #[structopt(long, value_name = "PAD", default_value = "2")] pub line_pad: u32, /// Line number offset #[structopt(long, value_name = "OFFSET", default_value = "1")] pub line_offset: u32, /// List all themes. #[structopt(long)] pub list_themes: bool, /// List all available fonts in your system #[structopt(long)] pub list_fonts: bool, /// Write output image to specific location instead of cwd. #[structopt( short, long, value_name = "PATH", required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"] )] pub output: Option<PathBuf>, /// Hide the window controls. #[structopt(long)] pub no_window_controls: bool, /// Show window title #[structopt(long, value_name = "WINDOW_TITLE")] pub window_title: Option<String>, /// Hide the line number. #[structopt(long)] pub no_line_number: bool, /// Don't round the corner #[structopt(long)] pub no_round_corner: bool, /// Pad horiz #[structopt(long, value_name = "PAD", default_value = "80")] pub pad_horiz: u32, /// Pad vert #[structopt(long, value_name = "PAD", default_value = "100")] pub pad_vert: u32, /// Color of shadow #[structopt( long, value_name = "COLOR", default_value = "#555555", parse(try_from_str = parse_str_color) )] pub shadow_color: Rgba<u8>, /// Blur radius of the shadow. (set it to 0 to hide shadow) #[structopt(long, value_name = "R", default_value = "0")] pub shadow_blur_radius: f32, /// Shadow's offset in Y axis #[structopt(long, value_name = "Y", default_value = "0")] pub shadow_offset_y: i32, /// Shadow's offset in X axis #[structopt(long, value_name = "X", default_value = "0")] pub shadow_offset_x: i32, /// Tab width #[structopt(long, value_name = "WIDTH", default_value = "4")] pub tab_width: u8, /// The syntax highlight theme. It can be a theme name or path to a .tmTheme file. #[structopt(long, value_name = "THEME", default_value = "Dracula")] pub theme: String, /// Copy the output image to clipboard. #[structopt(short = "c", long)] pub to_clipboard: bool, // Draw a custom text on the bottom right corner // #[structopt(long)] // watermark: Option<String>, /// build syntax definition and theme cache #[structopt(long, value_name = "OUTPUT_DIR")] pub build_cache: Option<Option<PathBuf>>, } impl Config { pub fn get_source_code<'a>( &self, ps: &'a SyntaxSet, ) -> Result<(&'a SyntaxReference, String), Error> { let possible_language = self.language.as_ref().map(|language| { ps.find_syntax_by_token(language) .ok_or_else(|| format_err!("Unsupported language: {}", language)) }); if self.from_clipboard { let mut ctx = ClipboardContext::new() .map_err(|e| format_err!("failed to access clipboard: {}", e))?; let code = ctx .get_contents() .map_err(|e| format_err!("failed to access clipboard: {}", e))?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_by_first_line(&code) .ok_or_else(|| format_err!("Failed to detect the language")) })?; return Ok((language, code)); } if let Some(path) = &self.file
let mut stdin = stdin(); let mut s = String::new(); stdin.read_to_string(&mut s)?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_by_first_line(&s) .ok_or_else(|| format_err!("Failed to detect the language")) })?; Ok((language, s)) } pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> { if let Some(theme) = ts.themes.get(&self.theme) { Ok(theme.clone()) } else { ThemeSet::get_theme(&self.theme) .context(format!("Canot load the theme: {}", self.theme)) } } pub fn get_formatter(&self) -> Result<ImageFormatter, Error> { let formatter = ImageFormatterBuilder::new() .line_pad(self.line_pad) .window_controls(!self.no_window_controls) .window_title(self.window_title.clone()) .line_number(!self.no_line_number) .font(self.font.clone().unwrap_or_default()) .round_corner(!self.no_round_corner) .shadow_adder(self.get_shadow_adder()?) .tab_width(self.tab_width) .highlight_lines(self.highlight_lines.clone().unwrap_or_default()) .line_offset(self.line_offset); Ok(formatter.build()?) } pub fn get_shadow_adder(&self) -> Result<ShadowAdder, Error> { Ok(ShadowAdder::new() .background(match &self.background_image { Some(path) => Background::Image(image::open(path)?.to_rgba8()), None => Background::Solid(self.background), }) .shadow_color(self.shadow_color) .blur_radius(self.shadow_blur_radius) .pad_horiz(self.pad_horiz) .pad_vert(self.pad_vert) .offset_x(self.shadow_offset_x) .offset_y(self.shadow_offset_y)) } pub fn get_expanded_output(&self) -> Option<PathBuf> { let need_expand = self.output.as_ref().map(|p| p.starts_with("~")) == Some(true); if let (Ok(home_dir), true) = (std::env::var("HOME"), need_expand) { self.output .as_ref() .map(|p| p.to_string_lossy().replacen('~', &home_dir, 1).into()) } else { self.output.clone() } } }
{ let mut s = String::new(); let mut file = File::open(path)?; file.read_to_string(&mut s)?; let language = possible_language.unwrap_or_else(|| { ps.find_syntax_for_file(path)? .ok_or_else(|| format_err!("Failed to detect the language")) })?; return Ok((language, s)); }
conditional_block
evaluator_test.go
package evaluator import ( "testing" "go-interpreter-lexer/object" "go-interpreter-lexer/lexer" "go-interpreter-lexer/parser" ) func TestEvaluateIntegerExpression(t *testing.T){ tests := []struct{ input string expected int64 }{ {"5", 5}, {"10", 10}, {"-10", -10}, {"-39", -39}, {"5 + 5 + 5", 15}, {"3 * 5 + 9", 24}, {"2 * 2 * 2 * 2 * 2", 32}, {"-5 + 10 + -5", 0}, {"20 + 2 * -10", 0}, {"50 / 2 * 2 + 10 ", 60}, {"2 * (5 + 10)", 30}, } for _, tt := range tests{ evaluated := testEval(tt.input) testIntegerObject(t, evaluated, tt.expected) } } func TestEvaluateBoolExpressions(t *testing.T){ tests := []struct{ input string expected bool }{ {"true", true}, {"false", false}, {"1 < 2", true}, {"1 > 1", false}, {"1 > 1 ", false}, {"1 > 2", false}, {" 1 == 1", true}, {"1 != 2", true}, {"1 != 1", false}, {"false == false", true}, {"true == true", true}, {"false != true", true}, {"false != false", false}, {" (1 < 2) == true", true}, {" (1 < 2) == false", false}, {" (1 > 2) == false", true}, {" (1 > 2) == true", false}, {"true == true", true}, {"false != true", true}, {"false != false", false}, } for _, bb := range tests{ evaluated := testEval(bb.input) testBooleanObject(t, evaluated, bb.expected) } } func testEval (input string) object.Object{ l := lexer.New(input) p := parser.New(l) program := p.ParseProgram() env := object.NewEnvironment() return Eval(program, env) } func testBooleanObject(t *testing.T, obj object.Object, expected bool) bool{ result, ok := obj.(*object.Boolean) if !ok
if result.Bool != expected{ t.Errorf("Expected %t but got %t", expected, result.Bool) return false } return true } func testIntegerObject(t *testing.T, obj object.Object, expected int64) bool{ result, ok := obj.(*object.Integer) if !ok { t.Errorf("Evaluated value is suppose to be of object.Integer Type by found %T",obj) return false } if result.Value != expected { t.Errorf("Expected Value %d was not equal to the evaluated value %d", expected, result.Value) return false } return true } func TestBangOperator(t *testing.T){ tests := []struct{ input string expected bool }{ {"!true", false}, { "!false", true}, {"!5", false}, {"!!true", true}, {"!!5", true}, } for _, bang := range tests{ evaluated := testEval(bang.input) testBooleanObject(t, evaluated,bang.expected) } } func TestIfElseExpression(t *testing.T){ tests := []struct{ input string expected interface{} }{ {"if (true) { 10 }", 10 }, {"if (false) { 10 }", nil}, {"if ( 1 ) { 10 }", 10 }, {"if ( 1 < 2) { 10 }", 10 }, {"if ( 1 > 2) { 10 }", nil }, {"if ( 1 < 2) { 10 } else { 20 } ", 10 }, {"if ( 1 > 2) { 10 } else { 20 } ", 20 }, } for _, tt := range tests { evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else { testNullObject(t, evaluated) } } } func testNullObject(t *testing.T, evaluated object.Object) bool { if evaluated != NULL { t.Errorf("Object expected to be null but got %T", evaluated) return false } return true } func TestReturnStatements(t *testing.T){ tests := []struct{ input string expected int64 }{ { "return 10;", 10}, { "return 10; 9; ", 10}, { "return 2 * 5; 9;", 10}, {"9; return 2*5; 9;", 10}, {`if (10>1) { if ( 10>1) { return 10; } return 1; `, 10}, } for _,tt := range tests{ evaluated := testEval(tt.input) testIntegerObject(t,evaluated,tt.expected) } } func TestErrorHandling (t *testing.T){ tests := []struct{ input string expectedMessage string }{ {"5 + true;", "type mismatch: INTEGER + BOOLEAN"}, {"5 + true; 5;", "type mismatch: INTEGER + BOOLEAN"}, {"-true", "unknown operator: -BOOLEAN"}, {"true + false;", "unknown operator: BOOLEAN + BOOLEAN"}, {`if (10 > 1) { if ( 10 > 1) { return true + true; } `, "unknown operator: BOOLEAN + BOOLEAN"}, {"foobar", "identifier not found: foobar"}, {`"Hello"-"World"`, "unknown operator: STRING - STRING"}, {`{"name": "Monkey"}[fn(x) {x}];`, "unusable as hash key: FUNCTION"}, } for _, tt :=range tests{ evaluated := testEval(tt.input) errObj, ok := evaluated.(*object.Error) if ! ok{ t.Errorf("no error object returned. got= %T(%+v)", evaluated, evaluated) continue } if errObj.Message != tt.expectedMessage{ t.Errorf("wrong error message.expected=%q, got=%q", tt.expectedMessage, errObj.Message) } } } func TestLetStatements(t *testing.T){ tests := []struct{ input string expected int64 }{ {"let a = 5; a;", 5}, {"let a = 5 * 5; a; ", 25}, {"let a = 5; let b = a; b;", 5}, {"let a = 5; let b = a; let c = a + b + 5; c;", 15}, } for _, tt := range tests{ testIntegerObject(t, testEval(tt.input), tt.expected) } } func TestFunctionObject(t *testing.T){ input := "fn(x) {x+2;};" evaluated := testEval(input) fn, ok := evaluated.(*object.Function) if !ok{ t.Fatalf("object is not a function. got: %T(%+v)", evaluated, evaluated) } if len(fn.Parameters) != 1 { t.Fatalf("function has wrong number of parameters %+v", fn.Parameters) } if fn.Parameters[0].String() != "x"{ t.Fatalf("parameter is not 'x' got %q", fn.Parameters[0]) } expectBody := "(x + 2)" if fn.Body.String() != expectBody{ t.Fatalf("body of the function is not %q, got %q", expectBody, fn.Body.String()) } } func TestFunctionApplication(t *testing.T){ tests := []struct{ input string expected int64 }{ { "let identity = fn(x) {x;} identity(5);", 5}, {"let identity = fn(x) { return x;}; identity(5)", 5}, {"let double = fn(x) { x*2;}; double(5); ",10}, {"let add = fn(x, y) { x + y; }; add(4, 6);", 10}, {"let add = fn(x, y) { x + y; }; add(4 + 6, add(5,5));", 20}, { "fn(x) {x; }(5)", 5}, {`fn( ) { 5;}()`, 5}, } for _, tt := range tests{ testIntegerObject(t, testEval(tt.input), tt.expected) } } func TestClosures(t *testing.T){ input := ` let newAdder = fn(x) { fn(y) { x+y }; }; let addTwo = newAdder(2); addTwo(2);` testIntegerObject(t, testEval(input), 4) } func TestStringLiteralExpression(t *testing.T){ input := `fn() {"hello world!"}();` evaluated := testEval(input) str, ok := evaluated.(*object.String) if !ok{ t.Fatalf("expected object.String got :%T", evaluated) } if str.Value != "hello world!" { t.Fatalf("expected value %s but got %s","hello world!" ,str.Value) } } func TestStringConcatenation(t *testing.T){ input := `"Hello"+" "+"World!"` evaluated := testEval(input) str, ok := evaluated.(*object.String) if !ok { t.Fatalf("Object is not String. got= %T", evaluated) } if str.Value != "Hello World!" { t.Fatalf("The expected value of concatenated string %s but got %s", "Hello World!",str.Value) } } func TestBuiltInFunction(t *testing.T){ tests := [] struct{ input string expected interface{} }{ {`len("")`, 0}, {`len("four")`, 4}, {"len(1)", "argument to `len` not supported, got INTEGER"}, {`len("one", "two")`, "wrong number of arguments. got 2, want=1"}, {`first([1,2,3,4])`, 1}, {`first([])`, NULL}, {`last([1,2,3,4])`, 4}, {`last([])`, NULL}, } for _, tt := range tests{ evaluated := testEval(tt.input) switch expected := tt.expected.(type){ case int: testIntegerObject(t,evaluated,int64(expected)) case string: errObj, ok := evaluated.(*object.Error) if !ok{ t.Errorf("object is not error got.%T(+v)", evaluated, evaluated) continue } if errObj.Message != expected{ t.Errorf("wrong error message. expected %q got %q", expected, errObj.Message) } } } } func TestArrayLiterals(t *testing.T){ input := "[1,2 * 2,3 + 3]" evaluated := testEval(input) result, ok := evaluated.(*object.Array) if !ok { t.Fatalf("Expected to get an *object.Array got %T", evaluated) } if len(result.Elements) != 3{ t.Fatalf("Array Should have 3 elements but got %d", len(result.Elements)) } testIntegerObject(t, result.Elements[0], 1) testIntegerObject(t, result.Elements[1],4) testIntegerObject(t, result.Elements[2], 6) } func TestArrayIndexExpressions(t *testing.T){ tests := []struct{ input string expected interface{} }{ {"[1, 2, 3, 4][0]", 1}, {"[1,2,3][1]", 2}, {"[1,2,3][2]", 3}, {"let i = 0; [1][i];", 1}, {"[1,2,3][1+1];", 3}, {"let myArray = [1,2,3,4]; myArray[2]; ", 3}, {"let myArray = [1,2,3,4]; myArray[0]+ myArray[1]+ myArray[2]; ", 6}, {"[1,2,3][3];", nil}, {"[1,2,3][-1];", nil}, } for _, tt := range tests{ evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else{ testNullObject(t, evaluated) } } } func TestHashLiterals(t *testing.T){ input := `let two = "two"; { "one" : 10 - 9, "two" : 1 + 1, "three": 6/2, 4: 4, true: 5, false: 6 } ` evaluated := testEval(input) result, ok := evaluated.(*object.Hash) if !ok { t.Fatalf("Eval did not return Hash got %T", evaluated) } expected := map[object.HashKey]int64{ (&object.String{Value: "one"}).HashKey(): 1, (&object.String{Value: "two"}).HashKey(): 2, (&object.String{Value: "three"}).HashKey(): 3, (&object.Integer{Value: 4}).HashKey(): 4, TRUE.HashKey(): 5, FALSE.HashKey(): 6, } if len(result.Pairs) != len(expected){ t.Fatalf("Hash has wrong number of pairs got %d", len(result.Pairs)) } for expectedKey, expectedValue := range expected{ pair, ok := result.Pairs[expectedKey] if !ok { t.Errorf("no pair for given key in Pairs") } testIntegerObject(t,pair.Value, expectedValue) } } func TestHashIndexExpression(t *testing.T){ tests := []struct{ input string expected interface{} }{ {`{"foo":5}["foo"]`, 5}, {`{"foo": 5}["bar"]`, nil}, {`let key ="foo"; {"foo": 5}[key];`, 5}, {`{}["foo"]`, nil}, {`{true: 5}[true]`, 5}, //{`{ 5 : 5}[5]'`,5}, } for _, tt := range tests{ evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else{ testNullObject(t, evaluated) } } }
{ t.Errorf("expected value was object.Boolean but got %T",obj) }
conditional_block
evaluator_test.go
package evaluator import ( "testing" "go-interpreter-lexer/object" "go-interpreter-lexer/lexer" "go-interpreter-lexer/parser" ) func TestEvaluateIntegerExpression(t *testing.T){ tests := []struct{ input string expected int64 }{ {"5", 5}, {"10", 10}, {"-10", -10}, {"-39", -39}, {"5 + 5 + 5", 15}, {"3 * 5 + 9", 24}, {"2 * 2 * 2 * 2 * 2", 32}, {"-5 + 10 + -5", 0}, {"20 + 2 * -10", 0}, {"50 / 2 * 2 + 10 ", 60}, {"2 * (5 + 10)", 30}, } for _, tt := range tests{ evaluated := testEval(tt.input) testIntegerObject(t, evaluated, tt.expected) } } func TestEvaluateBoolExpressions(t *testing.T){ tests := []struct{ input string expected bool }{ {"true", true}, {"false", false}, {"1 < 2", true}, {"1 > 1", false}, {"1 > 1 ", false}, {"1 > 2", false}, {" 1 == 1", true}, {"1 != 2", true}, {"1 != 1", false}, {"false == false", true}, {"true == true", true}, {"false != true", true}, {"false != false", false}, {" (1 < 2) == true", true}, {" (1 < 2) == false", false}, {" (1 > 2) == false", true}, {" (1 > 2) == true", false}, {"true == true", true}, {"false != true", true}, {"false != false", false}, } for _, bb := range tests{ evaluated := testEval(bb.input) testBooleanObject(t, evaluated, bb.expected) } } func testEval (input string) object.Object{ l := lexer.New(input) p := parser.New(l) program := p.ParseProgram() env := object.NewEnvironment() return Eval(program, env) } func testBooleanObject(t *testing.T, obj object.Object, expected bool) bool{ result, ok := obj.(*object.Boolean) if !ok{ t.Errorf("expected value was object.Boolean but got %T",obj) } if result.Bool != expected{ t.Errorf("Expected %t but got %t", expected, result.Bool) return false } return true } func testIntegerObject(t *testing.T, obj object.Object, expected int64) bool{ result, ok := obj.(*object.Integer) if !ok { t.Errorf("Evaluated value is suppose to be of object.Integer Type by found %T",obj) return false } if result.Value != expected { t.Errorf("Expected Value %d was not equal to the evaluated value %d", expected, result.Value) return false } return true } func TestBangOperator(t *testing.T){ tests := []struct{ input string expected bool }{ {"!true", false}, { "!false", true}, {"!5", false}, {"!!true", true}, {"!!5", true}, } for _, bang := range tests{ evaluated := testEval(bang.input) testBooleanObject(t, evaluated,bang.expected) } } func TestIfElseExpression(t *testing.T){ tests := []struct{ input string expected interface{} }{ {"if (true) { 10 }", 10 }, {"if (false) { 10 }", nil}, {"if ( 1 ) { 10 }", 10 }, {"if ( 1 < 2) { 10 }", 10 }, {"if ( 1 > 2) { 10 }", nil }, {"if ( 1 < 2) { 10 } else { 20 } ", 10 }, {"if ( 1 > 2) { 10 } else { 20 } ", 20 }, } for _, tt := range tests { evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else { testNullObject(t, evaluated) } } } func testNullObject(t *testing.T, evaluated object.Object) bool { if evaluated != NULL { t.Errorf("Object expected to be null but got %T", evaluated) return false } return true } func TestReturnStatements(t *testing.T){ tests := []struct{ input string expected int64 }{ { "return 10;", 10}, { "return 10; 9; ", 10}, { "return 2 * 5; 9;", 10}, {"9; return 2*5; 9;", 10}, {`if (10>1) { if ( 10>1) { return 10; } return 1; `, 10}, } for _,tt := range tests{ evaluated := testEval(tt.input) testIntegerObject(t,evaluated,tt.expected) } } func TestErrorHandling (t *testing.T){ tests := []struct{ input string expectedMessage string }{ {"5 + true;", "type mismatch: INTEGER + BOOLEAN"}, {"5 + true; 5;", "type mismatch: INTEGER + BOOLEAN"}, {"-true", "unknown operator: -BOOLEAN"}, {"true + false;", "unknown operator: BOOLEAN + BOOLEAN"}, {`if (10 > 1) { if ( 10 > 1) { return true + true; } `, "unknown operator: BOOLEAN + BOOLEAN"}, {"foobar", "identifier not found: foobar"}, {`"Hello"-"World"`, "unknown operator: STRING - STRING"}, {`{"name": "Monkey"}[fn(x) {x}];`, "unusable as hash key: FUNCTION"}, } for _, tt :=range tests{ evaluated := testEval(tt.input) errObj, ok := evaluated.(*object.Error) if ! ok{ t.Errorf("no error object returned. got= %T(%+v)", evaluated, evaluated) continue } if errObj.Message != tt.expectedMessage{ t.Errorf("wrong error message.expected=%q, got=%q", tt.expectedMessage, errObj.Message) } } } func TestLetStatements(t *testing.T){ tests := []struct{ input string expected int64 }{ {"let a = 5; a;", 5}, {"let a = 5 * 5; a; ", 25}, {"let a = 5; let b = a; b;", 5}, {"let a = 5; let b = a; let c = a + b + 5; c;", 15}, } for _, tt := range tests{ testIntegerObject(t, testEval(tt.input), tt.expected) } } func TestFunctionObject(t *testing.T){ input := "fn(x) {x+2;};" evaluated := testEval(input) fn, ok := evaluated.(*object.Function) if !ok{ t.Fatalf("object is not a function. got: %T(%+v)", evaluated, evaluated) } if len(fn.Parameters) != 1 { t.Fatalf("function has wrong number of parameters %+v", fn.Parameters) } if fn.Parameters[0].String() != "x"{ t.Fatalf("parameter is not 'x' got %q", fn.Parameters[0]) } expectBody := "(x + 2)" if fn.Body.String() != expectBody{ t.Fatalf("body of the function is not %q, got %q", expectBody, fn.Body.String()) } } func TestFunctionApplication(t *testing.T){ tests := []struct{ input string expected int64 }{ { "let identity = fn(x) {x;} identity(5);", 5}, {"let identity = fn(x) { return x;}; identity(5)", 5}, {"let double = fn(x) { x*2;}; double(5); ",10}, {"let add = fn(x, y) { x + y; }; add(4, 6);", 10}, {"let add = fn(x, y) { x + y; }; add(4 + 6, add(5,5));", 20}, { "fn(x) {x; }(5)", 5}, {`fn( ) { 5;}()`, 5}, } for _, tt := range tests{ testIntegerObject(t, testEval(tt.input), tt.expected) } } func TestClosures(t *testing.T){ input := ` let newAdder = fn(x) { fn(y) { x+y }; }; let addTwo = newAdder(2); addTwo(2);` testIntegerObject(t, testEval(input), 4) } func TestStringLiteralExpression(t *testing.T){ input := `fn() {"hello world!"}();` evaluated := testEval(input) str, ok := evaluated.(*object.String) if !ok{ t.Fatalf("expected object.String got :%T", evaluated) } if str.Value != "hello world!" { t.Fatalf("expected value %s but got %s","hello world!" ,str.Value) } } func TestStringConcatenation(t *testing.T){ input := `"Hello"+" "+"World!"` evaluated := testEval(input) str, ok := evaluated.(*object.String) if !ok { t.Fatalf("Object is not String. got= %T", evaluated) } if str.Value != "Hello World!" { t.Fatalf("The expected value of concatenated string %s but got %s", "Hello World!",str.Value) } } func TestBuiltInFunction(t *testing.T){ tests := [] struct{ input string expected interface{} }{ {`len("")`, 0}, {`len("four")`, 4}, {"len(1)", "argument to `len` not supported, got INTEGER"}, {`len("one", "two")`, "wrong number of arguments. got 2, want=1"}, {`first([1,2,3,4])`, 1}, {`first([])`, NULL}, {`last([1,2,3,4])`, 4}, {`last([])`, NULL}, } for _, tt := range tests{ evaluated := testEval(tt.input) switch expected := tt.expected.(type){ case int: testIntegerObject(t,evaluated,int64(expected)) case string: errObj, ok := evaluated.(*object.Error) if !ok{ t.Errorf("object is not error got.%T(+v)", evaluated, evaluated) continue } if errObj.Message != expected{ t.Errorf("wrong error message. expected %q got %q", expected, errObj.Message) } } } } func TestArrayLiterals(t *testing.T){ input := "[1,2 * 2,3 + 3]" evaluated := testEval(input) result, ok := evaluated.(*object.Array) if !ok { t.Fatalf("Expected to get an *object.Array got %T", evaluated) } if len(result.Elements) != 3{ t.Fatalf("Array Should have 3 elements but got %d", len(result.Elements)) } testIntegerObject(t, result.Elements[0], 1) testIntegerObject(t, result.Elements[1],4) testIntegerObject(t, result.Elements[2], 6) } func TestArrayIndexExpressions(t *testing.T){ tests := []struct{ input string expected interface{} }{ {"[1, 2, 3, 4][0]", 1}, {"[1,2,3][1]", 2}, {"[1,2,3][2]", 3}, {"let i = 0; [1][i];", 1}, {"[1,2,3][1+1];", 3}, {"let myArray = [1,2,3,4]; myArray[2]; ", 3}, {"let myArray = [1,2,3,4]; myArray[0]+ myArray[1]+ myArray[2]; ", 6}, {"[1,2,3][3];", nil}, {"[1,2,3][-1];", nil}, } for _, tt := range tests{ evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else{ testNullObject(t, evaluated) } } } func TestHashLiterals(t *testing.T){ input := `let two = "two"; { "one" : 10 - 9, "two" : 1 + 1, "three": 6/2, 4: 4, true: 5, false: 6 } ` evaluated := testEval(input) result, ok := evaluated.(*object.Hash) if !ok { t.Fatalf("Eval did not return Hash got %T", evaluated) } expected := map[object.HashKey]int64{ (&object.String{Value: "one"}).HashKey(): 1, (&object.String{Value: "two"}).HashKey(): 2, (&object.String{Value: "three"}).HashKey(): 3, (&object.Integer{Value: 4}).HashKey(): 4, TRUE.HashKey(): 5, FALSE.HashKey(): 6, } if len(result.Pairs) != len(expected){ t.Fatalf("Hash has wrong number of pairs got %d", len(result.Pairs)) } for expectedKey, expectedValue := range expected{ pair, ok := result.Pairs[expectedKey] if !ok { t.Errorf("no pair for given key in Pairs") } testIntegerObject(t,pair.Value, expectedValue) } } func TestHashIndexExpression(t *testing.T){ tests := []struct{ input string expected interface{} }{ {`{"foo":5}["foo"]`, 5}, {`{"foo": 5}["bar"]`, nil}, {`let key ="foo"; {"foo": 5}[key];`, 5}, {`{}["foo"]`, nil}, {`{true: 5}[true]`, 5}, //{`{ 5 : 5}[5]'`,5}, } for _, tt := range tests{ evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else{ testNullObject(t, evaluated) } }
}
random_line_split
evaluator_test.go
package evaluator import ( "testing" "go-interpreter-lexer/object" "go-interpreter-lexer/lexer" "go-interpreter-lexer/parser" ) func TestEvaluateIntegerExpression(t *testing.T){ tests := []struct{ input string expected int64 }{ {"5", 5}, {"10", 10}, {"-10", -10}, {"-39", -39}, {"5 + 5 + 5", 15}, {"3 * 5 + 9", 24}, {"2 * 2 * 2 * 2 * 2", 32}, {"-5 + 10 + -5", 0}, {"20 + 2 * -10", 0}, {"50 / 2 * 2 + 10 ", 60}, {"2 * (5 + 10)", 30}, } for _, tt := range tests{ evaluated := testEval(tt.input) testIntegerObject(t, evaluated, tt.expected) } } func TestEvaluateBoolExpressions(t *testing.T){ tests := []struct{ input string expected bool }{ {"true", true}, {"false", false}, {"1 < 2", true}, {"1 > 1", false}, {"1 > 1 ", false}, {"1 > 2", false}, {" 1 == 1", true}, {"1 != 2", true}, {"1 != 1", false}, {"false == false", true}, {"true == true", true}, {"false != true", true}, {"false != false", false}, {" (1 < 2) == true", true}, {" (1 < 2) == false", false}, {" (1 > 2) == false", true}, {" (1 > 2) == true", false}, {"true == true", true}, {"false != true", true}, {"false != false", false}, } for _, bb := range tests{ evaluated := testEval(bb.input) testBooleanObject(t, evaluated, bb.expected) } } func testEval (input string) object.Object{ l := lexer.New(input) p := parser.New(l) program := p.ParseProgram() env := object.NewEnvironment() return Eval(program, env) } func testBooleanObject(t *testing.T, obj object.Object, expected bool) bool{ result, ok := obj.(*object.Boolean) if !ok{ t.Errorf("expected value was object.Boolean but got %T",obj) } if result.Bool != expected{ t.Errorf("Expected %t but got %t", expected, result.Bool) return false } return true } func testIntegerObject(t *testing.T, obj object.Object, expected int64) bool{ result, ok := obj.(*object.Integer) if !ok { t.Errorf("Evaluated value is suppose to be of object.Integer Type by found %T",obj) return false } if result.Value != expected { t.Errorf("Expected Value %d was not equal to the evaluated value %d", expected, result.Value) return false } return true } func TestBangOperator(t *testing.T){ tests := []struct{ input string expected bool }{ {"!true", false}, { "!false", true}, {"!5", false}, {"!!true", true}, {"!!5", true}, } for _, bang := range tests{ evaluated := testEval(bang.input) testBooleanObject(t, evaluated,bang.expected) } } func TestIfElseExpression(t *testing.T){ tests := []struct{ input string expected interface{} }{ {"if (true) { 10 }", 10 }, {"if (false) { 10 }", nil}, {"if ( 1 ) { 10 }", 10 }, {"if ( 1 < 2) { 10 }", 10 }, {"if ( 1 > 2) { 10 }", nil }, {"if ( 1 < 2) { 10 } else { 20 } ", 10 }, {"if ( 1 > 2) { 10 } else { 20 } ", 20 }, } for _, tt := range tests { evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else { testNullObject(t, evaluated) } } } func testNullObject(t *testing.T, evaluated object.Object) bool { if evaluated != NULL { t.Errorf("Object expected to be null but got %T", evaluated) return false } return true } func TestReturnStatements(t *testing.T){ tests := []struct{ input string expected int64 }{ { "return 10;", 10}, { "return 10; 9; ", 10}, { "return 2 * 5; 9;", 10}, {"9; return 2*5; 9;", 10}, {`if (10>1) { if ( 10>1) { return 10; } return 1; `, 10}, } for _,tt := range tests{ evaluated := testEval(tt.input) testIntegerObject(t,evaluated,tt.expected) } } func TestErrorHandling (t *testing.T){ tests := []struct{ input string expectedMessage string }{ {"5 + true;", "type mismatch: INTEGER + BOOLEAN"}, {"5 + true; 5;", "type mismatch: INTEGER + BOOLEAN"}, {"-true", "unknown operator: -BOOLEAN"}, {"true + false;", "unknown operator: BOOLEAN + BOOLEAN"}, {`if (10 > 1) { if ( 10 > 1) { return true + true; } `, "unknown operator: BOOLEAN + BOOLEAN"}, {"foobar", "identifier not found: foobar"}, {`"Hello"-"World"`, "unknown operator: STRING - STRING"}, {`{"name": "Monkey"}[fn(x) {x}];`, "unusable as hash key: FUNCTION"}, } for _, tt :=range tests{ evaluated := testEval(tt.input) errObj, ok := evaluated.(*object.Error) if ! ok{ t.Errorf("no error object returned. got= %T(%+v)", evaluated, evaluated) continue } if errObj.Message != tt.expectedMessage{ t.Errorf("wrong error message.expected=%q, got=%q", tt.expectedMessage, errObj.Message) } } } func TestLetStatements(t *testing.T){ tests := []struct{ input string expected int64 }{ {"let a = 5; a;", 5}, {"let a = 5 * 5; a; ", 25}, {"let a = 5; let b = a; b;", 5}, {"let a = 5; let b = a; let c = a + b + 5; c;", 15}, } for _, tt := range tests{ testIntegerObject(t, testEval(tt.input), tt.expected) } } func TestFunctionObject(t *testing.T)
func TestFunctionApplication(t *testing.T){ tests := []struct{ input string expected int64 }{ { "let identity = fn(x) {x;} identity(5);", 5}, {"let identity = fn(x) { return x;}; identity(5)", 5}, {"let double = fn(x) { x*2;}; double(5); ",10}, {"let add = fn(x, y) { x + y; }; add(4, 6);", 10}, {"let add = fn(x, y) { x + y; }; add(4 + 6, add(5,5));", 20}, { "fn(x) {x; }(5)", 5}, {`fn( ) { 5;}()`, 5}, } for _, tt := range tests{ testIntegerObject(t, testEval(tt.input), tt.expected) } } func TestClosures(t *testing.T){ input := ` let newAdder = fn(x) { fn(y) { x+y }; }; let addTwo = newAdder(2); addTwo(2);` testIntegerObject(t, testEval(input), 4) } func TestStringLiteralExpression(t *testing.T){ input := `fn() {"hello world!"}();` evaluated := testEval(input) str, ok := evaluated.(*object.String) if !ok{ t.Fatalf("expected object.String got :%T", evaluated) } if str.Value != "hello world!" { t.Fatalf("expected value %s but got %s","hello world!" ,str.Value) } } func TestStringConcatenation(t *testing.T){ input := `"Hello"+" "+"World!"` evaluated := testEval(input) str, ok := evaluated.(*object.String) if !ok { t.Fatalf("Object is not String. got= %T", evaluated) } if str.Value != "Hello World!" { t.Fatalf("The expected value of concatenated string %s but got %s", "Hello World!",str.Value) } } func TestBuiltInFunction(t *testing.T){ tests := [] struct{ input string expected interface{} }{ {`len("")`, 0}, {`len("four")`, 4}, {"len(1)", "argument to `len` not supported, got INTEGER"}, {`len("one", "two")`, "wrong number of arguments. got 2, want=1"}, {`first([1,2,3,4])`, 1}, {`first([])`, NULL}, {`last([1,2,3,4])`, 4}, {`last([])`, NULL}, } for _, tt := range tests{ evaluated := testEval(tt.input) switch expected := tt.expected.(type){ case int: testIntegerObject(t,evaluated,int64(expected)) case string: errObj, ok := evaluated.(*object.Error) if !ok{ t.Errorf("object is not error got.%T(+v)", evaluated, evaluated) continue } if errObj.Message != expected{ t.Errorf("wrong error message. expected %q got %q", expected, errObj.Message) } } } } func TestArrayLiterals(t *testing.T){ input := "[1,2 * 2,3 + 3]" evaluated := testEval(input) result, ok := evaluated.(*object.Array) if !ok { t.Fatalf("Expected to get an *object.Array got %T", evaluated) } if len(result.Elements) != 3{ t.Fatalf("Array Should have 3 elements but got %d", len(result.Elements)) } testIntegerObject(t, result.Elements[0], 1) testIntegerObject(t, result.Elements[1],4) testIntegerObject(t, result.Elements[2], 6) } func TestArrayIndexExpressions(t *testing.T){ tests := []struct{ input string expected interface{} }{ {"[1, 2, 3, 4][0]", 1}, {"[1,2,3][1]", 2}, {"[1,2,3][2]", 3}, {"let i = 0; [1][i];", 1}, {"[1,2,3][1+1];", 3}, {"let myArray = [1,2,3,4]; myArray[2]; ", 3}, {"let myArray = [1,2,3,4]; myArray[0]+ myArray[1]+ myArray[2]; ", 6}, {"[1,2,3][3];", nil}, {"[1,2,3][-1];", nil}, } for _, tt := range tests{ evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else{ testNullObject(t, evaluated) } } } func TestHashLiterals(t *testing.T){ input := `let two = "two"; { "one" : 10 - 9, "two" : 1 + 1, "three": 6/2, 4: 4, true: 5, false: 6 } ` evaluated := testEval(input) result, ok := evaluated.(*object.Hash) if !ok { t.Fatalf("Eval did not return Hash got %T", evaluated) } expected := map[object.HashKey]int64{ (&object.String{Value: "one"}).HashKey(): 1, (&object.String{Value: "two"}).HashKey(): 2, (&object.String{Value: "three"}).HashKey(): 3, (&object.Integer{Value: 4}).HashKey(): 4, TRUE.HashKey(): 5, FALSE.HashKey(): 6, } if len(result.Pairs) != len(expected){ t.Fatalf("Hash has wrong number of pairs got %d", len(result.Pairs)) } for expectedKey, expectedValue := range expected{ pair, ok := result.Pairs[expectedKey] if !ok { t.Errorf("no pair for given key in Pairs") } testIntegerObject(t,pair.Value, expectedValue) } } func TestHashIndexExpression(t *testing.T){ tests := []struct{ input string expected interface{} }{ {`{"foo":5}["foo"]`, 5}, {`{"foo": 5}["bar"]`, nil}, {`let key ="foo"; {"foo": 5}[key];`, 5}, {`{}["foo"]`, nil}, {`{true: 5}[true]`, 5}, //{`{ 5 : 5}[5]'`,5}, } for _, tt := range tests{ evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else{ testNullObject(t, evaluated) } } }
{ input := "fn(x) {x+2;};" evaluated := testEval(input) fn, ok := evaluated.(*object.Function) if !ok{ t.Fatalf("object is not a function. got: %T(%+v)", evaluated, evaluated) } if len(fn.Parameters) != 1 { t.Fatalf("function has wrong number of parameters %+v", fn.Parameters) } if fn.Parameters[0].String() != "x"{ t.Fatalf("parameter is not 'x' got %q", fn.Parameters[0]) } expectBody := "(x + 2)" if fn.Body.String() != expectBody{ t.Fatalf("body of the function is not %q, got %q", expectBody, fn.Body.String()) } }
identifier_body
evaluator_test.go
package evaluator import ( "testing" "go-interpreter-lexer/object" "go-interpreter-lexer/lexer" "go-interpreter-lexer/parser" ) func TestEvaluateIntegerExpression(t *testing.T){ tests := []struct{ input string expected int64 }{ {"5", 5}, {"10", 10}, {"-10", -10}, {"-39", -39}, {"5 + 5 + 5", 15}, {"3 * 5 + 9", 24}, {"2 * 2 * 2 * 2 * 2", 32}, {"-5 + 10 + -5", 0}, {"20 + 2 * -10", 0}, {"50 / 2 * 2 + 10 ", 60}, {"2 * (5 + 10)", 30}, } for _, tt := range tests{ evaluated := testEval(tt.input) testIntegerObject(t, evaluated, tt.expected) } } func TestEvaluateBoolExpressions(t *testing.T){ tests := []struct{ input string expected bool }{ {"true", true}, {"false", false}, {"1 < 2", true}, {"1 > 1", false}, {"1 > 1 ", false}, {"1 > 2", false}, {" 1 == 1", true}, {"1 != 2", true}, {"1 != 1", false}, {"false == false", true}, {"true == true", true}, {"false != true", true}, {"false != false", false}, {" (1 < 2) == true", true}, {" (1 < 2) == false", false}, {" (1 > 2) == false", true}, {" (1 > 2) == true", false}, {"true == true", true}, {"false != true", true}, {"false != false", false}, } for _, bb := range tests{ evaluated := testEval(bb.input) testBooleanObject(t, evaluated, bb.expected) } } func testEval (input string) object.Object{ l := lexer.New(input) p := parser.New(l) program := p.ParseProgram() env := object.NewEnvironment() return Eval(program, env) } func testBooleanObject(t *testing.T, obj object.Object, expected bool) bool{ result, ok := obj.(*object.Boolean) if !ok{ t.Errorf("expected value was object.Boolean but got %T",obj) } if result.Bool != expected{ t.Errorf("Expected %t but got %t", expected, result.Bool) return false } return true } func testIntegerObject(t *testing.T, obj object.Object, expected int64) bool{ result, ok := obj.(*object.Integer) if !ok { t.Errorf("Evaluated value is suppose to be of object.Integer Type by found %T",obj) return false } if result.Value != expected { t.Errorf("Expected Value %d was not equal to the evaluated value %d", expected, result.Value) return false } return true } func TestBangOperator(t *testing.T){ tests := []struct{ input string expected bool }{ {"!true", false}, { "!false", true}, {"!5", false}, {"!!true", true}, {"!!5", true}, } for _, bang := range tests{ evaluated := testEval(bang.input) testBooleanObject(t, evaluated,bang.expected) } } func TestIfElseExpression(t *testing.T){ tests := []struct{ input string expected interface{} }{ {"if (true) { 10 }", 10 }, {"if (false) { 10 }", nil}, {"if ( 1 ) { 10 }", 10 }, {"if ( 1 < 2) { 10 }", 10 }, {"if ( 1 > 2) { 10 }", nil }, {"if ( 1 < 2) { 10 } else { 20 } ", 10 }, {"if ( 1 > 2) { 10 } else { 20 } ", 20 }, } for _, tt := range tests { evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else { testNullObject(t, evaluated) } } } func testNullObject(t *testing.T, evaluated object.Object) bool { if evaluated != NULL { t.Errorf("Object expected to be null but got %T", evaluated) return false } return true } func TestReturnStatements(t *testing.T){ tests := []struct{ input string expected int64 }{ { "return 10;", 10}, { "return 10; 9; ", 10}, { "return 2 * 5; 9;", 10}, {"9; return 2*5; 9;", 10}, {`if (10>1) { if ( 10>1) { return 10; } return 1; `, 10}, } for _,tt := range tests{ evaluated := testEval(tt.input) testIntegerObject(t,evaluated,tt.expected) } } func TestErrorHandling (t *testing.T){ tests := []struct{ input string expectedMessage string }{ {"5 + true;", "type mismatch: INTEGER + BOOLEAN"}, {"5 + true; 5;", "type mismatch: INTEGER + BOOLEAN"}, {"-true", "unknown operator: -BOOLEAN"}, {"true + false;", "unknown operator: BOOLEAN + BOOLEAN"}, {`if (10 > 1) { if ( 10 > 1) { return true + true; } `, "unknown operator: BOOLEAN + BOOLEAN"}, {"foobar", "identifier not found: foobar"}, {`"Hello"-"World"`, "unknown operator: STRING - STRING"}, {`{"name": "Monkey"}[fn(x) {x}];`, "unusable as hash key: FUNCTION"}, } for _, tt :=range tests{ evaluated := testEval(tt.input) errObj, ok := evaluated.(*object.Error) if ! ok{ t.Errorf("no error object returned. got= %T(%+v)", evaluated, evaluated) continue } if errObj.Message != tt.expectedMessage{ t.Errorf("wrong error message.expected=%q, got=%q", tt.expectedMessage, errObj.Message) } } } func TestLetStatements(t *testing.T){ tests := []struct{ input string expected int64 }{ {"let a = 5; a;", 5}, {"let a = 5 * 5; a; ", 25}, {"let a = 5; let b = a; b;", 5}, {"let a = 5; let b = a; let c = a + b + 5; c;", 15}, } for _, tt := range tests{ testIntegerObject(t, testEval(tt.input), tt.expected) } } func TestFunctionObject(t *testing.T){ input := "fn(x) {x+2;};" evaluated := testEval(input) fn, ok := evaluated.(*object.Function) if !ok{ t.Fatalf("object is not a function. got: %T(%+v)", evaluated, evaluated) } if len(fn.Parameters) != 1 { t.Fatalf("function has wrong number of parameters %+v", fn.Parameters) } if fn.Parameters[0].String() != "x"{ t.Fatalf("parameter is not 'x' got %q", fn.Parameters[0]) } expectBody := "(x + 2)" if fn.Body.String() != expectBody{ t.Fatalf("body of the function is not %q, got %q", expectBody, fn.Body.String()) } } func TestFunctionApplication(t *testing.T){ tests := []struct{ input string expected int64 }{ { "let identity = fn(x) {x;} identity(5);", 5}, {"let identity = fn(x) { return x;}; identity(5)", 5}, {"let double = fn(x) { x*2;}; double(5); ",10}, {"let add = fn(x, y) { x + y; }; add(4, 6);", 10}, {"let add = fn(x, y) { x + y; }; add(4 + 6, add(5,5));", 20}, { "fn(x) {x; }(5)", 5}, {`fn( ) { 5;}()`, 5}, } for _, tt := range tests{ testIntegerObject(t, testEval(tt.input), tt.expected) } } func TestClosures(t *testing.T){ input := ` let newAdder = fn(x) { fn(y) { x+y }; }; let addTwo = newAdder(2); addTwo(2);` testIntegerObject(t, testEval(input), 4) } func TestStringLiteralExpression(t *testing.T){ input := `fn() {"hello world!"}();` evaluated := testEval(input) str, ok := evaluated.(*object.String) if !ok{ t.Fatalf("expected object.String got :%T", evaluated) } if str.Value != "hello world!" { t.Fatalf("expected value %s but got %s","hello world!" ,str.Value) } } func
(t *testing.T){ input := `"Hello"+" "+"World!"` evaluated := testEval(input) str, ok := evaluated.(*object.String) if !ok { t.Fatalf("Object is not String. got= %T", evaluated) } if str.Value != "Hello World!" { t.Fatalf("The expected value of concatenated string %s but got %s", "Hello World!",str.Value) } } func TestBuiltInFunction(t *testing.T){ tests := [] struct{ input string expected interface{} }{ {`len("")`, 0}, {`len("four")`, 4}, {"len(1)", "argument to `len` not supported, got INTEGER"}, {`len("one", "two")`, "wrong number of arguments. got 2, want=1"}, {`first([1,2,3,4])`, 1}, {`first([])`, NULL}, {`last([1,2,3,4])`, 4}, {`last([])`, NULL}, } for _, tt := range tests{ evaluated := testEval(tt.input) switch expected := tt.expected.(type){ case int: testIntegerObject(t,evaluated,int64(expected)) case string: errObj, ok := evaluated.(*object.Error) if !ok{ t.Errorf("object is not error got.%T(+v)", evaluated, evaluated) continue } if errObj.Message != expected{ t.Errorf("wrong error message. expected %q got %q", expected, errObj.Message) } } } } func TestArrayLiterals(t *testing.T){ input := "[1,2 * 2,3 + 3]" evaluated := testEval(input) result, ok := evaluated.(*object.Array) if !ok { t.Fatalf("Expected to get an *object.Array got %T", evaluated) } if len(result.Elements) != 3{ t.Fatalf("Array Should have 3 elements but got %d", len(result.Elements)) } testIntegerObject(t, result.Elements[0], 1) testIntegerObject(t, result.Elements[1],4) testIntegerObject(t, result.Elements[2], 6) } func TestArrayIndexExpressions(t *testing.T){ tests := []struct{ input string expected interface{} }{ {"[1, 2, 3, 4][0]", 1}, {"[1,2,3][1]", 2}, {"[1,2,3][2]", 3}, {"let i = 0; [1][i];", 1}, {"[1,2,3][1+1];", 3}, {"let myArray = [1,2,3,4]; myArray[2]; ", 3}, {"let myArray = [1,2,3,4]; myArray[0]+ myArray[1]+ myArray[2]; ", 6}, {"[1,2,3][3];", nil}, {"[1,2,3][-1];", nil}, } for _, tt := range tests{ evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else{ testNullObject(t, evaluated) } } } func TestHashLiterals(t *testing.T){ input := `let two = "two"; { "one" : 10 - 9, "two" : 1 + 1, "three": 6/2, 4: 4, true: 5, false: 6 } ` evaluated := testEval(input) result, ok := evaluated.(*object.Hash) if !ok { t.Fatalf("Eval did not return Hash got %T", evaluated) } expected := map[object.HashKey]int64{ (&object.String{Value: "one"}).HashKey(): 1, (&object.String{Value: "two"}).HashKey(): 2, (&object.String{Value: "three"}).HashKey(): 3, (&object.Integer{Value: 4}).HashKey(): 4, TRUE.HashKey(): 5, FALSE.HashKey(): 6, } if len(result.Pairs) != len(expected){ t.Fatalf("Hash has wrong number of pairs got %d", len(result.Pairs)) } for expectedKey, expectedValue := range expected{ pair, ok := result.Pairs[expectedKey] if !ok { t.Errorf("no pair for given key in Pairs") } testIntegerObject(t,pair.Value, expectedValue) } } func TestHashIndexExpression(t *testing.T){ tests := []struct{ input string expected interface{} }{ {`{"foo":5}["foo"]`, 5}, {`{"foo": 5}["bar"]`, nil}, {`let key ="foo"; {"foo": 5}[key];`, 5}, {`{}["foo"]`, nil}, {`{true: 5}[true]`, 5}, //{`{ 5 : 5}[5]'`,5}, } for _, tt := range tests{ evaluated := testEval(tt.input) integer, ok := tt.expected.(int) if ok { testIntegerObject(t, evaluated, int64(integer)) }else{ testNullObject(t, evaluated) } } }
TestStringConcatenation
identifier_name
main.go
// Copyright 2019 Carleton University Library All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package main import ( "bufio" "context" "flag" "fmt" "log" "net/http" "net/url" "os" "os/signal" "path/filepath" "strconv" "strings" "syscall" ) const (
// DefaultAddress is the default address to serve from. DefaultAddress string = ":8877" // PrimoDomain is the domain at which Primo instances are hosted. PrimoDomain string = "primo.exlibrisgroup.com" // subDomain is the institution domain subDomain string = "ocul-qu" // instVID is the institution vid instVID string = "01OCUL_QU:QU_DEFAULT" // MaxMappingFileLength is the maximum number of lines in a mapping file. MaxMappingFileLength uint64 = 1000000 // RecordURLPrefix is the prefix of the path of requests to catalogues for the permalink of a record. RecordPrefix string = "/vwebv/holdingsInfo" // PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form. PatronInfoPrefix2 string = "/vwebv/login" // PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form. PatronInfoPrefix string = "/vwebv/my" // SearchPrefix is the prefix of the path of requests to catalogues for search results. SearchPrefix string = "/vwebv/search" ) // A version flag, which should be overwritten when building using ldflags. var version = "devel" // Detourer is a struct which stores the data needed to perform redirects. type Detourer struct { idMap map[uint32]uint64 // The map of BibIDs to ExL IDs. primo string // The domain name (host) for the target Primo instance. vid string // The vid parameter to use when building Primo URLs. } // The Detourer serves HTTP redirects based on the request. func (d Detourer) ServeHTTP(w http.ResponseWriter, r *http.Request) { // In the default case, redirect to the Primo search form. redirectTo := &url.URL{ Scheme: "https", Host: d.primo, Path: "/discovery/search", } // Depending on the prefix... switch { case strings.HasPrefix(r.URL.Path, RecordPrefix): buildRecordRedirect(redirectTo, r, d.idMap) case strings.HasPrefix(r.URL.Path, PatronInfoPrefix): redirectTo.Path = "/discovery/login" case strings.HasPrefix(r.URL.Path, PatronInfoPrefix2): redirectTo.Path = "/discovery/login" case strings.HasPrefix(r.URL.Path, SearchPrefix): buildSearchRedirect(redirectTo, r) } // Set the vid parameter on all redirects. setParamInURL(redirectTo, "vid", d.vid) // Send the redirect to the client. // http.Redirect(w, r, redirectTo.String(), http.StatusMovedPermanently) http.Redirect(w, r, redirectTo.String(), http.StatusTemporaryRedirect) } // buildRecordRedirect updates redirectTo to the correct Primo record URL for the requested bibID. func buildRecordRedirect(redirectTo *url.URL, r *http.Request, idMap map[uint32]uint64) { q := r.URL.Query() // bibID64, err := strconv.ParseUint(r.URL.Path[len(RecordPrefix):], 10, 32) bibID64, err := strconv.ParseUint(q.Get("bibId"), 10, 32) if err == nil { bibID := uint32(bibID64) exlID, present := idMap[bibID] if present { redirectTo.Path = "/discovery/fulldisplay" setParamInURL(redirectTo, "docid", fmt.Sprintf("alma%v", exlID)) } else { log.Printf("Not found: %v", bibID64) } } else { log.Fatalln(err) } } // SearchAuthorIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=NAME" // SearchCallNumberIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=CALL" // SearchTitleIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=T" // SearchJournalIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=JALL" // buildSearchRedirect updates redirectTo to an approximate Primo URL for the requested search. func buildSearchRedirect(redirectTo *url.URL, r *http.Request) { q := r.URL.Query() setParamInURL(redirectTo, "tab", "Everything") setParamInURL(redirectTo, "search_scope", "MyInst_and_CI") if q.Get("searchArg") != "" { switch q.Get("searchCode") { case "TKEY^": setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg"))) case "TALL": setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg"))) case "NAME": redirectTo.Path = "/discovery/browse" setParamInURL(redirectTo, "browseScope", "author") setParamInURL(redirectTo, "browseQuery", q.Get("searchArg")) case "CALL": redirectTo.Path = "/discovery/browse" setParamInURL(redirectTo, "browseScope", "callnumber.0") setParamInURL(redirectTo, "browseQuery", q.Get("searchArg")) case "JALL": redirectTo.Path = "/discovery/jsearch" setParamInURL(redirectTo, "tab", "jsearch_slot") setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg"))) default: setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg"))) } } else if q.Get("SEARCH") != "" { setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("SEARCH"))) } } func main() { // Define the command line flags. addr := flag.String("address", DefaultAddress, "Address to bind on.") subdomain := flag.String("primo", subDomain, "The subdomain of the target Primo instance, ?????.primo.exlibrisgroup.com.") vid := flag.String("vid", instVID, "VID parameter for Primo.") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Permanent Detour: A tiny web service which redirects Voyager Web OPAC requests to Primo URLs.\n") fmt.Fprintf(os.Stderr, "Version %v\n", version) fmt.Fprintf(os.Stderr, "Usage: permanentdetour [flag...] [file...]\n") flag.PrintDefaults() fmt.Fprintln(os.Stderr, " Environment variables read when flag is unset:") flag.VisitAll(func(f *flag.Flag) { uppercaseName := strings.ToUpper(f.Name) fmt.Fprintf(os.Stderr, " %v%v\n", EnvPrefix, uppercaseName) }) } // Process the flags. flag.Parse() // If any flags have not been set, see if there are // environment variables that set them. err := overrideUnsetFlagsFromEnvironmentVariables() if err != nil { log.Fatalln(err) } // The Detourer has all the data needed to build redirects. d := Detourer{ primo: fmt.Sprintf("%v.%v", *subdomain, PrimoDomain), vid: *vid, } // Map of BibIDs to ExL IDs // The initial size is an estimate based on the number of arguments. size := uint64(len(flag.Args())) * MaxMappingFileLength d.idMap = make(map[uint32]uint64, size) // Process each file in the arguments list. for _, mappingFilePath := range flag.Args() { // Add the mappings from this file to the idMap. err := processFile(d.idMap, mappingFilePath) if err != nil { log.Fatal(err) } } log.Printf("%v VGer BibID to Ex Libris ID mappings processed.\n", len(d.idMap)) // Use an explicit request multiplexer. mux := http.NewServeMux() mux.Handle("/", d) server := http.Server{ Addr: *addr, Handler: mux, } shutdown := make(chan struct{}) go func() { sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) // Wait to receive a message on the channel. <-sigs err := server.Shutdown(context.Background()) if err != nil { log.Printf("Error shutting down server, %v.\n", err) } close(shutdown) }() log.Println("Starting server.") err = server.ListenAndServe() if err != http.ErrServerClosed { log.Fatalf("Fatal server error, %v.\n", err) } <-shutdown log.Println("Server stopped.") } // processFile takes a file path, opens the file, and reads it line by line to extract id mappings. func processFile(m map[uint32]uint64, mappingFilePath string) error { // Get the absolute path of the file. Not strictly necessary, but creates clearer error messages. absFilePath, err := filepath.Abs(mappingFilePath) if err != nil { return fmt.Errorf("Could not get absolute path of %v, %v.\n", mappingFilePath, err) } // Open the file for reading. Close the file automatically when done. file, err := os.Open(absFilePath) if err != nil { return fmt.Errorf("Could not open %v for reading, %v.\n", absFilePath, err) } defer file.Close() // Read the file line by line. scanner := bufio.NewScanner(file) lnum := 0 for scanner.Scan() { lnum += 1 bibID, exlID, err := processLine(scanner.Text()) if err != nil { return fmt.Errorf("Unable to process line %v '%v', %v.\n", lnum, scanner.Text(), err) } _, present := m[bibID] if present { return fmt.Errorf("Previously seen Bib ID %v was encountered.\n", bibID) } m[bibID] = exlID } err = scanner.Err() if err != nil { return fmt.Errorf("Scanner error when processing %v, %v.\n", absFilePath, err) } return nil } // processLine takes a line of input, and finds the bibID and the exL ID. func processLine(line string) (bibID uint32, exlID uint64, _ error) { // Split the input line into fields on commas. splitLine := strings.Split(line, ",") if len(splitLine) < 2 { return bibID, exlID, fmt.Errorf("Line has incorrect number of fields, 2 expected, %v found.\n", len(splitLine)) } // The bibIDs look like this: a1234-instid // We need to strip off the first character and anything after the dash. dashIndex := strings.Index(splitLine[1], "-") if (dashIndex == 0) || (dashIndex == 1) { return bibID, exlID, fmt.Errorf("No bibID number was found before dash between bibID and institution id.\n") } bibIDString := "invalid" // If the dash isn't found, use the whole bibID field except the first character. if dashIndex == -1 { bibIDString = splitLine[1] } else { bibIDString = splitLine[1][0:dashIndex] } bibID64, err := strconv.ParseUint(bibIDString, 10, 32) if err != nil { return bibID, exlID, err } bibID = uint32(bibID64) exlID, err = strconv.ParseUint(splitLine[0], 10, 64) if err != nil { return bibID, exlID, err } return bibID, exlID, nil } // If any flags are not set, use environment variables to set them. func overrideUnsetFlagsFromEnvironmentVariables() error { // A map of pointers to unset flags. listOfUnsetFlags := make(map[*flag.Flag]bool) // flag.Visit calls a function on "only those flags that have been set." // flag.VisitAll calls a function on "all flags, even those not set." // No way to ask for "only unset flags". So, we add all, then // delete the set flags. // First, visit all the flags, and add them to our map. flag.VisitAll(func(f *flag.Flag) { listOfUnsetFlags[f] = true }) // Then delete the set flags. flag.Visit(func(f *flag.Flag) { delete(listOfUnsetFlags, f) }) // Loop through our list of unset flags. // We don't care about the values in our map, only the keys. for k := range listOfUnsetFlags { // Build the corresponding environment variable name for each flag. uppercaseName := strings.ToUpper(k.Name) environmentVariableName := fmt.Sprintf("%v%v", EnvPrefix, uppercaseName) // Look for the environment variable name. // If found, set the flag to that value. // If there's a problem setting the flag value, // there's a serious problem we can't recover from. environmentVariableValue := os.Getenv(environmentVariableName) if environmentVariableValue != "" { err := k.Value.Set(environmentVariableValue) if err != nil { fmt.Errorf("Unable to set configuration option %v from environment variable %v, "+ "which has a value of \"%v\"", k.Name, environmentVariableName, environmentVariableValue) } } } return nil } // setParamInURL is a helper function which sets a parameter in the query of a url. func setParamInURL(redirectTo *url.URL, param, value string) { q := redirectTo.Query() q.Set(param, value) redirectTo.RawQuery = q.Encode() } // addParamInURL is a helper function which adds a parameter in the query of a url. func addParamInURL(redirectTo *url.URL, param, value string) { q := redirectTo.Query() q.Add(param, value) redirectTo.RawQuery = q.Encode() }
// EnvPrefix is the prefix for the environment variables. EnvPrefix string = "PERMANENTDETOUR_"
random_line_split
main.go
// Copyright 2019 Carleton University Library All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package main import ( "bufio" "context" "flag" "fmt" "log" "net/http" "net/url" "os" "os/signal" "path/filepath" "strconv" "strings" "syscall" ) const ( // EnvPrefix is the prefix for the environment variables. EnvPrefix string = "PERMANENTDETOUR_" // DefaultAddress is the default address to serve from. DefaultAddress string = ":8877" // PrimoDomain is the domain at which Primo instances are hosted. PrimoDomain string = "primo.exlibrisgroup.com" // subDomain is the institution domain subDomain string = "ocul-qu" // instVID is the institution vid instVID string = "01OCUL_QU:QU_DEFAULT" // MaxMappingFileLength is the maximum number of lines in a mapping file. MaxMappingFileLength uint64 = 1000000 // RecordURLPrefix is the prefix of the path of requests to catalogues for the permalink of a record. RecordPrefix string = "/vwebv/holdingsInfo" // PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form. PatronInfoPrefix2 string = "/vwebv/login" // PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form. PatronInfoPrefix string = "/vwebv/my" // SearchPrefix is the prefix of the path of requests to catalogues for search results. SearchPrefix string = "/vwebv/search" ) // A version flag, which should be overwritten when building using ldflags. var version = "devel" // Detourer is a struct which stores the data needed to perform redirects. type Detourer struct { idMap map[uint32]uint64 // The map of BibIDs to ExL IDs. primo string // The domain name (host) for the target Primo instance. vid string // The vid parameter to use when building Primo URLs. } // The Detourer serves HTTP redirects based on the request. func (d Detourer) ServeHTTP(w http.ResponseWriter, r *http.Request) { // In the default case, redirect to the Primo search form. redirectTo := &url.URL{ Scheme: "https", Host: d.primo, Path: "/discovery/search", } // Depending on the prefix... switch { case strings.HasPrefix(r.URL.Path, RecordPrefix): buildRecordRedirect(redirectTo, r, d.idMap) case strings.HasPrefix(r.URL.Path, PatronInfoPrefix): redirectTo.Path = "/discovery/login" case strings.HasPrefix(r.URL.Path, PatronInfoPrefix2): redirectTo.Path = "/discovery/login" case strings.HasPrefix(r.URL.Path, SearchPrefix): buildSearchRedirect(redirectTo, r) } // Set the vid parameter on all redirects. setParamInURL(redirectTo, "vid", d.vid) // Send the redirect to the client. // http.Redirect(w, r, redirectTo.String(), http.StatusMovedPermanently) http.Redirect(w, r, redirectTo.String(), http.StatusTemporaryRedirect) } // buildRecordRedirect updates redirectTo to the correct Primo record URL for the requested bibID. func buildRecordRedirect(redirectTo *url.URL, r *http.Request, idMap map[uint32]uint64) { q := r.URL.Query() // bibID64, err := strconv.ParseUint(r.URL.Path[len(RecordPrefix):], 10, 32) bibID64, err := strconv.ParseUint(q.Get("bibId"), 10, 32) if err == nil { bibID := uint32(bibID64) exlID, present := idMap[bibID] if present { redirectTo.Path = "/discovery/fulldisplay" setParamInURL(redirectTo, "docid", fmt.Sprintf("alma%v", exlID)) } else { log.Printf("Not found: %v", bibID64) } } else { log.Fatalln(err) } } // SearchAuthorIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=NAME" // SearchCallNumberIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=CALL" // SearchTitleIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=T" // SearchJournalIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=JALL" // buildSearchRedirect updates redirectTo to an approximate Primo URL for the requested search. func buildSearchRedirect(redirectTo *url.URL, r *http.Request) { q := r.URL.Query() setParamInURL(redirectTo, "tab", "Everything") setParamInURL(redirectTo, "search_scope", "MyInst_and_CI") if q.Get("searchArg") != "" { switch q.Get("searchCode") { case "TKEY^": setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg"))) case "TALL": setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg"))) case "NAME": redirectTo.Path = "/discovery/browse" setParamInURL(redirectTo, "browseScope", "author") setParamInURL(redirectTo, "browseQuery", q.Get("searchArg")) case "CALL": redirectTo.Path = "/discovery/browse" setParamInURL(redirectTo, "browseScope", "callnumber.0") setParamInURL(redirectTo, "browseQuery", q.Get("searchArg")) case "JALL": redirectTo.Path = "/discovery/jsearch" setParamInURL(redirectTo, "tab", "jsearch_slot") setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg"))) default: setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg"))) } } else if q.Get("SEARCH") != "" { setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("SEARCH"))) } } func main() { // Define the command line flags. addr := flag.String("address", DefaultAddress, "Address to bind on.") subdomain := flag.String("primo", subDomain, "The subdomain of the target Primo instance, ?????.primo.exlibrisgroup.com.") vid := flag.String("vid", instVID, "VID parameter for Primo.") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Permanent Detour: A tiny web service which redirects Voyager Web OPAC requests to Primo URLs.\n") fmt.Fprintf(os.Stderr, "Version %v\n", version) fmt.Fprintf(os.Stderr, "Usage: permanentdetour [flag...] [file...]\n") flag.PrintDefaults() fmt.Fprintln(os.Stderr, " Environment variables read when flag is unset:") flag.VisitAll(func(f *flag.Flag) { uppercaseName := strings.ToUpper(f.Name) fmt.Fprintf(os.Stderr, " %v%v\n", EnvPrefix, uppercaseName) }) } // Process the flags. flag.Parse() // If any flags have not been set, see if there are // environment variables that set them. err := overrideUnsetFlagsFromEnvironmentVariables() if err != nil { log.Fatalln(err) } // The Detourer has all the data needed to build redirects. d := Detourer{ primo: fmt.Sprintf("%v.%v", *subdomain, PrimoDomain), vid: *vid, } // Map of BibIDs to ExL IDs // The initial size is an estimate based on the number of arguments. size := uint64(len(flag.Args())) * MaxMappingFileLength d.idMap = make(map[uint32]uint64, size) // Process each file in the arguments list. for _, mappingFilePath := range flag.Args() { // Add the mappings from this file to the idMap. err := processFile(d.idMap, mappingFilePath) if err != nil { log.Fatal(err) } } log.Printf("%v VGer BibID to Ex Libris ID mappings processed.\n", len(d.idMap)) // Use an explicit request multiplexer. mux := http.NewServeMux() mux.Handle("/", d) server := http.Server{ Addr: *addr, Handler: mux, } shutdown := make(chan struct{}) go func() { sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) // Wait to receive a message on the channel. <-sigs err := server.Shutdown(context.Background()) if err != nil { log.Printf("Error shutting down server, %v.\n", err) } close(shutdown) }() log.Println("Starting server.") err = server.ListenAndServe() if err != http.ErrServerClosed { log.Fatalf("Fatal server error, %v.\n", err) } <-shutdown log.Println("Server stopped.") } // processFile takes a file path, opens the file, and reads it line by line to extract id mappings. func processFile(m map[uint32]uint64, mappingFilePath string) error { // Get the absolute path of the file. Not strictly necessary, but creates clearer error messages. absFilePath, err := filepath.Abs(mappingFilePath) if err != nil { return fmt.Errorf("Could not get absolute path of %v, %v.\n", mappingFilePath, err) } // Open the file for reading. Close the file automatically when done. file, err := os.Open(absFilePath) if err != nil { return fmt.Errorf("Could not open %v for reading, %v.\n", absFilePath, err) } defer file.Close() // Read the file line by line. scanner := bufio.NewScanner(file) lnum := 0 for scanner.Scan() { lnum += 1 bibID, exlID, err := processLine(scanner.Text()) if err != nil { return fmt.Errorf("Unable to process line %v '%v', %v.\n", lnum, scanner.Text(), err) } _, present := m[bibID] if present { return fmt.Errorf("Previously seen Bib ID %v was encountered.\n", bibID) } m[bibID] = exlID } err = scanner.Err() if err != nil { return fmt.Errorf("Scanner error when processing %v, %v.\n", absFilePath, err) } return nil } // processLine takes a line of input, and finds the bibID and the exL ID. func processLine(line string) (bibID uint32, exlID uint64, _ error)
// If any flags are not set, use environment variables to set them. func overrideUnsetFlagsFromEnvironmentVariables() error { // A map of pointers to unset flags. listOfUnsetFlags := make(map[*flag.Flag]bool) // flag.Visit calls a function on "only those flags that have been set." // flag.VisitAll calls a function on "all flags, even those not set." // No way to ask for "only unset flags". So, we add all, then // delete the set flags. // First, visit all the flags, and add them to our map. flag.VisitAll(func(f *flag.Flag) { listOfUnsetFlags[f] = true }) // Then delete the set flags. flag.Visit(func(f *flag.Flag) { delete(listOfUnsetFlags, f) }) // Loop through our list of unset flags. // We don't care about the values in our map, only the keys. for k := range listOfUnsetFlags { // Build the corresponding environment variable name for each flag. uppercaseName := strings.ToUpper(k.Name) environmentVariableName := fmt.Sprintf("%v%v", EnvPrefix, uppercaseName) // Look for the environment variable name. // If found, set the flag to that value. // If there's a problem setting the flag value, // there's a serious problem we can't recover from. environmentVariableValue := os.Getenv(environmentVariableName) if environmentVariableValue != "" { err := k.Value.Set(environmentVariableValue) if err != nil { fmt.Errorf("Unable to set configuration option %v from environment variable %v, "+ "which has a value of \"%v\"", k.Name, environmentVariableName, environmentVariableValue) } } } return nil } // setParamInURL is a helper function which sets a parameter in the query of a url. func setParamInURL(redirectTo *url.URL, param, value string) { q := redirectTo.Query() q.Set(param, value) redirectTo.RawQuery = q.Encode() } // addParamInURL is a helper function which adds a parameter in the query of a url. func addParamInURL(redirectTo *url.URL, param, value string) { q := redirectTo.Query() q.Add(param, value) redirectTo.RawQuery = q.Encode() }
{ // Split the input line into fields on commas. splitLine := strings.Split(line, ",") if len(splitLine) < 2 { return bibID, exlID, fmt.Errorf("Line has incorrect number of fields, 2 expected, %v found.\n", len(splitLine)) } // The bibIDs look like this: a1234-instid // We need to strip off the first character and anything after the dash. dashIndex := strings.Index(splitLine[1], "-") if (dashIndex == 0) || (dashIndex == 1) { return bibID, exlID, fmt.Errorf("No bibID number was found before dash between bibID and institution id.\n") } bibIDString := "invalid" // If the dash isn't found, use the whole bibID field except the first character. if dashIndex == -1 { bibIDString = splitLine[1] } else { bibIDString = splitLine[1][0:dashIndex] } bibID64, err := strconv.ParseUint(bibIDString, 10, 32) if err != nil { return bibID, exlID, err } bibID = uint32(bibID64) exlID, err = strconv.ParseUint(splitLine[0], 10, 64) if err != nil { return bibID, exlID, err } return bibID, exlID, nil }
identifier_body
main.go
// Copyright 2019 Carleton University Library All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package main import ( "bufio" "context" "flag" "fmt" "log" "net/http" "net/url" "os" "os/signal" "path/filepath" "strconv" "strings" "syscall" ) const ( // EnvPrefix is the prefix for the environment variables. EnvPrefix string = "PERMANENTDETOUR_" // DefaultAddress is the default address to serve from. DefaultAddress string = ":8877" // PrimoDomain is the domain at which Primo instances are hosted. PrimoDomain string = "primo.exlibrisgroup.com" // subDomain is the institution domain subDomain string = "ocul-qu" // instVID is the institution vid instVID string = "01OCUL_QU:QU_DEFAULT" // MaxMappingFileLength is the maximum number of lines in a mapping file. MaxMappingFileLength uint64 = 1000000 // RecordURLPrefix is the prefix of the path of requests to catalogues for the permalink of a record. RecordPrefix string = "/vwebv/holdingsInfo" // PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form. PatronInfoPrefix2 string = "/vwebv/login" // PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form. PatronInfoPrefix string = "/vwebv/my" // SearchPrefix is the prefix of the path of requests to catalogues for search results. SearchPrefix string = "/vwebv/search" ) // A version flag, which should be overwritten when building using ldflags. var version = "devel" // Detourer is a struct which stores the data needed to perform redirects. type Detourer struct { idMap map[uint32]uint64 // The map of BibIDs to ExL IDs. primo string // The domain name (host) for the target Primo instance. vid string // The vid parameter to use when building Primo URLs. } // The Detourer serves HTTP redirects based on the request. func (d Detourer) ServeHTTP(w http.ResponseWriter, r *http.Request) { // In the default case, redirect to the Primo search form. redirectTo := &url.URL{ Scheme: "https", Host: d.primo, Path: "/discovery/search", } // Depending on the prefix... switch { case strings.HasPrefix(r.URL.Path, RecordPrefix): buildRecordRedirect(redirectTo, r, d.idMap) case strings.HasPrefix(r.URL.Path, PatronInfoPrefix): redirectTo.Path = "/discovery/login" case strings.HasPrefix(r.URL.Path, PatronInfoPrefix2): redirectTo.Path = "/discovery/login" case strings.HasPrefix(r.URL.Path, SearchPrefix): buildSearchRedirect(redirectTo, r) } // Set the vid parameter on all redirects. setParamInURL(redirectTo, "vid", d.vid) // Send the redirect to the client. // http.Redirect(w, r, redirectTo.String(), http.StatusMovedPermanently) http.Redirect(w, r, redirectTo.String(), http.StatusTemporaryRedirect) } // buildRecordRedirect updates redirectTo to the correct Primo record URL for the requested bibID. func buildRecordRedirect(redirectTo *url.URL, r *http.Request, idMap map[uint32]uint64) { q := r.URL.Query() // bibID64, err := strconv.ParseUint(r.URL.Path[len(RecordPrefix):], 10, 32) bibID64, err := strconv.ParseUint(q.Get("bibId"), 10, 32) if err == nil { bibID := uint32(bibID64) exlID, present := idMap[bibID] if present { redirectTo.Path = "/discovery/fulldisplay" setParamInURL(redirectTo, "docid", fmt.Sprintf("alma%v", exlID)) } else { log.Printf("Not found: %v", bibID64) } } else { log.Fatalln(err) } } // SearchAuthorIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=NAME" // SearchCallNumberIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=CALL" // SearchTitleIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=T" // SearchJournalIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=JALL" // buildSearchRedirect updates redirectTo to an approximate Primo URL for the requested search. func buildSearchRedirect(redirectTo *url.URL, r *http.Request) { q := r.URL.Query() setParamInURL(redirectTo, "tab", "Everything") setParamInURL(redirectTo, "search_scope", "MyInst_and_CI") if q.Get("searchArg") != "" { switch q.Get("searchCode") { case "TKEY^": setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg"))) case "TALL": setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg"))) case "NAME": redirectTo.Path = "/discovery/browse" setParamInURL(redirectTo, "browseScope", "author") setParamInURL(redirectTo, "browseQuery", q.Get("searchArg")) case "CALL": redirectTo.Path = "/discovery/browse" setParamInURL(redirectTo, "browseScope", "callnumber.0") setParamInURL(redirectTo, "browseQuery", q.Get("searchArg")) case "JALL": redirectTo.Path = "/discovery/jsearch" setParamInURL(redirectTo, "tab", "jsearch_slot") setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg"))) default: setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg"))) } } else if q.Get("SEARCH") != "" { setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("SEARCH"))) } } func main() { // Define the command line flags. addr := flag.String("address", DefaultAddress, "Address to bind on.") subdomain := flag.String("primo", subDomain, "The subdomain of the target Primo instance, ?????.primo.exlibrisgroup.com.") vid := flag.String("vid", instVID, "VID parameter for Primo.") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Permanent Detour: A tiny web service which redirects Voyager Web OPAC requests to Primo URLs.\n") fmt.Fprintf(os.Stderr, "Version %v\n", version) fmt.Fprintf(os.Stderr, "Usage: permanentdetour [flag...] [file...]\n") flag.PrintDefaults() fmt.Fprintln(os.Stderr, " Environment variables read when flag is unset:") flag.VisitAll(func(f *flag.Flag) { uppercaseName := strings.ToUpper(f.Name) fmt.Fprintf(os.Stderr, " %v%v\n", EnvPrefix, uppercaseName) }) } // Process the flags. flag.Parse() // If any flags have not been set, see if there are // environment variables that set them. err := overrideUnsetFlagsFromEnvironmentVariables() if err != nil { log.Fatalln(err) } // The Detourer has all the data needed to build redirects. d := Detourer{ primo: fmt.Sprintf("%v.%v", *subdomain, PrimoDomain), vid: *vid, } // Map of BibIDs to ExL IDs // The initial size is an estimate based on the number of arguments. size := uint64(len(flag.Args())) * MaxMappingFileLength d.idMap = make(map[uint32]uint64, size) // Process each file in the arguments list. for _, mappingFilePath := range flag.Args() { // Add the mappings from this file to the idMap. err := processFile(d.idMap, mappingFilePath) if err != nil { log.Fatal(err) } } log.Printf("%v VGer BibID to Ex Libris ID mappings processed.\n", len(d.idMap)) // Use an explicit request multiplexer. mux := http.NewServeMux() mux.Handle("/", d) server := http.Server{ Addr: *addr, Handler: mux, } shutdown := make(chan struct{}) go func() { sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) // Wait to receive a message on the channel. <-sigs err := server.Shutdown(context.Background()) if err != nil { log.Printf("Error shutting down server, %v.\n", err) } close(shutdown) }() log.Println("Starting server.") err = server.ListenAndServe() if err != http.ErrServerClosed { log.Fatalf("Fatal server error, %v.\n", err) } <-shutdown log.Println("Server stopped.") } // processFile takes a file path, opens the file, and reads it line by line to extract id mappings. func processFile(m map[uint32]uint64, mappingFilePath string) error { // Get the absolute path of the file. Not strictly necessary, but creates clearer error messages. absFilePath, err := filepath.Abs(mappingFilePath) if err != nil { return fmt.Errorf("Could not get absolute path of %v, %v.\n", mappingFilePath, err) } // Open the file for reading. Close the file automatically when done. file, err := os.Open(absFilePath) if err != nil { return fmt.Errorf("Could not open %v for reading, %v.\n", absFilePath, err) } defer file.Close() // Read the file line by line. scanner := bufio.NewScanner(file) lnum := 0 for scanner.Scan() { lnum += 1 bibID, exlID, err := processLine(scanner.Text()) if err != nil { return fmt.Errorf("Unable to process line %v '%v', %v.\n", lnum, scanner.Text(), err) } _, present := m[bibID] if present { return fmt.Errorf("Previously seen Bib ID %v was encountered.\n", bibID) } m[bibID] = exlID } err = scanner.Err() if err != nil { return fmt.Errorf("Scanner error when processing %v, %v.\n", absFilePath, err) } return nil } // processLine takes a line of input, and finds the bibID and the exL ID. func processLine(line string) (bibID uint32, exlID uint64, _ error) { // Split the input line into fields on commas. splitLine := strings.Split(line, ",") if len(splitLine) < 2 { return bibID, exlID, fmt.Errorf("Line has incorrect number of fields, 2 expected, %v found.\n", len(splitLine)) } // The bibIDs look like this: a1234-instid // We need to strip off the first character and anything after the dash. dashIndex := strings.Index(splitLine[1], "-") if (dashIndex == 0) || (dashIndex == 1) { return bibID, exlID, fmt.Errorf("No bibID number was found before dash between bibID and institution id.\n") } bibIDString := "invalid" // If the dash isn't found, use the whole bibID field except the first character. if dashIndex == -1 { bibIDString = splitLine[1] } else { bibIDString = splitLine[1][0:dashIndex] } bibID64, err := strconv.ParseUint(bibIDString, 10, 32) if err != nil { return bibID, exlID, err } bibID = uint32(bibID64) exlID, err = strconv.ParseUint(splitLine[0], 10, 64) if err != nil { return bibID, exlID, err } return bibID, exlID, nil } // If any flags are not set, use environment variables to set them. func overrideUnsetFlagsFromEnvironmentVariables() error { // A map of pointers to unset flags. listOfUnsetFlags := make(map[*flag.Flag]bool) // flag.Visit calls a function on "only those flags that have been set." // flag.VisitAll calls a function on "all flags, even those not set." // No way to ask for "only unset flags". So, we add all, then // delete the set flags. // First, visit all the flags, and add them to our map. flag.VisitAll(func(f *flag.Flag) { listOfUnsetFlags[f] = true }) // Then delete the set flags. flag.Visit(func(f *flag.Flag) { delete(listOfUnsetFlags, f) }) // Loop through our list of unset flags. // We don't care about the values in our map, only the keys. for k := range listOfUnsetFlags { // Build the corresponding environment variable name for each flag. uppercaseName := strings.ToUpper(k.Name) environmentVariableName := fmt.Sprintf("%v%v", EnvPrefix, uppercaseName) // Look for the environment variable name. // If found, set the flag to that value. // If there's a problem setting the flag value, // there's a serious problem we can't recover from. environmentVariableValue := os.Getenv(environmentVariableName) if environmentVariableValue != "" { err := k.Value.Set(environmentVariableValue) if err != nil { fmt.Errorf("Unable to set configuration option %v from environment variable %v, "+ "which has a value of \"%v\"", k.Name, environmentVariableName, environmentVariableValue) } } } return nil } // setParamInURL is a helper function which sets a parameter in the query of a url. func
(redirectTo *url.URL, param, value string) { q := redirectTo.Query() q.Set(param, value) redirectTo.RawQuery = q.Encode() } // addParamInURL is a helper function which adds a parameter in the query of a url. func addParamInURL(redirectTo *url.URL, param, value string) { q := redirectTo.Query() q.Add(param, value) redirectTo.RawQuery = q.Encode() }
setParamInURL
identifier_name
main.go
// Copyright 2019 Carleton University Library All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package main import ( "bufio" "context" "flag" "fmt" "log" "net/http" "net/url" "os" "os/signal" "path/filepath" "strconv" "strings" "syscall" ) const ( // EnvPrefix is the prefix for the environment variables. EnvPrefix string = "PERMANENTDETOUR_" // DefaultAddress is the default address to serve from. DefaultAddress string = ":8877" // PrimoDomain is the domain at which Primo instances are hosted. PrimoDomain string = "primo.exlibrisgroup.com" // subDomain is the institution domain subDomain string = "ocul-qu" // instVID is the institution vid instVID string = "01OCUL_QU:QU_DEFAULT" // MaxMappingFileLength is the maximum number of lines in a mapping file. MaxMappingFileLength uint64 = 1000000 // RecordURLPrefix is the prefix of the path of requests to catalogues for the permalink of a record. RecordPrefix string = "/vwebv/holdingsInfo" // PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form. PatronInfoPrefix2 string = "/vwebv/login" // PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form. PatronInfoPrefix string = "/vwebv/my" // SearchPrefix is the prefix of the path of requests to catalogues for search results. SearchPrefix string = "/vwebv/search" ) // A version flag, which should be overwritten when building using ldflags. var version = "devel" // Detourer is a struct which stores the data needed to perform redirects. type Detourer struct { idMap map[uint32]uint64 // The map of BibIDs to ExL IDs. primo string // The domain name (host) for the target Primo instance. vid string // The vid parameter to use when building Primo URLs. } // The Detourer serves HTTP redirects based on the request. func (d Detourer) ServeHTTP(w http.ResponseWriter, r *http.Request) { // In the default case, redirect to the Primo search form. redirectTo := &url.URL{ Scheme: "https", Host: d.primo, Path: "/discovery/search", } // Depending on the prefix... switch { case strings.HasPrefix(r.URL.Path, RecordPrefix): buildRecordRedirect(redirectTo, r, d.idMap) case strings.HasPrefix(r.URL.Path, PatronInfoPrefix): redirectTo.Path = "/discovery/login" case strings.HasPrefix(r.URL.Path, PatronInfoPrefix2): redirectTo.Path = "/discovery/login" case strings.HasPrefix(r.URL.Path, SearchPrefix): buildSearchRedirect(redirectTo, r) } // Set the vid parameter on all redirects. setParamInURL(redirectTo, "vid", d.vid) // Send the redirect to the client. // http.Redirect(w, r, redirectTo.String(), http.StatusMovedPermanently) http.Redirect(w, r, redirectTo.String(), http.StatusTemporaryRedirect) } // buildRecordRedirect updates redirectTo to the correct Primo record URL for the requested bibID. func buildRecordRedirect(redirectTo *url.URL, r *http.Request, idMap map[uint32]uint64) { q := r.URL.Query() // bibID64, err := strconv.ParseUint(r.URL.Path[len(RecordPrefix):], 10, 32) bibID64, err := strconv.ParseUint(q.Get("bibId"), 10, 32) if err == nil
else { log.Fatalln(err) } } // SearchAuthorIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=NAME" // SearchCallNumberIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=CALL" // SearchTitleIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=T" // SearchJournalIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=JALL" // buildSearchRedirect updates redirectTo to an approximate Primo URL for the requested search. func buildSearchRedirect(redirectTo *url.URL, r *http.Request) { q := r.URL.Query() setParamInURL(redirectTo, "tab", "Everything") setParamInURL(redirectTo, "search_scope", "MyInst_and_CI") if q.Get("searchArg") != "" { switch q.Get("searchCode") { case "TKEY^": setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg"))) case "TALL": setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg"))) case "NAME": redirectTo.Path = "/discovery/browse" setParamInURL(redirectTo, "browseScope", "author") setParamInURL(redirectTo, "browseQuery", q.Get("searchArg")) case "CALL": redirectTo.Path = "/discovery/browse" setParamInURL(redirectTo, "browseScope", "callnumber.0") setParamInURL(redirectTo, "browseQuery", q.Get("searchArg")) case "JALL": redirectTo.Path = "/discovery/jsearch" setParamInURL(redirectTo, "tab", "jsearch_slot") setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg"))) default: setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg"))) } } else if q.Get("SEARCH") != "" { setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("SEARCH"))) } } func main() { // Define the command line flags. addr := flag.String("address", DefaultAddress, "Address to bind on.") subdomain := flag.String("primo", subDomain, "The subdomain of the target Primo instance, ?????.primo.exlibrisgroup.com.") vid := flag.String("vid", instVID, "VID parameter for Primo.") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Permanent Detour: A tiny web service which redirects Voyager Web OPAC requests to Primo URLs.\n") fmt.Fprintf(os.Stderr, "Version %v\n", version) fmt.Fprintf(os.Stderr, "Usage: permanentdetour [flag...] [file...]\n") flag.PrintDefaults() fmt.Fprintln(os.Stderr, " Environment variables read when flag is unset:") flag.VisitAll(func(f *flag.Flag) { uppercaseName := strings.ToUpper(f.Name) fmt.Fprintf(os.Stderr, " %v%v\n", EnvPrefix, uppercaseName) }) } // Process the flags. flag.Parse() // If any flags have not been set, see if there are // environment variables that set them. err := overrideUnsetFlagsFromEnvironmentVariables() if err != nil { log.Fatalln(err) } // The Detourer has all the data needed to build redirects. d := Detourer{ primo: fmt.Sprintf("%v.%v", *subdomain, PrimoDomain), vid: *vid, } // Map of BibIDs to ExL IDs // The initial size is an estimate based on the number of arguments. size := uint64(len(flag.Args())) * MaxMappingFileLength d.idMap = make(map[uint32]uint64, size) // Process each file in the arguments list. for _, mappingFilePath := range flag.Args() { // Add the mappings from this file to the idMap. err := processFile(d.idMap, mappingFilePath) if err != nil { log.Fatal(err) } } log.Printf("%v VGer BibID to Ex Libris ID mappings processed.\n", len(d.idMap)) // Use an explicit request multiplexer. mux := http.NewServeMux() mux.Handle("/", d) server := http.Server{ Addr: *addr, Handler: mux, } shutdown := make(chan struct{}) go func() { sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) // Wait to receive a message on the channel. <-sigs err := server.Shutdown(context.Background()) if err != nil { log.Printf("Error shutting down server, %v.\n", err) } close(shutdown) }() log.Println("Starting server.") err = server.ListenAndServe() if err != http.ErrServerClosed { log.Fatalf("Fatal server error, %v.\n", err) } <-shutdown log.Println("Server stopped.") } // processFile takes a file path, opens the file, and reads it line by line to extract id mappings. func processFile(m map[uint32]uint64, mappingFilePath string) error { // Get the absolute path of the file. Not strictly necessary, but creates clearer error messages. absFilePath, err := filepath.Abs(mappingFilePath) if err != nil { return fmt.Errorf("Could not get absolute path of %v, %v.\n", mappingFilePath, err) } // Open the file for reading. Close the file automatically when done. file, err := os.Open(absFilePath) if err != nil { return fmt.Errorf("Could not open %v for reading, %v.\n", absFilePath, err) } defer file.Close() // Read the file line by line. scanner := bufio.NewScanner(file) lnum := 0 for scanner.Scan() { lnum += 1 bibID, exlID, err := processLine(scanner.Text()) if err != nil { return fmt.Errorf("Unable to process line %v '%v', %v.\n", lnum, scanner.Text(), err) } _, present := m[bibID] if present { return fmt.Errorf("Previously seen Bib ID %v was encountered.\n", bibID) } m[bibID] = exlID } err = scanner.Err() if err != nil { return fmt.Errorf("Scanner error when processing %v, %v.\n", absFilePath, err) } return nil } // processLine takes a line of input, and finds the bibID and the exL ID. func processLine(line string) (bibID uint32, exlID uint64, _ error) { // Split the input line into fields on commas. splitLine := strings.Split(line, ",") if len(splitLine) < 2 { return bibID, exlID, fmt.Errorf("Line has incorrect number of fields, 2 expected, %v found.\n", len(splitLine)) } // The bibIDs look like this: a1234-instid // We need to strip off the first character and anything after the dash. dashIndex := strings.Index(splitLine[1], "-") if (dashIndex == 0) || (dashIndex == 1) { return bibID, exlID, fmt.Errorf("No bibID number was found before dash between bibID and institution id.\n") } bibIDString := "invalid" // If the dash isn't found, use the whole bibID field except the first character. if dashIndex == -1 { bibIDString = splitLine[1] } else { bibIDString = splitLine[1][0:dashIndex] } bibID64, err := strconv.ParseUint(bibIDString, 10, 32) if err != nil { return bibID, exlID, err } bibID = uint32(bibID64) exlID, err = strconv.ParseUint(splitLine[0], 10, 64) if err != nil { return bibID, exlID, err } return bibID, exlID, nil } // If any flags are not set, use environment variables to set them. func overrideUnsetFlagsFromEnvironmentVariables() error { // A map of pointers to unset flags. listOfUnsetFlags := make(map[*flag.Flag]bool) // flag.Visit calls a function on "only those flags that have been set." // flag.VisitAll calls a function on "all flags, even those not set." // No way to ask for "only unset flags". So, we add all, then // delete the set flags. // First, visit all the flags, and add them to our map. flag.VisitAll(func(f *flag.Flag) { listOfUnsetFlags[f] = true }) // Then delete the set flags. flag.Visit(func(f *flag.Flag) { delete(listOfUnsetFlags, f) }) // Loop through our list of unset flags. // We don't care about the values in our map, only the keys. for k := range listOfUnsetFlags { // Build the corresponding environment variable name for each flag. uppercaseName := strings.ToUpper(k.Name) environmentVariableName := fmt.Sprintf("%v%v", EnvPrefix, uppercaseName) // Look for the environment variable name. // If found, set the flag to that value. // If there's a problem setting the flag value, // there's a serious problem we can't recover from. environmentVariableValue := os.Getenv(environmentVariableName) if environmentVariableValue != "" { err := k.Value.Set(environmentVariableValue) if err != nil { fmt.Errorf("Unable to set configuration option %v from environment variable %v, "+ "which has a value of \"%v\"", k.Name, environmentVariableName, environmentVariableValue) } } } return nil } // setParamInURL is a helper function which sets a parameter in the query of a url. func setParamInURL(redirectTo *url.URL, param, value string) { q := redirectTo.Query() q.Set(param, value) redirectTo.RawQuery = q.Encode() } // addParamInURL is a helper function which adds a parameter in the query of a url. func addParamInURL(redirectTo *url.URL, param, value string) { q := redirectTo.Query() q.Add(param, value) redirectTo.RawQuery = q.Encode() }
{ bibID := uint32(bibID64) exlID, present := idMap[bibID] if present { redirectTo.Path = "/discovery/fulldisplay" setParamInURL(redirectTo, "docid", fmt.Sprintf("alma%v", exlID)) } else { log.Printf("Not found: %v", bibID64) } }
conditional_block
import.rs
use std::{io, path::PathBuf}; use std::collections::HashMap; use crate::mm0::{SortId, TermId, ThmId}; #[cfg(debug_assertions)] use mm0b_parser::BasicMmbFile as MmbFile; #[cfg(not(debug_assertions))] use mm0b_parser::BareMmbFile as MmbFile; use super::Mm0Writer; macro_rules! const_assert { ($cond:expr) => { let _ = [(); 0 - (!($cond) as usize)]; } } macro_rules! build_consts { (@CONST sort $name:ident $e:expr) => { pub const $name: SortId = SortId($e); }; (@CONST term $name:ident $e:expr) => { pub const $name: TermId = TermId($e); }; (@CONST thm $name:ident $e:expr) => { pub const $name: ThmId = ThmId($e); }; (@CONST tydef $name:ident $e:expr) => { pub const $name: TydefId = TydefId($e); }; (@INDEX $mmb:expr, sort $name:ident) => { $mmb.sort_index(mm0_const::$name) }; (@INDEX $mmb:expr, term $name:ident) => { $mmb.term_index(mm0_const::$name) }; (@INDEX $mmb:expr, thm $name:ident) => { $mmb.thm_index(mm0_const::$name) }; (@CONJ $mmb:expr, tydef $name:ident: $s:tt) => {}; (@CONJ $mmb:expr, $ty:ident $name:ident: $s:tt) => { if build_consts!(@INDEX $mmb, $ty $name)?.value()? != $s { return None } }; (@GET $index:expr, sort $s:tt) => { $index.sorts.get($s).map(|n| n.0 as u32) }; (@GET $index:expr, term $s:tt) => { $index.terms.get($s).map(|n| n.0) }; (@GET $index:expr, thm $s:tt) => { $index.thms.get($s).map(|n| n.0) }; (@PRINT $print1:ident, $out:expr, $index:expr, $name:expr, tydef $s:tt = $e:expr) => { writeln!($out, " tydef {}: {} = {};", $name, stringify!($s), $e).unwrap() }; (@PRINT $print1:ident, $out:expr, $index:expr, $name:expr, $ty:ident $s:tt = $e:expr) => { $print1(&mut $out, stringify!($ty), $name, $s, build_consts!(@GET $index, $ty $s), $e) }; (@TYDEFS $(($nil:tt ($name:ident $s:expr, $e:expr)))*) => { pub const TYDEFS: [ (ThmId, TermId, [(TermId, ThmId); 2], [ThmId; 2]); [$($nil),*].len() ] = { const_assert!({ let mut n = 0; ($($e == (n, n += 1).0 &&)* true, n).0 }); use mm0_const::*; [$($s),*] }; }; (@FILTER_TYDEFS $(($(tydef $($tydef:literal)?)? $(sort)? $(term)? $(thm)?: $t:tt))*) => { build_consts! { @TYDEFS $($($($tydef)? (() $t))?)* } }; ($($ty:ident $name:ident: $s:tt = $e:expr;)*) => { pub mod mm0_const { use crate::mm0::{SortId, TermId, ThmId, TydefId}; $(build_consts!(@CONST $ty $name $e);)* } build_consts! { @FILTER_TYDEFS $(($ty: ($name $s, $e)))* } #[cfg(debug_assertions)] fn check_consts(mmb: &MmbFile<'_>) { #[derive(Default)] struct Index<'a> { sorts: HashMap<&'a str, SortId>, terms: HashMap<&'a str, TermId>, thms: HashMap<&'a str, ThmId>, } #[cold] fn rebuild_consts(mmb: &MmbFile<'_>) { use std::fmt::Write; let mut index = Index::default(); for n in (0..mmb.header.num_sorts).map(SortId) { if let Some(s) = mmb.sort_index(n).and_then(|ix| ix.value()) { index.sorts.insert(s, n); } } for n in (0..mmb.header.num_terms.get()).map(TermId) { if let Some(s) = mmb.term_index(n).and_then(|ix| ix.value()) { index.terms.insert(s, n); } } for n in (0..mmb.header.num_thms.get()).map(ThmId) { if let Some(s) = mmb.thm_index(n).and_then(|ix| ix.value()) { index.thms.insert(s, n); } } let mut out = String::new(); fn print1(out: &mut String, ty: &str, name: &str, s: &str, o: Option<u32>, e: u32) { if let Some(n) = o { write!(out, " {} {}: {:?} = {};", ty, name, s, n).unwrap(); if n == e { *out += "\n" } else { write!(out, " // not {}\n", e).unwrap() } } else { eprintln!("{} {:?} not found", ty, s); write!(out, " {} {}: {:?} = ???;\n", ty, name, s).unwrap(); } } $(build_consts!(@PRINT print1, out, index, stringify!($name), $ty $s = $e);)* eprintln!("build_consts! {{\n{}}}", out); panic!("Rebuild needed. Put this in 'mm0/import.rs'"); } #[inline] fn check_consts(mmb: &MmbFile<'_>) -> Option<()> { $(build_consts!(@CONJ mmb, $ty $name: $s);)* Some(()) } if check_consts(mmb).is_none() { rebuild_consts(mmb) } } } } // This is a list of indexes into the hol.mmb file; it is checked during startup, // and if any of the indexes are wrong then it will print the appropriate replacement. // (TODO: use code generation) build_consts! { sort WFF: "wff" = 0; sort TYPE: "type" = 1; sort TERM: "term" = 2; term MM0_IM: "im" = 0; term MM0_AN: "an" = 1; term TY: "ty" = 4; term THM: "thm" = 9; term APP: "app" = 5; term LAM: "lam" = 6; term BOOL: "bool" = 2; term FUN: "fun" = 3; term EQ: "eq" = 7; thm EQ_T: "eqT" = 27; term TRUE: "T" = 11; thm TRUE_T: "TT" = 51; thm TRUE_DEF: "T_DEF" = 52; term AND: "and" = 16; thm AND_T: "andT" = 68; thm AND_DEF: "AND_DEF" = 69; term IMP: "imp" = 18; thm IMP_T: "impT" = 76; thm IMP_DEF: "IMP_DEF" = 77; term ALL: "all" = 20; thm ALL_T: "allT" = 90; thm ALL_DEF: "FORALL_DEF" = 91; term EX: "ex" = 22; thm EX_T: "exT" = 105; thm EX_DEF: "EXISTS_DEF" = 106; term OR: "or" = 24; thm OR_T: "orT" = 127; thm OR_DEF: "OR_DEF" = 128; term FALSE: "F" = 26; thm FALSE_T: "FT" = 138; thm FALSE_DEF: "F_DEF" = 139; term NOT: "not" = 27; thm NOT_T: "notT" = 142; thm NOT_DEF: "NOT_DEF" = 143; term EU: "eu" = 29; thm EU_T: "euT" = 153; thm EU_DEF: "EU_DEF" = 154; thm ETA_AX: "ETA_AX" = 104; term SEL: "sel" = 15; thm SEL_T: "selT" = 63; thm SELECT_AX: "SELECT_AX" = 157; term COND: "COND" = 30; thm COND_T: "condT" = 163; thm COND_DEF: "COND_DEF" = 164; thm CONJ: "CONJ" = 72; thm CONJ_PAIR: "CONJ_PAIR" = 73; thm CONJUNCT1: "CONJUNCT1" = 74; thm CONJUNCT2: "CONJUNCT2" = 75; thm REFL: "refl" = 33; thm AEQ: "aeq" = 34; thm AEQ1: "aeq1" = 35; thm AEQ2: "aeq2" = 36; thm MP: "MP" = 81; thm DISCH: "DISCH" = 82; thm UNDISCH: "UNDISCH" = 84; thm IMP_ANTISYM: "IMP_ANTISYM" = 85; thm EQ_IMP1: "EQ_IMP1" = 86; thm EQ_IMP2: "EQ_IMP2" = 87; thm IMP_ID: "IMP_ID" = 88; thm IMP_TRANS: "IMP_TRANS" = 89; thm SPEC: "SPEC" = 97; thm GEN: "GEN" = 98; thm CHOOSE: "CHOOSE" = 111; thm EXISTS: "EXISTS" = 113; thm DISJ1: "DISJ1" = 132; thm DISJ2: "DISJ2" = 134; thm DISJ_CASES: "DISJ_CASES" = 136; thm CONTR: "CONTR" = 140; thm NOT_ELIM: "NOT_ELIM" = 147; thm NOT_INTRO: "NOT_INTRO" = 148; thm EQF_INTRO: "EQF_INTRO" = 149; thm EQF_ELIM: "EQF_ELIM" = 150; thm NOT_FALSE: "NOT_FALSE" = 151; thm CCONTR: "CCONTR" = 162; thm PROD_TYBIJ1: "prod_tybij1" = 172; thm PROD_TYBIJ2: "prod_tybij2" = 173; thm ONE_ONE_THM: "ONE_ONE" = 192; thm ONTO_THM: "ONTO" = 197; thm INF: "inf" = 198; term MK_PAIR: "mk_pair" = 31; thm MK_PAIR_T: "mk_pairT" = 165; thm MK_PAIR_DEF: "mk_pair_DEF" = 166; term PROD: "prod" = 32; thm PROD_THM: "PROD_THM" = 168; term ABS_PROD: "ABS_prod" = 33; thm ABS_PROD_T: "ABS_prodT" = 170; term REP_PROD: "REP_prod" = 34; thm REP_PROD_T: "REP_prodT" = 171; thm PROD_BIJ1: "PROD_BIJ1" = 174; thm PROD_BIJ2: "PROD_BIJ2" = 175; tydef PROD_TYDEF: (PROD_THM, PROD, [(ABS_PROD, ABS_PROD_T), (REP_PROD, REP_PROD_T)], [PROD_BIJ1, PROD_BIJ2]) = 0; term PAIR: "pr" = 35; thm PAIR_T: "prT" = 176; thm PAIR_DEF: "PAIR_DEF" = 177; term FST: "fst" = 36; thm FST_T: "fstT" = 180; thm FST_DEF: "FST_DEF" = 181; term SND: "snd" = 37; thm SND_T: "sndT" = 184; thm SND_DEF: "SND_DEF" = 185; term IND: "ind" = 40; term ONE_ONE: "one_one" = 38; thm ONE_ONE_T: "one_one_T" = 188; thm ONE_ONE_BD: "one_one_BD" = 189; thm ONE_ONE_DEF: "one_one_DEF" = 191; term ONTO: "onto" = 39; thm ONTO_T: "onto_T" = 193; thm ONTO_BD: "onto_BD" = 194; thm ONTO_DEF: "onto_DEF" = 196; thm INFINITY_AX: "inf" = 198; term IND_SUC: "IND_SUC" = 41; thm IND_SUC_T: "IND_SUC_T" = 199; thm IND_SUC_DEF: "IND_SUC_DEF" = 200; term IND_0: "IND_0" = 42; thm IND_0_T: "IND_0_T" = 203; thm IND_0_DEF: "IND_0_DEF" = 204; term NUM_REP: "NUM_REP" = 43; thm NUM_REP_T: "NUM_REP_T" = 205; thm NUM_REP_DEF: "NUM_REP_DEF" = 206; term NUM: "num" = 44; thm NUM_THM: "NUM_THM" = 207; term MK_NUM: "mk_num" = 45; thm MK_NUM_T: "mk_numT" = 208; term DEST_NUM: "dest_num" = 46; thm DEST_NUM_T: "dest_numT" = 209; thm NUM_BIJ1: "NUM_BIJ1" = 212; thm NUM_BIJ2: "NUM_BIJ2" = 213; tydef NUM_TYDEF: (NUM_THM, NUM, [(MK_NUM, MK_NUM_T), (DEST_NUM, DEST_NUM_T)], [NUM_BIJ1, NUM_BIJ2]) = 1; term ZERO: "_0" = 47; thm ZERO_T: "_0T" = 214; thm ZERO_DEF: "_0_DEF" = 215; term SUC: "suc" = 48; thm SUC_T: "sucT" = 216; thm SUC_BD: "suc_BD" = 218; thm SUC_DEF: "suc_DEF" = 219; term NUMERAL: "NUMERAL" = 49; thm NUMERAL_T: "NUMERAL_T" = 220; thm NUMERAL_BD: "NUMERAL_BD" = 222; thm NUMERAL_DEF: "NUMERAL_DEF" = 223; term BIT0: "bit0" = 51; thm BIT0_T: "bit0T" = 230; thm BIT0_DEF: "bit0_DEF" = 232; term BIT1: "bit1" = 52; thm BIT1_T: "bit1T" = 233; thm BIT1_BD: "bit1_BD" = 235; thm BIT1_DEF: "bit1_DEF" = 236; term PRE: "pre" = 54; thm PRE_T: "preT" = 238; thm PRE_DEF: "pre_DEF" = 239; thm PRE_SPEC: "PRE" = 240; term ADD: "add" = 55; thm ADD_T: "addT" = 241; thm ADD_DEF: "add_DEF" = 243; thm ADD_SPEC: "ADD" = 244; term MUL: "mul" = 57; thm MUL_T: "mulT" = 245; thm MUL_DEF: "mul_DEF" = 247; thm MUL_SPEC: "MUL" = 248; term EXP: "exp" = 59; thm EXP_T: "expT" = 250; thm EXP_DEF: "exp_DEF" = 252; thm EXP_SPEC: "EXP" = 253; term LE: "le" = 60; thm LE_T: "leT" = 254; thm LE_DEF: "le_DEF" = 256; thm LE_SPEC: "LE" = 257; term LT: "lt" = 62; thm LT_T: "ltT" = 258; thm LT_DEF: "lt_DEF" = 260; thm LT_SPEC: "LT" = 261; term GE: "ge" = 64; thm GE_T: "geT" = 263; thm GE_BD: "ge_BD" = 264; thm GE_DEF: "ge_DEF" = 265; term GT: "gt" = 65; thm GT_T: "gtT" = 266; thm GT_BD: "gt_BD" = 267; thm GT_DEF: "gt_DEF" = 268; term EVEN: "even" = 66; thm EVEN_T: "evenT" = 269; thm EVEN_DEF: "even_DEF" = 270; thm EVEN_SPEC: "EVEN" = 271; term ODD: "odd" = 67; thm ODD_T: "oddT" = 272; thm ODD_DEF: "odd_DEF" = 273; thm ODD_SPEC: "ODD" = 274; term SUB: "sub" = 68; thm SUB_T: "subT" = 276; thm SUB_DEF: "sub_DEF" = 278; thm SUB_SPEC: "SUB" = 279; term TYPEDEF: "TYPEDEF" = 70; thm TYPEDEF_T: "TYPEDEF_T" = 280; thm TYPEDEF_DEF: "TYPEDEF_DEF" = 281; thm AND_DEF1: "AND_DEF1" = 102; thm EXISTS_THM: "EXISTS_THM" = 158; thm EU_DEF1: "EU_DEF1" = 156; thm IMP_ANTISYM_AX: "IMP_ANTISYM_AX" = 103; thm BOOL_CASES_AX: "BOOL_CASES_AX" = 161; thm TRUTH: "TRUTH" = 53; thm NOT_TRUE: "NOT_TRUE" = 152; thm EM: "em" = 159; thm PAIR_EQ: "PAIR_EQ" = 178; thm PAIR_SURJ: "PAIR_SURJ" = 179; thm FST_THM: "FST" = 183; thm SND_THM: "SND" = 187; thm IND_SUC_0: "IND_SUC_0" = 201; thm IND_SUC_INJ: "IND_SUC_INJ" = 202; thm NOT_SUC: "NOT_SUC" = 225; thm SUC_INJ: "SUC_INJ" = 226; thm NUM_CASES: "num_CASES" = 227; thm NUM_IND: "num_INDUCTION" = 228; thm NUM_REC: "num_RECURSION" = 229; thm MUL1: "MUL1" = 249; thm LE1: "LE1" = 262; thm ODD1: "ODD1" = 275; } pub fn hol_writer(out: PathBuf, temp: PathBuf) -> io::Result<Mm0Writer> { #[repr(C, align(8))] pub struct
<T: ?Sized>(T); static HOL_MMB: &Aligned<[u8]> = &Aligned(*include_bytes!("../../hol.mmb")); let mmb = MmbFile::parse(&HOL_MMB.0).unwrap(); #[cfg(debug_assertions)] check_consts(&mmb); Mm0Writer::new(out, temp, &mmb) }
Aligned
identifier_name
import.rs
use std::{io, path::PathBuf}; use std::collections::HashMap; use crate::mm0::{SortId, TermId, ThmId}; #[cfg(debug_assertions)] use mm0b_parser::BasicMmbFile as MmbFile; #[cfg(not(debug_assertions))] use mm0b_parser::BareMmbFile as MmbFile; use super::Mm0Writer; macro_rules! const_assert { ($cond:expr) => { let _ = [(); 0 - (!($cond) as usize)]; } } macro_rules! build_consts { (@CONST sort $name:ident $e:expr) => { pub const $name: SortId = SortId($e); }; (@CONST term $name:ident $e:expr) => { pub const $name: TermId = TermId($e); }; (@CONST thm $name:ident $e:expr) => { pub const $name: ThmId = ThmId($e); }; (@CONST tydef $name:ident $e:expr) => { pub const $name: TydefId = TydefId($e); }; (@INDEX $mmb:expr, sort $name:ident) => { $mmb.sort_index(mm0_const::$name) }; (@INDEX $mmb:expr, term $name:ident) => { $mmb.term_index(mm0_const::$name) }; (@INDEX $mmb:expr, thm $name:ident) => { $mmb.thm_index(mm0_const::$name) }; (@CONJ $mmb:expr, tydef $name:ident: $s:tt) => {}; (@CONJ $mmb:expr, $ty:ident $name:ident: $s:tt) => { if build_consts!(@INDEX $mmb, $ty $name)?.value()? != $s { return None } }; (@GET $index:expr, sort $s:tt) => { $index.sorts.get($s).map(|n| n.0 as u32) }; (@GET $index:expr, term $s:tt) => { $index.terms.get($s).map(|n| n.0) }; (@GET $index:expr, thm $s:tt) => { $index.thms.get($s).map(|n| n.0) }; (@PRINT $print1:ident, $out:expr, $index:expr, $name:expr, tydef $s:tt = $e:expr) => { writeln!($out, " tydef {}: {} = {};", $name, stringify!($s), $e).unwrap() }; (@PRINT $print1:ident, $out:expr, $index:expr, $name:expr, $ty:ident $s:tt = $e:expr) => { $print1(&mut $out, stringify!($ty), $name, $s, build_consts!(@GET $index, $ty $s), $e) }; (@TYDEFS $(($nil:tt ($name:ident $s:expr, $e:expr)))*) => { pub const TYDEFS: [ (ThmId, TermId, [(TermId, ThmId); 2], [ThmId; 2]); [$($nil),*].len() ] = { const_assert!({ let mut n = 0; ($($e == (n, n += 1).0 &&)* true, n).0 }); use mm0_const::*; [$($s),*] }; }; (@FILTER_TYDEFS $(($(tydef $($tydef:literal)?)? $(sort)? $(term)? $(thm)?: $t:tt))*) => { build_consts! { @TYDEFS $($($($tydef)? (() $t))?)* } }; ($($ty:ident $name:ident: $s:tt = $e:expr;)*) => { pub mod mm0_const { use crate::mm0::{SortId, TermId, ThmId, TydefId}; $(build_consts!(@CONST $ty $name $e);)* } build_consts! { @FILTER_TYDEFS $(($ty: ($name $s, $e)))* } #[cfg(debug_assertions)] fn check_consts(mmb: &MmbFile<'_>) { #[derive(Default)] struct Index<'a> { sorts: HashMap<&'a str, SortId>, terms: HashMap<&'a str, TermId>, thms: HashMap<&'a str, ThmId>, } #[cold] fn rebuild_consts(mmb: &MmbFile<'_>) { use std::fmt::Write; let mut index = Index::default(); for n in (0..mmb.header.num_sorts).map(SortId) { if let Some(s) = mmb.sort_index(n).and_then(|ix| ix.value()) { index.sorts.insert(s, n); } } for n in (0..mmb.header.num_terms.get()).map(TermId) { if let Some(s) = mmb.term_index(n).and_then(|ix| ix.value()) { index.terms.insert(s, n); } } for n in (0..mmb.header.num_thms.get()).map(ThmId) { if let Some(s) = mmb.thm_index(n).and_then(|ix| ix.value()) { index.thms.insert(s, n); } } let mut out = String::new(); fn print1(out: &mut String, ty: &str, name: &str, s: &str, o: Option<u32>, e: u32) { if let Some(n) = o { write!(out, " {} {}: {:?} = {};", ty, name, s, n).unwrap(); if n == e { *out += "\n" } else { write!(out, " // not {}\n", e).unwrap() } } else { eprintln!("{} {:?} not found", ty, s); write!(out, " {} {}: {:?} = ???;\n", ty, name, s).unwrap(); } } $(build_consts!(@PRINT print1, out, index, stringify!($name), $ty $s = $e);)* eprintln!("build_consts! {{\n{}}}", out); panic!("Rebuild needed. Put this in 'mm0/import.rs'"); } #[inline] fn check_consts(mmb: &MmbFile<'_>) -> Option<()> { $(build_consts!(@CONJ mmb, $ty $name: $s);)* Some(()) } if check_consts(mmb).is_none() { rebuild_consts(mmb) } } } } // This is a list of indexes into the hol.mmb file; it is checked during startup, // and if any of the indexes are wrong then it will print the appropriate replacement. // (TODO: use code generation) build_consts! { sort WFF: "wff" = 0; sort TYPE: "type" = 1; sort TERM: "term" = 2; term MM0_IM: "im" = 0; term MM0_AN: "an" = 1; term TY: "ty" = 4; term THM: "thm" = 9; term APP: "app" = 5; term LAM: "lam" = 6; term BOOL: "bool" = 2; term FUN: "fun" = 3; term EQ: "eq" = 7; thm EQ_T: "eqT" = 27; term TRUE: "T" = 11; thm TRUE_T: "TT" = 51; thm TRUE_DEF: "T_DEF" = 52; term AND: "and" = 16; thm AND_T: "andT" = 68; thm AND_DEF: "AND_DEF" = 69; term IMP: "imp" = 18; thm IMP_T: "impT" = 76; thm IMP_DEF: "IMP_DEF" = 77; term ALL: "all" = 20; thm ALL_T: "allT" = 90; thm ALL_DEF: "FORALL_DEF" = 91; term EX: "ex" = 22; thm EX_T: "exT" = 105; thm EX_DEF: "EXISTS_DEF" = 106; term OR: "or" = 24; thm OR_T: "orT" = 127; thm OR_DEF: "OR_DEF" = 128; term FALSE: "F" = 26; thm FALSE_T: "FT" = 138; thm FALSE_DEF: "F_DEF" = 139; term NOT: "not" = 27; thm NOT_T: "notT" = 142; thm NOT_DEF: "NOT_DEF" = 143; term EU: "eu" = 29; thm EU_T: "euT" = 153; thm EU_DEF: "EU_DEF" = 154; thm ETA_AX: "ETA_AX" = 104; term SEL: "sel" = 15; thm SEL_T: "selT" = 63; thm SELECT_AX: "SELECT_AX" = 157; term COND: "COND" = 30; thm COND_T: "condT" = 163; thm COND_DEF: "COND_DEF" = 164; thm CONJ: "CONJ" = 72; thm CONJ_PAIR: "CONJ_PAIR" = 73; thm CONJUNCT1: "CONJUNCT1" = 74; thm CONJUNCT2: "CONJUNCT2" = 75; thm REFL: "refl" = 33; thm AEQ: "aeq" = 34; thm AEQ1: "aeq1" = 35; thm AEQ2: "aeq2" = 36; thm MP: "MP" = 81; thm DISCH: "DISCH" = 82; thm UNDISCH: "UNDISCH" = 84; thm IMP_ANTISYM: "IMP_ANTISYM" = 85; thm EQ_IMP1: "EQ_IMP1" = 86; thm EQ_IMP2: "EQ_IMP2" = 87; thm IMP_ID: "IMP_ID" = 88; thm IMP_TRANS: "IMP_TRANS" = 89; thm SPEC: "SPEC" = 97; thm GEN: "GEN" = 98; thm CHOOSE: "CHOOSE" = 111; thm EXISTS: "EXISTS" = 113; thm DISJ1: "DISJ1" = 132; thm DISJ2: "DISJ2" = 134; thm DISJ_CASES: "DISJ_CASES" = 136; thm CONTR: "CONTR" = 140; thm NOT_ELIM: "NOT_ELIM" = 147; thm NOT_INTRO: "NOT_INTRO" = 148; thm EQF_INTRO: "EQF_INTRO" = 149; thm EQF_ELIM: "EQF_ELIM" = 150; thm NOT_FALSE: "NOT_FALSE" = 151; thm CCONTR: "CCONTR" = 162; thm PROD_TYBIJ1: "prod_tybij1" = 172; thm PROD_TYBIJ2: "prod_tybij2" = 173; thm ONE_ONE_THM: "ONE_ONE" = 192; thm ONTO_THM: "ONTO" = 197; thm INF: "inf" = 198; term MK_PAIR: "mk_pair" = 31; thm MK_PAIR_T: "mk_pairT" = 165; thm MK_PAIR_DEF: "mk_pair_DEF" = 166; term PROD: "prod" = 32; thm PROD_THM: "PROD_THM" = 168; term ABS_PROD: "ABS_prod" = 33; thm ABS_PROD_T: "ABS_prodT" = 170; term REP_PROD: "REP_prod" = 34; thm REP_PROD_T: "REP_prodT" = 171; thm PROD_BIJ1: "PROD_BIJ1" = 174; thm PROD_BIJ2: "PROD_BIJ2" = 175; tydef PROD_TYDEF: (PROD_THM, PROD, [(ABS_PROD, ABS_PROD_T), (REP_PROD, REP_PROD_T)], [PROD_BIJ1, PROD_BIJ2]) = 0; term PAIR: "pr" = 35; thm PAIR_T: "prT" = 176; thm PAIR_DEF: "PAIR_DEF" = 177; term FST: "fst" = 36; thm FST_T: "fstT" = 180; thm FST_DEF: "FST_DEF" = 181; term SND: "snd" = 37; thm SND_T: "sndT" = 184; thm SND_DEF: "SND_DEF" = 185; term IND: "ind" = 40; term ONE_ONE: "one_one" = 38; thm ONE_ONE_T: "one_one_T" = 188; thm ONE_ONE_BD: "one_one_BD" = 189; thm ONE_ONE_DEF: "one_one_DEF" = 191; term ONTO: "onto" = 39; thm ONTO_T: "onto_T" = 193; thm ONTO_BD: "onto_BD" = 194; thm ONTO_DEF: "onto_DEF" = 196; thm INFINITY_AX: "inf" = 198; term IND_SUC: "IND_SUC" = 41; thm IND_SUC_T: "IND_SUC_T" = 199; thm IND_SUC_DEF: "IND_SUC_DEF" = 200; term IND_0: "IND_0" = 42; thm IND_0_T: "IND_0_T" = 203; thm IND_0_DEF: "IND_0_DEF" = 204; term NUM_REP: "NUM_REP" = 43; thm NUM_REP_T: "NUM_REP_T" = 205; thm NUM_REP_DEF: "NUM_REP_DEF" = 206; term NUM: "num" = 44; thm NUM_THM: "NUM_THM" = 207; term MK_NUM: "mk_num" = 45; thm MK_NUM_T: "mk_numT" = 208; term DEST_NUM: "dest_num" = 46; thm DEST_NUM_T: "dest_numT" = 209; thm NUM_BIJ1: "NUM_BIJ1" = 212; thm NUM_BIJ2: "NUM_BIJ2" = 213; tydef NUM_TYDEF: (NUM_THM, NUM, [(MK_NUM, MK_NUM_T), (DEST_NUM, DEST_NUM_T)], [NUM_BIJ1, NUM_BIJ2]) = 1; term ZERO: "_0" = 47; thm ZERO_T: "_0T" = 214; thm ZERO_DEF: "_0_DEF" = 215; term SUC: "suc" = 48; thm SUC_T: "sucT" = 216; thm SUC_BD: "suc_BD" = 218; thm SUC_DEF: "suc_DEF" = 219; term NUMERAL: "NUMERAL" = 49; thm NUMERAL_T: "NUMERAL_T" = 220; thm NUMERAL_BD: "NUMERAL_BD" = 222; thm NUMERAL_DEF: "NUMERAL_DEF" = 223; term BIT0: "bit0" = 51; thm BIT0_T: "bit0T" = 230; thm BIT0_DEF: "bit0_DEF" = 232; term BIT1: "bit1" = 52; thm BIT1_T: "bit1T" = 233; thm BIT1_BD: "bit1_BD" = 235; thm BIT1_DEF: "bit1_DEF" = 236; term PRE: "pre" = 54; thm PRE_T: "preT" = 238; thm PRE_DEF: "pre_DEF" = 239; thm PRE_SPEC: "PRE" = 240; term ADD: "add" = 55; thm ADD_T: "addT" = 241; thm ADD_DEF: "add_DEF" = 243; thm ADD_SPEC: "ADD" = 244; term MUL: "mul" = 57; thm MUL_T: "mulT" = 245; thm MUL_DEF: "mul_DEF" = 247; thm MUL_SPEC: "MUL" = 248; term EXP: "exp" = 59; thm EXP_T: "expT" = 250; thm EXP_DEF: "exp_DEF" = 252; thm EXP_SPEC: "EXP" = 253; term LE: "le" = 60; thm LE_T: "leT" = 254; thm LE_DEF: "le_DEF" = 256; thm LE_SPEC: "LE" = 257; term LT: "lt" = 62; thm LT_T: "ltT" = 258; thm LT_DEF: "lt_DEF" = 260; thm LT_SPEC: "LT" = 261; term GE: "ge" = 64; thm GE_T: "geT" = 263; thm GE_BD: "ge_BD" = 264; thm GE_DEF: "ge_DEF" = 265; term GT: "gt" = 65; thm GT_T: "gtT" = 266; thm GT_BD: "gt_BD" = 267; thm GT_DEF: "gt_DEF" = 268; term EVEN: "even" = 66; thm EVEN_T: "evenT" = 269; thm EVEN_DEF: "even_DEF" = 270; thm EVEN_SPEC: "EVEN" = 271; term ODD: "odd" = 67; thm ODD_T: "oddT" = 272; thm ODD_DEF: "odd_DEF" = 273; thm ODD_SPEC: "ODD" = 274; term SUB: "sub" = 68; thm SUB_T: "subT" = 276; thm SUB_DEF: "sub_DEF" = 278; thm SUB_SPEC: "SUB" = 279; term TYPEDEF: "TYPEDEF" = 70; thm TYPEDEF_T: "TYPEDEF_T" = 280; thm TYPEDEF_DEF: "TYPEDEF_DEF" = 281; thm AND_DEF1: "AND_DEF1" = 102; thm EXISTS_THM: "EXISTS_THM" = 158; thm EU_DEF1: "EU_DEF1" = 156; thm IMP_ANTISYM_AX: "IMP_ANTISYM_AX" = 103; thm BOOL_CASES_AX: "BOOL_CASES_AX" = 161; thm TRUTH: "TRUTH" = 53; thm NOT_TRUE: "NOT_TRUE" = 152; thm EM: "em" = 159; thm PAIR_EQ: "PAIR_EQ" = 178; thm PAIR_SURJ: "PAIR_SURJ" = 179; thm FST_THM: "FST" = 183; thm SND_THM: "SND" = 187; thm IND_SUC_0: "IND_SUC_0" = 201; thm IND_SUC_INJ: "IND_SUC_INJ" = 202; thm NOT_SUC: "NOT_SUC" = 225; thm SUC_INJ: "SUC_INJ" = 226; thm NUM_CASES: "num_CASES" = 227; thm NUM_IND: "num_INDUCTION" = 228; thm NUM_REC: "num_RECURSION" = 229; thm MUL1: "MUL1" = 249; thm LE1: "LE1" = 262; thm ODD1: "ODD1" = 275; } pub fn hol_writer(out: PathBuf, temp: PathBuf) -> io::Result<Mm0Writer> { #[repr(C, align(8))] pub struct Aligned<T: ?Sized>(T); static HOL_MMB: &Aligned<[u8]> = &Aligned(*include_bytes!("../../hol.mmb"));
Mm0Writer::new(out, temp, &mmb) }
let mmb = MmbFile::parse(&HOL_MMB.0).unwrap(); #[cfg(debug_assertions)] check_consts(&mmb);
random_line_split
main.go
package main import ( "encoding/csv" "encoding/json" "fmt" "log" "os" "os/exec" "path/filepath" "strings" "time" "github.com/vansante/go-ffprobe" ) var ( fileInfo os.FileInfo err error ) type Ffprobe struct { Format struct { Filename string `json:"filename"` NbStreams int `json:"nb_streams"` NbPrograms int `json:"nb_programs"` FormatName string `json:"format_name"` FormatLongName string `json:"format_long_name"` StartTime string `json:"start_time"` Duration string `json:"duration"` Size string `json:"size"` BitRate string `json:"bit_rate"` ProbeScore int `json:"probe_score"` Tags struct { MajorBrand string `json:"major_brand"` MinorVersion string `json:"minor_version"` CompatibleBrands string `json:"compatible_brands"` CreationTime time.Time `json:"creation_time"` } `json:"tags"` } `json:"format"` } type BroadcastResearch interface { DefineTargets() } type operations struct { title string directory string outputtype string optype string monthlycsv bool jpg struct { inputfile string outputdirectory string } png struct { inputfile string outputfile string } gif struct { inputfile string outputfile string } histo struct { inputfile string outputfile string } } // Content Engine func (o operations) DefineTargets() { var collectFiles, jj, result []string fileList := []string{} filepath.Walk(o.directory, func(path string, f os.FileInfo, err error) error { fileList = append(fileList, path) return nil }) for _, file := range fileList { collectFiles = append(collectFiles, file) } dirFiles := searchFiles("edits", collectFiles) for stuff := 0; stuff < len(dirFiles); stuff++ { editName := strings.SplitAfter(dirFiles[stuff], "/edits/") // fmt.Println(editName) jj = append(jj, editName[0][:len(editName[0])-7]) if stuff == len(dirFiles)-1 { encountered := map[string]bool{} for v := range jj { if encountered[jj[v]] == true { } else { encountered[jj[v]] = true result = append(result, jj[v]) // Create directories if o.optype != "ffprobe" { fmt.Println("directory created") cmd3 := exec.Command("mkdir", jj[v]+o.outputtype) cmd3.Run() } } } } // Defer the creation of the content until all folders have been created defer generateContent(o, dirFiles[stuff], editName) } // Clear buffers collectFiles, jj, result = nil, nil, nil if o.monthlycsv != false { fmt.Println("generating csv analysis file~~~~~~~") generateCSV(dirFiles) } } func generateCSV(input []string) { var matrix [][]string for i := 0; i < len(input); i++ { // fmt.Println(input[i] + "s\n") editName := strings.SplitAfter(input[i], "/edits/") // fmt.Println(editName) data, err := ffprobe.GetProbeData(input[i], 5000*time.Millisecond) if err != nil { log.Panicf("Error getting data: %v", err) } // MarhsalIndent the incoming data, accessible via buf variable (w/ error handling) buf, err := json.MarshalIndent(data, "", " ") if err != nil { log.Panicf("Error unmarshalling: %v", err) } // Connect struct to variable var probed Ffprobe // Unmarshal buffer into variable defined by the Ffprobe type if err := json.Unmarshal(buf, &probed); err != nil { panic(err) } // Base filename ffprobeFilename := probed.Format.Filename // Clean up the filename cleanFilename := filepath.Base(ffprobeFilename) // Unix date for the current file unixdate := string(probed.Format.Tags.CreationTime.Format(time.RFC850)) // Split the Unix date by a comma s := strings.Split(unixdate, ",") // Title of file title := cleanFilename[:len(cleanFilename)-4] // Type of file typer := cleanFilename[len(cleanFilename)-4:] // Path of file path := editName[0] jo := strings.SplitAfter(input[i], "/edits/") again := jo[0][:len(jo[0])-7] splitagain := strings.SplitAfter(again, "/") again2 := splitagain[len(splitagain)-1] jj := strings.SplitAfter(again2, "-") // Folder month folderMonth := jj[0][:len(jj[0])-1] // Folder day folderDay := jj[1][:len(jj[1])-1] // Folder year folderYear := jj[2][:len(jj[2])] // Edit Month editMonth := s[1][4:7] // Edit date editDate := s[1][1:11] // Edit day (i.e. Monday) editDay := s[0] // Edit year editYear := "20" + s[1][8:11] // Timestamp of current file timestamp := s[1][11:19] // Location of the current file loc := s[1][20:23] matrix = append(matrix, []string{ title, folderMonth, folderDay, folderYear, editMonth, editDay, editYear, editDate[:2], typer, path, timestamp, loc, probed.Format.Duration, probed.Format.Size, probed.Format.BitRate, probed.Format.FormatName, probed.Format.FormatLongName}) } targetDirectory := strings.SplitAfter(input[0], "/edits/") fmt.Println(targetDirectory[0][:len(targetDirectory[0])-17] + "\n") again := strings.SplitAfter(targetDirectory[0][:len(targetDirectory[0])-17], "-") fmt.Println(again) year := again[0][len(again[0])-5:] month := again[1] day := again[2][:len(again[2])-1] fmt.Println(year + month + day) combine := year + month + day root := targetDirectory[0][:len(targetDirectory[0])-18] csvFile := root + "/" + combine + "-FFProbeAnalysis-V4.csv" fmt.Println(csvFile) file, err := os.Create(csvFile) if err != nil { log.Fatal("FAILED TO CREATE CSV", err) } defer file.Close() writer := csv.NewWriter(file) defer writer.Flush() // Iterate over the FFmpeg matrix for _, value := range matrix { // Write data to new .csv file err := writer.Write(value) if err != nil { log.Fatal("FAILED TO WRITE CSV", err) } } fmt.Println("done!") } // generateContent // Define an operation using "case" and pass files from the directory func generateContent(o operations, dirFiles string, editName []string) { switch o.optype { case "png": // create pngs mp4File := dirFiles pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png" cmd := exec.Command("ffmpeg", "-y", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] palettegen", pngFile) cmd.Run() case "gif": // create gifs mp4File := dirFiles gifFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".gif" cmd := exec.Command("ffmpeg", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] fps=24,scale=w=480:h=-1,split [a][b];[a] palettegen=stats_mode=single [p];[b][p] paletteuse=new=1", gifFile) cmd.Run() // Move .gif to gif directory destinationDirectory := editName[0][:len(editName[0])-7] + "/gif" cmd2 := exec.Command("mv", gifFile, destinationDirectory) cmd2.Run() case "jpg": // create jpgs fmt.Println("create jpgs") jpgFolder := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] cmd3 := exec.Command("mkdir", jpgFolder) cmd3.Run() mp4File := dirFiles jpgFrames := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] + "/" + editName[1][:len(editName[1])-4] + "-frame-%04d.jpg" defer generateJpgs(mp4File, jpgFrames) case "histogram1": // create histogram fmt.Println("create histo1") histo1Folder := editName[0][:len(editName[0])-7] + "/histogram" cmd := exec.Command("mkdir", histo1Folder) cmd.Run() pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png" histo1File := editName[0][:len(editName[0])-7] + "/histogram/" + editName[1][:len(editName[1])-4] + ".txt" defer generateHistogram1(pngFile, histo1File) } } func generateJpgs(mp4File string, jpgFrames string) { cmd5 := exec.Command("ffmpeg", "-n", "-i", mp4File, jpgFrames) cmd5.Run() } func generateHistogram1(pngFile string, histo1File string)
func main() { globalDir := "/Users/csk/Documents/_REPO/1987-06-may/" runSuite(globalDir) } func runSuite(globalDir string) { var br operations // // PNG test // br.directory = globalDir // br.outputtype = "/png" // br.optype = "png" // var i1 BroadcastResearch = br // i1.DefineTargets() // // GIF test // br.directory = globalDir // br.outputtype = "/gif" // br.optype = "gif" // var i2 BroadcastResearch = br // i2.DefineTargets() // // JPG test // br.directory = globalDir // br.outputtype = "/jpg" // br.optype = "jpg" // var i3 BroadcastResearch = br // i3.DefineTargets() // // HISTOGRAM-1 test // br.directory = globalDir // br.outputtype = "/histogram" // br.optype = "histogram1" // var i4 BroadcastResearch = br // i4.DefineTargets() // PNG test br.directory = globalDir br.outputtype = "/ffprobe" br.optype = "ffprobe" br.monthlycsv = true var i5 BroadcastResearch = br i5.DefineTargets() } func searchFiles(typer string, rmDups []string) []string { var savePng []string // Receive a type switch typer { case "gifs": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "edits": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "png": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "raw": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } } return savePng }
{ cmd := exec.Command("convert", pngFile, "-format", "%c", "histogam:info:", histo1File) cmd.Run() }
identifier_body
main.go
package main import ( "encoding/csv" "encoding/json" "fmt" "log" "os" "os/exec" "path/filepath" "strings" "time" "github.com/vansante/go-ffprobe" ) var ( fileInfo os.FileInfo err error ) type Ffprobe struct { Format struct { Filename string `json:"filename"` NbStreams int `json:"nb_streams"` NbPrograms int `json:"nb_programs"` FormatName string `json:"format_name"` FormatLongName string `json:"format_long_name"` StartTime string `json:"start_time"` Duration string `json:"duration"` Size string `json:"size"` BitRate string `json:"bit_rate"` ProbeScore int `json:"probe_score"` Tags struct { MajorBrand string `json:"major_brand"` MinorVersion string `json:"minor_version"` CompatibleBrands string `json:"compatible_brands"` CreationTime time.Time `json:"creation_time"` } `json:"tags"` } `json:"format"` } type BroadcastResearch interface { DefineTargets() } type operations struct { title string directory string outputtype string optype string monthlycsv bool jpg struct { inputfile string outputdirectory string } png struct { inputfile string outputfile string } gif struct { inputfile string outputfile string } histo struct { inputfile string outputfile string } } // Content Engine func (o operations) DefineTargets() { var collectFiles, jj, result []string fileList := []string{} filepath.Walk(o.directory, func(path string, f os.FileInfo, err error) error { fileList = append(fileList, path) return nil }) for _, file := range fileList { collectFiles = append(collectFiles, file) } dirFiles := searchFiles("edits", collectFiles) for stuff := 0; stuff < len(dirFiles); stuff++ { editName := strings.SplitAfter(dirFiles[stuff], "/edits/") // fmt.Println(editName) jj = append(jj, editName[0][:len(editName[0])-7]) if stuff == len(dirFiles)-1 { encountered := map[string]bool{} for v := range jj { if encountered[jj[v]] == true { } else { encountered[jj[v]] = true result = append(result, jj[v]) // Create directories if o.optype != "ffprobe" { fmt.Println("directory created") cmd3 := exec.Command("mkdir", jj[v]+o.outputtype) cmd3.Run() } } } } // Defer the creation of the content until all folders have been created defer generateContent(o, dirFiles[stuff], editName) } // Clear buffers collectFiles, jj, result = nil, nil, nil if o.monthlycsv != false { fmt.Println("generating csv analysis file~~~~~~~") generateCSV(dirFiles) } } func generateCSV(input []string) { var matrix [][]string for i := 0; i < len(input); i++ { // fmt.Println(input[i] + "s\n") editName := strings.SplitAfter(input[i], "/edits/") // fmt.Println(editName) data, err := ffprobe.GetProbeData(input[i], 5000*time.Millisecond) if err != nil { log.Panicf("Error getting data: %v", err) } // MarhsalIndent the incoming data, accessible via buf variable (w/ error handling) buf, err := json.MarshalIndent(data, "", " ") if err != nil { log.Panicf("Error unmarshalling: %v", err) } // Connect struct to variable var probed Ffprobe // Unmarshal buffer into variable defined by the Ffprobe type if err := json.Unmarshal(buf, &probed); err != nil { panic(err) } // Base filename ffprobeFilename := probed.Format.Filename // Clean up the filename cleanFilename := filepath.Base(ffprobeFilename) // Unix date for the current file unixdate := string(probed.Format.Tags.CreationTime.Format(time.RFC850)) // Split the Unix date by a comma s := strings.Split(unixdate, ",") // Title of file title := cleanFilename[:len(cleanFilename)-4] // Type of file typer := cleanFilename[len(cleanFilename)-4:] // Path of file path := editName[0] jo := strings.SplitAfter(input[i], "/edits/") again := jo[0][:len(jo[0])-7] splitagain := strings.SplitAfter(again, "/") again2 := splitagain[len(splitagain)-1] jj := strings.SplitAfter(again2, "-") // Folder month folderMonth := jj[0][:len(jj[0])-1] // Folder day folderDay := jj[1][:len(jj[1])-1] // Folder year folderYear := jj[2][:len(jj[2])] // Edit Month editMonth := s[1][4:7] // Edit date editDate := s[1][1:11] // Edit day (i.e. Monday) editDay := s[0] // Edit year editYear := "20" + s[1][8:11] // Timestamp of current file timestamp := s[1][11:19] // Location of the current file loc := s[1][20:23] matrix = append(matrix, []string{ title, folderMonth, folderDay, folderYear, editMonth, editDay, editYear, editDate[:2], typer, path, timestamp, loc, probed.Format.Duration, probed.Format.Size, probed.Format.BitRate, probed.Format.FormatName, probed.Format.FormatLongName}) } targetDirectory := strings.SplitAfter(input[0], "/edits/") fmt.Println(targetDirectory[0][:len(targetDirectory[0])-17] + "\n") again := strings.SplitAfter(targetDirectory[0][:len(targetDirectory[0])-17], "-") fmt.Println(again) year := again[0][len(again[0])-5:] month := again[1] day := again[2][:len(again[2])-1] fmt.Println(year + month + day) combine := year + month + day root := targetDirectory[0][:len(targetDirectory[0])-18] csvFile := root + "/" + combine + "-FFProbeAnalysis-V4.csv" fmt.Println(csvFile) file, err := os.Create(csvFile) if err != nil { log.Fatal("FAILED TO CREATE CSV", err) } defer file.Close() writer := csv.NewWriter(file) defer writer.Flush() // Iterate over the FFmpeg matrix for _, value := range matrix { // Write data to new .csv file err := writer.Write(value) if err != nil { log.Fatal("FAILED TO WRITE CSV", err) } } fmt.Println("done!") } // generateContent // Define an operation using "case" and pass files from the directory func generateContent(o operations, dirFiles string, editName []string) { switch o.optype { case "png": // create pngs mp4File := dirFiles pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png" cmd := exec.Command("ffmpeg", "-y", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] palettegen", pngFile) cmd.Run() case "gif": // create gifs mp4File := dirFiles gifFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".gif" cmd := exec.Command("ffmpeg", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] fps=24,scale=w=480:h=-1,split [a][b];[a] palettegen=stats_mode=single [p];[b][p] paletteuse=new=1", gifFile) cmd.Run() // Move .gif to gif directory destinationDirectory := editName[0][:len(editName[0])-7] + "/gif" cmd2 := exec.Command("mv", gifFile, destinationDirectory) cmd2.Run() case "jpg": // create jpgs fmt.Println("create jpgs") jpgFolder := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] cmd3 := exec.Command("mkdir", jpgFolder) cmd3.Run() mp4File := dirFiles jpgFrames := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] + "/" + editName[1][:len(editName[1])-4] + "-frame-%04d.jpg" defer generateJpgs(mp4File, jpgFrames) case "histogram1": // create histogram fmt.Println("create histo1") histo1Folder := editName[0][:len(editName[0])-7] + "/histogram" cmd := exec.Command("mkdir", histo1Folder) cmd.Run() pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png" histo1File := editName[0][:len(editName[0])-7] + "/histogram/" + editName[1][:len(editName[1])-4] + ".txt" defer generateHistogram1(pngFile, histo1File) } } func generateJpgs(mp4File string, jpgFrames string) { cmd5 := exec.Command("ffmpeg", "-n", "-i", mp4File, jpgFrames) cmd5.Run() } func generateHistogram1(pngFile string, histo1File string) { cmd := exec.Command("convert", pngFile, "-format", "%c", "histogam:info:", histo1File) cmd.Run() } func main() { globalDir := "/Users/csk/Documents/_REPO/1987-06-may/" runSuite(globalDir) } func runSuite(globalDir string) { var br operations // // PNG test // br.directory = globalDir // br.outputtype = "/png" // br.optype = "png" // var i1 BroadcastResearch = br // i1.DefineTargets() // // GIF test // br.directory = globalDir // br.outputtype = "/gif" // br.optype = "gif" // var i2 BroadcastResearch = br // i2.DefineTargets() // // JPG test // br.directory = globalDir // br.outputtype = "/jpg" // br.optype = "jpg" // var i3 BroadcastResearch = br // i3.DefineTargets() // // HISTOGRAM-1 test // br.directory = globalDir // br.outputtype = "/histogram" // br.optype = "histogram1" // var i4 BroadcastResearch = br // i4.DefineTargets() // PNG test br.directory = globalDir br.outputtype = "/ffprobe" br.optype = "ffprobe" br.monthlycsv = true var i5 BroadcastResearch = br i5.DefineTargets() } func
(typer string, rmDups []string) []string { var savePng []string // Receive a type switch typer { case "gifs": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "edits": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "png": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "raw": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } } return savePng }
searchFiles
identifier_name
main.go
package main import ( "encoding/csv" "encoding/json" "fmt" "log" "os" "os/exec" "path/filepath" "strings" "time" "github.com/vansante/go-ffprobe" ) var ( fileInfo os.FileInfo err error ) type Ffprobe struct { Format struct { Filename string `json:"filename"` NbStreams int `json:"nb_streams"` NbPrograms int `json:"nb_programs"` FormatName string `json:"format_name"` FormatLongName string `json:"format_long_name"` StartTime string `json:"start_time"` Duration string `json:"duration"` Size string `json:"size"` BitRate string `json:"bit_rate"` ProbeScore int `json:"probe_score"` Tags struct { MajorBrand string `json:"major_brand"` MinorVersion string `json:"minor_version"` CompatibleBrands string `json:"compatible_brands"` CreationTime time.Time `json:"creation_time"` } `json:"tags"` } `json:"format"` } type BroadcastResearch interface { DefineTargets() } type operations struct { title string directory string outputtype string optype string monthlycsv bool jpg struct { inputfile string outputdirectory string } png struct { inputfile string outputfile string } gif struct { inputfile string outputfile string } histo struct { inputfile string outputfile string } } // Content Engine func (o operations) DefineTargets() { var collectFiles, jj, result []string fileList := []string{} filepath.Walk(o.directory, func(path string, f os.FileInfo, err error) error { fileList = append(fileList, path) return nil }) for _, file := range fileList
dirFiles := searchFiles("edits", collectFiles) for stuff := 0; stuff < len(dirFiles); stuff++ { editName := strings.SplitAfter(dirFiles[stuff], "/edits/") // fmt.Println(editName) jj = append(jj, editName[0][:len(editName[0])-7]) if stuff == len(dirFiles)-1 { encountered := map[string]bool{} for v := range jj { if encountered[jj[v]] == true { } else { encountered[jj[v]] = true result = append(result, jj[v]) // Create directories if o.optype != "ffprobe" { fmt.Println("directory created") cmd3 := exec.Command("mkdir", jj[v]+o.outputtype) cmd3.Run() } } } } // Defer the creation of the content until all folders have been created defer generateContent(o, dirFiles[stuff], editName) } // Clear buffers collectFiles, jj, result = nil, nil, nil if o.monthlycsv != false { fmt.Println("generating csv analysis file~~~~~~~") generateCSV(dirFiles) } } func generateCSV(input []string) { var matrix [][]string for i := 0; i < len(input); i++ { // fmt.Println(input[i] + "s\n") editName := strings.SplitAfter(input[i], "/edits/") // fmt.Println(editName) data, err := ffprobe.GetProbeData(input[i], 5000*time.Millisecond) if err != nil { log.Panicf("Error getting data: %v", err) } // MarhsalIndent the incoming data, accessible via buf variable (w/ error handling) buf, err := json.MarshalIndent(data, "", " ") if err != nil { log.Panicf("Error unmarshalling: %v", err) } // Connect struct to variable var probed Ffprobe // Unmarshal buffer into variable defined by the Ffprobe type if err := json.Unmarshal(buf, &probed); err != nil { panic(err) } // Base filename ffprobeFilename := probed.Format.Filename // Clean up the filename cleanFilename := filepath.Base(ffprobeFilename) // Unix date for the current file unixdate := string(probed.Format.Tags.CreationTime.Format(time.RFC850)) // Split the Unix date by a comma s := strings.Split(unixdate, ",") // Title of file title := cleanFilename[:len(cleanFilename)-4] // Type of file typer := cleanFilename[len(cleanFilename)-4:] // Path of file path := editName[0] jo := strings.SplitAfter(input[i], "/edits/") again := jo[0][:len(jo[0])-7] splitagain := strings.SplitAfter(again, "/") again2 := splitagain[len(splitagain)-1] jj := strings.SplitAfter(again2, "-") // Folder month folderMonth := jj[0][:len(jj[0])-1] // Folder day folderDay := jj[1][:len(jj[1])-1] // Folder year folderYear := jj[2][:len(jj[2])] // Edit Month editMonth := s[1][4:7] // Edit date editDate := s[1][1:11] // Edit day (i.e. Monday) editDay := s[0] // Edit year editYear := "20" + s[1][8:11] // Timestamp of current file timestamp := s[1][11:19] // Location of the current file loc := s[1][20:23] matrix = append(matrix, []string{ title, folderMonth, folderDay, folderYear, editMonth, editDay, editYear, editDate[:2], typer, path, timestamp, loc, probed.Format.Duration, probed.Format.Size, probed.Format.BitRate, probed.Format.FormatName, probed.Format.FormatLongName}) } targetDirectory := strings.SplitAfter(input[0], "/edits/") fmt.Println(targetDirectory[0][:len(targetDirectory[0])-17] + "\n") again := strings.SplitAfter(targetDirectory[0][:len(targetDirectory[0])-17], "-") fmt.Println(again) year := again[0][len(again[0])-5:] month := again[1] day := again[2][:len(again[2])-1] fmt.Println(year + month + day) combine := year + month + day root := targetDirectory[0][:len(targetDirectory[0])-18] csvFile := root + "/" + combine + "-FFProbeAnalysis-V4.csv" fmt.Println(csvFile) file, err := os.Create(csvFile) if err != nil { log.Fatal("FAILED TO CREATE CSV", err) } defer file.Close() writer := csv.NewWriter(file) defer writer.Flush() // Iterate over the FFmpeg matrix for _, value := range matrix { // Write data to new .csv file err := writer.Write(value) if err != nil { log.Fatal("FAILED TO WRITE CSV", err) } } fmt.Println("done!") } // generateContent // Define an operation using "case" and pass files from the directory func generateContent(o operations, dirFiles string, editName []string) { switch o.optype { case "png": // create pngs mp4File := dirFiles pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png" cmd := exec.Command("ffmpeg", "-y", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] palettegen", pngFile) cmd.Run() case "gif": // create gifs mp4File := dirFiles gifFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".gif" cmd := exec.Command("ffmpeg", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] fps=24,scale=w=480:h=-1,split [a][b];[a] palettegen=stats_mode=single [p];[b][p] paletteuse=new=1", gifFile) cmd.Run() // Move .gif to gif directory destinationDirectory := editName[0][:len(editName[0])-7] + "/gif" cmd2 := exec.Command("mv", gifFile, destinationDirectory) cmd2.Run() case "jpg": // create jpgs fmt.Println("create jpgs") jpgFolder := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] cmd3 := exec.Command("mkdir", jpgFolder) cmd3.Run() mp4File := dirFiles jpgFrames := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] + "/" + editName[1][:len(editName[1])-4] + "-frame-%04d.jpg" defer generateJpgs(mp4File, jpgFrames) case "histogram1": // create histogram fmt.Println("create histo1") histo1Folder := editName[0][:len(editName[0])-7] + "/histogram" cmd := exec.Command("mkdir", histo1Folder) cmd.Run() pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png" histo1File := editName[0][:len(editName[0])-7] + "/histogram/" + editName[1][:len(editName[1])-4] + ".txt" defer generateHistogram1(pngFile, histo1File) } } func generateJpgs(mp4File string, jpgFrames string) { cmd5 := exec.Command("ffmpeg", "-n", "-i", mp4File, jpgFrames) cmd5.Run() } func generateHistogram1(pngFile string, histo1File string) { cmd := exec.Command("convert", pngFile, "-format", "%c", "histogam:info:", histo1File) cmd.Run() } func main() { globalDir := "/Users/csk/Documents/_REPO/1987-06-may/" runSuite(globalDir) } func runSuite(globalDir string) { var br operations // // PNG test // br.directory = globalDir // br.outputtype = "/png" // br.optype = "png" // var i1 BroadcastResearch = br // i1.DefineTargets() // // GIF test // br.directory = globalDir // br.outputtype = "/gif" // br.optype = "gif" // var i2 BroadcastResearch = br // i2.DefineTargets() // // JPG test // br.directory = globalDir // br.outputtype = "/jpg" // br.optype = "jpg" // var i3 BroadcastResearch = br // i3.DefineTargets() // // HISTOGRAM-1 test // br.directory = globalDir // br.outputtype = "/histogram" // br.optype = "histogram1" // var i4 BroadcastResearch = br // i4.DefineTargets() // PNG test br.directory = globalDir br.outputtype = "/ffprobe" br.optype = "ffprobe" br.monthlycsv = true var i5 BroadcastResearch = br i5.DefineTargets() } func searchFiles(typer string, rmDups []string) []string { var savePng []string // Receive a type switch typer { case "gifs": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "edits": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "png": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "raw": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } } return savePng }
{ collectFiles = append(collectFiles, file) }
conditional_block
main.go
package main import ( "encoding/csv" "encoding/json" "fmt" "log" "os" "os/exec" "path/filepath" "strings" "time" "github.com/vansante/go-ffprobe" ) var ( fileInfo os.FileInfo err error ) type Ffprobe struct { Format struct { Filename string `json:"filename"` NbStreams int `json:"nb_streams"` NbPrograms int `json:"nb_programs"` FormatName string `json:"format_name"` FormatLongName string `json:"format_long_name"` StartTime string `json:"start_time"` Duration string `json:"duration"` Size string `json:"size"` BitRate string `json:"bit_rate"` ProbeScore int `json:"probe_score"` Tags struct { MajorBrand string `json:"major_brand"` MinorVersion string `json:"minor_version"` CompatibleBrands string `json:"compatible_brands"` CreationTime time.Time `json:"creation_time"` } `json:"tags"` } `json:"format"` } type BroadcastResearch interface { DefineTargets() } type operations struct { title string directory string outputtype string optype string monthlycsv bool jpg struct { inputfile string outputdirectory string } png struct { inputfile string outputfile string } gif struct { inputfile string outputfile string } histo struct { inputfile string outputfile string } } // Content Engine func (o operations) DefineTargets() { var collectFiles, jj, result []string
return nil }) for _, file := range fileList { collectFiles = append(collectFiles, file) } dirFiles := searchFiles("edits", collectFiles) for stuff := 0; stuff < len(dirFiles); stuff++ { editName := strings.SplitAfter(dirFiles[stuff], "/edits/") // fmt.Println(editName) jj = append(jj, editName[0][:len(editName[0])-7]) if stuff == len(dirFiles)-1 { encountered := map[string]bool{} for v := range jj { if encountered[jj[v]] == true { } else { encountered[jj[v]] = true result = append(result, jj[v]) // Create directories if o.optype != "ffprobe" { fmt.Println("directory created") cmd3 := exec.Command("mkdir", jj[v]+o.outputtype) cmd3.Run() } } } } // Defer the creation of the content until all folders have been created defer generateContent(o, dirFiles[stuff], editName) } // Clear buffers collectFiles, jj, result = nil, nil, nil if o.monthlycsv != false { fmt.Println("generating csv analysis file~~~~~~~") generateCSV(dirFiles) } } func generateCSV(input []string) { var matrix [][]string for i := 0; i < len(input); i++ { // fmt.Println(input[i] + "s\n") editName := strings.SplitAfter(input[i], "/edits/") // fmt.Println(editName) data, err := ffprobe.GetProbeData(input[i], 5000*time.Millisecond) if err != nil { log.Panicf("Error getting data: %v", err) } // MarhsalIndent the incoming data, accessible via buf variable (w/ error handling) buf, err := json.MarshalIndent(data, "", " ") if err != nil { log.Panicf("Error unmarshalling: %v", err) } // Connect struct to variable var probed Ffprobe // Unmarshal buffer into variable defined by the Ffprobe type if err := json.Unmarshal(buf, &probed); err != nil { panic(err) } // Base filename ffprobeFilename := probed.Format.Filename // Clean up the filename cleanFilename := filepath.Base(ffprobeFilename) // Unix date for the current file unixdate := string(probed.Format.Tags.CreationTime.Format(time.RFC850)) // Split the Unix date by a comma s := strings.Split(unixdate, ",") // Title of file title := cleanFilename[:len(cleanFilename)-4] // Type of file typer := cleanFilename[len(cleanFilename)-4:] // Path of file path := editName[0] jo := strings.SplitAfter(input[i], "/edits/") again := jo[0][:len(jo[0])-7] splitagain := strings.SplitAfter(again, "/") again2 := splitagain[len(splitagain)-1] jj := strings.SplitAfter(again2, "-") // Folder month folderMonth := jj[0][:len(jj[0])-1] // Folder day folderDay := jj[1][:len(jj[1])-1] // Folder year folderYear := jj[2][:len(jj[2])] // Edit Month editMonth := s[1][4:7] // Edit date editDate := s[1][1:11] // Edit day (i.e. Monday) editDay := s[0] // Edit year editYear := "20" + s[1][8:11] // Timestamp of current file timestamp := s[1][11:19] // Location of the current file loc := s[1][20:23] matrix = append(matrix, []string{ title, folderMonth, folderDay, folderYear, editMonth, editDay, editYear, editDate[:2], typer, path, timestamp, loc, probed.Format.Duration, probed.Format.Size, probed.Format.BitRate, probed.Format.FormatName, probed.Format.FormatLongName}) } targetDirectory := strings.SplitAfter(input[0], "/edits/") fmt.Println(targetDirectory[0][:len(targetDirectory[0])-17] + "\n") again := strings.SplitAfter(targetDirectory[0][:len(targetDirectory[0])-17], "-") fmt.Println(again) year := again[0][len(again[0])-5:] month := again[1] day := again[2][:len(again[2])-1] fmt.Println(year + month + day) combine := year + month + day root := targetDirectory[0][:len(targetDirectory[0])-18] csvFile := root + "/" + combine + "-FFProbeAnalysis-V4.csv" fmt.Println(csvFile) file, err := os.Create(csvFile) if err != nil { log.Fatal("FAILED TO CREATE CSV", err) } defer file.Close() writer := csv.NewWriter(file) defer writer.Flush() // Iterate over the FFmpeg matrix for _, value := range matrix { // Write data to new .csv file err := writer.Write(value) if err != nil { log.Fatal("FAILED TO WRITE CSV", err) } } fmt.Println("done!") } // generateContent // Define an operation using "case" and pass files from the directory func generateContent(o operations, dirFiles string, editName []string) { switch o.optype { case "png": // create pngs mp4File := dirFiles pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png" cmd := exec.Command("ffmpeg", "-y", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] palettegen", pngFile) cmd.Run() case "gif": // create gifs mp4File := dirFiles gifFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".gif" cmd := exec.Command("ffmpeg", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] fps=24,scale=w=480:h=-1,split [a][b];[a] palettegen=stats_mode=single [p];[b][p] paletteuse=new=1", gifFile) cmd.Run() // Move .gif to gif directory destinationDirectory := editName[0][:len(editName[0])-7] + "/gif" cmd2 := exec.Command("mv", gifFile, destinationDirectory) cmd2.Run() case "jpg": // create jpgs fmt.Println("create jpgs") jpgFolder := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] cmd3 := exec.Command("mkdir", jpgFolder) cmd3.Run() mp4File := dirFiles jpgFrames := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] + "/" + editName[1][:len(editName[1])-4] + "-frame-%04d.jpg" defer generateJpgs(mp4File, jpgFrames) case "histogram1": // create histogram fmt.Println("create histo1") histo1Folder := editName[0][:len(editName[0])-7] + "/histogram" cmd := exec.Command("mkdir", histo1Folder) cmd.Run() pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png" histo1File := editName[0][:len(editName[0])-7] + "/histogram/" + editName[1][:len(editName[1])-4] + ".txt" defer generateHistogram1(pngFile, histo1File) } } func generateJpgs(mp4File string, jpgFrames string) { cmd5 := exec.Command("ffmpeg", "-n", "-i", mp4File, jpgFrames) cmd5.Run() } func generateHistogram1(pngFile string, histo1File string) { cmd := exec.Command("convert", pngFile, "-format", "%c", "histogam:info:", histo1File) cmd.Run() } func main() { globalDir := "/Users/csk/Documents/_REPO/1987-06-may/" runSuite(globalDir) } func runSuite(globalDir string) { var br operations // // PNG test // br.directory = globalDir // br.outputtype = "/png" // br.optype = "png" // var i1 BroadcastResearch = br // i1.DefineTargets() // // GIF test // br.directory = globalDir // br.outputtype = "/gif" // br.optype = "gif" // var i2 BroadcastResearch = br // i2.DefineTargets() // // JPG test // br.directory = globalDir // br.outputtype = "/jpg" // br.optype = "jpg" // var i3 BroadcastResearch = br // i3.DefineTargets() // // HISTOGRAM-1 test // br.directory = globalDir // br.outputtype = "/histogram" // br.optype = "histogram1" // var i4 BroadcastResearch = br // i4.DefineTargets() // PNG test br.directory = globalDir br.outputtype = "/ffprobe" br.optype = "ffprobe" br.monthlycsv = true var i5 BroadcastResearch = br i5.DefineTargets() } func searchFiles(typer string, rmDups []string) []string { var savePng []string // Receive a type switch typer { case "gifs": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "edits": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "png": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } case "raw": // iterate over the incoming array i := 0 for i < len(rmDups)-1 { i++ // If the string contains the incoming type of directory h := strings.Contains(rmDups[i], typer+"/") switch h { case true: // If the string does not contain .DS_Store if !strings.Contains(rmDups[i], ".DS_Store") { // append to savePng string array savePng = append(savePng, rmDups[i]) } } } } return savePng }
fileList := []string{} filepath.Walk(o.directory, func(path string, f os.FileInfo, err error) error { fileList = append(fileList, path)
random_line_split
repository.go
// Copyright 2016 GRAIL, Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package filerepo implements a filesystem-backed repository. It stores // objects in a directory on disk; the objects are named by the // string representation of their digest, i.e., of the form // sha256:d60e67ce9.... package filerepo import ( "context" "io" "io/ioutil" "net/url" "os" "path/filepath" "syscall" "time" "github.com/grailbio/base/data" "github.com/grailbio/base/digest" "github.com/grailbio/reflow" "github.com/grailbio/reflow/errors" "github.com/grailbio/reflow/liveset" "github.com/grailbio/reflow/log" "github.com/grailbio/reflow/repository" "golang.org/x/sync/singleflight" ) // Repository implements a filesystem-backed Repository. type Repository struct { // The root directory for this repository. This directory contains // all objects. Root string Log *log.Logger // RepoURL may be set to a URL that represents this repository. RepoURL *url.URL read, write singleflight.Group } // Path returns the filesystem directory and full path of the object with a given digest. func (r *Repository) Path(id digest.Digest) (dir, path string) { dir = filepath.Join(r.Root, id.Hex()[:2]) return dir, filepath.Join(dir, id.Hex()[2:]) } // Install links the given file into the repository, named by digest. func (r *Repository) Install(file string) (reflow.File, error) { f, err := os.Open(file) if err != nil { return reflow.File{}, err } defer f.Close() w := reflow.Digester.NewWriter() n, err := io.Copy(w, f) if err != nil { return reflow.File{}, err } d := w.Digest() return reflow.File{ID: d, Size: n}, r.InstallDigest(d, file) } // InstallDigest installs a file at the given digest. The caller guarantees // that the file's bytes have the digest d. func (r *Repository) InstallDigest(d digest.Digest, file string) error { file, err := filepath.EvalSymlinks(file) if err != nil { return err } dir, path := r.Path(d) if err := os.MkdirAll(dir, 0777); err != nil { return err } err = os.Link(file, path) if os.IsExist(err) { err = nil } if err != nil { // Copy if file was reported to be on a different device. if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV { f, ferr := os.Open(file) if ferr != nil { return ferr } defer func() { _ = f.Close() }() _, err = r.Put(context.Background(), f) } } return err } // Stat retrieves metadata for files stored in the repository. func (r *Repository) Stat(ctx context.Context, id digest.Digest) (reflow.File, error) { _, path := r.Path(id) info, err := os.Stat(path) if err != nil { return reflow.File{}, errors.E("stat", r.Root, id, err) } return reflow.File{ID: id, Size: info.Size()}, nil } // Get retrieves the object named by a digest. func (r *Repository) Get(ctx context.Context, id digest.Digest) (io.ReadCloser, error) { _, path := r.Path(id) rc, err := os.Open(path) if err != nil { return nil, errors.E("get", r.Root, id, err) } return rc, nil } // Remove removes an object from the repository. func (r *Repository) Remove(id digest.Digest) error { _, path := r.Path(id) return os.Remove(path) } // ReadFrom installs an object directly from a foreign repository. If // the foreign repository supports supports GetFile, it is used to // download directly. // // GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error) // // This is used by implementations like S3, which can download // multiple chunks at once, and requires an io.WriterAt so that // chunks needn't be buffered in memory until the download is // contiguous. // // ReadFrom singleflights concurrent downloads from the same key // regardless of repository origin. func (r *Repository) ReadFrom(ctx context.Context, id digest.Digest, u *url.URL) error { if ok, _ := r.Contains(id); ok { return nil } _, err, _ := r.read.Do(id.String(), func() (interface{}, error) { repo, err := repository.Dial(u.String()) if err != nil { return nil, err } type getFiler interface { GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error) } if gf, ok := repo.(getFiler); ok { temp, err := r.TempFile("getfile-") if err != nil { return nil, err } defer os.Remove(temp.Name()) _, err = gf.GetFile(ctx, id, temp) if err != nil { _ = temp.Close() return nil, err } err = temp.Close() if err != nil { return nil, err } f, err := r.Install(temp.Name()) if err != nil { return nil, err } if id != f.ID { srcSize := "unknown size" if srcStat, serr := repo.Stat(ctx, id); serr == nil { srcSize = data.Size(srcStat.Size).String() } dstSize := "unknown size" if dstStat, derr := temp.Stat(); derr == nil { dstSize = data.Size(dstStat.Size()).String() } return nil, errors.E("readfrom (GetFile)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, f.ID, dstSize)) } return nil, nil } rc, err := repo.Get(ctx, id) if err != nil { return nil, err } defer rc.Close() id2, err := r.Put(ctx, rc) if err != nil { return nil, err } if id != id2 { srcSize := "unknown size" if srcStat, serr := repo.Stat(ctx, id); serr == nil { srcSize = data.Size(srcStat.Size).String() } dstSize := "unknown size" if dstStat, derr := r.Stat(ctx, id2); derr == nil { dstSize = data.Size(dstStat.Size).String() } return nil, errors.E("readfrom (Get)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, id2, dstSize)) } return nil, nil }) if err != nil { r.Log.Debug(err) } return err } // WriteTo writes an object directly to a foreign repository. // If the foreign repository supports the PutFile method, it is // used. // // PutFile(context.Context, reflow.File, io.Reader) error // // PutFile is useful for implementations like S3 which can upload // multiple chunks at a time, and requires direct reader (and, // dynamically, io.Seeker) access. // // WriteTo singleflights requests of the same ID and url. func (r *Repository) WriteTo(ctx context.Context, id digest.Digest, u *url.URL) error { if ok, _ := r.Contains(id); !ok { return errors.E("writeto", r.Root, id, u.String(), errors.NotExist) } _, err, _ := r.write.Do(id.String()+u.String(), func() (interface{}, error) { repo, err := repository.Dial(u.String()) if err != nil { return nil, err } dir, path := r.Path(id) if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } file, err := os.Open(path) if err != nil { return nil, errors.E("get", r.Root, id, err) } defer file.Close() info, err := file.Stat() if err != nil { return nil, err } type putFiler interface { PutFile(context.Context, reflow.File, io.Reader) error } if pf, ok := repo.(putFiler); ok { return nil, pf.PutFile(ctx, reflow.File{ID: id, Size: info.Size()}, file) } id2, err := repo.Put(ctx, file) if err != nil { return nil, err } if id != id2 { return nil, errors.E("writeto", r.Root, id, errors.Integrity, errors.Errorf("%v != %v", id, id2)) }
// URL returns the url of this repository. func (r *Repository) URL() *url.URL { return r.RepoURL } // Contains tells whether the repository has an object with a digest. func (r *Repository) Contains(id digest.Digest) (bool, error) { _, path := r.Path(id) _, err := os.Stat(path) if os.IsNotExist(err) { return false, nil } else if err != nil { return false, err } return true, nil } // Put installs an object into the repository. Its digest identity is returned. func (r *Repository) Put(ctx context.Context, body io.Reader) (digest.Digest, error) { temp, err := r.TempFile("create-") if err != nil { return digest.Digest{}, err } defer os.Remove(temp.Name()) dw := reflow.Digester.NewWriter() done := make(chan error, 1) // This is a workaround to make sure that copies respect // context cancellations. Note that the underlying copy is // not actually cancelled, so this could lead to goroutine // leaks. go func() { _, err = io.Copy(temp, io.TeeReader(body, dw)) temp.Close() done <- err }() select { case <-ctx.Done(): return digest.Digest{}, ctx.Err() case err := <-done: if err != nil { return digest.Digest{}, err } dgst := dw.Digest() return dgst, r.InstallDigest(dgst, temp.Name()) } } // Materialize takes a mapping of path-to-object, and hardlinks the // corresponding objects from the repository into the given root. func (r *Repository) Materialize(root string, binds map[string]digest.Digest) error { dirsMade := map[string]bool{} for path, id := range binds { path = filepath.Join(root, path) dir := filepath.Dir(path) if !dirsMade[dir] { if err := os.MkdirAll(dir, 0777); err != nil { return err } // TODO(marius): also insert parents dirsMade[dir] = true } os.Remove(path) // best effort _, rpath := r.Path(id) if err := os.Link(rpath, path); err != nil { // Copy if file was reported to be on a different device. if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV { f, ferr := os.Create(path) if ferr != nil { return ferr } rc, rcerr := r.Get(context.Background(), id) if rcerr != nil { return rcerr } _, err = io.Copy(f, rc) } return err } } return nil } // Vacuum moves all objects from the given repository to this one. func (r *Repository) Vacuum(ctx context.Context, repo *Repository) error { var w walker w.Init(repo) var errs errors.Multi for w.Scan() { if err := r.InstallDigest(w.Digest(), w.Path()); err != nil { errs.Add(errors.E("vacuum", w.Digest(), err)) } } if err := w.Err(); err != nil { errs.Add(err) } _ = repo.Collect(ctx, nil) // ignore errors return errs.Combined() } // Scan invokes handler for each object in the repository. func (r *Repository) Scan(ctx context.Context, handler func(digest.Digest) error) error { var w walker w.Init(r) for w.Scan() { if err := ctx.Err(); err != nil { return err } err := handler(w.Digest()) if err != nil { return err } } return w.Err() } // CollectWithThreshold removes from this repository any objects not in the // Liveset and whose creation times are not more recent than the // threshold time. func (r *Repository) CollectWithThreshold(ctx context.Context, live liveset.Liveset, dead liveset.Liveset, threshold time.Time, dryRun bool) error { return errors.E("collectwiththreshold", errors.NotSupported) } // Collect removes any objects in the repository that are not also in // the live set. func (r *Repository) Collect(ctx context.Context, live liveset.Liveset) error { var w walker w.Init(r) var ( n int size int64 ) for w.Scan() { if live != nil && live.Contains(w.Digest()) { continue } size += w.Info().Size() if err := os.Remove(w.Path()); err != nil { r.Log.Errorf("remove %q: %v", w.Path(), err) } // Clean up object subdirectories. (Ignores failure when nonempty.) os.Remove(filepath.Dir(w.Path())) n++ } if live != nil { r.Log.Printf("collected %v objects (%s)", n, data.Size(size)) } return w.Err() } // TempFile creates and returns a new temporary file adjacent to the // repository. Files created by TempFile can be efficiently ingested // by Repository.Install. The caller is responsible for cleaning up // temporary files. func (r *Repository) TempFile(prefix string) (*os.File, error) { dir := filepath.Join(r.Root, "tmp") os.MkdirAll(dir, 0777) return ioutil.TempFile(dir, prefix) }
return nil, nil }) return err }
random_line_split
repository.go
// Copyright 2016 GRAIL, Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package filerepo implements a filesystem-backed repository. It stores // objects in a directory on disk; the objects are named by the // string representation of their digest, i.e., of the form // sha256:d60e67ce9.... package filerepo import ( "context" "io" "io/ioutil" "net/url" "os" "path/filepath" "syscall" "time" "github.com/grailbio/base/data" "github.com/grailbio/base/digest" "github.com/grailbio/reflow" "github.com/grailbio/reflow/errors" "github.com/grailbio/reflow/liveset" "github.com/grailbio/reflow/log" "github.com/grailbio/reflow/repository" "golang.org/x/sync/singleflight" ) // Repository implements a filesystem-backed Repository. type Repository struct { // The root directory for this repository. This directory contains // all objects. Root string Log *log.Logger // RepoURL may be set to a URL that represents this repository. RepoURL *url.URL read, write singleflight.Group } // Path returns the filesystem directory and full path of the object with a given digest. func (r *Repository) Path(id digest.Digest) (dir, path string) { dir = filepath.Join(r.Root, id.Hex()[:2]) return dir, filepath.Join(dir, id.Hex()[2:]) } // Install links the given file into the repository, named by digest. func (r *Repository) Install(file string) (reflow.File, error) { f, err := os.Open(file) if err != nil { return reflow.File{}, err } defer f.Close() w := reflow.Digester.NewWriter() n, err := io.Copy(w, f) if err != nil { return reflow.File{}, err } d := w.Digest() return reflow.File{ID: d, Size: n}, r.InstallDigest(d, file) } // InstallDigest installs a file at the given digest. The caller guarantees // that the file's bytes have the digest d. func (r *Repository)
(d digest.Digest, file string) error { file, err := filepath.EvalSymlinks(file) if err != nil { return err } dir, path := r.Path(d) if err := os.MkdirAll(dir, 0777); err != nil { return err } err = os.Link(file, path) if os.IsExist(err) { err = nil } if err != nil { // Copy if file was reported to be on a different device. if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV { f, ferr := os.Open(file) if ferr != nil { return ferr } defer func() { _ = f.Close() }() _, err = r.Put(context.Background(), f) } } return err } // Stat retrieves metadata for files stored in the repository. func (r *Repository) Stat(ctx context.Context, id digest.Digest) (reflow.File, error) { _, path := r.Path(id) info, err := os.Stat(path) if err != nil { return reflow.File{}, errors.E("stat", r.Root, id, err) } return reflow.File{ID: id, Size: info.Size()}, nil } // Get retrieves the object named by a digest. func (r *Repository) Get(ctx context.Context, id digest.Digest) (io.ReadCloser, error) { _, path := r.Path(id) rc, err := os.Open(path) if err != nil { return nil, errors.E("get", r.Root, id, err) } return rc, nil } // Remove removes an object from the repository. func (r *Repository) Remove(id digest.Digest) error { _, path := r.Path(id) return os.Remove(path) } // ReadFrom installs an object directly from a foreign repository. If // the foreign repository supports supports GetFile, it is used to // download directly. // // GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error) // // This is used by implementations like S3, which can download // multiple chunks at once, and requires an io.WriterAt so that // chunks needn't be buffered in memory until the download is // contiguous. // // ReadFrom singleflights concurrent downloads from the same key // regardless of repository origin. func (r *Repository) ReadFrom(ctx context.Context, id digest.Digest, u *url.URL) error { if ok, _ := r.Contains(id); ok { return nil } _, err, _ := r.read.Do(id.String(), func() (interface{}, error) { repo, err := repository.Dial(u.String()) if err != nil { return nil, err } type getFiler interface { GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error) } if gf, ok := repo.(getFiler); ok { temp, err := r.TempFile("getfile-") if err != nil { return nil, err } defer os.Remove(temp.Name()) _, err = gf.GetFile(ctx, id, temp) if err != nil { _ = temp.Close() return nil, err } err = temp.Close() if err != nil { return nil, err } f, err := r.Install(temp.Name()) if err != nil { return nil, err } if id != f.ID { srcSize := "unknown size" if srcStat, serr := repo.Stat(ctx, id); serr == nil { srcSize = data.Size(srcStat.Size).String() } dstSize := "unknown size" if dstStat, derr := temp.Stat(); derr == nil { dstSize = data.Size(dstStat.Size()).String() } return nil, errors.E("readfrom (GetFile)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, f.ID, dstSize)) } return nil, nil } rc, err := repo.Get(ctx, id) if err != nil { return nil, err } defer rc.Close() id2, err := r.Put(ctx, rc) if err != nil { return nil, err } if id != id2 { srcSize := "unknown size" if srcStat, serr := repo.Stat(ctx, id); serr == nil { srcSize = data.Size(srcStat.Size).String() } dstSize := "unknown size" if dstStat, derr := r.Stat(ctx, id2); derr == nil { dstSize = data.Size(dstStat.Size).String() } return nil, errors.E("readfrom (Get)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, id2, dstSize)) } return nil, nil }) if err != nil { r.Log.Debug(err) } return err } // WriteTo writes an object directly to a foreign repository. // If the foreign repository supports the PutFile method, it is // used. // // PutFile(context.Context, reflow.File, io.Reader) error // // PutFile is useful for implementations like S3 which can upload // multiple chunks at a time, and requires direct reader (and, // dynamically, io.Seeker) access. // // WriteTo singleflights requests of the same ID and url. func (r *Repository) WriteTo(ctx context.Context, id digest.Digest, u *url.URL) error { if ok, _ := r.Contains(id); !ok { return errors.E("writeto", r.Root, id, u.String(), errors.NotExist) } _, err, _ := r.write.Do(id.String()+u.String(), func() (interface{}, error) { repo, err := repository.Dial(u.String()) if err != nil { return nil, err } dir, path := r.Path(id) if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } file, err := os.Open(path) if err != nil { return nil, errors.E("get", r.Root, id, err) } defer file.Close() info, err := file.Stat() if err != nil { return nil, err } type putFiler interface { PutFile(context.Context, reflow.File, io.Reader) error } if pf, ok := repo.(putFiler); ok { return nil, pf.PutFile(ctx, reflow.File{ID: id, Size: info.Size()}, file) } id2, err := repo.Put(ctx, file) if err != nil { return nil, err } if id != id2 { return nil, errors.E("writeto", r.Root, id, errors.Integrity, errors.Errorf("%v != %v", id, id2)) } return nil, nil }) return err } // URL returns the url of this repository. func (r *Repository) URL() *url.URL { return r.RepoURL } // Contains tells whether the repository has an object with a digest. func (r *Repository) Contains(id digest.Digest) (bool, error) { _, path := r.Path(id) _, err := os.Stat(path) if os.IsNotExist(err) { return false, nil } else if err != nil { return false, err } return true, nil } // Put installs an object into the repository. Its digest identity is returned. func (r *Repository) Put(ctx context.Context, body io.Reader) (digest.Digest, error) { temp, err := r.TempFile("create-") if err != nil { return digest.Digest{}, err } defer os.Remove(temp.Name()) dw := reflow.Digester.NewWriter() done := make(chan error, 1) // This is a workaround to make sure that copies respect // context cancellations. Note that the underlying copy is // not actually cancelled, so this could lead to goroutine // leaks. go func() { _, err = io.Copy(temp, io.TeeReader(body, dw)) temp.Close() done <- err }() select { case <-ctx.Done(): return digest.Digest{}, ctx.Err() case err := <-done: if err != nil { return digest.Digest{}, err } dgst := dw.Digest() return dgst, r.InstallDigest(dgst, temp.Name()) } } // Materialize takes a mapping of path-to-object, and hardlinks the // corresponding objects from the repository into the given root. func (r *Repository) Materialize(root string, binds map[string]digest.Digest) error { dirsMade := map[string]bool{} for path, id := range binds { path = filepath.Join(root, path) dir := filepath.Dir(path) if !dirsMade[dir] { if err := os.MkdirAll(dir, 0777); err != nil { return err } // TODO(marius): also insert parents dirsMade[dir] = true } os.Remove(path) // best effort _, rpath := r.Path(id) if err := os.Link(rpath, path); err != nil { // Copy if file was reported to be on a different device. if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV { f, ferr := os.Create(path) if ferr != nil { return ferr } rc, rcerr := r.Get(context.Background(), id) if rcerr != nil { return rcerr } _, err = io.Copy(f, rc) } return err } } return nil } // Vacuum moves all objects from the given repository to this one. func (r *Repository) Vacuum(ctx context.Context, repo *Repository) error { var w walker w.Init(repo) var errs errors.Multi for w.Scan() { if err := r.InstallDigest(w.Digest(), w.Path()); err != nil { errs.Add(errors.E("vacuum", w.Digest(), err)) } } if err := w.Err(); err != nil { errs.Add(err) } _ = repo.Collect(ctx, nil) // ignore errors return errs.Combined() } // Scan invokes handler for each object in the repository. func (r *Repository) Scan(ctx context.Context, handler func(digest.Digest) error) error { var w walker w.Init(r) for w.Scan() { if err := ctx.Err(); err != nil { return err } err := handler(w.Digest()) if err != nil { return err } } return w.Err() } // CollectWithThreshold removes from this repository any objects not in the // Liveset and whose creation times are not more recent than the // threshold time. func (r *Repository) CollectWithThreshold(ctx context.Context, live liveset.Liveset, dead liveset.Liveset, threshold time.Time, dryRun bool) error { return errors.E("collectwiththreshold", errors.NotSupported) } // Collect removes any objects in the repository that are not also in // the live set. func (r *Repository) Collect(ctx context.Context, live liveset.Liveset) error { var w walker w.Init(r) var ( n int size int64 ) for w.Scan() { if live != nil && live.Contains(w.Digest()) { continue } size += w.Info().Size() if err := os.Remove(w.Path()); err != nil { r.Log.Errorf("remove %q: %v", w.Path(), err) } // Clean up object subdirectories. (Ignores failure when nonempty.) os.Remove(filepath.Dir(w.Path())) n++ } if live != nil { r.Log.Printf("collected %v objects (%s)", n, data.Size(size)) } return w.Err() } // TempFile creates and returns a new temporary file adjacent to the // repository. Files created by TempFile can be efficiently ingested // by Repository.Install. The caller is responsible for cleaning up // temporary files. func (r *Repository) TempFile(prefix string) (*os.File, error) { dir := filepath.Join(r.Root, "tmp") os.MkdirAll(dir, 0777) return ioutil.TempFile(dir, prefix) }
InstallDigest
identifier_name
repository.go
// Copyright 2016 GRAIL, Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package filerepo implements a filesystem-backed repository. It stores // objects in a directory on disk; the objects are named by the // string representation of their digest, i.e., of the form // sha256:d60e67ce9.... package filerepo import ( "context" "io" "io/ioutil" "net/url" "os" "path/filepath" "syscall" "time" "github.com/grailbio/base/data" "github.com/grailbio/base/digest" "github.com/grailbio/reflow" "github.com/grailbio/reflow/errors" "github.com/grailbio/reflow/liveset" "github.com/grailbio/reflow/log" "github.com/grailbio/reflow/repository" "golang.org/x/sync/singleflight" ) // Repository implements a filesystem-backed Repository. type Repository struct { // The root directory for this repository. This directory contains // all objects. Root string Log *log.Logger // RepoURL may be set to a URL that represents this repository. RepoURL *url.URL read, write singleflight.Group } // Path returns the filesystem directory and full path of the object with a given digest. func (r *Repository) Path(id digest.Digest) (dir, path string) { dir = filepath.Join(r.Root, id.Hex()[:2]) return dir, filepath.Join(dir, id.Hex()[2:]) } // Install links the given file into the repository, named by digest. func (r *Repository) Install(file string) (reflow.File, error) { f, err := os.Open(file) if err != nil { return reflow.File{}, err } defer f.Close() w := reflow.Digester.NewWriter() n, err := io.Copy(w, f) if err != nil { return reflow.File{}, err } d := w.Digest() return reflow.File{ID: d, Size: n}, r.InstallDigest(d, file) } // InstallDigest installs a file at the given digest. The caller guarantees // that the file's bytes have the digest d. func (r *Repository) InstallDigest(d digest.Digest, file string) error { file, err := filepath.EvalSymlinks(file) if err != nil { return err } dir, path := r.Path(d) if err := os.MkdirAll(dir, 0777); err != nil { return err } err = os.Link(file, path) if os.IsExist(err) { err = nil } if err != nil { // Copy if file was reported to be on a different device. if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV { f, ferr := os.Open(file) if ferr != nil { return ferr } defer func() { _ = f.Close() }() _, err = r.Put(context.Background(), f) } } return err } // Stat retrieves metadata for files stored in the repository. func (r *Repository) Stat(ctx context.Context, id digest.Digest) (reflow.File, error) { _, path := r.Path(id) info, err := os.Stat(path) if err != nil { return reflow.File{}, errors.E("stat", r.Root, id, err) } return reflow.File{ID: id, Size: info.Size()}, nil } // Get retrieves the object named by a digest. func (r *Repository) Get(ctx context.Context, id digest.Digest) (io.ReadCloser, error) { _, path := r.Path(id) rc, err := os.Open(path) if err != nil { return nil, errors.E("get", r.Root, id, err) } return rc, nil } // Remove removes an object from the repository. func (r *Repository) Remove(id digest.Digest) error { _, path := r.Path(id) return os.Remove(path) } // ReadFrom installs an object directly from a foreign repository. If // the foreign repository supports supports GetFile, it is used to // download directly. // // GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error) // // This is used by implementations like S3, which can download // multiple chunks at once, and requires an io.WriterAt so that // chunks needn't be buffered in memory until the download is // contiguous. // // ReadFrom singleflights concurrent downloads from the same key // regardless of repository origin. func (r *Repository) ReadFrom(ctx context.Context, id digest.Digest, u *url.URL) error { if ok, _ := r.Contains(id); ok { return nil } _, err, _ := r.read.Do(id.String(), func() (interface{}, error) { repo, err := repository.Dial(u.String()) if err != nil { return nil, err } type getFiler interface { GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error) } if gf, ok := repo.(getFiler); ok { temp, err := r.TempFile("getfile-") if err != nil { return nil, err } defer os.Remove(temp.Name()) _, err = gf.GetFile(ctx, id, temp) if err != nil { _ = temp.Close() return nil, err } err = temp.Close() if err != nil { return nil, err } f, err := r.Install(temp.Name()) if err != nil { return nil, err } if id != f.ID { srcSize := "unknown size" if srcStat, serr := repo.Stat(ctx, id); serr == nil { srcSize = data.Size(srcStat.Size).String() } dstSize := "unknown size" if dstStat, derr := temp.Stat(); derr == nil { dstSize = data.Size(dstStat.Size()).String() } return nil, errors.E("readfrom (GetFile)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, f.ID, dstSize)) } return nil, nil } rc, err := repo.Get(ctx, id) if err != nil { return nil, err } defer rc.Close() id2, err := r.Put(ctx, rc) if err != nil { return nil, err } if id != id2 { srcSize := "unknown size" if srcStat, serr := repo.Stat(ctx, id); serr == nil { srcSize = data.Size(srcStat.Size).String() } dstSize := "unknown size" if dstStat, derr := r.Stat(ctx, id2); derr == nil { dstSize = data.Size(dstStat.Size).String() } return nil, errors.E("readfrom (Get)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, id2, dstSize)) } return nil, nil }) if err != nil { r.Log.Debug(err) } return err } // WriteTo writes an object directly to a foreign repository. // If the foreign repository supports the PutFile method, it is // used. // // PutFile(context.Context, reflow.File, io.Reader) error // // PutFile is useful for implementations like S3 which can upload // multiple chunks at a time, and requires direct reader (and, // dynamically, io.Seeker) access. // // WriteTo singleflights requests of the same ID and url. func (r *Repository) WriteTo(ctx context.Context, id digest.Digest, u *url.URL) error { if ok, _ := r.Contains(id); !ok { return errors.E("writeto", r.Root, id, u.String(), errors.NotExist) } _, err, _ := r.write.Do(id.String()+u.String(), func() (interface{}, error) { repo, err := repository.Dial(u.String()) if err != nil { return nil, err } dir, path := r.Path(id) if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } file, err := os.Open(path) if err != nil { return nil, errors.E("get", r.Root, id, err) } defer file.Close() info, err := file.Stat() if err != nil { return nil, err } type putFiler interface { PutFile(context.Context, reflow.File, io.Reader) error } if pf, ok := repo.(putFiler); ok { return nil, pf.PutFile(ctx, reflow.File{ID: id, Size: info.Size()}, file) } id2, err := repo.Put(ctx, file) if err != nil { return nil, err } if id != id2 { return nil, errors.E("writeto", r.Root, id, errors.Integrity, errors.Errorf("%v != %v", id, id2)) } return nil, nil }) return err } // URL returns the url of this repository. func (r *Repository) URL() *url.URL { return r.RepoURL } // Contains tells whether the repository has an object with a digest. func (r *Repository) Contains(id digest.Digest) (bool, error) { _, path := r.Path(id) _, err := os.Stat(path) if os.IsNotExist(err) { return false, nil } else if err != nil { return false, err } return true, nil } // Put installs an object into the repository. Its digest identity is returned. func (r *Repository) Put(ctx context.Context, body io.Reader) (digest.Digest, error) { temp, err := r.TempFile("create-") if err != nil { return digest.Digest{}, err } defer os.Remove(temp.Name()) dw := reflow.Digester.NewWriter() done := make(chan error, 1) // This is a workaround to make sure that copies respect // context cancellations. Note that the underlying copy is // not actually cancelled, so this could lead to goroutine // leaks. go func() { _, err = io.Copy(temp, io.TeeReader(body, dw)) temp.Close() done <- err }() select { case <-ctx.Done(): return digest.Digest{}, ctx.Err() case err := <-done: if err != nil { return digest.Digest{}, err } dgst := dw.Digest() return dgst, r.InstallDigest(dgst, temp.Name()) } } // Materialize takes a mapping of path-to-object, and hardlinks the // corresponding objects from the repository into the given root. func (r *Repository) Materialize(root string, binds map[string]digest.Digest) error { dirsMade := map[string]bool{} for path, id := range binds { path = filepath.Join(root, path) dir := filepath.Dir(path) if !dirsMade[dir] { if err := os.MkdirAll(dir, 0777); err != nil { return err } // TODO(marius): also insert parents dirsMade[dir] = true } os.Remove(path) // best effort _, rpath := r.Path(id) if err := os.Link(rpath, path); err != nil { // Copy if file was reported to be on a different device. if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV { f, ferr := os.Create(path) if ferr != nil { return ferr } rc, rcerr := r.Get(context.Background(), id) if rcerr != nil { return rcerr } _, err = io.Copy(f, rc) } return err } } return nil } // Vacuum moves all objects from the given repository to this one. func (r *Repository) Vacuum(ctx context.Context, repo *Repository) error { var w walker w.Init(repo) var errs errors.Multi for w.Scan() { if err := r.InstallDigest(w.Digest(), w.Path()); err != nil { errs.Add(errors.E("vacuum", w.Digest(), err)) } } if err := w.Err(); err != nil { errs.Add(err) } _ = repo.Collect(ctx, nil) // ignore errors return errs.Combined() } // Scan invokes handler for each object in the repository. func (r *Repository) Scan(ctx context.Context, handler func(digest.Digest) error) error { var w walker w.Init(r) for w.Scan() { if err := ctx.Err(); err != nil { return err } err := handler(w.Digest()) if err != nil { return err } } return w.Err() } // CollectWithThreshold removes from this repository any objects not in the // Liveset and whose creation times are not more recent than the // threshold time. func (r *Repository) CollectWithThreshold(ctx context.Context, live liveset.Liveset, dead liveset.Liveset, threshold time.Time, dryRun bool) error { return errors.E("collectwiththreshold", errors.NotSupported) } // Collect removes any objects in the repository that are not also in // the live set. func (r *Repository) Collect(ctx context.Context, live liveset.Liveset) error { var w walker w.Init(r) var ( n int size int64 ) for w.Scan()
if live != nil { r.Log.Printf("collected %v objects (%s)", n, data.Size(size)) } return w.Err() } // TempFile creates and returns a new temporary file adjacent to the // repository. Files created by TempFile can be efficiently ingested // by Repository.Install. The caller is responsible for cleaning up // temporary files. func (r *Repository) TempFile(prefix string) (*os.File, error) { dir := filepath.Join(r.Root, "tmp") os.MkdirAll(dir, 0777) return ioutil.TempFile(dir, prefix) }
{ if live != nil && live.Contains(w.Digest()) { continue } size += w.Info().Size() if err := os.Remove(w.Path()); err != nil { r.Log.Errorf("remove %q: %v", w.Path(), err) } // Clean up object subdirectories. (Ignores failure when nonempty.) os.Remove(filepath.Dir(w.Path())) n++ }
conditional_block
repository.go
// Copyright 2016 GRAIL, Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package filerepo implements a filesystem-backed repository. It stores // objects in a directory on disk; the objects are named by the // string representation of their digest, i.e., of the form // sha256:d60e67ce9.... package filerepo import ( "context" "io" "io/ioutil" "net/url" "os" "path/filepath" "syscall" "time" "github.com/grailbio/base/data" "github.com/grailbio/base/digest" "github.com/grailbio/reflow" "github.com/grailbio/reflow/errors" "github.com/grailbio/reflow/liveset" "github.com/grailbio/reflow/log" "github.com/grailbio/reflow/repository" "golang.org/x/sync/singleflight" ) // Repository implements a filesystem-backed Repository. type Repository struct { // The root directory for this repository. This directory contains // all objects. Root string Log *log.Logger // RepoURL may be set to a URL that represents this repository. RepoURL *url.URL read, write singleflight.Group } // Path returns the filesystem directory and full path of the object with a given digest. func (r *Repository) Path(id digest.Digest) (dir, path string) { dir = filepath.Join(r.Root, id.Hex()[:2]) return dir, filepath.Join(dir, id.Hex()[2:]) } // Install links the given file into the repository, named by digest. func (r *Repository) Install(file string) (reflow.File, error) { f, err := os.Open(file) if err != nil { return reflow.File{}, err } defer f.Close() w := reflow.Digester.NewWriter() n, err := io.Copy(w, f) if err != nil { return reflow.File{}, err } d := w.Digest() return reflow.File{ID: d, Size: n}, r.InstallDigest(d, file) } // InstallDigest installs a file at the given digest. The caller guarantees // that the file's bytes have the digest d. func (r *Repository) InstallDigest(d digest.Digest, file string) error { file, err := filepath.EvalSymlinks(file) if err != nil { return err } dir, path := r.Path(d) if err := os.MkdirAll(dir, 0777); err != nil { return err } err = os.Link(file, path) if os.IsExist(err) { err = nil } if err != nil { // Copy if file was reported to be on a different device. if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV { f, ferr := os.Open(file) if ferr != nil { return ferr } defer func() { _ = f.Close() }() _, err = r.Put(context.Background(), f) } } return err } // Stat retrieves metadata for files stored in the repository. func (r *Repository) Stat(ctx context.Context, id digest.Digest) (reflow.File, error) { _, path := r.Path(id) info, err := os.Stat(path) if err != nil { return reflow.File{}, errors.E("stat", r.Root, id, err) } return reflow.File{ID: id, Size: info.Size()}, nil } // Get retrieves the object named by a digest. func (r *Repository) Get(ctx context.Context, id digest.Digest) (io.ReadCloser, error) { _, path := r.Path(id) rc, err := os.Open(path) if err != nil { return nil, errors.E("get", r.Root, id, err) } return rc, nil } // Remove removes an object from the repository. func (r *Repository) Remove(id digest.Digest) error { _, path := r.Path(id) return os.Remove(path) } // ReadFrom installs an object directly from a foreign repository. If // the foreign repository supports supports GetFile, it is used to // download directly. // // GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error) // // This is used by implementations like S3, which can download // multiple chunks at once, and requires an io.WriterAt so that // chunks needn't be buffered in memory until the download is // contiguous. // // ReadFrom singleflights concurrent downloads from the same key // regardless of repository origin. func (r *Repository) ReadFrom(ctx context.Context, id digest.Digest, u *url.URL) error { if ok, _ := r.Contains(id); ok { return nil } _, err, _ := r.read.Do(id.String(), func() (interface{}, error) { repo, err := repository.Dial(u.String()) if err != nil { return nil, err } type getFiler interface { GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error) } if gf, ok := repo.(getFiler); ok { temp, err := r.TempFile("getfile-") if err != nil { return nil, err } defer os.Remove(temp.Name()) _, err = gf.GetFile(ctx, id, temp) if err != nil { _ = temp.Close() return nil, err } err = temp.Close() if err != nil { return nil, err } f, err := r.Install(temp.Name()) if err != nil { return nil, err } if id != f.ID { srcSize := "unknown size" if srcStat, serr := repo.Stat(ctx, id); serr == nil { srcSize = data.Size(srcStat.Size).String() } dstSize := "unknown size" if dstStat, derr := temp.Stat(); derr == nil { dstSize = data.Size(dstStat.Size()).String() } return nil, errors.E("readfrom (GetFile)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, f.ID, dstSize)) } return nil, nil } rc, err := repo.Get(ctx, id) if err != nil { return nil, err } defer rc.Close() id2, err := r.Put(ctx, rc) if err != nil { return nil, err } if id != id2 { srcSize := "unknown size" if srcStat, serr := repo.Stat(ctx, id); serr == nil { srcSize = data.Size(srcStat.Size).String() } dstSize := "unknown size" if dstStat, derr := r.Stat(ctx, id2); derr == nil { dstSize = data.Size(dstStat.Size).String() } return nil, errors.E("readfrom (Get)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, id2, dstSize)) } return nil, nil }) if err != nil { r.Log.Debug(err) } return err } // WriteTo writes an object directly to a foreign repository. // If the foreign repository supports the PutFile method, it is // used. // // PutFile(context.Context, reflow.File, io.Reader) error // // PutFile is useful for implementations like S3 which can upload // multiple chunks at a time, and requires direct reader (and, // dynamically, io.Seeker) access. // // WriteTo singleflights requests of the same ID and url. func (r *Repository) WriteTo(ctx context.Context, id digest.Digest, u *url.URL) error { if ok, _ := r.Contains(id); !ok { return errors.E("writeto", r.Root, id, u.String(), errors.NotExist) } _, err, _ := r.write.Do(id.String()+u.String(), func() (interface{}, error) { repo, err := repository.Dial(u.String()) if err != nil { return nil, err } dir, path := r.Path(id) if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } file, err := os.Open(path) if err != nil { return nil, errors.E("get", r.Root, id, err) } defer file.Close() info, err := file.Stat() if err != nil { return nil, err } type putFiler interface { PutFile(context.Context, reflow.File, io.Reader) error } if pf, ok := repo.(putFiler); ok { return nil, pf.PutFile(ctx, reflow.File{ID: id, Size: info.Size()}, file) } id2, err := repo.Put(ctx, file) if err != nil { return nil, err } if id != id2 { return nil, errors.E("writeto", r.Root, id, errors.Integrity, errors.Errorf("%v != %v", id, id2)) } return nil, nil }) return err } // URL returns the url of this repository. func (r *Repository) URL() *url.URL { return r.RepoURL } // Contains tells whether the repository has an object with a digest. func (r *Repository) Contains(id digest.Digest) (bool, error) { _, path := r.Path(id) _, err := os.Stat(path) if os.IsNotExist(err) { return false, nil } else if err != nil { return false, err } return true, nil } // Put installs an object into the repository. Its digest identity is returned. func (r *Repository) Put(ctx context.Context, body io.Reader) (digest.Digest, error)
// Materialize takes a mapping of path-to-object, and hardlinks the // corresponding objects from the repository into the given root. func (r *Repository) Materialize(root string, binds map[string]digest.Digest) error { dirsMade := map[string]bool{} for path, id := range binds { path = filepath.Join(root, path) dir := filepath.Dir(path) if !dirsMade[dir] { if err := os.MkdirAll(dir, 0777); err != nil { return err } // TODO(marius): also insert parents dirsMade[dir] = true } os.Remove(path) // best effort _, rpath := r.Path(id) if err := os.Link(rpath, path); err != nil { // Copy if file was reported to be on a different device. if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV { f, ferr := os.Create(path) if ferr != nil { return ferr } rc, rcerr := r.Get(context.Background(), id) if rcerr != nil { return rcerr } _, err = io.Copy(f, rc) } return err } } return nil } // Vacuum moves all objects from the given repository to this one. func (r *Repository) Vacuum(ctx context.Context, repo *Repository) error { var w walker w.Init(repo) var errs errors.Multi for w.Scan() { if err := r.InstallDigest(w.Digest(), w.Path()); err != nil { errs.Add(errors.E("vacuum", w.Digest(), err)) } } if err := w.Err(); err != nil { errs.Add(err) } _ = repo.Collect(ctx, nil) // ignore errors return errs.Combined() } // Scan invokes handler for each object in the repository. func (r *Repository) Scan(ctx context.Context, handler func(digest.Digest) error) error { var w walker w.Init(r) for w.Scan() { if err := ctx.Err(); err != nil { return err } err := handler(w.Digest()) if err != nil { return err } } return w.Err() } // CollectWithThreshold removes from this repository any objects not in the // Liveset and whose creation times are not more recent than the // threshold time. func (r *Repository) CollectWithThreshold(ctx context.Context, live liveset.Liveset, dead liveset.Liveset, threshold time.Time, dryRun bool) error { return errors.E("collectwiththreshold", errors.NotSupported) } // Collect removes any objects in the repository that are not also in // the live set. func (r *Repository) Collect(ctx context.Context, live liveset.Liveset) error { var w walker w.Init(r) var ( n int size int64 ) for w.Scan() { if live != nil && live.Contains(w.Digest()) { continue } size += w.Info().Size() if err := os.Remove(w.Path()); err != nil { r.Log.Errorf("remove %q: %v", w.Path(), err) } // Clean up object subdirectories. (Ignores failure when nonempty.) os.Remove(filepath.Dir(w.Path())) n++ } if live != nil { r.Log.Printf("collected %v objects (%s)", n, data.Size(size)) } return w.Err() } // TempFile creates and returns a new temporary file adjacent to the // repository. Files created by TempFile can be efficiently ingested // by Repository.Install. The caller is responsible for cleaning up // temporary files. func (r *Repository) TempFile(prefix string) (*os.File, error) { dir := filepath.Join(r.Root, "tmp") os.MkdirAll(dir, 0777) return ioutil.TempFile(dir, prefix) }
{ temp, err := r.TempFile("create-") if err != nil { return digest.Digest{}, err } defer os.Remove(temp.Name()) dw := reflow.Digester.NewWriter() done := make(chan error, 1) // This is a workaround to make sure that copies respect // context cancellations. Note that the underlying copy is // not actually cancelled, so this could lead to goroutine // leaks. go func() { _, err = io.Copy(temp, io.TeeReader(body, dw)) temp.Close() done <- err }() select { case <-ctx.Done(): return digest.Digest{}, ctx.Err() case err := <-done: if err != nil { return digest.Digest{}, err } dgst := dw.Digest() return dgst, r.InstallDigest(dgst, temp.Name()) } }
identifier_body
pre_train.py
# coding:utf-8 # Produced by Andysin Zhang # 23_Oct_2019 # Inspired By the original Bert, Appreciate for the wonderful work # # Copyright 2019 TCL Inc. All Rights Reserverd. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """"Run masked LM/next sentence masked_lm pre-training for ALBERT.""" import sys import functools import tensorflow as tf # tf.enable_eager_execution() from pathlib import Path PROJECT_PATH = Path(__file__).absolute().parent sys.path.insert(0, str(PROJECT_PATH)) from utils.setup import Setup setup = Setup() import optimization from model import BertModel from model_helper import * from config import bert_config from load_data import train_input_fn, serving_input_receiver_fn from utils.log import log_info as _info from utils.log import log_error as _error # Prototype for tf.estimator def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps): """Returns 'model_fn' closure for Estomator, use closure is because of building the model requires some paramters, sending them into the 'params' is not a good deal.""" def model_fn(features, labels, mode, params): """this is prototype syntax, all parameters are necessary.""" # obtain the data _info('*** Features ***') for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features['input_ids'] # [batch_size, seq_length] input_mask = features['input_mask'] # [batch_size, seq_length] # if mode != tf.estimator.ModeKeys.PREDICT: # # segment_idx = features['segment_dis'] # masked_lm_positions = features['masked_lm_positions'] # [batch_size, seq_length], specify the answer # masked_lm_ids = features['masked_lm_ids'] # [batch_size, answer_seq_length], specify the answer labels # masked_lm_weights = features['masked_lm_weights'] # [batch_size, seq_length], [1, 1, 0], 0 refers to the mask # # next_sentence_labels = features['next_sentence_labels'] # else: masked_lm_positions = features['masked_lm_positions'] masked_lm_ids = features['masked_lm_ids'] masked_lm_weights = features['masked_lm_weights'] if bert_config.train_type == 'seq2seq': _info('Training seq2seq task.') elif bert_config.train_type == 'lm': _info('Training language model task.') # build model is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask) # compute loss loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config, model.get_sequence_output(), model.embedding_table, model.projection_table, masked_lm_positions, masked_lm_ids, masked_lm_weights, mode) if mode == tf.estimator.ModeKeys.PREDICT: masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1]) output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions) else: if mode == tf.estimator.ModeKeys.TRAIN: # restore from the checkpoint, # tf.estimator automatically restore from the model typically, # maybe here is for restore some pre-trained parameters tvars = tf.trainable_variables() initialized_variable_names = {} if init_checkpoint: (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) _info('*** Trainable Variables ***') for var in tvars: init_string = '' if var.name in initialized_variable_names: init_string = ', *INIT_FROM_CKPT*' _info('name = {}, shape={}{}'.format(var.name, var.shape, init_string)) train_op = optimization.create_optimizer( loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit) # learning_rate = tf.train.polynomial_decay(bert_config.learning_rate,
# num_train_steps, # end_learning_rate=0.0, # power=1.0, # cycle=False) # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) # gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True) # clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0) # train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step()) output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) elif mode == tf.estimator.ModeKeys.EVAL: is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32) def metric_fn(loss, label_ids, logits, is_real_example): """ Args: loss: tf.float32. label_ids: [b, s]. logits: [b, s, v]. """ # [b * s, v] logits = tf.reshape(logits, [-1, logits.shape[-1]]) # [b * s, 1] predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) # [b * s] label_ids = tf.reshape(label_ids, [-1]) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions) loss = tf.metrics.mean(values=loss) return {'eval_accuracy': accuracy, 'eval_loss': loss} eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example) output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, embedding_table, projection_table, positions, label_ids, label_weights, mode): """Get the loss for the answer according to the mask. Args: bert_config: config for bert. input_tensor: float Tensor of shape [batch_size, seq_length, witdh]. embedding_table: [vocab_size, embedding_size]. projection_table: [embedding_size, hidden_size]. positions: tf.int32, which saves the positions for answers. label_ids: tf.int32, which is the true labels. label_weights: tf.int32, which is refers to the padding. Returns: loss: average word loss. per_loss: per word loss. log_probs: log probability. """ predicted_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope('cls/predictions'): with tf.variable_scope('transform'): input_tensor = tf.layers.dense( predicted_tensor, units=bert_config.hidden_size, activation=gelu, kernel_initializer=create_initializer(bert_config.initializer_range)) input_tensor = layer_norm(input_tensor) output_bias = tf.get_variable( 'output_bias', shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) input_project = tf.matmul(input_tensor, projection_table, transpose_b=True) logits = tf.matmul(input_project, embedding_table, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) # [some_length, vocab_size] log_probs = tf.nn.log_softmax(logits, axis=-1) if mode == tf.estimator.ModeKeys.PREDICT: return None, None, tf.nn.softmax(logits, axis=-1) # [some_length], no need to cast to tf.float32 label_ids = tf.reshape(label_ids, [-1]) # [some_length] label_weights = tf.cast(tf.reshape(label_ids, [-1]), dtype=tf.float32) # [some_length, vocab_size] one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size) # [some_length, 1] per_loss = - tf.reduce_sum(log_probs * one_hot_labels, axis=-1) # ignore padding numerator = tf.reduce_sum(label_weights * per_loss) # the number of predicted items denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return loss, per_loss, log_probs, logits def gather_indexes(input_tensor, positions): """Gather all the predicted tensor, input_tensor contains all the positions, however, only maksed positions are used for calculating the loss. Args: input_tensor: float Tensor of shape [batch_size, seq_length, width]. positions: save the relative positions of each sentence's labels. Returns: output_tensor: [some_length, width], where some_length refers to all the predicted labels in the data batch. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] # create a vector which saves the initial positions for each batch flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) # get the absolute positions for the predicted labels, [batch_size * seq_length, 1] flat_postions = tf.reshape(positions + flat_offsets, [-1]) flat_input_tensor = tf.reshape(input_tensor, [batch_size * seq_length, width]) # obtain the predicted items, [some_lenght, width] output_tensor = tf.gather(flat_input_tensor, flat_postions) return output_tensor def main(): # tf.gfile.MakeDirs(FLAGS.output_dir) Path(bert_config.model_dir).mkdir(exist_ok=True) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=bert_config.init_checkpoint, learning_rate=bert_config.learning_rate, num_train_steps=bert_config.num_train_steps) input_fn = functools.partial(train_input_fn, path=bert_config.data_path, batch_size=bert_config.batch_size, repeat_num=bert_config.num_train_steps, max_length = bert_config.max_length, train_type=bert_config.train_type, reverse=bert_config.reverse) run_config = tf.contrib.tpu.RunConfig( keep_checkpoint_max=1, save_checkpoints_steps=1000, model_dir=bert_config.model_dir) estimator = tf.estimator.Estimator(model_fn, config=run_config) estimator.train(input_fn) # train_spec = tf.estimator.TrainSpec(input_fn=input_fn) # eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1000) # tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # for evaluation, the repeat_num in input_fn has to be reset # estimator.evaluate(input_fn) def package_model(model_path, pb_path): model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=None, learning_rate=bert_config.learning_rate, num_train_steps=bert_config.num_train_steps) estimator = tf.estimator.Estimator(model_fn, model_path) estimator.export_saved_model(pb_path, serving_input_receiver_fn) if __name__ == '__main__': main() # package_model('models/', 'models_to_deploy/') """the following code is just for test.""" # import codecs # from load_data import create_mask_for_lm, create_mask_for_seq # with codecs.open('data/vocab.txt', 'r', 'utf-8') as file: # vocab_idx = {} # idx_vocab = {} # for idx, vocab in enumerate(file): # vocab = vocab.strip() # idx = int(idx) # vocab_idx[vocab] = idx # idx_vocab[idx] = vocab # model_fn = model_fn_builder( # bert_config=bert_config, # init_checkpoint=bert_config.init_checkpoint, # learning_rate=bert_config.learning_rate, # num_train_steps=bert_config.num_train_steps) # estimator = tf.estimator.Estimator(model_fn, 'models/') # def convert_to_idx(line): # """convert the vocab to idx.""" # result = [] # for vocab in line: # try: # result.append(vocab_idx[vocab]) # except KeyError: # result.append(vocab_idx['<unk>']) # return result # def parse_data(path): # """process the data.""" # with codecs.open(path, 'r', 'utf-8') as file: # questions = [] # answers = [] # for line in file: # line = line.strip().split('=') # que, ans = convert_to_idx(line[0]), convert_to_idx(line[1]) # questions.append(que) # answers.append(ans) # assert len(questions) == len(answers) # # get max length to pad # length = [len(ans) + len(que) for ans, que in zip(questions, answers)] # max_length = max(length) # return questions, answers, max_length # def train_generator(path): # """"This is the entrance to the input_fn.""" # questions, answers, max_length = parse_data(path) # for que, ans in zip(questions, answers): # input_ids = que + ans # padding_part = [vocab_idx['<padding>'] for _ in range(max_length - len(input_ids))] # input_ids += padding_part # # input_mask: -> [1, 1, 1, 0, 0], # # where 1 indicates the question part, 0 indicates both the answer part and padding part. # input_mask = [1 for _ in range(len(que))] + [0 for _ in range(len(ans + padding_part))] # input_mask = create_mask_for_seq(input_mask, len(que), len(ans + padding_part)) # # masked_lm_positions saves the relative positions for answer part and padding part. # # [[2, 3, 4, 5, 6], [5, 6]] # masked_lm_positions = [idx + len(que) for idx in range(len(input_ids) - len(que))] # # ATTENTION: the above `masked_lm_positions` is not in the same length due to the various length of question, # # so padding the `masked_lm_positions` to the same length as input_ids, # # although the padding items are fake, the following `mask_lm_weights` will handle this. # masked_lm_positions += [masked_lm_positions[-1] + 1 + idx for idx in range(len(input_ids) - len(masked_lm_positions))] # mask_lm_ids = ans + padding_part # mask_lm_ids += [vocab_idx['<padding>'] for _ in range(len(input_ids) - len(mask_lm_ids))] # mask_lm_weights = [1 for _ in range(len(ans))] + [0 for _ in range(len(padding_part))] # mask_lm_weights += [0 for _ in range(len(input_ids) - len(mask_lm_weights))] # # input_ids = [input_ids] # # input_mask = [input_mask] # # masked_lm_positions = [masked_lm_positions] # # mask_lm_ids = [mask_lm_ids] # # mask_lm_weights = [mask_lm_weights] # # print(que) # # print(ans) # # print(len(input_ids)) # # print(len(input_mask)) # # print(len(masked_lm_positions)) # # print(len(mask_lm_ids)) # # print(len(mask_lm_weights)) # # input() # features = {'input_ids': input_ids, # 'input_mask': input_mask, # 'masked_lm_positions': masked_lm_positions, # 'masked_lm_ids': mask_lm_ids, # 'masked_lm_weights': mask_lm_weights} # yield features # ## we don't know the input as a web server, so use lambda to create fake generator # def example_input_fn(data): # output_types = {'input_ids': tf.int32, # 'input_mask': tf.int32, # 'masked_lm_positions': tf.int32, # 'masked_lm_ids': tf.int32, # 'masked_lm_weights': tf.int32} # output_shape = {'input_ids': [None], # 'input_mask': [None, None], # 'masked_lm_positions': [None], # 'masked_lm_ids': [None], # 'masked_lm_weights': [None]} # dataset = tf.data.Dataset.from_generator( # lambda: [data], # output_types=output_types, # output_shapes=output_shape) # # dataset = dataset.batch(batch_size).repeat(repeat_num) # iterator = dataset.batch(1).make_one_shot_iterator() # next_element = iterator.get_next() # return next_element, None # for data in train_generator('data/test.data'): # print(data) # example_inpf = functools.partial(example_input_fn, data) # for pred in estimator.predict(example_inpf): # print(pred) # input(idx_vocab[pred])
# tf.train.get_or_create_global_step(),
random_line_split
pre_train.py
# coding:utf-8 # Produced by Andysin Zhang # 23_Oct_2019 # Inspired By the original Bert, Appreciate for the wonderful work # # Copyright 2019 TCL Inc. All Rights Reserverd. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """"Run masked LM/next sentence masked_lm pre-training for ALBERT.""" import sys import functools import tensorflow as tf # tf.enable_eager_execution() from pathlib import Path PROJECT_PATH = Path(__file__).absolute().parent sys.path.insert(0, str(PROJECT_PATH)) from utils.setup import Setup setup = Setup() import optimization from model import BertModel from model_helper import * from config import bert_config from load_data import train_input_fn, serving_input_receiver_fn from utils.log import log_info as _info from utils.log import log_error as _error # Prototype for tf.estimator def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps): """Returns 'model_fn' closure for Estomator, use closure is because of building the model requires some paramters, sending them into the 'params' is not a good deal.""" def model_fn(features, labels, mode, params): """this is prototype syntax, all parameters are necessary.""" # obtain the data _info('*** Features ***') for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features['input_ids'] # [batch_size, seq_length] input_mask = features['input_mask'] # [batch_size, seq_length] # if mode != tf.estimator.ModeKeys.PREDICT: # # segment_idx = features['segment_dis'] # masked_lm_positions = features['masked_lm_positions'] # [batch_size, seq_length], specify the answer # masked_lm_ids = features['masked_lm_ids'] # [batch_size, answer_seq_length], specify the answer labels # masked_lm_weights = features['masked_lm_weights'] # [batch_size, seq_length], [1, 1, 0], 0 refers to the mask # # next_sentence_labels = features['next_sentence_labels'] # else: masked_lm_positions = features['masked_lm_positions'] masked_lm_ids = features['masked_lm_ids'] masked_lm_weights = features['masked_lm_weights'] if bert_config.train_type == 'seq2seq': _info('Training seq2seq task.') elif bert_config.train_type == 'lm': _info('Training language model task.') # build model is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask) # compute loss loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config, model.get_sequence_output(), model.embedding_table, model.projection_table, masked_lm_positions, masked_lm_ids, masked_lm_weights, mode) if mode == tf.estimator.ModeKeys.PREDICT: masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1]) output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions) else: if mode == tf.estimator.ModeKeys.TRAIN: # restore from the checkpoint, # tf.estimator automatically restore from the model typically, # maybe here is for restore some pre-trained parameters tvars = tf.trainable_variables() initialized_variable_names = {} if init_checkpoint: (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) _info('*** Trainable Variables ***') for var in tvars: init_string = '' if var.name in initialized_variable_names: init_string = ', *INIT_FROM_CKPT*' _info('name = {}, shape={}{}'.format(var.name, var.shape, init_string)) train_op = optimization.create_optimizer( loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit) # learning_rate = tf.train.polynomial_decay(bert_config.learning_rate, # tf.train.get_or_create_global_step(), # num_train_steps, # end_learning_rate=0.0, # power=1.0, # cycle=False) # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) # gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True) # clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0) # train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step()) output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) elif mode == tf.estimator.ModeKeys.EVAL: is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32) def metric_fn(loss, label_ids, logits, is_real_example):
eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example) output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, embedding_table, projection_table, positions, label_ids, label_weights, mode): """Get the loss for the answer according to the mask. Args: bert_config: config for bert. input_tensor: float Tensor of shape [batch_size, seq_length, witdh]. embedding_table: [vocab_size, embedding_size]. projection_table: [embedding_size, hidden_size]. positions: tf.int32, which saves the positions for answers. label_ids: tf.int32, which is the true labels. label_weights: tf.int32, which is refers to the padding. Returns: loss: average word loss. per_loss: per word loss. log_probs: log probability. """ predicted_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope('cls/predictions'): with tf.variable_scope('transform'): input_tensor = tf.layers.dense( predicted_tensor, units=bert_config.hidden_size, activation=gelu, kernel_initializer=create_initializer(bert_config.initializer_range)) input_tensor = layer_norm(input_tensor) output_bias = tf.get_variable( 'output_bias', shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) input_project = tf.matmul(input_tensor, projection_table, transpose_b=True) logits = tf.matmul(input_project, embedding_table, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) # [some_length, vocab_size] log_probs = tf.nn.log_softmax(logits, axis=-1) if mode == tf.estimator.ModeKeys.PREDICT: return None, None, tf.nn.softmax(logits, axis=-1) # [some_length], no need to cast to tf.float32 label_ids = tf.reshape(label_ids, [-1]) # [some_length] label_weights = tf.cast(tf.reshape(label_ids, [-1]), dtype=tf.float32) # [some_length, vocab_size] one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size) # [some_length, 1] per_loss = - tf.reduce_sum(log_probs * one_hot_labels, axis=-1) # ignore padding numerator = tf.reduce_sum(label_weights * per_loss) # the number of predicted items denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return loss, per_loss, log_probs, logits def gather_indexes(input_tensor, positions): """Gather all the predicted tensor, input_tensor contains all the positions, however, only maksed positions are used for calculating the loss. Args: input_tensor: float Tensor of shape [batch_size, seq_length, width]. positions: save the relative positions of each sentence's labels. Returns: output_tensor: [some_length, width], where some_length refers to all the predicted labels in the data batch. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] # create a vector which saves the initial positions for each batch flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) # get the absolute positions for the predicted labels, [batch_size * seq_length, 1] flat_postions = tf.reshape(positions + flat_offsets, [-1]) flat_input_tensor = tf.reshape(input_tensor, [batch_size * seq_length, width]) # obtain the predicted items, [some_lenght, width] output_tensor = tf.gather(flat_input_tensor, flat_postions) return output_tensor def main(): # tf.gfile.MakeDirs(FLAGS.output_dir) Path(bert_config.model_dir).mkdir(exist_ok=True) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=bert_config.init_checkpoint, learning_rate=bert_config.learning_rate, num_train_steps=bert_config.num_train_steps) input_fn = functools.partial(train_input_fn, path=bert_config.data_path, batch_size=bert_config.batch_size, repeat_num=bert_config.num_train_steps, max_length = bert_config.max_length, train_type=bert_config.train_type, reverse=bert_config.reverse) run_config = tf.contrib.tpu.RunConfig( keep_checkpoint_max=1, save_checkpoints_steps=1000, model_dir=bert_config.model_dir) estimator = tf.estimator.Estimator(model_fn, config=run_config) estimator.train(input_fn) # train_spec = tf.estimator.TrainSpec(input_fn=input_fn) # eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1000) # tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # for evaluation, the repeat_num in input_fn has to be reset # estimator.evaluate(input_fn) def package_model(model_path, pb_path): model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=None, learning_rate=bert_config.learning_rate, num_train_steps=bert_config.num_train_steps) estimator = tf.estimator.Estimator(model_fn, model_path) estimator.export_saved_model(pb_path, serving_input_receiver_fn) if __name__ == '__main__': main() # package_model('models/', 'models_to_deploy/') """the following code is just for test.""" # import codecs # from load_data import create_mask_for_lm, create_mask_for_seq # with codecs.open('data/vocab.txt', 'r', 'utf-8') as file: # vocab_idx = {} # idx_vocab = {} # for idx, vocab in enumerate(file): # vocab = vocab.strip() # idx = int(idx) # vocab_idx[vocab] = idx # idx_vocab[idx] = vocab # model_fn = model_fn_builder( # bert_config=bert_config, # init_checkpoint=bert_config.init_checkpoint, # learning_rate=bert_config.learning_rate, # num_train_steps=bert_config.num_train_steps) # estimator = tf.estimator.Estimator(model_fn, 'models/') # def convert_to_idx(line): # """convert the vocab to idx.""" # result = [] # for vocab in line: # try: # result.append(vocab_idx[vocab]) # except KeyError: # result.append(vocab_idx['<unk>']) # return result # def parse_data(path): # """process the data.""" # with codecs.open(path, 'r', 'utf-8') as file: # questions = [] # answers = [] # for line in file: # line = line.strip().split('=') # que, ans = convert_to_idx(line[0]), convert_to_idx(line[1]) # questions.append(que) # answers.append(ans) # assert len(questions) == len(answers) # # get max length to pad # length = [len(ans) + len(que) for ans, que in zip(questions, answers)] # max_length = max(length) # return questions, answers, max_length # def train_generator(path): # """"This is the entrance to the input_fn.""" # questions, answers, max_length = parse_data(path) # for que, ans in zip(questions, answers): # input_ids = que + ans # padding_part = [vocab_idx['<padding>'] for _ in range(max_length - len(input_ids))] # input_ids += padding_part # # input_mask: -> [1, 1, 1, 0, 0], # # where 1 indicates the question part, 0 indicates both the answer part and padding part. # input_mask = [1 for _ in range(len(que))] + [0 for _ in range(len(ans + padding_part))] # input_mask = create_mask_for_seq(input_mask, len(que), len(ans + padding_part)) # # masked_lm_positions saves the relative positions for answer part and padding part. # # [[2, 3, 4, 5, 6], [5, 6]] # masked_lm_positions = [idx + len(que) for idx in range(len(input_ids) - len(que))] # # ATTENTION: the above `masked_lm_positions` is not in the same length due to the various length of question, # # so padding the `masked_lm_positions` to the same length as input_ids, # # although the padding items are fake, the following `mask_lm_weights` will handle this. # masked_lm_positions += [masked_lm_positions[-1] + 1 + idx for idx in range(len(input_ids) - len(masked_lm_positions))] # mask_lm_ids = ans + padding_part # mask_lm_ids += [vocab_idx['<padding>'] for _ in range(len(input_ids) - len(mask_lm_ids))] # mask_lm_weights = [1 for _ in range(len(ans))] + [0 for _ in range(len(padding_part))] # mask_lm_weights += [0 for _ in range(len(input_ids) - len(mask_lm_weights))] # # input_ids = [input_ids] # # input_mask = [input_mask] # # masked_lm_positions = [masked_lm_positions] # # mask_lm_ids = [mask_lm_ids] # # mask_lm_weights = [mask_lm_weights] # # print(que) # # print(ans) # # print(len(input_ids)) # # print(len(input_mask)) # # print(len(masked_lm_positions)) # # print(len(mask_lm_ids)) # # print(len(mask_lm_weights)) # # input() # features = {'input_ids': input_ids, # 'input_mask': input_mask, # 'masked_lm_positions': masked_lm_positions, # 'masked_lm_ids': mask_lm_ids, # 'masked_lm_weights': mask_lm_weights} # yield features # ## we don't know the input as a web server, so use lambda to create fake generator # def example_input_fn(data): # output_types = {'input_ids': tf.int32, # 'input_mask': tf.int32, # 'masked_lm_positions': tf.int32, # 'masked_lm_ids': tf.int32, # 'masked_lm_weights': tf.int32} # output_shape = {'input_ids': [None], # 'input_mask': [None, None], # 'masked_lm_positions': [None], # 'masked_lm_ids': [None], # 'masked_lm_weights': [None]} # dataset = tf.data.Dataset.from_generator( # lambda: [data], # output_types=output_types, # output_shapes=output_shape) # # dataset = dataset.batch(batch_size).repeat(repeat_num) # iterator = dataset.batch(1).make_one_shot_iterator() # next_element = iterator.get_next() # return next_element, None # for data in train_generator('data/test.data'): # print(data) # example_inpf = functools.partial(example_input_fn, data) # for pred in estimator.predict(example_inpf): # print(pred) # input(idx_vocab[pred])
""" Args: loss: tf.float32. label_ids: [b, s]. logits: [b, s, v]. """ # [b * s, v] logits = tf.reshape(logits, [-1, logits.shape[-1]]) # [b * s, 1] predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) # [b * s] label_ids = tf.reshape(label_ids, [-1]) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions) loss = tf.metrics.mean(values=loss) return {'eval_accuracy': accuracy, 'eval_loss': loss}
identifier_body
pre_train.py
# coding:utf-8 # Produced by Andysin Zhang # 23_Oct_2019 # Inspired By the original Bert, Appreciate for the wonderful work # # Copyright 2019 TCL Inc. All Rights Reserverd. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """"Run masked LM/next sentence masked_lm pre-training for ALBERT.""" import sys import functools import tensorflow as tf # tf.enable_eager_execution() from pathlib import Path PROJECT_PATH = Path(__file__).absolute().parent sys.path.insert(0, str(PROJECT_PATH)) from utils.setup import Setup setup = Setup() import optimization from model import BertModel from model_helper import * from config import bert_config from load_data import train_input_fn, serving_input_receiver_fn from utils.log import log_info as _info from utils.log import log_error as _error # Prototype for tf.estimator def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps): """Returns 'model_fn' closure for Estomator, use closure is because of building the model requires some paramters, sending them into the 'params' is not a good deal.""" def model_fn(features, labels, mode, params): """this is prototype syntax, all parameters are necessary.""" # obtain the data _info('*** Features ***') for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features['input_ids'] # [batch_size, seq_length] input_mask = features['input_mask'] # [batch_size, seq_length] # if mode != tf.estimator.ModeKeys.PREDICT: # # segment_idx = features['segment_dis'] # masked_lm_positions = features['masked_lm_positions'] # [batch_size, seq_length], specify the answer # masked_lm_ids = features['masked_lm_ids'] # [batch_size, answer_seq_length], specify the answer labels # masked_lm_weights = features['masked_lm_weights'] # [batch_size, seq_length], [1, 1, 0], 0 refers to the mask # # next_sentence_labels = features['next_sentence_labels'] # else: masked_lm_positions = features['masked_lm_positions'] masked_lm_ids = features['masked_lm_ids'] masked_lm_weights = features['masked_lm_weights'] if bert_config.train_type == 'seq2seq': _info('Training seq2seq task.') elif bert_config.train_type == 'lm': _info('Training language model task.') # build model is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask) # compute loss loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config, model.get_sequence_output(), model.embedding_table, model.projection_table, masked_lm_positions, masked_lm_ids, masked_lm_weights, mode) if mode == tf.estimator.ModeKeys.PREDICT: masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1]) output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions) else: if mode == tf.estimator.ModeKeys.TRAIN: # restore from the checkpoint, # tf.estimator automatically restore from the model typically, # maybe here is for restore some pre-trained parameters tvars = tf.trainable_variables() initialized_variable_names = {} if init_checkpoint: (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) _info('*** Trainable Variables ***') for var in tvars: init_string = '' if var.name in initialized_variable_names: init_string = ', *INIT_FROM_CKPT*' _info('name = {}, shape={}{}'.format(var.name, var.shape, init_string)) train_op = optimization.create_optimizer( loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit) # learning_rate = tf.train.polynomial_decay(bert_config.learning_rate, # tf.train.get_or_create_global_step(), # num_train_steps, # end_learning_rate=0.0, # power=1.0, # cycle=False) # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) # gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True) # clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0) # train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step()) output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) elif mode == tf.estimator.ModeKeys.EVAL: is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32) def
(loss, label_ids, logits, is_real_example): """ Args: loss: tf.float32. label_ids: [b, s]. logits: [b, s, v]. """ # [b * s, v] logits = tf.reshape(logits, [-1, logits.shape[-1]]) # [b * s, 1] predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) # [b * s] label_ids = tf.reshape(label_ids, [-1]) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions) loss = tf.metrics.mean(values=loss) return {'eval_accuracy': accuracy, 'eval_loss': loss} eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example) output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, embedding_table, projection_table, positions, label_ids, label_weights, mode): """Get the loss for the answer according to the mask. Args: bert_config: config for bert. input_tensor: float Tensor of shape [batch_size, seq_length, witdh]. embedding_table: [vocab_size, embedding_size]. projection_table: [embedding_size, hidden_size]. positions: tf.int32, which saves the positions for answers. label_ids: tf.int32, which is the true labels. label_weights: tf.int32, which is refers to the padding. Returns: loss: average word loss. per_loss: per word loss. log_probs: log probability. """ predicted_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope('cls/predictions'): with tf.variable_scope('transform'): input_tensor = tf.layers.dense( predicted_tensor, units=bert_config.hidden_size, activation=gelu, kernel_initializer=create_initializer(bert_config.initializer_range)) input_tensor = layer_norm(input_tensor) output_bias = tf.get_variable( 'output_bias', shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) input_project = tf.matmul(input_tensor, projection_table, transpose_b=True) logits = tf.matmul(input_project, embedding_table, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) # [some_length, vocab_size] log_probs = tf.nn.log_softmax(logits, axis=-1) if mode == tf.estimator.ModeKeys.PREDICT: return None, None, tf.nn.softmax(logits, axis=-1) # [some_length], no need to cast to tf.float32 label_ids = tf.reshape(label_ids, [-1]) # [some_length] label_weights = tf.cast(tf.reshape(label_ids, [-1]), dtype=tf.float32) # [some_length, vocab_size] one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size) # [some_length, 1] per_loss = - tf.reduce_sum(log_probs * one_hot_labels, axis=-1) # ignore padding numerator = tf.reduce_sum(label_weights * per_loss) # the number of predicted items denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return loss, per_loss, log_probs, logits def gather_indexes(input_tensor, positions): """Gather all the predicted tensor, input_tensor contains all the positions, however, only maksed positions are used for calculating the loss. Args: input_tensor: float Tensor of shape [batch_size, seq_length, width]. positions: save the relative positions of each sentence's labels. Returns: output_tensor: [some_length, width], where some_length refers to all the predicted labels in the data batch. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] # create a vector which saves the initial positions for each batch flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) # get the absolute positions for the predicted labels, [batch_size * seq_length, 1] flat_postions = tf.reshape(positions + flat_offsets, [-1]) flat_input_tensor = tf.reshape(input_tensor, [batch_size * seq_length, width]) # obtain the predicted items, [some_lenght, width] output_tensor = tf.gather(flat_input_tensor, flat_postions) return output_tensor def main(): # tf.gfile.MakeDirs(FLAGS.output_dir) Path(bert_config.model_dir).mkdir(exist_ok=True) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=bert_config.init_checkpoint, learning_rate=bert_config.learning_rate, num_train_steps=bert_config.num_train_steps) input_fn = functools.partial(train_input_fn, path=bert_config.data_path, batch_size=bert_config.batch_size, repeat_num=bert_config.num_train_steps, max_length = bert_config.max_length, train_type=bert_config.train_type, reverse=bert_config.reverse) run_config = tf.contrib.tpu.RunConfig( keep_checkpoint_max=1, save_checkpoints_steps=1000, model_dir=bert_config.model_dir) estimator = tf.estimator.Estimator(model_fn, config=run_config) estimator.train(input_fn) # train_spec = tf.estimator.TrainSpec(input_fn=input_fn) # eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1000) # tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # for evaluation, the repeat_num in input_fn has to be reset # estimator.evaluate(input_fn) def package_model(model_path, pb_path): model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=None, learning_rate=bert_config.learning_rate, num_train_steps=bert_config.num_train_steps) estimator = tf.estimator.Estimator(model_fn, model_path) estimator.export_saved_model(pb_path, serving_input_receiver_fn) if __name__ == '__main__': main() # package_model('models/', 'models_to_deploy/') """the following code is just for test.""" # import codecs # from load_data import create_mask_for_lm, create_mask_for_seq # with codecs.open('data/vocab.txt', 'r', 'utf-8') as file: # vocab_idx = {} # idx_vocab = {} # for idx, vocab in enumerate(file): # vocab = vocab.strip() # idx = int(idx) # vocab_idx[vocab] = idx # idx_vocab[idx] = vocab # model_fn = model_fn_builder( # bert_config=bert_config, # init_checkpoint=bert_config.init_checkpoint, # learning_rate=bert_config.learning_rate, # num_train_steps=bert_config.num_train_steps) # estimator = tf.estimator.Estimator(model_fn, 'models/') # def convert_to_idx(line): # """convert the vocab to idx.""" # result = [] # for vocab in line: # try: # result.append(vocab_idx[vocab]) # except KeyError: # result.append(vocab_idx['<unk>']) # return result # def parse_data(path): # """process the data.""" # with codecs.open(path, 'r', 'utf-8') as file: # questions = [] # answers = [] # for line in file: # line = line.strip().split('=') # que, ans = convert_to_idx(line[0]), convert_to_idx(line[1]) # questions.append(que) # answers.append(ans) # assert len(questions) == len(answers) # # get max length to pad # length = [len(ans) + len(que) for ans, que in zip(questions, answers)] # max_length = max(length) # return questions, answers, max_length # def train_generator(path): # """"This is the entrance to the input_fn.""" # questions, answers, max_length = parse_data(path) # for que, ans in zip(questions, answers): # input_ids = que + ans # padding_part = [vocab_idx['<padding>'] for _ in range(max_length - len(input_ids))] # input_ids += padding_part # # input_mask: -> [1, 1, 1, 0, 0], # # where 1 indicates the question part, 0 indicates both the answer part and padding part. # input_mask = [1 for _ in range(len(que))] + [0 for _ in range(len(ans + padding_part))] # input_mask = create_mask_for_seq(input_mask, len(que), len(ans + padding_part)) # # masked_lm_positions saves the relative positions for answer part and padding part. # # [[2, 3, 4, 5, 6], [5, 6]] # masked_lm_positions = [idx + len(que) for idx in range(len(input_ids) - len(que))] # # ATTENTION: the above `masked_lm_positions` is not in the same length due to the various length of question, # # so padding the `masked_lm_positions` to the same length as input_ids, # # although the padding items are fake, the following `mask_lm_weights` will handle this. # masked_lm_positions += [masked_lm_positions[-1] + 1 + idx for idx in range(len(input_ids) - len(masked_lm_positions))] # mask_lm_ids = ans + padding_part # mask_lm_ids += [vocab_idx['<padding>'] for _ in range(len(input_ids) - len(mask_lm_ids))] # mask_lm_weights = [1 for _ in range(len(ans))] + [0 for _ in range(len(padding_part))] # mask_lm_weights += [0 for _ in range(len(input_ids) - len(mask_lm_weights))] # # input_ids = [input_ids] # # input_mask = [input_mask] # # masked_lm_positions = [masked_lm_positions] # # mask_lm_ids = [mask_lm_ids] # # mask_lm_weights = [mask_lm_weights] # # print(que) # # print(ans) # # print(len(input_ids)) # # print(len(input_mask)) # # print(len(masked_lm_positions)) # # print(len(mask_lm_ids)) # # print(len(mask_lm_weights)) # # input() # features = {'input_ids': input_ids, # 'input_mask': input_mask, # 'masked_lm_positions': masked_lm_positions, # 'masked_lm_ids': mask_lm_ids, # 'masked_lm_weights': mask_lm_weights} # yield features # ## we don't know the input as a web server, so use lambda to create fake generator # def example_input_fn(data): # output_types = {'input_ids': tf.int32, # 'input_mask': tf.int32, # 'masked_lm_positions': tf.int32, # 'masked_lm_ids': tf.int32, # 'masked_lm_weights': tf.int32} # output_shape = {'input_ids': [None], # 'input_mask': [None, None], # 'masked_lm_positions': [None], # 'masked_lm_ids': [None], # 'masked_lm_weights': [None]} # dataset = tf.data.Dataset.from_generator( # lambda: [data], # output_types=output_types, # output_shapes=output_shape) # # dataset = dataset.batch(batch_size).repeat(repeat_num) # iterator = dataset.batch(1).make_one_shot_iterator() # next_element = iterator.get_next() # return next_element, None # for data in train_generator('data/test.data'): # print(data) # example_inpf = functools.partial(example_input_fn, data) # for pred in estimator.predict(example_inpf): # print(pred) # input(idx_vocab[pred])
metric_fn
identifier_name
pre_train.py
# coding:utf-8 # Produced by Andysin Zhang # 23_Oct_2019 # Inspired By the original Bert, Appreciate for the wonderful work # # Copyright 2019 TCL Inc. All Rights Reserverd. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """"Run masked LM/next sentence masked_lm pre-training for ALBERT.""" import sys import functools import tensorflow as tf # tf.enable_eager_execution() from pathlib import Path PROJECT_PATH = Path(__file__).absolute().parent sys.path.insert(0, str(PROJECT_PATH)) from utils.setup import Setup setup = Setup() import optimization from model import BertModel from model_helper import * from config import bert_config from load_data import train_input_fn, serving_input_receiver_fn from utils.log import log_info as _info from utils.log import log_error as _error # Prototype for tf.estimator def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps): """Returns 'model_fn' closure for Estomator, use closure is because of building the model requires some paramters, sending them into the 'params' is not a good deal.""" def model_fn(features, labels, mode, params): """this is prototype syntax, all parameters are necessary.""" # obtain the data _info('*** Features ***') for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features['input_ids'] # [batch_size, seq_length] input_mask = features['input_mask'] # [batch_size, seq_length] # if mode != tf.estimator.ModeKeys.PREDICT: # # segment_idx = features['segment_dis'] # masked_lm_positions = features['masked_lm_positions'] # [batch_size, seq_length], specify the answer # masked_lm_ids = features['masked_lm_ids'] # [batch_size, answer_seq_length], specify the answer labels # masked_lm_weights = features['masked_lm_weights'] # [batch_size, seq_length], [1, 1, 0], 0 refers to the mask # # next_sentence_labels = features['next_sentence_labels'] # else: masked_lm_positions = features['masked_lm_positions'] masked_lm_ids = features['masked_lm_ids'] masked_lm_weights = features['masked_lm_weights'] if bert_config.train_type == 'seq2seq':
elif bert_config.train_type == 'lm': _info('Training language model task.') # build model is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask) # compute loss loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config, model.get_sequence_output(), model.embedding_table, model.projection_table, masked_lm_positions, masked_lm_ids, masked_lm_weights, mode) if mode == tf.estimator.ModeKeys.PREDICT: masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1]) output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions) else: if mode == tf.estimator.ModeKeys.TRAIN: # restore from the checkpoint, # tf.estimator automatically restore from the model typically, # maybe here is for restore some pre-trained parameters tvars = tf.trainable_variables() initialized_variable_names = {} if init_checkpoint: (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) _info('*** Trainable Variables ***') for var in tvars: init_string = '' if var.name in initialized_variable_names: init_string = ', *INIT_FROM_CKPT*' _info('name = {}, shape={}{}'.format(var.name, var.shape, init_string)) train_op = optimization.create_optimizer( loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit) # learning_rate = tf.train.polynomial_decay(bert_config.learning_rate, # tf.train.get_or_create_global_step(), # num_train_steps, # end_learning_rate=0.0, # power=1.0, # cycle=False) # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) # gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True) # clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0) # train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step()) output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) elif mode == tf.estimator.ModeKeys.EVAL: is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32) def metric_fn(loss, label_ids, logits, is_real_example): """ Args: loss: tf.float32. label_ids: [b, s]. logits: [b, s, v]. """ # [b * s, v] logits = tf.reshape(logits, [-1, logits.shape[-1]]) # [b * s, 1] predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) # [b * s] label_ids = tf.reshape(label_ids, [-1]) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions) loss = tf.metrics.mean(values=loss) return {'eval_accuracy': accuracy, 'eval_loss': loss} eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example) output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, embedding_table, projection_table, positions, label_ids, label_weights, mode): """Get the loss for the answer according to the mask. Args: bert_config: config for bert. input_tensor: float Tensor of shape [batch_size, seq_length, witdh]. embedding_table: [vocab_size, embedding_size]. projection_table: [embedding_size, hidden_size]. positions: tf.int32, which saves the positions for answers. label_ids: tf.int32, which is the true labels. label_weights: tf.int32, which is refers to the padding. Returns: loss: average word loss. per_loss: per word loss. log_probs: log probability. """ predicted_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope('cls/predictions'): with tf.variable_scope('transform'): input_tensor = tf.layers.dense( predicted_tensor, units=bert_config.hidden_size, activation=gelu, kernel_initializer=create_initializer(bert_config.initializer_range)) input_tensor = layer_norm(input_tensor) output_bias = tf.get_variable( 'output_bias', shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) input_project = tf.matmul(input_tensor, projection_table, transpose_b=True) logits = tf.matmul(input_project, embedding_table, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) # [some_length, vocab_size] log_probs = tf.nn.log_softmax(logits, axis=-1) if mode == tf.estimator.ModeKeys.PREDICT: return None, None, tf.nn.softmax(logits, axis=-1) # [some_length], no need to cast to tf.float32 label_ids = tf.reshape(label_ids, [-1]) # [some_length] label_weights = tf.cast(tf.reshape(label_ids, [-1]), dtype=tf.float32) # [some_length, vocab_size] one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size) # [some_length, 1] per_loss = - tf.reduce_sum(log_probs * one_hot_labels, axis=-1) # ignore padding numerator = tf.reduce_sum(label_weights * per_loss) # the number of predicted items denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return loss, per_loss, log_probs, logits def gather_indexes(input_tensor, positions): """Gather all the predicted tensor, input_tensor contains all the positions, however, only maksed positions are used for calculating the loss. Args: input_tensor: float Tensor of shape [batch_size, seq_length, width]. positions: save the relative positions of each sentence's labels. Returns: output_tensor: [some_length, width], where some_length refers to all the predicted labels in the data batch. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] # create a vector which saves the initial positions for each batch flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) # get the absolute positions for the predicted labels, [batch_size * seq_length, 1] flat_postions = tf.reshape(positions + flat_offsets, [-1]) flat_input_tensor = tf.reshape(input_tensor, [batch_size * seq_length, width]) # obtain the predicted items, [some_lenght, width] output_tensor = tf.gather(flat_input_tensor, flat_postions) return output_tensor def main(): # tf.gfile.MakeDirs(FLAGS.output_dir) Path(bert_config.model_dir).mkdir(exist_ok=True) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=bert_config.init_checkpoint, learning_rate=bert_config.learning_rate, num_train_steps=bert_config.num_train_steps) input_fn = functools.partial(train_input_fn, path=bert_config.data_path, batch_size=bert_config.batch_size, repeat_num=bert_config.num_train_steps, max_length = bert_config.max_length, train_type=bert_config.train_type, reverse=bert_config.reverse) run_config = tf.contrib.tpu.RunConfig( keep_checkpoint_max=1, save_checkpoints_steps=1000, model_dir=bert_config.model_dir) estimator = tf.estimator.Estimator(model_fn, config=run_config) estimator.train(input_fn) # train_spec = tf.estimator.TrainSpec(input_fn=input_fn) # eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1000) # tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # for evaluation, the repeat_num in input_fn has to be reset # estimator.evaluate(input_fn) def package_model(model_path, pb_path): model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=None, learning_rate=bert_config.learning_rate, num_train_steps=bert_config.num_train_steps) estimator = tf.estimator.Estimator(model_fn, model_path) estimator.export_saved_model(pb_path, serving_input_receiver_fn) if __name__ == '__main__': main() # package_model('models/', 'models_to_deploy/') """the following code is just for test.""" # import codecs # from load_data import create_mask_for_lm, create_mask_for_seq # with codecs.open('data/vocab.txt', 'r', 'utf-8') as file: # vocab_idx = {} # idx_vocab = {} # for idx, vocab in enumerate(file): # vocab = vocab.strip() # idx = int(idx) # vocab_idx[vocab] = idx # idx_vocab[idx] = vocab # model_fn = model_fn_builder( # bert_config=bert_config, # init_checkpoint=bert_config.init_checkpoint, # learning_rate=bert_config.learning_rate, # num_train_steps=bert_config.num_train_steps) # estimator = tf.estimator.Estimator(model_fn, 'models/') # def convert_to_idx(line): # """convert the vocab to idx.""" # result = [] # for vocab in line: # try: # result.append(vocab_idx[vocab]) # except KeyError: # result.append(vocab_idx['<unk>']) # return result # def parse_data(path): # """process the data.""" # with codecs.open(path, 'r', 'utf-8') as file: # questions = [] # answers = [] # for line in file: # line = line.strip().split('=') # que, ans = convert_to_idx(line[0]), convert_to_idx(line[1]) # questions.append(que) # answers.append(ans) # assert len(questions) == len(answers) # # get max length to pad # length = [len(ans) + len(que) for ans, que in zip(questions, answers)] # max_length = max(length) # return questions, answers, max_length # def train_generator(path): # """"This is the entrance to the input_fn.""" # questions, answers, max_length = parse_data(path) # for que, ans in zip(questions, answers): # input_ids = que + ans # padding_part = [vocab_idx['<padding>'] for _ in range(max_length - len(input_ids))] # input_ids += padding_part # # input_mask: -> [1, 1, 1, 0, 0], # # where 1 indicates the question part, 0 indicates both the answer part and padding part. # input_mask = [1 for _ in range(len(que))] + [0 for _ in range(len(ans + padding_part))] # input_mask = create_mask_for_seq(input_mask, len(que), len(ans + padding_part)) # # masked_lm_positions saves the relative positions for answer part and padding part. # # [[2, 3, 4, 5, 6], [5, 6]] # masked_lm_positions = [idx + len(que) for idx in range(len(input_ids) - len(que))] # # ATTENTION: the above `masked_lm_positions` is not in the same length due to the various length of question, # # so padding the `masked_lm_positions` to the same length as input_ids, # # although the padding items are fake, the following `mask_lm_weights` will handle this. # masked_lm_positions += [masked_lm_positions[-1] + 1 + idx for idx in range(len(input_ids) - len(masked_lm_positions))] # mask_lm_ids = ans + padding_part # mask_lm_ids += [vocab_idx['<padding>'] for _ in range(len(input_ids) - len(mask_lm_ids))] # mask_lm_weights = [1 for _ in range(len(ans))] + [0 for _ in range(len(padding_part))] # mask_lm_weights += [0 for _ in range(len(input_ids) - len(mask_lm_weights))] # # input_ids = [input_ids] # # input_mask = [input_mask] # # masked_lm_positions = [masked_lm_positions] # # mask_lm_ids = [mask_lm_ids] # # mask_lm_weights = [mask_lm_weights] # # print(que) # # print(ans) # # print(len(input_ids)) # # print(len(input_mask)) # # print(len(masked_lm_positions)) # # print(len(mask_lm_ids)) # # print(len(mask_lm_weights)) # # input() # features = {'input_ids': input_ids, # 'input_mask': input_mask, # 'masked_lm_positions': masked_lm_positions, # 'masked_lm_ids': mask_lm_ids, # 'masked_lm_weights': mask_lm_weights} # yield features # ## we don't know the input as a web server, so use lambda to create fake generator # def example_input_fn(data): # output_types = {'input_ids': tf.int32, # 'input_mask': tf.int32, # 'masked_lm_positions': tf.int32, # 'masked_lm_ids': tf.int32, # 'masked_lm_weights': tf.int32} # output_shape = {'input_ids': [None], # 'input_mask': [None, None], # 'masked_lm_positions': [None], # 'masked_lm_ids': [None], # 'masked_lm_weights': [None]} # dataset = tf.data.Dataset.from_generator( # lambda: [data], # output_types=output_types, # output_shapes=output_shape) # # dataset = dataset.batch(batch_size).repeat(repeat_num) # iterator = dataset.batch(1).make_one_shot_iterator() # next_element = iterator.get_next() # return next_element, None # for data in train_generator('data/test.data'): # print(data) # example_inpf = functools.partial(example_input_fn, data) # for pred in estimator.predict(example_inpf): # print(pred) # input(idx_vocab[pred])
_info('Training seq2seq task.')
conditional_block
layer.go
/* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package layer import ( "bytes" "context" "fmt" "io" "os" "path/filepath" "sync" "time" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" "github.com/containerd/stargz-snapshotter/cache" "github.com/containerd/stargz-snapshotter/estargz" "github.com/containerd/stargz-snapshotter/estargz/zstdchunked" "github.com/containerd/stargz-snapshotter/fs/config" commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common" "github.com/containerd/stargz-snapshotter/fs/reader" "github.com/containerd/stargz-snapshotter/fs/remote" "github.com/containerd/stargz-snapshotter/fs/source" "github.com/containerd/stargz-snapshotter/metadata" "github.com/containerd/stargz-snapshotter/task" "github.com/containerd/stargz-snapshotter/util/cacheutil" "github.com/containerd/stargz-snapshotter/util/namedmutex" fusefs "github.com/hanwen/go-fuse/v2/fs" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) const ( defaultResolveResultEntryTTLSec = 120 defaultMaxLRUCacheEntry = 10 defaultMaxCacheFds = 10 defaultPrefetchTimeoutSec = 10 memoryCacheType = "memory" ) // Layer represents a layer. type Layer interface { // Info returns the information of this layer. Info() Info // RootNode returns the root node of this layer. RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) // Check checks if the layer is still connectable. Check() error // Refresh refreshes the layer connection. Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error // Verify verifies this layer using the passed TOC Digest. // Nop if Verify() or SkipVerify() was already called. Verify(tocDigest digest.Digest) (err error) // SkipVerify skips verification for this layer. // Nop if Verify() or SkipVerify() was already called. SkipVerify() // Prefetch prefetches the specified size. If the layer is eStargz and contains landmark files, // the range indicated by these files is respected. Prefetch(prefetchSize int64) error // ReadAt reads this layer. ReadAt([]byte, int64, ...remote.Option) (int, error) // WaitForPrefetchCompletion waits untils Prefetch completes. WaitForPrefetchCompletion() error // BackgroundFetch fetches the entire layer contents to the cache. // Fetching contents is done as a background task. BackgroundFetch() error // Done releases the reference to this layer. The resources related to this layer will be // discarded sooner or later. Queries after calling this function won't be serviced. Done() } // Info is the current status of a layer. type Info struct { Digest digest.Digest Size int64 // layer size in bytes FetchedSize int64 // layer fetched size in bytes PrefetchSize int64 // layer prefetch size in bytes ReadTime time.Time // last time the layer was read } // Resolver resolves the layer location and provieds the handler of that layer. type Resolver struct { rootDir string resolver *remote.Resolver prefetchTimeout time.Duration layerCache *cacheutil.TTLCache layerCacheMu sync.Mutex blobCache *cacheutil.TTLCache blobCacheMu sync.Mutex backgroundTaskManager *task.BackgroundTaskManager resolveLock *namedmutex.NamedMutex config config.Config metadataStore metadata.Store overlayOpaqueType OverlayOpaqueType additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor } // NewResolver returns a new layer resolver. func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, cfg config.Config, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store, overlayOpaqueType OverlayOpaqueType, additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor) (*Resolver, error) { resolveResultEntryTTL := time.Duration(cfg.ResolveResultEntryTTLSec) * time.Second if resolveResultEntryTTL == 0 { resolveResultEntryTTL = defaultResolveResultEntryTTLSec * time.Second } prefetchTimeout := time.Duration(cfg.PrefetchTimeoutSec) * time.Second if prefetchTimeout == 0 { prefetchTimeout = defaultPrefetchTimeoutSec * time.Second } // layerCache caches resolved layers for future use. This is useful in a use-case where // the filesystem resolves and caches all layers in an image (not only queried one) in parallel, // before they are actually queried. layerCache := cacheutil.NewTTLCache(resolveResultEntryTTL) layerCache.OnEvicted = func(key string, value interface{}) { if err := value.(*layer).close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up layer") return } logrus.WithField("key", key).Debugf("cleaned up layer") } // blobCache caches resolved blobs for futural use. This is especially useful when a layer // isn't eStargz/stargz (the *layer object won't be created/cached in this case). blobCache := cacheutil.NewTTLCache(resolveResultEntryTTL) blobCache.OnEvicted = func(key string, value interface{}) { if err := value.(remote.Blob).Close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up blob") return } logrus.WithField("key", key).Debugf("cleaned up blob") } if err := os.MkdirAll(root, 0700); err != nil { return nil, err } return &Resolver{ rootDir: root, resolver: remote.NewResolver(cfg.BlobConfig, resolveHandlers), layerCache: layerCache, blobCache: blobCache, prefetchTimeout: prefetchTimeout, backgroundTaskManager: backgroundTaskManager, config: cfg, resolveLock: new(namedmutex.NamedMutex), metadataStore: metadataStore, overlayOpaqueType: overlayOpaqueType, additionalDecompressors: additionalDecompressors, }, nil } func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache, error) { if cacheType == memoryCacheType { return cache.NewMemoryCache(), nil } dcc := cfg.DirectoryCacheConfig maxDataEntry := dcc.MaxLRUCacheEntry if maxDataEntry == 0 { maxDataEntry = defaultMaxLRUCacheEntry } maxFdEntry := dcc.MaxCacheFds if maxFdEntry == 0 { maxFdEntry = defaultMaxCacheFds } bufPool := &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry) dCache.OnEvicted = func(key string, value interface{}) { value.(*bytes.Buffer).Reset() bufPool.Put(value) } fCache.OnEvicted = func(key string, value interface{}) { value.(*os.File).Close() } // create a cache on an unique directory if err := os.MkdirAll(root, 0700); err != nil { return nil, err } cachePath, err := os.MkdirTemp(root, "") if err != nil { return nil, fmt.Errorf("failed to initialize directory cache: %w", err) } return cache.NewDirectoryCache( cachePath, cache.DirectoryCacheConfig{ SyncAdd: dcc.SyncAdd, DataCache: dCache, FdCache: fCache, BufPool: bufPool, Direct: dcc.Direct, }, ) } // Resolve resolves a layer based on the passed layer blob information. func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor, esgzOpts ...metadata.Option) (_ Layer, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Wait if resolving this layer is already running. The result // can hopefully get from the cache. r.resolveLock.Lock(name) defer r.resolveLock.Unlock(name) ctx = log.WithLogger(ctx, log.G(ctx).WithField("src", name)) // First, try to retrieve this layer from the underlying cache. r.layerCacheMu.Lock() c, done, ok := r.layerCache.Get(name) r.layerCacheMu.Unlock() if ok { if l := c.(*layer); l.Check() == nil { log.G(ctx).Debugf("hit layer cache %q", name) return &layerRef{l, done}, nil } // Cached layer is invalid done() r.layerCacheMu.Lock() r.layerCache.Remove(name) r.layerCacheMu.Unlock() } log.G(ctx).Debugf("resolving") // Resolve the blob. blobR, err := r.resolveBlob(ctx, hosts, refspec, desc) if err != nil { return nil, fmt.Errorf("failed to resolve the blob: %w", err) } defer func() { if retErr != nil { blobR.done() } }() fsCache, err := newCache(filepath.Join(r.rootDir, "fscache"), r.config.FSCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create fs cache: %w", err) } defer func() { if retErr != nil { fsCache.Close() } }() // Get a reader for stargz archive. // Each file's read operation is a prioritized task and all background tasks // will be stopped during the execution so this can avoid being disturbed for // NW traffic by background tasks. sr := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (n int, err error) { r.backgroundTaskManager.DoPrioritizedTask() defer r.backgroundTaskManager.DonePrioritizedTask() return blobR.ReadAt(p, offset) }), 0, blobR.Size()) // define telemetry hooks to measure latency metrics inside estargz package telemetry := metadata.Telemetry{ GetFooterLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzFooterGet, desc.Digest, start) }, GetTocLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzTocGet, desc.Digest, start) }, DeserializeTocLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.DeserializeTocJSON, desc.Digest, start) }, } additionalDecompressors := []metadata.Decompressor{new(zstdchunked.Decompressor)} if r.additionalDecompressors != nil { additionalDecompressors = append(additionalDecompressors, r.additionalDecompressors(ctx, hosts, refspec, desc)...) } meta, err := r.metadataStore(sr, append(esgzOpts, metadata.WithTelemetry(&telemetry), metadata.WithDecompressors(additionalDecompressors...))...) if err != nil { return nil, err } vr, err := reader.NewReader(meta, fsCache, desc.Digest) if err != nil { return nil, fmt.Errorf("failed to read layer: %w", err) } // Combine layer information together and cache it. l := newLayer(r, desc, blobR, vr) r.layerCacheMu.Lock() cachedL, done2, added := r.layerCache.Add(name, l) r.layerCacheMu.Unlock() if !added { l.close() // layer already exists in the cache. discrad this. } log.G(ctx).Debugf("resolved") return &layerRef{cachedL.(*layer), done2}, nil } // resolveBlob resolves a blob based on the passed layer blob information. func (r *Resolver) resolveBlob(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (_ *blobRef, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Try to retrieve the blob from the underlying cache. r.blobCacheMu.Lock() c, done, ok := r.blobCache.Get(name) r.blobCacheMu.Unlock() if ok { if blob := c.(remote.Blob); blob.Check() == nil { return &blobRef{blob, done}, nil } // invalid blob. discard this. done() r.blobCacheMu.Lock() r.blobCache.Remove(name) r.blobCacheMu.Unlock() } httpCache, err := newCache(filepath.Join(r.rootDir, "httpcache"), r.config.HTTPCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create http cache: %w", err) } defer func() { if retErr != nil { httpCache.Close() } }() // Resolve the blob and cache the result. b, err := r.resolver.Resolve(ctx, hosts, refspec, desc, httpCache) if err != nil { return nil, fmt.Errorf("failed to resolve the source: %w", err) } r.blobCacheMu.Lock() cachedB, done, added := r.blobCache.Add(name, b) r.blobCacheMu.Unlock() if !added { b.Close() // blob already exists in the cache. discard this. } return &blobRef{cachedB.(remote.Blob), done}, nil } func newLayer( resolver *Resolver, desc ocispec.Descriptor, blob *blobRef, vr *reader.VerifiableReader, ) *layer { return &layer{ resolver: resolver, desc: desc, blob: blob, verifiableReader: vr, prefetchWaiter: newWaiter(), } } type layer struct { resolver *Resolver desc ocispec.Descriptor blob *blobRef verifiableReader *reader.VerifiableReader prefetchWaiter *waiter prefetchSize int64 prefetchSizeMu sync.Mutex r reader.Reader closed bool closedMu sync.Mutex prefetchOnce sync.Once backgroundFetchOnce sync.Once } func (l *layer) Info() Info { var readTime time.Time if l.r != nil { readTime = l.r.LastOnDemandReadTime() } return Info{ Digest: l.desc.Digest, Size: l.blob.Size(), FetchedSize: l.blob.FetchedSize(), PrefetchSize: l.prefetchedSize(), ReadTime: readTime, } } func (l *layer) prefetchedSize() int64 { l.prefetchSizeMu.Lock() sz := l.prefetchSize l.prefetchSizeMu.Unlock() return sz } func (l *layer) Check() error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Check() } func (l *layer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Refresh(ctx, hosts, refspec, desc) } func (l *layer) Verify(tocDigest digest.Digest) (err error) { if l.isClosed() { return fmt.Errorf("layer is already closed") } if l.r != nil { return nil } l.r, err = l.verifiableReader.VerifyTOC(tocDigest) return } func (l *layer) SkipVerify() { if l.r != nil { return } l.r = l.verifiableReader.SkipVerify() } func (l *layer) Prefetch(prefetchSize int64) (err error) { l.prefetchOnce.Do(func() { ctx := context.Background() l.resolver.backgroundTaskManager.DoPrioritizedTask() defer l.resolver.backgroundTaskManager.DonePrioritizedTask() err = l.prefetch(ctx, prefetchSize) if err != nil { log.G(ctx).WithError(err).Warnf("failed to prefetch layer=%v", l.desc.Digest) return } log.G(ctx).Debug("completed to prefetch") }) return } func (l *layer) prefetch(ctx context.Context, prefetchSize int64) error { defer l.prefetchWaiter.done() // Notify the completion // Measuring the total time to complete prefetch (use defer func() because l.Info().PrefetchSize is set later) start := time.Now() defer func() { commonmetrics.WriteLatencyWithBytesLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchTotal, start, commonmetrics.PrefetchSize, l.prefetchedSize()) }() if l.isClosed() { return fmt.Errorf("layer is already closed") } rootID := l.verifiableReader.Metadata().RootID() if _, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.NoPrefetchLandmark); err == nil { // do not prefetch this layer return nil } else if id, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.PrefetchLandmark); err == nil { offset, err := l.verifiableReader.Metadata().GetOffset(id) if err != nil { return fmt.Errorf("failed to get offset of prefetch landmark: %w", err) } // override the prefetch size with optimized value prefetchSize = offset } else if prefetchSize > l.blob.Size() { // adjust prefetch size not to exceed the whole layer size prefetchSize = l.blob.Size() } // Fetch the target range downloadStart := time.Now() err := l.blob.Cache(0, prefetchSize) commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDownload, downloadStart) // time to download prefetch data if err != nil { return fmt.Errorf("failed to prefetch layer: %w", err) } // Set prefetch size for metrics after prefetch completed l.prefetchSizeMu.Lock() l.prefetchSize = prefetchSize l.prefetchSizeMu.Unlock() // Cache uncompressed contents of the prefetched range decompressStart := time.Now() err = l.verifiableReader.Cache(reader.WithFilter(func(offset int64) bool { return offset < prefetchSize // Cache only prefetch target })) commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDecompress, decompressStart) // time to decompress prefetch data if err != nil { return fmt.Errorf("failed to cache prefetched layer: %w", err) } return nil } func (l *layer) WaitForPrefetchCompletion() error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.prefetchWaiter.wait(l.resolver.prefetchTimeout) } func (l *layer) BackgroundFetch() (err error) { l.backgroundFetchOnce.Do(func() { ctx := context.Background() err = l.backgroundFetch(ctx) if err != nil { log.G(ctx).WithError(err).Warnf("failed to fetch whole layer=%v", l.desc.Digest) return } log.G(ctx).Debug("completed to fetch all layer data in background") }) return } func (l *layer) backgroundFetch(ctx context.Context) error { defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchTotal, time.Now()) if l.isClosed() { return fmt.Errorf("layer is already closed") } br := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (retN int, retErr error) { l.resolver.backgroundTaskManager.InvokeBackgroundTask(func(ctx context.Context) { // Measuring the time to download background fetch data (in milliseconds) defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.BackgroundFetchDownload, l.Info().Digest, time.Now()) // time to download background fetch data retN, retErr = l.blob.ReadAt( p, offset, remote.WithContext(ctx), // Make cancellable remote.WithCacheOpts(cache.Direct()), // Do not pollute mem cache ) }, 120*time.Second) return }), 0, l.blob.Size()) defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchDecompress, time.Now()) // time to decompress background fetch data (in milliseconds) return l.verifiableReader.Cache( reader.WithReader(br), // Read contents in background reader.WithCacheOpts(cache.Direct()), // Do not pollute mem cache ) } func (l *layerRef) Done() { l.done() } func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) { if l.isClosed() { return nil, fmt.Errorf("layer is already closed") } if l.r == nil { return nil, fmt.Errorf("layer hasn't been verified yet") } return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType) } func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) { return l.blob.ReadAt(p, offset, opts...) } func (l *layer) close() error { l.closedMu.Lock() defer l.closedMu.Unlock() if l.closed { return nil } l.closed = true defer l.blob.done() // Close reader first, then close the blob l.verifiableReader.Close() if l.r != nil { return l.r.Close() } return nil } func (l *layer)
() bool { l.closedMu.Lock() closed := l.closed l.closedMu.Unlock() return closed } // blobRef is a reference to the blob in the cache. Calling `done` decreases the reference counter // of this blob in the underlying cache. When nobody refers to the blob in the cache, resources bound // to this blob will be discarded. type blobRef struct { remote.Blob done func() } // layerRef is a reference to the layer in the cache. Calling `Done` or `done` decreases the // reference counter of this blob in the underlying cache. When nobody refers to the layer in the // cache, resources bound to this layer will be discarded. type layerRef struct { *layer done func() } func newWaiter() *waiter { return &waiter{ completionCond: sync.NewCond(&sync.Mutex{}), } } type waiter struct { isDone bool isDoneMu sync.Mutex completionCond *sync.Cond } func (w *waiter) done() { w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() } func (w *waiter) wait(timeout time.Duration) error { wait := func() <-chan struct{} { ch := make(chan struct{}) go func() { w.isDoneMu.Lock() isDone := w.isDone w.isDoneMu.Unlock() w.completionCond.L.Lock() if !isDone { w.completionCond.Wait() } w.completionCond.L.Unlock() ch <- struct{}{} }() return ch } select { case <-time.After(timeout): w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() return fmt.Errorf("timeout(%v)", timeout) case <-wait(): return nil } } type readerAtFunc func([]byte, int64) (int, error) func (f readerAtFunc) ReadAt(p []byte, offset int64) (int, error) { return f(p, offset) }
isClosed
identifier_name
layer.go
/* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package layer import ( "bytes" "context" "fmt" "io" "os" "path/filepath" "sync" "time" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" "github.com/containerd/stargz-snapshotter/cache" "github.com/containerd/stargz-snapshotter/estargz" "github.com/containerd/stargz-snapshotter/estargz/zstdchunked" "github.com/containerd/stargz-snapshotter/fs/config" commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common" "github.com/containerd/stargz-snapshotter/fs/reader" "github.com/containerd/stargz-snapshotter/fs/remote" "github.com/containerd/stargz-snapshotter/fs/source" "github.com/containerd/stargz-snapshotter/metadata" "github.com/containerd/stargz-snapshotter/task" "github.com/containerd/stargz-snapshotter/util/cacheutil" "github.com/containerd/stargz-snapshotter/util/namedmutex" fusefs "github.com/hanwen/go-fuse/v2/fs" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) const ( defaultResolveResultEntryTTLSec = 120 defaultMaxLRUCacheEntry = 10 defaultMaxCacheFds = 10 defaultPrefetchTimeoutSec = 10 memoryCacheType = "memory" ) // Layer represents a layer. type Layer interface { // Info returns the information of this layer. Info() Info // RootNode returns the root node of this layer. RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) // Check checks if the layer is still connectable. Check() error // Refresh refreshes the layer connection. Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error // Verify verifies this layer using the passed TOC Digest. // Nop if Verify() or SkipVerify() was already called. Verify(tocDigest digest.Digest) (err error) // SkipVerify skips verification for this layer. // Nop if Verify() or SkipVerify() was already called. SkipVerify() // Prefetch prefetches the specified size. If the layer is eStargz and contains landmark files, // the range indicated by these files is respected. Prefetch(prefetchSize int64) error // ReadAt reads this layer. ReadAt([]byte, int64, ...remote.Option) (int, error) // WaitForPrefetchCompletion waits untils Prefetch completes. WaitForPrefetchCompletion() error // BackgroundFetch fetches the entire layer contents to the cache. // Fetching contents is done as a background task. BackgroundFetch() error // Done releases the reference to this layer. The resources related to this layer will be // discarded sooner or later. Queries after calling this function won't be serviced. Done() } // Info is the current status of a layer. type Info struct { Digest digest.Digest Size int64 // layer size in bytes FetchedSize int64 // layer fetched size in bytes PrefetchSize int64 // layer prefetch size in bytes ReadTime time.Time // last time the layer was read } // Resolver resolves the layer location and provieds the handler of that layer. type Resolver struct { rootDir string resolver *remote.Resolver prefetchTimeout time.Duration layerCache *cacheutil.TTLCache layerCacheMu sync.Mutex blobCache *cacheutil.TTLCache blobCacheMu sync.Mutex backgroundTaskManager *task.BackgroundTaskManager resolveLock *namedmutex.NamedMutex config config.Config metadataStore metadata.Store overlayOpaqueType OverlayOpaqueType additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor } // NewResolver returns a new layer resolver. func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, cfg config.Config, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store, overlayOpaqueType OverlayOpaqueType, additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor) (*Resolver, error) { resolveResultEntryTTL := time.Duration(cfg.ResolveResultEntryTTLSec) * time.Second if resolveResultEntryTTL == 0 { resolveResultEntryTTL = defaultResolveResultEntryTTLSec * time.Second } prefetchTimeout := time.Duration(cfg.PrefetchTimeoutSec) * time.Second if prefetchTimeout == 0 { prefetchTimeout = defaultPrefetchTimeoutSec * time.Second } // layerCache caches resolved layers for future use. This is useful in a use-case where // the filesystem resolves and caches all layers in an image (not only queried one) in parallel, // before they are actually queried. layerCache := cacheutil.NewTTLCache(resolveResultEntryTTL) layerCache.OnEvicted = func(key string, value interface{}) { if err := value.(*layer).close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up layer") return } logrus.WithField("key", key).Debugf("cleaned up layer") } // blobCache caches resolved blobs for futural use. This is especially useful when a layer // isn't eStargz/stargz (the *layer object won't be created/cached in this case). blobCache := cacheutil.NewTTLCache(resolveResultEntryTTL) blobCache.OnEvicted = func(key string, value interface{}) { if err := value.(remote.Blob).Close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up blob") return } logrus.WithField("key", key).Debugf("cleaned up blob") } if err := os.MkdirAll(root, 0700); err != nil { return nil, err } return &Resolver{ rootDir: root, resolver: remote.NewResolver(cfg.BlobConfig, resolveHandlers), layerCache: layerCache, blobCache: blobCache, prefetchTimeout: prefetchTimeout, backgroundTaskManager: backgroundTaskManager, config: cfg, resolveLock: new(namedmutex.NamedMutex), metadataStore: metadataStore, overlayOpaqueType: overlayOpaqueType, additionalDecompressors: additionalDecompressors, }, nil } func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache, error) { if cacheType == memoryCacheType { return cache.NewMemoryCache(), nil } dcc := cfg.DirectoryCacheConfig maxDataEntry := dcc.MaxLRUCacheEntry if maxDataEntry == 0 { maxDataEntry = defaultMaxLRUCacheEntry } maxFdEntry := dcc.MaxCacheFds if maxFdEntry == 0 { maxFdEntry = defaultMaxCacheFds } bufPool := &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry) dCache.OnEvicted = func(key string, value interface{}) { value.(*bytes.Buffer).Reset() bufPool.Put(value) } fCache.OnEvicted = func(key string, value interface{}) { value.(*os.File).Close() } // create a cache on an unique directory if err := os.MkdirAll(root, 0700); err != nil { return nil, err } cachePath, err := os.MkdirTemp(root, "") if err != nil { return nil, fmt.Errorf("failed to initialize directory cache: %w", err) } return cache.NewDirectoryCache( cachePath, cache.DirectoryCacheConfig{ SyncAdd: dcc.SyncAdd, DataCache: dCache, FdCache: fCache, BufPool: bufPool, Direct: dcc.Direct, }, ) } // Resolve resolves a layer based on the passed layer blob information. func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor, esgzOpts ...metadata.Option) (_ Layer, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Wait if resolving this layer is already running. The result // can hopefully get from the cache. r.resolveLock.Lock(name) defer r.resolveLock.Unlock(name) ctx = log.WithLogger(ctx, log.G(ctx).WithField("src", name)) // First, try to retrieve this layer from the underlying cache. r.layerCacheMu.Lock() c, done, ok := r.layerCache.Get(name) r.layerCacheMu.Unlock() if ok { if l := c.(*layer); l.Check() == nil { log.G(ctx).Debugf("hit layer cache %q", name) return &layerRef{l, done}, nil } // Cached layer is invalid done() r.layerCacheMu.Lock() r.layerCache.Remove(name) r.layerCacheMu.Unlock() } log.G(ctx).Debugf("resolving") // Resolve the blob. blobR, err := r.resolveBlob(ctx, hosts, refspec, desc) if err != nil { return nil, fmt.Errorf("failed to resolve the blob: %w", err) } defer func() { if retErr != nil { blobR.done() } }() fsCache, err := newCache(filepath.Join(r.rootDir, "fscache"), r.config.FSCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create fs cache: %w", err) } defer func() { if retErr != nil { fsCache.Close() } }() // Get a reader for stargz archive. // Each file's read operation is a prioritized task and all background tasks // will be stopped during the execution so this can avoid being disturbed for // NW traffic by background tasks. sr := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (n int, err error) { r.backgroundTaskManager.DoPrioritizedTask() defer r.backgroundTaskManager.DonePrioritizedTask() return blobR.ReadAt(p, offset) }), 0, blobR.Size()) // define telemetry hooks to measure latency metrics inside estargz package telemetry := metadata.Telemetry{ GetFooterLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzFooterGet, desc.Digest, start) }, GetTocLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzTocGet, desc.Digest, start) }, DeserializeTocLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.DeserializeTocJSON, desc.Digest, start) }, } additionalDecompressors := []metadata.Decompressor{new(zstdchunked.Decompressor)} if r.additionalDecompressors != nil { additionalDecompressors = append(additionalDecompressors, r.additionalDecompressors(ctx, hosts, refspec, desc)...) } meta, err := r.metadataStore(sr, append(esgzOpts, metadata.WithTelemetry(&telemetry), metadata.WithDecompressors(additionalDecompressors...))...) if err != nil { return nil, err } vr, err := reader.NewReader(meta, fsCache, desc.Digest) if err != nil { return nil, fmt.Errorf("failed to read layer: %w", err) } // Combine layer information together and cache it. l := newLayer(r, desc, blobR, vr) r.layerCacheMu.Lock() cachedL, done2, added := r.layerCache.Add(name, l) r.layerCacheMu.Unlock() if !added { l.close() // layer already exists in the cache. discrad this. } log.G(ctx).Debugf("resolved") return &layerRef{cachedL.(*layer), done2}, nil } // resolveBlob resolves a blob based on the passed layer blob information. func (r *Resolver) resolveBlob(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (_ *blobRef, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Try to retrieve the blob from the underlying cache. r.blobCacheMu.Lock() c, done, ok := r.blobCache.Get(name) r.blobCacheMu.Unlock() if ok { if blob := c.(remote.Blob); blob.Check() == nil { return &blobRef{blob, done}, nil } // invalid blob. discard this. done() r.blobCacheMu.Lock() r.blobCache.Remove(name) r.blobCacheMu.Unlock() } httpCache, err := newCache(filepath.Join(r.rootDir, "httpcache"), r.config.HTTPCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create http cache: %w", err) } defer func() { if retErr != nil { httpCache.Close() } }() // Resolve the blob and cache the result. b, err := r.resolver.Resolve(ctx, hosts, refspec, desc, httpCache) if err != nil { return nil, fmt.Errorf("failed to resolve the source: %w", err) } r.blobCacheMu.Lock() cachedB, done, added := r.blobCache.Add(name, b) r.blobCacheMu.Unlock() if !added { b.Close() // blob already exists in the cache. discard this. } return &blobRef{cachedB.(remote.Blob), done}, nil } func newLayer( resolver *Resolver, desc ocispec.Descriptor, blob *blobRef, vr *reader.VerifiableReader, ) *layer { return &layer{ resolver: resolver, desc: desc, blob: blob, verifiableReader: vr, prefetchWaiter: newWaiter(), } } type layer struct { resolver *Resolver desc ocispec.Descriptor blob *blobRef verifiableReader *reader.VerifiableReader prefetchWaiter *waiter prefetchSize int64 prefetchSizeMu sync.Mutex r reader.Reader closed bool closedMu sync.Mutex prefetchOnce sync.Once backgroundFetchOnce sync.Once } func (l *layer) Info() Info { var readTime time.Time if l.r != nil { readTime = l.r.LastOnDemandReadTime() } return Info{ Digest: l.desc.Digest, Size: l.blob.Size(), FetchedSize: l.blob.FetchedSize(), PrefetchSize: l.prefetchedSize(), ReadTime: readTime, } } func (l *layer) prefetchedSize() int64 { l.prefetchSizeMu.Lock() sz := l.prefetchSize l.prefetchSizeMu.Unlock() return sz } func (l *layer) Check() error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Check() } func (l *layer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Refresh(ctx, hosts, refspec, desc) } func (l *layer) Verify(tocDigest digest.Digest) (err error) { if l.isClosed() { return fmt.Errorf("layer is already closed") } if l.r != nil { return nil } l.r, err = l.verifiableReader.VerifyTOC(tocDigest) return } func (l *layer) SkipVerify() { if l.r != nil { return } l.r = l.verifiableReader.SkipVerify() } func (l *layer) Prefetch(prefetchSize int64) (err error) { l.prefetchOnce.Do(func() { ctx := context.Background() l.resolver.backgroundTaskManager.DoPrioritizedTask() defer l.resolver.backgroundTaskManager.DonePrioritizedTask() err = l.prefetch(ctx, prefetchSize) if err != nil { log.G(ctx).WithError(err).Warnf("failed to prefetch layer=%v", l.desc.Digest) return } log.G(ctx).Debug("completed to prefetch") }) return } func (l *layer) prefetch(ctx context.Context, prefetchSize int64) error { defer l.prefetchWaiter.done() // Notify the completion // Measuring the total time to complete prefetch (use defer func() because l.Info().PrefetchSize is set later) start := time.Now() defer func() { commonmetrics.WriteLatencyWithBytesLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchTotal, start, commonmetrics.PrefetchSize, l.prefetchedSize()) }() if l.isClosed() { return fmt.Errorf("layer is already closed") } rootID := l.verifiableReader.Metadata().RootID() if _, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.NoPrefetchLandmark); err == nil { // do not prefetch this layer return nil } else if id, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.PrefetchLandmark); err == nil { offset, err := l.verifiableReader.Metadata().GetOffset(id) if err != nil { return fmt.Errorf("failed to get offset of prefetch landmark: %w", err) } // override the prefetch size with optimized value prefetchSize = offset } else if prefetchSize > l.blob.Size() { // adjust prefetch size not to exceed the whole layer size prefetchSize = l.blob.Size() } // Fetch the target range downloadStart := time.Now() err := l.blob.Cache(0, prefetchSize) commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDownload, downloadStart) // time to download prefetch data if err != nil { return fmt.Errorf("failed to prefetch layer: %w", err) } // Set prefetch size for metrics after prefetch completed l.prefetchSizeMu.Lock() l.prefetchSize = prefetchSize l.prefetchSizeMu.Unlock() // Cache uncompressed contents of the prefetched range decompressStart := time.Now() err = l.verifiableReader.Cache(reader.WithFilter(func(offset int64) bool { return offset < prefetchSize // Cache only prefetch target })) commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDecompress, decompressStart) // time to decompress prefetch data if err != nil { return fmt.Errorf("failed to cache prefetched layer: %w", err) } return nil } func (l *layer) WaitForPrefetchCompletion() error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.prefetchWaiter.wait(l.resolver.prefetchTimeout) } func (l *layer) BackgroundFetch() (err error) { l.backgroundFetchOnce.Do(func() { ctx := context.Background() err = l.backgroundFetch(ctx) if err != nil { log.G(ctx).WithError(err).Warnf("failed to fetch whole layer=%v", l.desc.Digest) return } log.G(ctx).Debug("completed to fetch all layer data in background") }) return } func (l *layer) backgroundFetch(ctx context.Context) error { defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchTotal, time.Now()) if l.isClosed() { return fmt.Errorf("layer is already closed") } br := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (retN int, retErr error) { l.resolver.backgroundTaskManager.InvokeBackgroundTask(func(ctx context.Context) { // Measuring the time to download background fetch data (in milliseconds) defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.BackgroundFetchDownload, l.Info().Digest, time.Now()) // time to download background fetch data retN, retErr = l.blob.ReadAt( p, offset, remote.WithContext(ctx), // Make cancellable remote.WithCacheOpts(cache.Direct()), // Do not pollute mem cache ) }, 120*time.Second) return }), 0, l.blob.Size()) defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchDecompress, time.Now()) // time to decompress background fetch data (in milliseconds) return l.verifiableReader.Cache( reader.WithReader(br), // Read contents in background reader.WithCacheOpts(cache.Direct()), // Do not pollute mem cache ) } func (l *layerRef) Done() { l.done() } func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) { if l.isClosed() { return nil, fmt.Errorf("layer is already closed") } if l.r == nil { return nil, fmt.Errorf("layer hasn't been verified yet") } return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType) } func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) { return l.blob.ReadAt(p, offset, opts...) } func (l *layer) close() error { l.closedMu.Lock() defer l.closedMu.Unlock() if l.closed { return nil } l.closed = true defer l.blob.done() // Close reader first, then close the blob l.verifiableReader.Close() if l.r != nil { return l.r.Close() } return nil } func (l *layer) isClosed() bool { l.closedMu.Lock() closed := l.closed l.closedMu.Unlock() return closed } // blobRef is a reference to the blob in the cache. Calling `done` decreases the reference counter // of this blob in the underlying cache. When nobody refers to the blob in the cache, resources bound // to this blob will be discarded. type blobRef struct { remote.Blob done func() } // layerRef is a reference to the layer in the cache. Calling `Done` or `done` decreases the // reference counter of this blob in the underlying cache. When nobody refers to the layer in the // cache, resources bound to this layer will be discarded. type layerRef struct { *layer done func() } func newWaiter() *waiter { return &waiter{ completionCond: sync.NewCond(&sync.Mutex{}), } } type waiter struct { isDone bool isDoneMu sync.Mutex completionCond *sync.Cond } func (w *waiter) done()
func (w *waiter) wait(timeout time.Duration) error { wait := func() <-chan struct{} { ch := make(chan struct{}) go func() { w.isDoneMu.Lock() isDone := w.isDone w.isDoneMu.Unlock() w.completionCond.L.Lock() if !isDone { w.completionCond.Wait() } w.completionCond.L.Unlock() ch <- struct{}{} }() return ch } select { case <-time.After(timeout): w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() return fmt.Errorf("timeout(%v)", timeout) case <-wait(): return nil } } type readerAtFunc func([]byte, int64) (int, error) func (f readerAtFunc) ReadAt(p []byte, offset int64) (int, error) { return f(p, offset) }
{ w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() }
identifier_body
layer.go
/* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package layer import ( "bytes" "context" "fmt" "io" "os" "path/filepath" "sync" "time" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" "github.com/containerd/stargz-snapshotter/cache" "github.com/containerd/stargz-snapshotter/estargz" "github.com/containerd/stargz-snapshotter/estargz/zstdchunked" "github.com/containerd/stargz-snapshotter/fs/config" commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common" "github.com/containerd/stargz-snapshotter/fs/reader" "github.com/containerd/stargz-snapshotter/fs/remote" "github.com/containerd/stargz-snapshotter/fs/source" "github.com/containerd/stargz-snapshotter/metadata" "github.com/containerd/stargz-snapshotter/task" "github.com/containerd/stargz-snapshotter/util/cacheutil" "github.com/containerd/stargz-snapshotter/util/namedmutex" fusefs "github.com/hanwen/go-fuse/v2/fs" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) const ( defaultResolveResultEntryTTLSec = 120 defaultMaxLRUCacheEntry = 10 defaultMaxCacheFds = 10 defaultPrefetchTimeoutSec = 10 memoryCacheType = "memory" ) // Layer represents a layer. type Layer interface { // Info returns the information of this layer. Info() Info // RootNode returns the root node of this layer. RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) // Check checks if the layer is still connectable. Check() error // Refresh refreshes the layer connection. Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error // Verify verifies this layer using the passed TOC Digest. // Nop if Verify() or SkipVerify() was already called. Verify(tocDigest digest.Digest) (err error) // SkipVerify skips verification for this layer. // Nop if Verify() or SkipVerify() was already called. SkipVerify() // Prefetch prefetches the specified size. If the layer is eStargz and contains landmark files, // the range indicated by these files is respected. Prefetch(prefetchSize int64) error // ReadAt reads this layer. ReadAt([]byte, int64, ...remote.Option) (int, error) // WaitForPrefetchCompletion waits untils Prefetch completes. WaitForPrefetchCompletion() error // BackgroundFetch fetches the entire layer contents to the cache. // Fetching contents is done as a background task. BackgroundFetch() error // Done releases the reference to this layer. The resources related to this layer will be // discarded sooner or later. Queries after calling this function won't be serviced. Done() } // Info is the current status of a layer. type Info struct { Digest digest.Digest Size int64 // layer size in bytes FetchedSize int64 // layer fetched size in bytes PrefetchSize int64 // layer prefetch size in bytes ReadTime time.Time // last time the layer was read } // Resolver resolves the layer location and provieds the handler of that layer. type Resolver struct { rootDir string resolver *remote.Resolver prefetchTimeout time.Duration layerCache *cacheutil.TTLCache layerCacheMu sync.Mutex blobCache *cacheutil.TTLCache blobCacheMu sync.Mutex backgroundTaskManager *task.BackgroundTaskManager resolveLock *namedmutex.NamedMutex config config.Config metadataStore metadata.Store overlayOpaqueType OverlayOpaqueType additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor } // NewResolver returns a new layer resolver. func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, cfg config.Config, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store, overlayOpaqueType OverlayOpaqueType, additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor) (*Resolver, error) { resolveResultEntryTTL := time.Duration(cfg.ResolveResultEntryTTLSec) * time.Second if resolveResultEntryTTL == 0 { resolveResultEntryTTL = defaultResolveResultEntryTTLSec * time.Second } prefetchTimeout := time.Duration(cfg.PrefetchTimeoutSec) * time.Second if prefetchTimeout == 0 { prefetchTimeout = defaultPrefetchTimeoutSec * time.Second } // layerCache caches resolved layers for future use. This is useful in a use-case where // the filesystem resolves and caches all layers in an image (not only queried one) in parallel, // before they are actually queried. layerCache := cacheutil.NewTTLCache(resolveResultEntryTTL) layerCache.OnEvicted = func(key string, value interface{}) { if err := value.(*layer).close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up layer") return } logrus.WithField("key", key).Debugf("cleaned up layer") } // blobCache caches resolved blobs for futural use. This is especially useful when a layer // isn't eStargz/stargz (the *layer object won't be created/cached in this case). blobCache := cacheutil.NewTTLCache(resolveResultEntryTTL) blobCache.OnEvicted = func(key string, value interface{}) { if err := value.(remote.Blob).Close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up blob") return } logrus.WithField("key", key).Debugf("cleaned up blob") } if err := os.MkdirAll(root, 0700); err != nil { return nil, err } return &Resolver{ rootDir: root, resolver: remote.NewResolver(cfg.BlobConfig, resolveHandlers), layerCache: layerCache, blobCache: blobCache, prefetchTimeout: prefetchTimeout, backgroundTaskManager: backgroundTaskManager, config: cfg, resolveLock: new(namedmutex.NamedMutex), metadataStore: metadataStore, overlayOpaqueType: overlayOpaqueType, additionalDecompressors: additionalDecompressors, }, nil } func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache, error) { if cacheType == memoryCacheType { return cache.NewMemoryCache(), nil } dcc := cfg.DirectoryCacheConfig maxDataEntry := dcc.MaxLRUCacheEntry if maxDataEntry == 0 { maxDataEntry = defaultMaxLRUCacheEntry } maxFdEntry := dcc.MaxCacheFds if maxFdEntry == 0 { maxFdEntry = defaultMaxCacheFds } bufPool := &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry) dCache.OnEvicted = func(key string, value interface{}) { value.(*bytes.Buffer).Reset() bufPool.Put(value) } fCache.OnEvicted = func(key string, value interface{}) { value.(*os.File).Close() } // create a cache on an unique directory if err := os.MkdirAll(root, 0700); err != nil { return nil, err } cachePath, err := os.MkdirTemp(root, "") if err != nil { return nil, fmt.Errorf("failed to initialize directory cache: %w", err) } return cache.NewDirectoryCache( cachePath, cache.DirectoryCacheConfig{ SyncAdd: dcc.SyncAdd, DataCache: dCache, FdCache: fCache, BufPool: bufPool, Direct: dcc.Direct, }, ) } // Resolve resolves a layer based on the passed layer blob information. func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor, esgzOpts ...metadata.Option) (_ Layer, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Wait if resolving this layer is already running. The result // can hopefully get from the cache. r.resolveLock.Lock(name) defer r.resolveLock.Unlock(name) ctx = log.WithLogger(ctx, log.G(ctx).WithField("src", name)) // First, try to retrieve this layer from the underlying cache. r.layerCacheMu.Lock() c, done, ok := r.layerCache.Get(name) r.layerCacheMu.Unlock() if ok { if l := c.(*layer); l.Check() == nil { log.G(ctx).Debugf("hit layer cache %q", name) return &layerRef{l, done}, nil } // Cached layer is invalid done() r.layerCacheMu.Lock() r.layerCache.Remove(name) r.layerCacheMu.Unlock() } log.G(ctx).Debugf("resolving") // Resolve the blob. blobR, err := r.resolveBlob(ctx, hosts, refspec, desc) if err != nil { return nil, fmt.Errorf("failed to resolve the blob: %w", err) } defer func() { if retErr != nil { blobR.done() } }() fsCache, err := newCache(filepath.Join(r.rootDir, "fscache"), r.config.FSCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create fs cache: %w", err) } defer func() { if retErr != nil { fsCache.Close() } }() // Get a reader for stargz archive. // Each file's read operation is a prioritized task and all background tasks // will be stopped during the execution so this can avoid being disturbed for // NW traffic by background tasks. sr := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (n int, err error) { r.backgroundTaskManager.DoPrioritizedTask() defer r.backgroundTaskManager.DonePrioritizedTask() return blobR.ReadAt(p, offset) }), 0, blobR.Size()) // define telemetry hooks to measure latency metrics inside estargz package telemetry := metadata.Telemetry{ GetFooterLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzFooterGet, desc.Digest, start) }, GetTocLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzTocGet, desc.Digest, start) }, DeserializeTocLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.DeserializeTocJSON, desc.Digest, start) }, } additionalDecompressors := []metadata.Decompressor{new(zstdchunked.Decompressor)} if r.additionalDecompressors != nil { additionalDecompressors = append(additionalDecompressors, r.additionalDecompressors(ctx, hosts, refspec, desc)...) } meta, err := r.metadataStore(sr, append(esgzOpts, metadata.WithTelemetry(&telemetry), metadata.WithDecompressors(additionalDecompressors...))...) if err != nil { return nil, err } vr, err := reader.NewReader(meta, fsCache, desc.Digest) if err != nil { return nil, fmt.Errorf("failed to read layer: %w", err) } // Combine layer information together and cache it. l := newLayer(r, desc, blobR, vr) r.layerCacheMu.Lock() cachedL, done2, added := r.layerCache.Add(name, l) r.layerCacheMu.Unlock() if !added { l.close() // layer already exists in the cache. discrad this. } log.G(ctx).Debugf("resolved") return &layerRef{cachedL.(*layer), done2}, nil } // resolveBlob resolves a blob based on the passed layer blob information. func (r *Resolver) resolveBlob(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (_ *blobRef, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Try to retrieve the blob from the underlying cache. r.blobCacheMu.Lock() c, done, ok := r.blobCache.Get(name) r.blobCacheMu.Unlock() if ok { if blob := c.(remote.Blob); blob.Check() == nil { return &blobRef{blob, done}, nil } // invalid blob. discard this. done() r.blobCacheMu.Lock() r.blobCache.Remove(name) r.blobCacheMu.Unlock() } httpCache, err := newCache(filepath.Join(r.rootDir, "httpcache"), r.config.HTTPCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create http cache: %w", err) } defer func() { if retErr != nil { httpCache.Close() } }() // Resolve the blob and cache the result. b, err := r.resolver.Resolve(ctx, hosts, refspec, desc, httpCache) if err != nil { return nil, fmt.Errorf("failed to resolve the source: %w", err) } r.blobCacheMu.Lock() cachedB, done, added := r.blobCache.Add(name, b) r.blobCacheMu.Unlock() if !added { b.Close() // blob already exists in the cache. discard this. } return &blobRef{cachedB.(remote.Blob), done}, nil } func newLayer( resolver *Resolver, desc ocispec.Descriptor, blob *blobRef, vr *reader.VerifiableReader, ) *layer { return &layer{ resolver: resolver, desc: desc, blob: blob, verifiableReader: vr, prefetchWaiter: newWaiter(), } } type layer struct { resolver *Resolver desc ocispec.Descriptor blob *blobRef verifiableReader *reader.VerifiableReader prefetchWaiter *waiter prefetchSize int64 prefetchSizeMu sync.Mutex r reader.Reader closed bool closedMu sync.Mutex prefetchOnce sync.Once backgroundFetchOnce sync.Once } func (l *layer) Info() Info { var readTime time.Time if l.r != nil { readTime = l.r.LastOnDemandReadTime() } return Info{ Digest: l.desc.Digest, Size: l.blob.Size(), FetchedSize: l.blob.FetchedSize(), PrefetchSize: l.prefetchedSize(), ReadTime: readTime, } } func (l *layer) prefetchedSize() int64 { l.prefetchSizeMu.Lock() sz := l.prefetchSize l.prefetchSizeMu.Unlock() return sz } func (l *layer) Check() error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Check() } func (l *layer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Refresh(ctx, hosts, refspec, desc) } func (l *layer) Verify(tocDigest digest.Digest) (err error) { if l.isClosed() { return fmt.Errorf("layer is already closed") } if l.r != nil { return nil } l.r, err = l.verifiableReader.VerifyTOC(tocDigest) return } func (l *layer) SkipVerify() { if l.r != nil
l.r = l.verifiableReader.SkipVerify() } func (l *layer) Prefetch(prefetchSize int64) (err error) { l.prefetchOnce.Do(func() { ctx := context.Background() l.resolver.backgroundTaskManager.DoPrioritizedTask() defer l.resolver.backgroundTaskManager.DonePrioritizedTask() err = l.prefetch(ctx, prefetchSize) if err != nil { log.G(ctx).WithError(err).Warnf("failed to prefetch layer=%v", l.desc.Digest) return } log.G(ctx).Debug("completed to prefetch") }) return } func (l *layer) prefetch(ctx context.Context, prefetchSize int64) error { defer l.prefetchWaiter.done() // Notify the completion // Measuring the total time to complete prefetch (use defer func() because l.Info().PrefetchSize is set later) start := time.Now() defer func() { commonmetrics.WriteLatencyWithBytesLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchTotal, start, commonmetrics.PrefetchSize, l.prefetchedSize()) }() if l.isClosed() { return fmt.Errorf("layer is already closed") } rootID := l.verifiableReader.Metadata().RootID() if _, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.NoPrefetchLandmark); err == nil { // do not prefetch this layer return nil } else if id, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.PrefetchLandmark); err == nil { offset, err := l.verifiableReader.Metadata().GetOffset(id) if err != nil { return fmt.Errorf("failed to get offset of prefetch landmark: %w", err) } // override the prefetch size with optimized value prefetchSize = offset } else if prefetchSize > l.blob.Size() { // adjust prefetch size not to exceed the whole layer size prefetchSize = l.blob.Size() } // Fetch the target range downloadStart := time.Now() err := l.blob.Cache(0, prefetchSize) commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDownload, downloadStart) // time to download prefetch data if err != nil { return fmt.Errorf("failed to prefetch layer: %w", err) } // Set prefetch size for metrics after prefetch completed l.prefetchSizeMu.Lock() l.prefetchSize = prefetchSize l.prefetchSizeMu.Unlock() // Cache uncompressed contents of the prefetched range decompressStart := time.Now() err = l.verifiableReader.Cache(reader.WithFilter(func(offset int64) bool { return offset < prefetchSize // Cache only prefetch target })) commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDecompress, decompressStart) // time to decompress prefetch data if err != nil { return fmt.Errorf("failed to cache prefetched layer: %w", err) } return nil } func (l *layer) WaitForPrefetchCompletion() error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.prefetchWaiter.wait(l.resolver.prefetchTimeout) } func (l *layer) BackgroundFetch() (err error) { l.backgroundFetchOnce.Do(func() { ctx := context.Background() err = l.backgroundFetch(ctx) if err != nil { log.G(ctx).WithError(err).Warnf("failed to fetch whole layer=%v", l.desc.Digest) return } log.G(ctx).Debug("completed to fetch all layer data in background") }) return } func (l *layer) backgroundFetch(ctx context.Context) error { defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchTotal, time.Now()) if l.isClosed() { return fmt.Errorf("layer is already closed") } br := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (retN int, retErr error) { l.resolver.backgroundTaskManager.InvokeBackgroundTask(func(ctx context.Context) { // Measuring the time to download background fetch data (in milliseconds) defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.BackgroundFetchDownload, l.Info().Digest, time.Now()) // time to download background fetch data retN, retErr = l.blob.ReadAt( p, offset, remote.WithContext(ctx), // Make cancellable remote.WithCacheOpts(cache.Direct()), // Do not pollute mem cache ) }, 120*time.Second) return }), 0, l.blob.Size()) defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchDecompress, time.Now()) // time to decompress background fetch data (in milliseconds) return l.verifiableReader.Cache( reader.WithReader(br), // Read contents in background reader.WithCacheOpts(cache.Direct()), // Do not pollute mem cache ) } func (l *layerRef) Done() { l.done() } func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) { if l.isClosed() { return nil, fmt.Errorf("layer is already closed") } if l.r == nil { return nil, fmt.Errorf("layer hasn't been verified yet") } return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType) } func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) { return l.blob.ReadAt(p, offset, opts...) } func (l *layer) close() error { l.closedMu.Lock() defer l.closedMu.Unlock() if l.closed { return nil } l.closed = true defer l.blob.done() // Close reader first, then close the blob l.verifiableReader.Close() if l.r != nil { return l.r.Close() } return nil } func (l *layer) isClosed() bool { l.closedMu.Lock() closed := l.closed l.closedMu.Unlock() return closed } // blobRef is a reference to the blob in the cache. Calling `done` decreases the reference counter // of this blob in the underlying cache. When nobody refers to the blob in the cache, resources bound // to this blob will be discarded. type blobRef struct { remote.Blob done func() } // layerRef is a reference to the layer in the cache. Calling `Done` or `done` decreases the // reference counter of this blob in the underlying cache. When nobody refers to the layer in the // cache, resources bound to this layer will be discarded. type layerRef struct { *layer done func() } func newWaiter() *waiter { return &waiter{ completionCond: sync.NewCond(&sync.Mutex{}), } } type waiter struct { isDone bool isDoneMu sync.Mutex completionCond *sync.Cond } func (w *waiter) done() { w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() } func (w *waiter) wait(timeout time.Duration) error { wait := func() <-chan struct{} { ch := make(chan struct{}) go func() { w.isDoneMu.Lock() isDone := w.isDone w.isDoneMu.Unlock() w.completionCond.L.Lock() if !isDone { w.completionCond.Wait() } w.completionCond.L.Unlock() ch <- struct{}{} }() return ch } select { case <-time.After(timeout): w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() return fmt.Errorf("timeout(%v)", timeout) case <-wait(): return nil } } type readerAtFunc func([]byte, int64) (int, error) func (f readerAtFunc) ReadAt(p []byte, offset int64) (int, error) { return f(p, offset) }
{ return }
conditional_block
layer.go
/* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package layer import ( "bytes" "context" "fmt" "io" "os" "path/filepath" "sync" "time" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" "github.com/containerd/stargz-snapshotter/cache" "github.com/containerd/stargz-snapshotter/estargz" "github.com/containerd/stargz-snapshotter/estargz/zstdchunked" "github.com/containerd/stargz-snapshotter/fs/config" commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common" "github.com/containerd/stargz-snapshotter/fs/reader" "github.com/containerd/stargz-snapshotter/fs/remote" "github.com/containerd/stargz-snapshotter/fs/source" "github.com/containerd/stargz-snapshotter/metadata" "github.com/containerd/stargz-snapshotter/task" "github.com/containerd/stargz-snapshotter/util/cacheutil" "github.com/containerd/stargz-snapshotter/util/namedmutex" fusefs "github.com/hanwen/go-fuse/v2/fs" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) const ( defaultResolveResultEntryTTLSec = 120 defaultMaxLRUCacheEntry = 10 defaultMaxCacheFds = 10 defaultPrefetchTimeoutSec = 10 memoryCacheType = "memory" ) // Layer represents a layer. type Layer interface { // Info returns the information of this layer. Info() Info // RootNode returns the root node of this layer. RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) // Check checks if the layer is still connectable. Check() error // Refresh refreshes the layer connection. Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error // Verify verifies this layer using the passed TOC Digest. // Nop if Verify() or SkipVerify() was already called. Verify(tocDigest digest.Digest) (err error) // SkipVerify skips verification for this layer. // Nop if Verify() or SkipVerify() was already called. SkipVerify() // Prefetch prefetches the specified size. If the layer is eStargz and contains landmark files, // the range indicated by these files is respected. Prefetch(prefetchSize int64) error // ReadAt reads this layer. ReadAt([]byte, int64, ...remote.Option) (int, error) // WaitForPrefetchCompletion waits untils Prefetch completes. WaitForPrefetchCompletion() error // BackgroundFetch fetches the entire layer contents to the cache. // Fetching contents is done as a background task. BackgroundFetch() error // Done releases the reference to this layer. The resources related to this layer will be // discarded sooner or later. Queries after calling this function won't be serviced. Done() } // Info is the current status of a layer. type Info struct { Digest digest.Digest Size int64 // layer size in bytes FetchedSize int64 // layer fetched size in bytes PrefetchSize int64 // layer prefetch size in bytes ReadTime time.Time // last time the layer was read } // Resolver resolves the layer location and provieds the handler of that layer. type Resolver struct { rootDir string resolver *remote.Resolver prefetchTimeout time.Duration layerCache *cacheutil.TTLCache layerCacheMu sync.Mutex blobCache *cacheutil.TTLCache blobCacheMu sync.Mutex backgroundTaskManager *task.BackgroundTaskManager resolveLock *namedmutex.NamedMutex config config.Config metadataStore metadata.Store overlayOpaqueType OverlayOpaqueType additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor } // NewResolver returns a new layer resolver. func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, cfg config.Config, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store, overlayOpaqueType OverlayOpaqueType, additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor) (*Resolver, error) { resolveResultEntryTTL := time.Duration(cfg.ResolveResultEntryTTLSec) * time.Second if resolveResultEntryTTL == 0 { resolveResultEntryTTL = defaultResolveResultEntryTTLSec * time.Second } prefetchTimeout := time.Duration(cfg.PrefetchTimeoutSec) * time.Second if prefetchTimeout == 0 { prefetchTimeout = defaultPrefetchTimeoutSec * time.Second } // layerCache caches resolved layers for future use. This is useful in a use-case where // the filesystem resolves and caches all layers in an image (not only queried one) in parallel, // before they are actually queried. layerCache := cacheutil.NewTTLCache(resolveResultEntryTTL) layerCache.OnEvicted = func(key string, value interface{}) { if err := value.(*layer).close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up layer") return } logrus.WithField("key", key).Debugf("cleaned up layer") } // blobCache caches resolved blobs for futural use. This is especially useful when a layer // isn't eStargz/stargz (the *layer object won't be created/cached in this case). blobCache := cacheutil.NewTTLCache(resolveResultEntryTTL) blobCache.OnEvicted = func(key string, value interface{}) { if err := value.(remote.Blob).Close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up blob") return } logrus.WithField("key", key).Debugf("cleaned up blob") } if err := os.MkdirAll(root, 0700); err != nil { return nil, err } return &Resolver{ rootDir: root, resolver: remote.NewResolver(cfg.BlobConfig, resolveHandlers), layerCache: layerCache, blobCache: blobCache, prefetchTimeout: prefetchTimeout, backgroundTaskManager: backgroundTaskManager, config: cfg, resolveLock: new(namedmutex.NamedMutex), metadataStore: metadataStore, overlayOpaqueType: overlayOpaqueType, additionalDecompressors: additionalDecompressors, }, nil } func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache, error) { if cacheType == memoryCacheType { return cache.NewMemoryCache(), nil } dcc := cfg.DirectoryCacheConfig maxDataEntry := dcc.MaxLRUCacheEntry if maxDataEntry == 0 { maxDataEntry = defaultMaxLRUCacheEntry } maxFdEntry := dcc.MaxCacheFds if maxFdEntry == 0 { maxFdEntry = defaultMaxCacheFds } bufPool := &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry) dCache.OnEvicted = func(key string, value interface{}) { value.(*bytes.Buffer).Reset() bufPool.Put(value) } fCache.OnEvicted = func(key string, value interface{}) { value.(*os.File).Close() } // create a cache on an unique directory if err := os.MkdirAll(root, 0700); err != nil { return nil, err } cachePath, err := os.MkdirTemp(root, "") if err != nil { return nil, fmt.Errorf("failed to initialize directory cache: %w", err) } return cache.NewDirectoryCache( cachePath, cache.DirectoryCacheConfig{ SyncAdd: dcc.SyncAdd, DataCache: dCache, FdCache: fCache, BufPool: bufPool, Direct: dcc.Direct, }, ) } // Resolve resolves a layer based on the passed layer blob information. func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor, esgzOpts ...metadata.Option) (_ Layer, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Wait if resolving this layer is already running. The result // can hopefully get from the cache. r.resolveLock.Lock(name) defer r.resolveLock.Unlock(name) ctx = log.WithLogger(ctx, log.G(ctx).WithField("src", name)) // First, try to retrieve this layer from the underlying cache. r.layerCacheMu.Lock() c, done, ok := r.layerCache.Get(name) r.layerCacheMu.Unlock() if ok { if l := c.(*layer); l.Check() == nil { log.G(ctx).Debugf("hit layer cache %q", name) return &layerRef{l, done}, nil } // Cached layer is invalid done() r.layerCacheMu.Lock() r.layerCache.Remove(name)
} log.G(ctx).Debugf("resolving") // Resolve the blob. blobR, err := r.resolveBlob(ctx, hosts, refspec, desc) if err != nil { return nil, fmt.Errorf("failed to resolve the blob: %w", err) } defer func() { if retErr != nil { blobR.done() } }() fsCache, err := newCache(filepath.Join(r.rootDir, "fscache"), r.config.FSCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create fs cache: %w", err) } defer func() { if retErr != nil { fsCache.Close() } }() // Get a reader for stargz archive. // Each file's read operation is a prioritized task and all background tasks // will be stopped during the execution so this can avoid being disturbed for // NW traffic by background tasks. sr := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (n int, err error) { r.backgroundTaskManager.DoPrioritizedTask() defer r.backgroundTaskManager.DonePrioritizedTask() return blobR.ReadAt(p, offset) }), 0, blobR.Size()) // define telemetry hooks to measure latency metrics inside estargz package telemetry := metadata.Telemetry{ GetFooterLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzFooterGet, desc.Digest, start) }, GetTocLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzTocGet, desc.Digest, start) }, DeserializeTocLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.DeserializeTocJSON, desc.Digest, start) }, } additionalDecompressors := []metadata.Decompressor{new(zstdchunked.Decompressor)} if r.additionalDecompressors != nil { additionalDecompressors = append(additionalDecompressors, r.additionalDecompressors(ctx, hosts, refspec, desc)...) } meta, err := r.metadataStore(sr, append(esgzOpts, metadata.WithTelemetry(&telemetry), metadata.WithDecompressors(additionalDecompressors...))...) if err != nil { return nil, err } vr, err := reader.NewReader(meta, fsCache, desc.Digest) if err != nil { return nil, fmt.Errorf("failed to read layer: %w", err) } // Combine layer information together and cache it. l := newLayer(r, desc, blobR, vr) r.layerCacheMu.Lock() cachedL, done2, added := r.layerCache.Add(name, l) r.layerCacheMu.Unlock() if !added { l.close() // layer already exists in the cache. discrad this. } log.G(ctx).Debugf("resolved") return &layerRef{cachedL.(*layer), done2}, nil } // resolveBlob resolves a blob based on the passed layer blob information. func (r *Resolver) resolveBlob(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (_ *blobRef, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Try to retrieve the blob from the underlying cache. r.blobCacheMu.Lock() c, done, ok := r.blobCache.Get(name) r.blobCacheMu.Unlock() if ok { if blob := c.(remote.Blob); blob.Check() == nil { return &blobRef{blob, done}, nil } // invalid blob. discard this. done() r.blobCacheMu.Lock() r.blobCache.Remove(name) r.blobCacheMu.Unlock() } httpCache, err := newCache(filepath.Join(r.rootDir, "httpcache"), r.config.HTTPCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create http cache: %w", err) } defer func() { if retErr != nil { httpCache.Close() } }() // Resolve the blob and cache the result. b, err := r.resolver.Resolve(ctx, hosts, refspec, desc, httpCache) if err != nil { return nil, fmt.Errorf("failed to resolve the source: %w", err) } r.blobCacheMu.Lock() cachedB, done, added := r.blobCache.Add(name, b) r.blobCacheMu.Unlock() if !added { b.Close() // blob already exists in the cache. discard this. } return &blobRef{cachedB.(remote.Blob), done}, nil } func newLayer( resolver *Resolver, desc ocispec.Descriptor, blob *blobRef, vr *reader.VerifiableReader, ) *layer { return &layer{ resolver: resolver, desc: desc, blob: blob, verifiableReader: vr, prefetchWaiter: newWaiter(), } } type layer struct { resolver *Resolver desc ocispec.Descriptor blob *blobRef verifiableReader *reader.VerifiableReader prefetchWaiter *waiter prefetchSize int64 prefetchSizeMu sync.Mutex r reader.Reader closed bool closedMu sync.Mutex prefetchOnce sync.Once backgroundFetchOnce sync.Once } func (l *layer) Info() Info { var readTime time.Time if l.r != nil { readTime = l.r.LastOnDemandReadTime() } return Info{ Digest: l.desc.Digest, Size: l.blob.Size(), FetchedSize: l.blob.FetchedSize(), PrefetchSize: l.prefetchedSize(), ReadTime: readTime, } } func (l *layer) prefetchedSize() int64 { l.prefetchSizeMu.Lock() sz := l.prefetchSize l.prefetchSizeMu.Unlock() return sz } func (l *layer) Check() error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Check() } func (l *layer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Refresh(ctx, hosts, refspec, desc) } func (l *layer) Verify(tocDigest digest.Digest) (err error) { if l.isClosed() { return fmt.Errorf("layer is already closed") } if l.r != nil { return nil } l.r, err = l.verifiableReader.VerifyTOC(tocDigest) return } func (l *layer) SkipVerify() { if l.r != nil { return } l.r = l.verifiableReader.SkipVerify() } func (l *layer) Prefetch(prefetchSize int64) (err error) { l.prefetchOnce.Do(func() { ctx := context.Background() l.resolver.backgroundTaskManager.DoPrioritizedTask() defer l.resolver.backgroundTaskManager.DonePrioritizedTask() err = l.prefetch(ctx, prefetchSize) if err != nil { log.G(ctx).WithError(err).Warnf("failed to prefetch layer=%v", l.desc.Digest) return } log.G(ctx).Debug("completed to prefetch") }) return } func (l *layer) prefetch(ctx context.Context, prefetchSize int64) error { defer l.prefetchWaiter.done() // Notify the completion // Measuring the total time to complete prefetch (use defer func() because l.Info().PrefetchSize is set later) start := time.Now() defer func() { commonmetrics.WriteLatencyWithBytesLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchTotal, start, commonmetrics.PrefetchSize, l.prefetchedSize()) }() if l.isClosed() { return fmt.Errorf("layer is already closed") } rootID := l.verifiableReader.Metadata().RootID() if _, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.NoPrefetchLandmark); err == nil { // do not prefetch this layer return nil } else if id, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.PrefetchLandmark); err == nil { offset, err := l.verifiableReader.Metadata().GetOffset(id) if err != nil { return fmt.Errorf("failed to get offset of prefetch landmark: %w", err) } // override the prefetch size with optimized value prefetchSize = offset } else if prefetchSize > l.blob.Size() { // adjust prefetch size not to exceed the whole layer size prefetchSize = l.blob.Size() } // Fetch the target range downloadStart := time.Now() err := l.blob.Cache(0, prefetchSize) commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDownload, downloadStart) // time to download prefetch data if err != nil { return fmt.Errorf("failed to prefetch layer: %w", err) } // Set prefetch size for metrics after prefetch completed l.prefetchSizeMu.Lock() l.prefetchSize = prefetchSize l.prefetchSizeMu.Unlock() // Cache uncompressed contents of the prefetched range decompressStart := time.Now() err = l.verifiableReader.Cache(reader.WithFilter(func(offset int64) bool { return offset < prefetchSize // Cache only prefetch target })) commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDecompress, decompressStart) // time to decompress prefetch data if err != nil { return fmt.Errorf("failed to cache prefetched layer: %w", err) } return nil } func (l *layer) WaitForPrefetchCompletion() error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.prefetchWaiter.wait(l.resolver.prefetchTimeout) } func (l *layer) BackgroundFetch() (err error) { l.backgroundFetchOnce.Do(func() { ctx := context.Background() err = l.backgroundFetch(ctx) if err != nil { log.G(ctx).WithError(err).Warnf("failed to fetch whole layer=%v", l.desc.Digest) return } log.G(ctx).Debug("completed to fetch all layer data in background") }) return } func (l *layer) backgroundFetch(ctx context.Context) error { defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchTotal, time.Now()) if l.isClosed() { return fmt.Errorf("layer is already closed") } br := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (retN int, retErr error) { l.resolver.backgroundTaskManager.InvokeBackgroundTask(func(ctx context.Context) { // Measuring the time to download background fetch data (in milliseconds) defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.BackgroundFetchDownload, l.Info().Digest, time.Now()) // time to download background fetch data retN, retErr = l.blob.ReadAt( p, offset, remote.WithContext(ctx), // Make cancellable remote.WithCacheOpts(cache.Direct()), // Do not pollute mem cache ) }, 120*time.Second) return }), 0, l.blob.Size()) defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchDecompress, time.Now()) // time to decompress background fetch data (in milliseconds) return l.verifiableReader.Cache( reader.WithReader(br), // Read contents in background reader.WithCacheOpts(cache.Direct()), // Do not pollute mem cache ) } func (l *layerRef) Done() { l.done() } func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) { if l.isClosed() { return nil, fmt.Errorf("layer is already closed") } if l.r == nil { return nil, fmt.Errorf("layer hasn't been verified yet") } return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType) } func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) { return l.blob.ReadAt(p, offset, opts...) } func (l *layer) close() error { l.closedMu.Lock() defer l.closedMu.Unlock() if l.closed { return nil } l.closed = true defer l.blob.done() // Close reader first, then close the blob l.verifiableReader.Close() if l.r != nil { return l.r.Close() } return nil } func (l *layer) isClosed() bool { l.closedMu.Lock() closed := l.closed l.closedMu.Unlock() return closed } // blobRef is a reference to the blob in the cache. Calling `done` decreases the reference counter // of this blob in the underlying cache. When nobody refers to the blob in the cache, resources bound // to this blob will be discarded. type blobRef struct { remote.Blob done func() } // layerRef is a reference to the layer in the cache. Calling `Done` or `done` decreases the // reference counter of this blob in the underlying cache. When nobody refers to the layer in the // cache, resources bound to this layer will be discarded. type layerRef struct { *layer done func() } func newWaiter() *waiter { return &waiter{ completionCond: sync.NewCond(&sync.Mutex{}), } } type waiter struct { isDone bool isDoneMu sync.Mutex completionCond *sync.Cond } func (w *waiter) done() { w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() } func (w *waiter) wait(timeout time.Duration) error { wait := func() <-chan struct{} { ch := make(chan struct{}) go func() { w.isDoneMu.Lock() isDone := w.isDone w.isDoneMu.Unlock() w.completionCond.L.Lock() if !isDone { w.completionCond.Wait() } w.completionCond.L.Unlock() ch <- struct{}{} }() return ch } select { case <-time.After(timeout): w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() return fmt.Errorf("timeout(%v)", timeout) case <-wait(): return nil } } type readerAtFunc func([]byte, int64) (int, error) func (f readerAtFunc) ReadAt(p []byte, offset int64) (int, error) { return f(p, offset) }
r.layerCacheMu.Unlock()
random_line_split
app.py
from data.db_functions import DBFunctions from functions.email_functions import send_mail from data.create_recommender import get_beer_columns, melt_user_item_matrix import numpy as np import pandas as pd import streamlit as st from streamlit.hashing import _CodeHasher from streamlit.report_thread import get_report_ctx from streamlit.server.server import Server from time import sleep from turicreate import load_model, SFrame pd.options.mode.chained_assignment = None st.set_page_config(layout="wide") def main(): state = _get_state() pages = { "Pesquisa": display_pesquisa, "Recomendações": display_sugestoes, } st.sidebar.title(":bookmark_tabs: MENU") page = st.sidebar.selectbox( "", tuple(pages.keys()) ) # Display the selected page with the session state pages[page](state) # Mandatory to avoid rollbacks with widgets, must be called at the end of your app state.sync() @st.cache def get_beer_list(): db = DBFunctions() return db.get_df_from_query('beer_list') def display_pesquisa(state): st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True) st.markdown( '<style>div[role="radiogroup"] > :first-child{display: none !important;}</style>', unsafe_allow_html=True ) st.image('fig/terabeer_banner.jpeg') st.markdown(''' ## Olá, que bom que você veio! O TeraBeer é um sistema de recomendação de cervejas artesanais brasileiras baseado no seu paladar e no uso de Inteligência Artificial. Antes de mais nada, confirme que você tem mais de 18 anos: ''') if st.checkbox('Sim, tenho mais de 18 anos, internet!', False): st.text("") st.markdown("![Sei...](https://media.giphy.com/media/VhLc1Mb9HlPo2Jo2ZG/giphy.gif)") st.text("") st.markdown(''' ## :pencil: **PESQUISA** Agora responda as duas perguntas a seguir para gerar as suas recomendações. ''') options = ['', 'Gosto', 'Indiferente', 'Não gosto', 'Desconheço'] st.markdown(''' ### QUAL A SUA OPINIÃO SOBRE OS **ALIMENTOS E BEBIDAS** ABAIXO? ''') st.text("") taste_questions = { # Key matches column names used in training, value is displayed in forms 'Alimento Chocolate amargo': 'Chocolate 70% cacau', 'Alimento Beringela': 'Beringela', 'Alimento Folhas escuras': 'Folhas escuras', 'Alimento Mel': 'Mel', 'Alimento Chocolate ao leite': 'Chocolate ao leite', 'Alimento Oreo': "Cookies & Cream", 'Alimento Salgadinho': 'Batata chips', 'Alimento Tomate': 'Tomate', 'Alimento Margherita': 'Margarita', 'Alimento Limonada': 'Limonada', 'Alimento Laranja': 'Laranja', 'Alimento Maracujá': 'Maracujá', 'Alimento Tangerina': 'Mexerica/tangerina', 'Alimento Pimentas': 'Pimenta', 'Alimento Cravo': 'Cravo', 'Alimento Banana': 'Banana', 'Alimento Gengibre': 'Gengibre', 'Alimento Canela': 'Canela', 'Alimento Bacon': 'Bacon', 'Alimento Café': 'Café sem açúcar' } feat_paladar = {} for feature_name, question in taste_questions.items(): feat_paladar[feature_name] = st.radio(question, options, index=1) st.text("") st.markdown('### QUAL A SUA OPINIÃO SOBRE OS SEGUINTES **ESTILOS DE CERVEJA**?') st.text("") beer_questions = { 'Cerveja Pilsen': 'Pilsen/Lager', 'Cerveja Blonde': 'Golden Ale/Blonde Ale', 'Cerveja Trigo': 'Trigo (Weiss)', 'Cerveja APA': 'American Pale Ale (APA)', 'Cerveja IPA': 'India Pale Ale (IPA)', 'Cerveja Session IPA': 'Session IPA', 'Cerveja NEIPA': 'New England IPA/Juice IPA', 'Cerveja Porter': 'Porter/Stout', 'Cerveja Malzbier': 'Dunkel/Malzbier', 'Cerveja Witbier': 'Witbier', 'Cerveja Sour': 'Fruit Beer/Sour', 'Cerveja RIS': 'Russian Imperial Stout/Pastry Stout', 'Cerveja Lambic': 'Lambic' } for feature_name, question in beer_questions.items(): feat_paladar[feature_name] = st.radio(question, options, index=4) st.text("") st.text("") exclude_known = st.checkbox('Desejo receber recomendações somente de estilos que eu não conheço', True) df_paladar = pd.DataFrame([feat_paladar], index=[-1]) # User-item matrix preference_map = { "Gosto": 1, "Não gosto": 0, "Indiferente": 0.5, "Desconheço": np.nan } df_paladar.replace(preference_map, inplace=True) melt_df = melt_user_item_matrix(df_paladar) new_observation_data = melt_df # st.dataframe(new_observation_data) recommendable_beers = get_beer_columns(df_paladar) recommendable_beers.remove('Cerveja Pilsen') if not exclude_known: # Exclude beers user doesn't like at all if known beers can be recommended dislike_beers = melt_df[melt_df['rating'] < 1]['product'].to_list() for dislike_beer in dislike_beers: if dislike_beer in recommendable_beers: recommendable_beers.remove(dislike_beer) st.text("") st.text("") st.text("") if st.button('Gerar recomendações'): model = load_model('data/recommending_system') if len(recommendable_beers) == 0: st.error('Não temos nenhuma cerveja para te recomendar :/') else: with st.spinner(text='Aguarde um instante enquanto analisamos as suas respostas...'): sleep(4) # Pretend making recommendations takes a while. Actually they are pretty fast recommendations = model.recommend( users=[-1], k=3, items=recommendable_beers, new_observation_data=SFrame(new_observation_data), exclude_known=exclude_known, ).to_dataframe() # st.dataframe(recommendations) if recommendations.empty and exclude_known: st.error('Você conhece muitas cervejas ein?! Que tal desmarcar a caixa acima?') else: st.success('Pronto! Selecione no menu à esquerda a página Recomendações.') sleep(3) state.recommendations, state.paladar = recommendations, df_paladar def display_sugestoes(state): st.title(':beers: CERVEJAS RECOMENDADAS') st.markdown(''' Estas são as cervejas artesanais brasileiras **mais recomendadas para você**. Ao final, você poderá enviar a lista de cervejas para o seu e-mail. ''') recommendations, df_paladar = state.recommendations, state.paladar # st.dataframe(df_paladar) # st.dataframe(recommendations) if not isinstance(recommendations, pd.DataFrame): st.error('Sua sessão expirou, responda novamente o formulário para ver as suas recomendações.') else: rename_beer_styles = { 'Cerveja Blonde': 'Blonde Ale', 'Cerveja Trigo': 'Weiss (Trigo)', 'Cerveja APA': 'American Pale Ale', 'Cerveja IPA': 'India Pale Ale', 'Cerveja Session IPA': 'Session IPA', 'Cerveja NEIPA': 'New England IPA', 'Cerveja Porter': 'Porter/Stout', 'Cerveja Malzbier': 'Dunkel/Malzbier', 'Cerveja Witbier': 'Witbier', 'Cerveja Sour': 'Sour/Fruit', 'Cerveja RIS': 'Russian Imperial Stout', 'Cerveja Lambic': 'Lambic' } recommendations.replace({'product': rename_beer_styles}, inplace=True) with st.spinner('Buscando cervejas...'): df_cervejas = get_beer_list() recommended_labels = pd.merge(recommendations, df_cervejas, left_on='product', right_on='terabeer_style') recommended_labels.sort_values(by=['score', 'ratings_avg'], ascending=[False, False]) # st.dataframe(recommended_labels) origins = recommended_labels['origin_state'].unique().tolist() origin_filter = st.multiselect("Filtrar por estado:", origins, default=origins) filtered_labels = recommended_labels[recommended_labels['origin_state'].isin(origin_filter)] max_beers = st.slider('Número máximo de rótulos por estilo', 1, 5, 3) df_style_1 = filtered_labels[filtered_labels['rank'] == 1] df_style_2 = filtered_labels[filtered_labels['rank'] == 2] df_style_3 = filtered_labels[filtered_labels['rank'] == 3] markdown_list = [] image_list = [] for df_style in [df_style_1, df_style_2, df_style_3]: if not df_style.empty: df_style.reset_index(drop=True, inplace=True) style_name = df_style['terabeer_style'][0] style_rank = df_style['rank'][0] style_score = df_style['score'][0] style_description = df_style['style_description'][0] style_harmonization = df_style['harmonization'][0] if style_harmonization: harmonization_line = f'<br><br> <b>Harmoniza bem com</b>: {style_harmonization}' else: harmonization_line = '' style_markdown = f""" <div> <br> <h2> Estilo {style_rank}: <b>{style_name}</b> ({style_score:.1%} recomendado para você) </h2> <br> <p> <b>Descrição</b>: {style_description} {harmonization_line} </p> <br> </div> """ st.markdown(style_markdown, unsafe_allow_html=True) markdown_list.append(style_markdown) for index, row in df_style.iloc[0:max_beers, :].iterrows(): beer = row['name'] brewery = row['brand'] abv = row['abv'] ibu = row['ibu'] avg_rating = row['ratings_avg'] count_ratings = int(row['ratings_count']) figure = row['figure'] ratings_source = row['ratings_source'] ratings_url = row['ratings_url'] origin_state = row['origin_state'] offer_url = row['offer_url'] discount_coupon = row['discount_coupon'] column1, column2 = st.beta_columns((1, 4)) with column1: # Column with beer labels try: st.image(f'fig/{figure}', use_column_width=True) image_list.append(f'fig/{figure}') markdown_list.append( f""" <br> <div> <img src="cid:image{len(image_list)}" alt="Logo" style="width:200px;height:200px;"> </div> """ ) except FileNotFoundError: st.image('fig/placeholder-image.jpg', use_column_width=True) image_list.append('fig/placeholder-image.jpg') markdown_list.append( f""" <br> <div> <img src="cid:image{len(image_list)}" alt="Logo" style="width:200px;height:200px;"> </div> """ ) with column2: # Column with beer characteristics ratings_source_url = f'<a href="{ratings_url}" target="_blank">{ratings_source}</a>' ratings_line = f'{avg_rating:.3} ({count_ratings} avaliações no {ratings_source_url})' ibu_line = f'{int(ibu)} unidades de amargor' if ibu > 0 else 'Indisponível' discount_phrase = f'(Cupom de desconto: {discount_coupon})' if discount_coupon else '' offer_line = f'<b><a href="{offer_url}" target="_blank">Quero!</a></b> {discount_phrase}' beer_markdown = f""" <div> <h3>{beer} - {brewery}</h3> <p> <b>Origem</b>: {origin_state}<br> <b>Nota média</b>: {ratings_line}<br> <b>ABV</b>: {abv}% álcool <br> <b>IBU</b>: {ibu_line} <br> {offer_line} </p> </div> """ st.markdown(beer_markdown, unsafe_allow_html=True) markdown_list.append(beer_markdown) st.text("") st.text("") st.markdown("### :mailbox: Para receber a lista acima no seu e-mail, digite-o abaixo e aperte enter:") email = st.text_input('') if email: st.markdown("### Qual seu nome?") name = st.text_input(' ') accept_beer_offers = st.checkbox( 'Aceito receber novidades do TeraBeer.', True ) allow_data_usage = st.checkbox( 'Permito que utilizem minhas respostas para melhorar recomendações futuras.', True ) st.text("") if st.button('Enviar recomendações por email'): with st.spinner(text='Enviando...'):
n, hash_funcs): """Initialize SessionState instance.""" self.__dict__["_state"] = { "data": {}, "hash": None, "hasher": _CodeHasher(hash_funcs), "is_rerun": False, "session": session, } def __call__(self, **kwargs): """Initialize state data once.""" for item, value in kwargs.items(): if item not in self._state["data"]: self._state["data"][item] = value def __getitem__(self, item): """Return a saved state value, None if item is undefined.""" return self._state["data"].get(item, None) def __getattr__(self, item): """Return a saved state value, None if item is undefined.""" return self._state["data"].get(item, None) def __setitem__(self, item, value): """Set state value.""" self._state["data"][item] = value def __setattr__(self, item, value): """Set state value.""" self._state["data"][item] = value def clear(self): """Clear session state and request a rerun.""" self._state["data"].clear() self._state["session"].request_rerun() def sync(self): """Rerun the app with all state values up to date from the beginning to fix rollbacks.""" # Ensure to rerun only once to avoid infinite loops # caused by a constantly changing state value at each run. # # Example: state.value += 1 if self._state["is_rerun"]: self._state["is_rerun"] = False elif self._state["hash"] is not None: if self._state["hash"] != self._state["hasher"].to_bytes(self._state["data"], None): self._state["is_rerun"] = True self._state["session"].request_rerun() self._state["hash"] = self._state["hasher"].to_bytes(self._state["data"], None) def _get_session(): session_id = get_report_ctx().session_id session_info = Server.get_current()._get_session_info(session_id) if session_info is None: raise RuntimeError("Couldn't get your Streamlit Session object.") return session_info.session def _get_state(hash_funcs=None): session = _get_session() if not hasattr(session, "_custom_session_state"): session._custom_session_state = _SessionState(session, hash_funcs) return session._custom_session_state if __name__ == "__main__": main()
send_mail(email, name, markdown_list, image_list) st.success('Enviado! Confira sua caixa de entrada e lixo eletrônico.') if accept_beer_offers or allow_data_usage: # Try to send answers to database db = DBFunctions() try: db.send_answers_to_db( email=email, name=name, recommendations=recommendations, df_paladar=df_paladar, accept_beer_offers=accept_beer_offers, allow_data_usage=allow_data_usage, ) except KeyError: pass class _SessionState: def __init__(self, sessio
conditional_block
app.py
from data.db_functions import DBFunctions from functions.email_functions import send_mail from data.create_recommender import get_beer_columns, melt_user_item_matrix import numpy as np import pandas as pd import streamlit as st from streamlit.hashing import _CodeHasher from streamlit.report_thread import get_report_ctx from streamlit.server.server import Server from time import sleep from turicreate import load_model, SFrame pd.options.mode.chained_assignment = None st.set_page_config(layout="wide") def main(): state = _get_state() pages = { "Pesquisa": display_pesquisa, "Recomendações": display_sugestoes, } st.sidebar.title(":bookmark_tabs: MENU") page = st.sidebar.selectbox( "", tuple(pages.keys()) ) # Display the selected page with the session state pages[page](state) # Mandatory to avoid rollbacks with widgets, must be called at the end of your app state.sync() @st.cache def get_beer_list(): db = DBFunctions() return db.get_df_from_query('beer_list') def display_pesquisa(state): st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True) st.markdown( '<style>div[role="radiogroup"] > :first-child{display: none !important;}</style>', unsafe_allow_html=True ) st.image('fig/terabeer_banner.jpeg') st.markdown(''' ## Olá, que bom que você veio! O TeraBeer é um sistema de recomendação de cervejas artesanais brasileiras baseado no seu paladar e no uso de Inteligência Artificial. Antes de mais nada, confirme que você tem mais de 18 anos: ''') if st.checkbox('Sim, tenho mais de 18 anos, internet!', False): st.text("") st.markdown("![Sei...](https://media.giphy.com/media/VhLc1Mb9HlPo2Jo2ZG/giphy.gif)") st.text("") st.markdown(''' ## :pencil: **PESQUISA** Agora responda as duas perguntas a seguir para gerar as suas recomendações. ''') options = ['', 'Gosto', 'Indiferente', 'Não gosto', 'Desconheço'] st.markdown(''' ### QUAL A SUA OPINIÃO SOBRE OS **ALIMENTOS E BEBIDAS** ABAIXO? ''') st.text("") taste_questions = { # Key matches column names used in training, value is displayed in forms 'Alimento Chocolate amargo': 'Chocolate 70% cacau', 'Alimento Beringela': 'Beringela', 'Alimento Folhas escuras': 'Folhas escuras', 'Alimento Mel': 'Mel', 'Alimento Chocolate ao leite': 'Chocolate ao leite', 'Alimento Oreo': "Cookies & Cream", 'Alimento Salgadinho': 'Batata chips', 'Alimento Tomate': 'Tomate', 'Alimento Margherita': 'Margarita', 'Alimento Limonada': 'Limonada', 'Alimento Laranja': 'Laranja', 'Alimento Maracujá': 'Maracujá', 'Alimento Tangerina': 'Mexerica/tangerina', 'Alimento Pimentas': 'Pimenta', 'Alimento Cravo': 'Cravo', 'Alimento Banana': 'Banana', 'Alimento Gengibre': 'Gengibre', 'Alimento Canela': 'Canela', 'Alimento Bacon': 'Bacon', 'Alimento Café': 'Café sem açúcar' } feat_paladar = {} for feature_name, question in taste_questions.items(): feat_paladar[feature_name] = st.radio(question, options, index=1) st.text("") st.markdown('### QUAL A SUA OPINIÃO SOBRE OS SEGUINTES **ESTILOS DE CERVEJA**?') st.text("") beer_questions = { 'Cerveja Pilsen': 'Pilsen/Lager', 'Cerveja Blonde': 'Golden Ale/Blonde Ale', 'Cerveja Trigo': 'Trigo (Weiss)', 'Cerveja APA': 'American Pale Ale (APA)', 'Cerveja IPA': 'India Pale Ale (IPA)', 'Cerveja Session IPA': 'Session IPA', 'Cerveja NEIPA': 'New England IPA/Juice IPA', 'Cerveja Porter': 'Porter/Stout', 'Cerveja Malzbier': 'Dunkel/Malzbier', 'Cerveja Witbier': 'Witbier', 'Cerveja Sour': 'Fruit Beer/Sour', 'Cerveja RIS': 'Russian Imperial Stout/Pastry Stout', 'Cerveja Lambic': 'Lambic' } for feature_name, question in beer_questions.items(): feat_paladar[feature_name] = st.radio(question, options, index=4) st.text("") st.text("") exclude_known = st.checkbox('Desejo receber recomendações somente de estilos que eu não conheço', True) df_paladar = pd.DataFrame([feat_paladar], index=[-1]) # User-item matrix preference_map = { "Gosto": 1, "Não gosto": 0, "Indiferente": 0.5, "Desconheço": np.nan } df_paladar.replace(preference_map, inplace=True) melt_df = melt_user_item_matrix(df_paladar) new_observation_data = melt_df # st.dataframe(new_observation_data) recommendable_beers = get_beer_columns(df_paladar) recommendable_beers.remove('Cerveja Pilsen') if not exclude_known: # Exclude beers user doesn't like at all if known beers can be recommended dislike_beers = melt_df[melt_df['rating'] < 1]['product'].to_list() for dislike_beer in dislike_beers: if dislike_beer in recommendable_beers: recommendable_beers.remove(dislike_beer) st.text("") st.text("") st.text("") if st.button('Gerar recomendações'): model = load_model('data/recommending_system') if len(recommendable_beers) == 0: st.error('Não temos nenhuma cerveja para te recomendar :/') else: with st.spinner(text='Aguarde um instante enquanto analisamos as suas respostas...'): sleep(4) # Pretend making recommendations takes a while. Actually they are pretty fast recommendations = model.recommend( users=[-1], k=3, items=recommendable_beers, new_observation_data=SFrame(new_observation_data), exclude_known=exclude_known, ).to_dataframe() # st.dataframe(recommendations) if recommendations.empty and exclude_known: st.error('Você conhece muitas cervejas ein?! Que tal desmarcar a caixa acima?') else: st.success('Pronto! Selecione no menu à esquerda a página Recomendações.') sleep(3) state.recommendations, state.paladar = recommendations, df_paladar def display_sugestoes(state): st.title(':beers: CERVEJAS RECOMENDADAS') st.markdown(''' Estas são as cervejas artesanais brasileiras **mais recomendadas para você**. Ao final, você poderá enviar a lista de cervejas para o seu e-mail. ''') recommendations, df_paladar = state.recommendations, state.paladar # st.dataframe(df_paladar) # st.dataframe(recommendations) if not isinstance(recommendations, pd.DataFrame): st.error('Sua sessão expirou, responda novamente o formulário para ver as suas recomendações.') else: rename_beer_styles = { 'Cerveja Blonde': 'Blonde Ale', 'Cerveja Trigo': 'Weiss (Trigo)', 'Cerveja APA': 'American Pale Ale', 'Cerveja IPA': 'India Pale Ale', 'Cerveja Session IPA': 'Session IPA', 'Cerveja NEIPA': 'New England IPA', 'Cerveja Porter': 'Porter/Stout', 'Cerveja Malzbier': 'Dunkel/Malzbier', 'Cerveja Witbier': 'Witbier', 'Cerveja Sour': 'Sour/Fruit', 'Cerveja RIS': 'Russian Imperial Stout', 'Cerveja Lambic': 'Lambic' } recommendations.replace({'product': rename_beer_styles}, inplace=True) with st.spinner('Buscando cervejas...'): df_cervejas = get_beer_list() recommended_labels = pd.merge(recommendations, df_cervejas, left_on='product', right_on='terabeer_style') recommended_labels.sort_values(by=['score', 'ratings_avg'], ascending=[False, False]) # st.dataframe(recommended_labels) origins = recommended_labels['origin_state'].unique().tolist() origin_filter = st.multiselect("Filtrar por estado:", origins, default=origins) filtered_labels = recommended_labels[recommended_labels['origin_state'].isin(origin_filter)] max_beers = st.slider('Número máximo de rótulos por estilo', 1, 5, 3) df_style_1 = filtered_labels[filtered_labels['rank'] == 1] df_style_2 = filtered_labels[filtered_labels['rank'] == 2] df_style_3 = filtered_labels[filtered_labels['rank'] == 3] markdown_list = [] image_list = [] for df_style in [df_style_1, df_style_2, df_style_3]: if not df_style.empty: df_style.reset_index(drop=True, inplace=True) style_name = df_style['terabeer_style'][0] style_rank = df_style['rank'][0] style_score = df_style['score'][0] style_description = df_style['style_description'][0] style_harmonization = df_style['harmonization'][0] if style_harmonization: harmonization_line = f'<br><br> <b>Harmoniza bem com</b>: {style_harmonization}' else: harmonization_line = '' style_markdown = f""" <div> <br> <h2> Estilo {style_rank}: <b>{style_name}</b> ({style_score:.1%} recomendado para você) </h2> <br> <p> <b>Descrição</b>: {style_description} {harmonization_line} </p> <br> </div> """ st.markdown(style_markdown, unsafe_allow_html=True) markdown_list.append(style_markdown) for index, row in df_style.iloc[0:max_beers, :].iterrows(): beer = row['name'] brewery = row['brand'] abv = row['abv'] ibu = row['ibu'] avg_rating = row['ratings_avg'] count_ratings = int(row['ratings_count']) figure = row['figure'] ratings_source = row['ratings_source'] ratings_url = row['ratings_url'] origin_state = row['origin_state'] offer_url = row['offer_url'] discount_coupon = row['discount_coupon'] column1, column2 = st.beta_columns((1, 4)) with column1: # Column with beer labels try: st.image(f'fig/{figure}', use_column_width=True) image_list.append(f'fig/{figure}') markdown_list.append( f""" <br> <div> <img src="cid:image{len(image_list)}" alt="Logo" style="width:200px;height:200px;"> </div> """ ) except FileNotFoundError: st.image('fig/placeholder-image.jpg', use_column_width=True) image_list.append('fig/placeholder-image.jpg') markdown_list.append( f""" <br> <div> <img src="cid:image{len(image_list)}" alt="Logo" style="width:200px;height:200px;"> </div> """ ) with column2: # Column with beer characteristics ratings_source_url = f'<a href="{ratings_url}" target="_blank">{ratings_source}</a>' ratings_line = f'{avg_rating:.3} ({count_ratings} avaliações no {ratings_source_url})' ibu_line = f'{int(ibu)} unidades de amargor' if ibu > 0 else 'Indisponível' discount_phrase = f'(Cupom de desconto: {discount_coupon})' if discount_coupon else '' offer_line = f'<b><a href="{offer_url}" target="_blank">Quero!</a></b> {discount_phrase}' beer_markdown = f""" <div> <h3>{beer} - {brewery}</h3> <p> <b>Origem</b>: {origin_state}<br> <b>Nota média</b>: {ratings_line}<br> <b>ABV</b>: {abv}% álcool <br> <b>IBU</b>: {ibu_line} <br> {offer_line} </p> </div> """ st.markdown(beer_markdown, unsafe_allow_html=True) markdown_list.append(beer_markdown) st.text("") st.text("") st.markdown("### :mailbox: Para receber a lista acima no seu e-mail, digite-o abaixo e aperte enter:") email = st.text_input('') if email: st.markdown("### Qual seu nome?") name = st.text_input(' ') accept_beer_offers = st.checkbox( 'Aceito receber novidades do TeraBeer.', True ) allow_data_usage = st.checkbox( 'Permito que utilizem minhas respostas para melhorar recomendações futuras.', True ) st.text("") if st.button('Enviar recomendações por email'): with st.spinner(text='Enviando...'): send_mail(email, name, markdown_list, image_list) st.success('Enviado! Confira sua caixa de entrada e lixo eletrônico.') if accept_beer_offers or allow_data_usage: # Try to send answers to database db = DBFunctions() try: db.send_answers_to_db( email=email, name=name, recommendations=recommendations, df_paladar=df_paladar, accept_beer_offers=accept_beer_offers, allow_data_usage=allow_data_usage, ) except KeyError: pass class _SessionState: def __init__(self, session, hash_funcs): """Initialize SessionState instance.""" self.__dict__["_state"] = { "data": {}, "hash": None, "hasher": _CodeHasher(hash_funcs), "is_rerun": False, "session": session, } def __call__(self, **kwargs): """Initialize state data once.""" for item, value in kwargs.items(): if item not in self._state["data"]: self._state["data"][item] = value def __getitem__(self, item): """Return a saved state value, None if item is undefined.""" return self._state["data"].get(item, None) def __getattr__(self, item): """Return a saved state value, None if item is undefined.""" return self._state["data"].get(item, None) def __setitem__(self, item, value): """Set state value.""" self._state["data"][item] = value def __setattr__(self, item, value): """Set state value.""" self._state["data"][item] = value def clear(self): """Clear session state and request a rerun.""" self._state["data"].clear() self._state["session"].request_rerun() def sync(self): """Rerun the app with all state values up to date from the
).session_id session_info = Server.get_current()._get_session_info(session_id) if session_info is None: raise RuntimeError("Couldn't get your Streamlit Session object.") return session_info.session def _get_state(hash_funcs=None): session = _get_session() if not hasattr(session, "_custom_session_state"): session._custom_session_state = _SessionState(session, hash_funcs) return session._custom_session_state if __name__ == "__main__": main()
beginning to fix rollbacks.""" # Ensure to rerun only once to avoid infinite loops # caused by a constantly changing state value at each run. # # Example: state.value += 1 if self._state["is_rerun"]: self._state["is_rerun"] = False elif self._state["hash"] is not None: if self._state["hash"] != self._state["hasher"].to_bytes(self._state["data"], None): self._state["is_rerun"] = True self._state["session"].request_rerun() self._state["hash"] = self._state["hasher"].to_bytes(self._state["data"], None) def _get_session(): session_id = get_report_ctx(
identifier_body
app.py
from data.db_functions import DBFunctions from functions.email_functions import send_mail from data.create_recommender import get_beer_columns, melt_user_item_matrix import numpy as np import pandas as pd import streamlit as st from streamlit.hashing import _CodeHasher from streamlit.report_thread import get_report_ctx from streamlit.server.server import Server from time import sleep from turicreate import load_model, SFrame pd.options.mode.chained_assignment = None st.set_page_config(layout="wide") def main(): state = _get_state() pages = { "Pesquisa": display_pesquisa, "Recomendações": display_sugestoes, } st.sidebar.title(":bookmark_tabs: MENU") page = st.sidebar.selectbox( "", tuple(pages.keys()) ) # Display the selected page with the session state pages[page](state) # Mandatory to avoid rollbacks with widgets, must be called at the end of your app state.sync() @st.cache def get_beer_list(): db = DBFunctions() return db.get_df_from_query('beer_list') def display_pesquisa(state): st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True) st.markdown( '<style>div[role="radiogroup"] > :first-child{display: none !important;}</style>', unsafe_allow_html=True ) st.image('fig/terabeer_banner.jpeg') st.markdown(''' ## Olá, que bom que você veio! O TeraBeer é um sistema de recomendação de cervejas artesanais brasileiras baseado no seu paladar e no uso de Inteligência Artificial. Antes de mais nada, confirme que você tem mais de 18 anos: ''') if st.checkbox('Sim, tenho mais de 18 anos, internet!', False): st.text("") st.markdown("![Sei...](https://media.giphy.com/media/VhLc1Mb9HlPo2Jo2ZG/giphy.gif)") st.text("") st.markdown(''' ## :pencil: **PESQUISA** Agora responda as duas perguntas a seguir para gerar as suas recomendações. ''') options = ['', 'Gosto', 'Indiferente', 'Não gosto', 'Desconheço'] st.markdown(''' ### QUAL A SUA OPINIÃO SOBRE OS **ALIMENTOS E BEBIDAS** ABAIXO? ''') st.text("") taste_questions = { # Key matches column names used in training, value is displayed in forms 'Alimento Chocolate amargo': 'Chocolate 70% cacau', 'Alimento Beringela': 'Beringela', 'Alimento Folhas escuras': 'Folhas escuras', 'Alimento Mel': 'Mel', 'Alimento Chocolate ao leite': 'Chocolate ao leite', 'Alimento Oreo': "Cookies & Cream", 'Alimento Salgadinho': 'Batata chips', 'Alimento Tomate': 'Tomate', 'Alimento Margherita': 'Margarita', 'Alimento Limonada': 'Limonada', 'Alimento Laranja': 'Laranja', 'Alimento Maracujá': 'Maracujá', 'Alimento Tangerina': 'Mexerica/tangerina', 'Alimento Pimentas': 'Pimenta', 'Alimento Cravo': 'Cravo', 'Alimento Banana': 'Banana', 'Alimento Gengibre': 'Gengibre', 'Alimento Canela': 'Canela', 'Alimento Bacon': 'Bacon', 'Alimento Café': 'Café sem açúcar' } feat_paladar = {} for feature_name, question in taste_questions.items(): feat_paladar[feature_name] = st.radio(question, options, index=1) st.text("") st.markdown('### QUAL A SUA OPINIÃO SOBRE OS SEGUINTES **ESTILOS DE CERVEJA**?') st.text("") beer_questions = { 'Cerveja Pilsen': 'Pilsen/Lager', 'Cerveja Blonde': 'Golden Ale/Blonde Ale', 'Cerveja Trigo': 'Trigo (Weiss)', 'Cerveja APA': 'American Pale Ale (APA)', 'Cerveja IPA': 'India Pale Ale (IPA)', 'Cerveja Session IPA': 'Session IPA', 'Cerveja NEIPA': 'New England IPA/Juice IPA', 'Cerveja Porter': 'Porter/Stout', 'Cerveja Malzbier': 'Dunkel/Malzbier', 'Cerveja Witbier': 'Witbier', 'Cerveja Sour': 'Fruit Beer/Sour', 'Cerveja RIS': 'Russian Imperial Stout/Pastry Stout', 'Cerveja Lambic': 'Lambic' } for feature_name, question in beer_questions.items(): feat_paladar[feature_name] = st.radio(question, options, index=4) st.text("") st.text("") exclude_known = st.checkbox('Desejo receber recomendações somente de estilos que eu não conheço', True) df_paladar = pd.DataFrame([feat_paladar], index=[-1]) # User-item matrix preference_map = { "Gosto": 1, "Não gosto": 0, "Indiferente": 0.5, "Desconheço": np.nan } df_paladar.replace(preference_map, inplace=True) melt_df = melt_user_item_matrix(df_paladar) new_observation_data = melt_df # st.dataframe(new_observation_data) recommendable_beers = get_beer_columns(df_paladar) recommendable_beers.remove('Cerveja Pilsen') if not exclude_known: # Exclude beers user doesn't like at all if known beers can be recommended dislike_beers = melt_df[melt_df['rating'] < 1]['product'].to_list() for dislike_beer in dislike_beers: if dislike_beer in recommendable_beers: recommendable_beers.remove(dislike_beer) st.text("") st.text("") st.text("") if st.button('Gerar recomendações'): model = load_model('data/recommending_system') if len(recommendable_beers) == 0: st.error('Não temos nenhuma cerveja para te recomendar :/') else: with st.spinner(text='Aguarde um instante enquanto analisamos as suas respostas...'): sleep(4) # Pretend making recommendations takes a while. Actually they are pretty fast recommendations = model.recommend( users=[-1], k=3, items=recommendable_beers, new_observation_data=SFrame(new_observation_data), exclude_known=exclude_known, ).to_dataframe() # st.dataframe(recommendations) if recommendations.empty and exclude_known: st.error('Você conhece muitas cervejas ein?! Que tal desmarcar a caixa acima?') else: st.success('Pronto! Selecione no menu à esquerda a página Recomendações.') sleep(3) state.recommendations, state.paladar = recommendations, df_paladar def display_sugestoes(state): st.title(':beers: CERVEJAS RECOMENDADAS') st.markdown(''' Estas são as cervejas artesanais brasileiras **mais recomendadas para você**. Ao final, você poderá enviar a lista de cervejas para o seu e-mail. ''') recommendations, df_paladar = state.recommendations, state.paladar # st.dataframe(df_paladar) # st.dataframe(recommendations) if not isinstance(recommendations, pd.DataFrame): st.error('Sua sessão expirou, responda novamente o formulário para ver as suas recomendações.') else: rename_beer_styles = { 'Cerveja Blonde': 'Blonde Ale', 'Cerveja Trigo': 'Weiss (Trigo)', 'Cerveja APA': 'American Pale Ale', 'Cerveja IPA': 'India Pale Ale', 'Cerveja Session IPA': 'Session IPA', 'Cerveja NEIPA': 'New England IPA', 'Cerveja Porter': 'Porter/Stout', 'Cerveja Malzbier': 'Dunkel/Malzbier', 'Cerveja Witbier': 'Witbier', 'Cerveja Sour': 'Sour/Fruit', 'Cerveja RIS': 'Russian Imperial Stout', 'Cerveja Lambic': 'Lambic' } recommendations.replace({'product': rename_beer_styles}, inplace=True) with st.spinner('Buscando cervejas...'): df_cervejas = get_beer_list() recommended_labels = pd.merge(recommendations, df_cervejas, left_on='product', right_on='terabeer_style') recommended_labels.sort_values(by=['score', 'ratings_avg'], ascending=[False, False]) # st.dataframe(recommended_labels) origins = recommended_labels['origin_state'].unique().tolist() origin_filter = st.multiselect("Filtrar por estado:", origins, default=origins) filtered_labels = recommended_labels[recommended_labels['origin_state'].isin(origin_filter)] max_beers = st.slider('Número máximo de rótulos por estilo', 1, 5, 3) df_style_1 = filtered_labels[filtered_labels['rank'] == 1] df_style_2 = filtered_labels[filtered_labels['rank'] == 2] df_style_3 = filtered_labels[filtered_labels['rank'] == 3] markdown_list = [] image_list = [] for df_style in [df_style_1, df_style_2, df_style_3]: if not df_style.empty: df_style.reset_index(drop=True, inplace=True) style_name = df_style['terabeer_style'][0] style_rank = df_style['rank'][0] style_score = df_style['score'][0] style_description = df_style['style_description'][0] style_harmonization = df_style['harmonization'][0] if style_harmonization: harmonization_line = f'<br><br> <b>Harmoniza bem com</b>: {style_harmonization}' else: harmonization_line = '' style_markdown = f""" <div> <br> <h2> Estilo {style_rank}: <b>{style_name}</b> ({style_score:.1%} recomendado para você) </h2> <br> <p> <b>Descrição</b>: {style_description} {harmonization_line} </p> <br> </div> """ st.markdown(style_markdown, unsafe_allow_html=True) markdown_list.append(style_markdown) for index, row in df_style.iloc[0:max_beers, :].iterrows(): beer = row['name'] brewery = row['brand'] abv = row['abv'] ibu = row['ibu'] avg_rating = row['ratings_avg'] count_ratings = int(row['ratings_count']) figure = row['figure'] ratings_source = row['ratings_source'] ratings_url = row['ratings_url'] origin_state = row['origin_state'] offer_url = row['offer_url'] discount_coupon = row['discount_coupon'] column1, column2 = st.beta_columns((1, 4)) with column1: # Column with beer labels try: st.image(f'fig/{figure}', use_column_width=True) image_list.append(f'fig/{figure}') markdown_list.append( f""" <br> <div> <img src="cid:image{len(image_list)}" alt="Logo" style="width:200px;height:200px;"> </div> """ ) except FileNotFoundError: st.image('fig/placeholder-image.jpg', use_column_width=True) image_list.append('fig/placeholder-image.jpg') markdown_list.append( f""" <br> <div> <img src="cid:image{len(image_list)}" alt="Logo" style="width:200px;height:200px;"> </div> """ ) with column2: # Column with beer characteristics ratings_source_url = f'<a href="{ratings_url}" target="_blank">{ratings_source}</a>' ratings_line = f'{avg_rating:.3} ({count_ratings} avaliações no {ratings_source_url})' ibu_line = f'{int(ibu)} unidades de amargor' if ibu > 0 else 'Indisponível' discount_phrase = f'(Cupom de desconto: {discount_coupon})' if discount_coupon else '' offer_line = f'<b><a href="{offer_url}" target="_blank">Quero!</a></b> {discount_phrase}' beer_markdown = f""" <div> <h3>{beer} - {brewery}</h3> <p> <b>Origem</b>: {origin_state}<br> <b>Nota média</b>: {ratings_line}<br> <b>ABV</b>: {abv}% álcool <br> <b>IBU</b>: {ibu_line} <br> {offer_line} </p> </div> """ st.markdown(beer_markdown, unsafe_allow_html=True) markdown_list.append(beer_markdown) st.text("") st.text("") st.markdown("### :mailbox: Para receber a lista acima no seu e-mail, digite-o abaixo e aperte enter:") email = st.text_input('') if email: st.markdown("### Qual seu nome?") name = st.text_input(' ') accept_beer_offers = st.checkbox( 'Aceito receber novidades do TeraBeer.', True ) allow_data_usage = st.checkbox( 'Permito que utilizem minhas respostas para melhorar recomendações futuras.', True ) st.text("") if st.button('Enviar recomendações por email'): with st.spinner(text='Enviando...'): send_mail(email, name, markdown_list, image_list) st.success('Enviado! Confira sua caixa de entrada e lixo eletrônico.') if accept_beer_offers or allow_data_usage: # Try to send answers to database db = DBFunctions() try: db.send_answers_to_db( email=email, name=name, recommendations=recommendations, df_paladar=df_paladar, accept_beer_offers=accept_beer_offers, allow_data_usage=allow_data_usage, ) except KeyError: pass class _SessionState: def __init__(self, session, hash_funcs): """Initialize SessionState instance.""" self.__dict__["_state"] = { "data": {}, "hash": None, "hasher": _CodeHasher(hash_funcs), "is_rerun": False, "session": session, } def __call__(self, **kwargs): """Initialize state data once.""" for item, value in kwargs.items(): if item not in self._state["data"]: self._state["data"][item] = value def __getitem__(self, item): """Return a saved state value, None if item is undefined.""" return self._state["data"].get(item, None) def __getattr__(self, item): """Return a saved state value, None if item is undefined.""" return self._state["data"].get(item, None) def __setitem__(self, item, value): """Set state value.""" self._state["data"][item] = value def __setattr__(self, item, value): """Set state value.""" self._state["data"][item] = value def clear(self): """Clear session state and request a rerun.""" self._state["data"].clear() self._state["session"].request_rerun() def sync(self): """Rerun the app with all state values up to date from the beginning to fix rollbacks.""" # Ensure to rerun only once to avoid infinite loops # caused by a constantly changing state value at each run. # # Example: state.value += 1 if self._state["is_rerun"]: self._state["is_rerun"] = False elif self._state["hash"] is not None: if self._state["hash"] != self._state["hasher"].to_bytes(self._state["data"], None): self._state["is_rerun"] = True self._state["session"].request_rerun() self._state["hash"] = self._state["hasher"].to_bytes(self._state["data"], None) def _get_session(): session_id = get_report_ctx().session_
ion_info = Server.get_current()._get_session_info(session_id) if session_info is None: raise RuntimeError("Couldn't get your Streamlit Session object.") return session_info.session def _get_state(hash_funcs=None): session = _get_session() if not hasattr(session, "_custom_session_state"): session._custom_session_state = _SessionState(session, hash_funcs) return session._custom_session_state if __name__ == "__main__": main()
id sess
identifier_name
app.py
from data.db_functions import DBFunctions from functions.email_functions import send_mail from data.create_recommender import get_beer_columns, melt_user_item_matrix import numpy as np import pandas as pd import streamlit as st from streamlit.hashing import _CodeHasher from streamlit.report_thread import get_report_ctx from streamlit.server.server import Server from time import sleep from turicreate import load_model, SFrame pd.options.mode.chained_assignment = None st.set_page_config(layout="wide") def main(): state = _get_state() pages = { "Pesquisa": display_pesquisa, "Recomendações": display_sugestoes, } st.sidebar.title(":bookmark_tabs: MENU") page = st.sidebar.selectbox( "", tuple(pages.keys()) ) # Display the selected page with the session state pages[page](state) # Mandatory to avoid rollbacks with widgets, must be called at the end of your app state.sync() @st.cache def get_beer_list(): db = DBFunctions() return db.get_df_from_query('beer_list') def display_pesquisa(state): st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True) st.markdown( '<style>div[role="radiogroup"] > :first-child{display: none !important;}</style>', unsafe_allow_html=True ) st.image('fig/terabeer_banner.jpeg') st.markdown(''' ## Olá, que bom que você veio! O TeraBeer é um sistema de recomendação de cervejas artesanais brasileiras baseado no seu paladar e no uso de Inteligência Artificial. Antes de mais nada, confirme que você tem mais de 18 anos: ''') if st.checkbox('Sim, tenho mais de 18 anos, internet!', False): st.text("") st.markdown("![Sei...](https://media.giphy.com/media/VhLc1Mb9HlPo2Jo2ZG/giphy.gif)") st.text("") st.markdown(''' ## :pencil: **PESQUISA** Agora responda as duas perguntas a seguir para gerar as suas recomendações. ''') options = ['', 'Gosto', 'Indiferente', 'Não gosto', 'Desconheço'] st.markdown(''' ### QUAL A SUA OPINIÃO SOBRE OS **ALIMENTOS E BEBIDAS** ABAIXO? ''') st.text("") taste_questions = { # Key matches column names used in training, value is displayed in forms 'Alimento Chocolate amargo': 'Chocolate 70% cacau', 'Alimento Beringela': 'Beringela', 'Alimento Folhas escuras': 'Folhas escuras', 'Alimento Mel': 'Mel', 'Alimento Chocolate ao leite': 'Chocolate ao leite', 'Alimento Oreo': "Cookies & Cream", 'Alimento Salgadinho': 'Batata chips', 'Alimento Tomate': 'Tomate', 'Alimento Margherita': 'Margarita', 'Alimento Limonada': 'Limonada', 'Alimento Laranja': 'Laranja', 'Alimento Maracujá': 'Maracujá', 'Alimento Tangerina': 'Mexerica/tangerina', 'Alimento Pimentas': 'Pimenta', 'Alimento Cravo': 'Cravo', 'Alimento Banana': 'Banana', 'Alimento Gengibre': 'Gengibre', 'Alimento Canela': 'Canela', 'Alimento Bacon': 'Bacon', 'Alimento Café': 'Café sem açúcar' } feat_paladar = {} for feature_name, question in taste_questions.items(): feat_paladar[feature_name] = st.radio(question, options, index=1) st.text("") st.markdown('### QUAL A SUA OPINIÃO SOBRE OS SEGUINTES **ESTILOS DE CERVEJA**?') st.text("") beer_questions = { 'Cerveja Pilsen': 'Pilsen/Lager', 'Cerveja Blonde': 'Golden Ale/Blonde Ale', 'Cerveja Trigo': 'Trigo (Weiss)', 'Cerveja APA': 'American Pale Ale (APA)', 'Cerveja IPA': 'India Pale Ale (IPA)', 'Cerveja Session IPA': 'Session IPA', 'Cerveja NEIPA': 'New England IPA/Juice IPA', 'Cerveja Porter': 'Porter/Stout', 'Cerveja Malzbier': 'Dunkel/Malzbier', 'Cerveja Witbier': 'Witbier', 'Cerveja Sour': 'Fruit Beer/Sour', 'Cerveja RIS': 'Russian Imperial Stout/Pastry Stout', 'Cerveja Lambic': 'Lambic' } for feature_name, question in beer_questions.items(): feat_paladar[feature_name] = st.radio(question, options, index=4) st.text("") st.text("") exclude_known = st.checkbox('Desejo receber recomendações somente de estilos que eu não conheço', True) df_paladar = pd.DataFrame([feat_paladar], index=[-1]) # User-item matrix preference_map = { "Gosto": 1, "Não gosto": 0, "Indiferente": 0.5, "Desconheço": np.nan } df_paladar.replace(preference_map, inplace=True) melt_df = melt_user_item_matrix(df_paladar) new_observation_data = melt_df # st.dataframe(new_observation_data) recommendable_beers = get_beer_columns(df_paladar) recommendable_beers.remove('Cerveja Pilsen') if not exclude_known: # Exclude beers user doesn't like at all if known beers can be recommended dislike_beers = melt_df[melt_df['rating'] < 1]['product'].to_list() for dislike_beer in dislike_beers: if dislike_beer in recommendable_beers: recommendable_beers.remove(dislike_beer) st.text("") st.text("") st.text("") if st.button('Gerar recomendações'): model = load_model('data/recommending_system') if len(recommendable_beers) == 0: st.error('Não temos nenhuma cerveja para te recomendar :/') else: with st.spinner(text='Aguarde um instante enquanto analisamos as suas respostas...'): sleep(4) # Pretend making recommendations takes a while. Actually they are pretty fast recommendations = model.recommend( users=[-1], k=3, items=recommendable_beers, new_observation_data=SFrame(new_observation_data), exclude_known=exclude_known, ).to_dataframe() # st.dataframe(recommendations) if recommendations.empty and exclude_known: st.error('Você conhece muitas cervejas ein?! Que tal desmarcar a caixa acima?') else: st.success('Pronto! Selecione no menu à esquerda a página Recomendações.') sleep(3) state.recommendations, state.paladar = recommendations, df_paladar def display_sugestoes(state): st.title(':beers: CERVEJAS RECOMENDADAS') st.markdown(''' Estas são as cervejas artesanais brasileiras **mais recomendadas para você**. Ao final, você poderá enviar a lista de cervejas para o seu e-mail. ''') recommendations, df_paladar = state.recommendations, state.paladar # st.dataframe(df_paladar) # st.dataframe(recommendations) if not isinstance(recommendations, pd.DataFrame): st.error('Sua sessão expirou, responda novamente o formulário para ver as suas recomendações.') else: rename_beer_styles = { 'Cerveja Blonde': 'Blonde Ale', 'Cerveja Trigo': 'Weiss (Trigo)', 'Cerveja APA': 'American Pale Ale', 'Cerveja IPA': 'India Pale Ale', 'Cerveja Session IPA': 'Session IPA', 'Cerveja NEIPA': 'New England IPA', 'Cerveja Porter': 'Porter/Stout', 'Cerveja Malzbier': 'Dunkel/Malzbier', 'Cerveja Witbier': 'Witbier', 'Cerveja Sour': 'Sour/Fruit', 'Cerveja RIS': 'Russian Imperial Stout', 'Cerveja Lambic': 'Lambic' } recommendations.replace({'product': rename_beer_styles}, inplace=True) with st.spinner('Buscando cervejas...'): df_cervejas = get_beer_list() recommended_labels = pd.merge(recommendations, df_cervejas, left_on='product', right_on='terabeer_style') recommended_labels.sort_values(by=['score', 'ratings_avg'], ascending=[False, False]) # st.dataframe(recommended_labels) origins = recommended_labels['origin_state'].unique().tolist() origin_filter = st.multiselect("Filtrar por estado:", origins, default=origins) filtered_labels = recommended_labels[recommended_labels['origin_state'].isin(origin_filter)] max_beers = st.slider('Número máximo de rótulos por estilo', 1, 5, 3) df_style_1 = filtered_labels[filtered_labels['rank'] == 1] df_style_2 = filtered_labels[filtered_labels['rank'] == 2] df_style_3 = filtered_labels[filtered_labels['rank'] == 3] markdown_list = [] image_list = [] for df_style in [df_style_1, df_style_2, df_style_3]: if not df_style.empty: df_style.reset_index(drop=True, inplace=True) style_name = df_style['terabeer_style'][0] style_rank = df_style['rank'][0] style_score = df_style['score'][0] style_description = df_style['style_description'][0] style_harmonization = df_style['harmonization'][0] if style_harmonization: harmonization_line = f'<br><br> <b>Harmoniza bem com</b>: {style_harmonization}' else: harmonization_line = '' style_markdown = f""" <div> <br> <h2> Estilo {style_rank}: <b>{style_name}</b> ({style_score:.1%} recomendado para você) </h2> <br> <p> <b>Descrição</b>: {style_description} {harmonization_line} </p> <br> </div> """ st.markdown(style_markdown, unsafe_allow_html=True) markdown_list.append(style_markdown) for index, row in df_style.iloc[0:max_beers, :].iterrows(): beer = row['name'] brewery = row['brand'] abv = row['abv'] ibu = row['ibu'] avg_rating = row['ratings_avg'] count_ratings = int(row['ratings_count']) figure = row['figure'] ratings_source = row['ratings_source'] ratings_url = row['ratings_url'] origin_state = row['origin_state'] offer_url = row['offer_url'] discount_coupon = row['discount_coupon'] column1, column2 = st.beta_columns((1, 4)) with column1: # Column with beer labels try: st.image(f'fig/{figure}', use_column_width=True) image_list.append(f'fig/{figure}') markdown_list.append( f""" <br> <div> <img src="cid:image{len(image_list)}" alt="Logo" style="width:200px;height:200px;"> </div> """ ) except FileNotFoundError: st.image('fig/placeholder-image.jpg', use_column_width=True) image_list.append('fig/placeholder-image.jpg') markdown_list.append( f""" <br> <div> <img src="cid:image{len(image_list)}" alt="Logo" style="width:200px;height:200px;"> </div> """ ) with column2: # Column with beer characteristics ratings_source_url = f'<a href="{ratings_url}" target="_blank">{ratings_source}</a>' ratings_line = f'{avg_rating:.3} ({count_ratings} avaliações no {ratings_source_url})' ibu_line = f'{int(ibu)} unidades de amargor' if ibu > 0 else 'Indisponível' discount_phrase = f'(Cupom de desconto: {discount_coupon})' if discount_coupon else '' offer_line = f'<b><a href="{offer_url}" target="_blank">Quero!</a></b> {discount_phrase}' beer_markdown = f""" <div> <h3>{beer} - {brewery}</h3> <p> <b>Origem</b>: {origin_state}<br> <b>Nota média</b>: {ratings_line}<br> <b>ABV</b>: {abv}% álcool <br> <b>IBU</b>: {ibu_line} <br> {offer_line} </p> </div> """ st.markdown(beer_markdown, unsafe_allow_html=True) markdown_list.append(beer_markdown) st.text("") st.text("") st.markdown("### :mailbox: Para receber a lista acima no seu e-mail, digite-o abaixo e aperte enter:") email = st.text_input('') if email: st.markdown("### Qual seu nome?") name = st.text_input(' ') accept_beer_offers = st.checkbox( 'Aceito receber novidades do TeraBeer.', True ) allow_data_usage = st.checkbox( 'Permito que utilizem minhas respostas para melhorar recomendações futuras.', True ) st.text("") if st.button('Enviar recomendações por email'): with st.spinner(text='Enviando...'): send_mail(email, name, markdown_list, image_list) st.success('Enviado! Confira sua caixa de entrada e lixo eletrônico.') if accept_beer_offers or allow_data_usage: # Try to send answers to database db = DBFunctions() try: db.send_answers_to_db( email=email, name=name, recommendations=recommendations, df_paladar=df_paladar, accept_beer_offers=accept_beer_offers, allow_data_usage=allow_data_usage, ) except KeyError: pass class _SessionState: def __init__(self, session, hash_funcs): """Initialize SessionState instance.""" self.__dict__["_state"] = { "data": {}, "hash": None,
def __call__(self, **kwargs): """Initialize state data once.""" for item, value in kwargs.items(): if item not in self._state["data"]: self._state["data"][item] = value def __getitem__(self, item): """Return a saved state value, None if item is undefined.""" return self._state["data"].get(item, None) def __getattr__(self, item): """Return a saved state value, None if item is undefined.""" return self._state["data"].get(item, None) def __setitem__(self, item, value): """Set state value.""" self._state["data"][item] = value def __setattr__(self, item, value): """Set state value.""" self._state["data"][item] = value def clear(self): """Clear session state and request a rerun.""" self._state["data"].clear() self._state["session"].request_rerun() def sync(self): """Rerun the app with all state values up to date from the beginning to fix rollbacks.""" # Ensure to rerun only once to avoid infinite loops # caused by a constantly changing state value at each run. # # Example: state.value += 1 if self._state["is_rerun"]: self._state["is_rerun"] = False elif self._state["hash"] is not None: if self._state["hash"] != self._state["hasher"].to_bytes(self._state["data"], None): self._state["is_rerun"] = True self._state["session"].request_rerun() self._state["hash"] = self._state["hasher"].to_bytes(self._state["data"], None) def _get_session(): session_id = get_report_ctx().session_id session_info = Server.get_current()._get_session_info(session_id) if session_info is None: raise RuntimeError("Couldn't get your Streamlit Session object.") return session_info.session def _get_state(hash_funcs=None): session = _get_session() if not hasattr(session, "_custom_session_state"): session._custom_session_state = _SessionState(session, hash_funcs) return session._custom_session_state if __name__ == "__main__": main()
"hasher": _CodeHasher(hash_funcs), "is_rerun": False, "session": session, }
random_line_split
packet_decoder.py
#!/usr/bin/python import struct import sys #TODO: Review which of these we actually need SERVER_TO_CLIENT = 0x01 CLIENT_TO_SERVER = 0x02 PROTOCOL_VERSION = 28 class Packet: def __init__(self, packet_type=0, **data): self.direction = 0 if type(packet_type) == str: packet_type = dict((v,k) for k,v in names.items())[packet_type] # Reverse lookup self.ident = packet_type self.data = data def name(self): return names[self.ident] def __str__(self): from_to = {CLIENT_TO_SERVER: "to server", SERVER_TO_CLIENT: "from server"}[self.direction] return "%s packet %s: %s" % (self.name(), from_to, repr(self.data)) def
(self): p = Packet() p.direction = self.direction p.ident = self.ident p.data = self.data.copy() return p SLOT_EXTRA_DATA_IDS = [ 0x103, 0x105, 0x15A, 0x167, 0x10C, 0x10D, 0x10E, 0x10F, 0x122, 0x110, 0x111, 0x112, 0x113, 0x123, 0x10B, 0x100, 0x101, 0x102, 0x124, 0x114, 0x115, 0x116, 0x117, 0x125, 0x11B, 0x11C, 0x11D, 0x11E, 0x126, 0x12A, 0x12B, 0x12C, 0x12D, 0x12E, 0x12F, 0x130, 0x131, 0x132, 0x133, 0x134, 0x135, 0x136, 0x137, 0x138, 0x139, 0x13A, 0x13B, 0x13C, 0x13D ] data_types = { "ubyte": ('B', 1), "byte": ('b', 1), "bool": ('?', 1), "short": ('h', 2), "float": ('f', 4), "int": ('i', 4), "double": ('d', 8), "long": ('q', 8) } names = { 0x00: "Keep-alive", 0x01: "Login request", 0x02: "Handshake", 0x03: "Chat message", 0x04: "Time update", 0x05: "Entity Equipment", 0x06: "Spawn position", 0x07: "Use entity", 0x08: "Update health", 0x09: "Respawn", 0x0A: "Player", 0x0B: "Player position", 0x0C: "Player look", 0x0D: "Player position & look", 0x0E: "Player digging", 0x0F: "Player block placement", 0x10: "Holding change", 0x11: "Use bed", 0x12: "Animation", 0x13: "Entity action", 0x14: "Named entity spawn", 0x15: "Pickup spawn", 0x16: "Collect item", 0x17: "Add object or vehicle", 0x18: "Mob spawn", 0x19: "Entity: painting", 0x1A: "Experience Orb", 0x1B: "Stance update (DEPRECATED)", 0x1C: "Entity velocity", 0x1D: "Destroy entity", 0x1E: "Entity", 0x1F: "Entity relative move", 0x20: "Entity look", 0x21: "Entity look and relative move", 0x22: "Entity teleport", 0x23: "Entity head look", 0x26: "Entity status", 0x27: "Attach entity", 0x28: "Entity metadata", 0x29: "Entity effect", 0x2A: "Remove entity effect", 0x2B: "Experience", 0x32: "Pre-chunk", 0x33: "Map chunk", 0x34: "Multi-block change", 0x35: "Block change", 0x36: "Block action", 0x3C: "Explosion", 0x3D: "Sound effect", 0x46: "New or invalid state", 0x47: "Thunderbolt", 0x64: "Open window", 0x65: "Close window", 0x66: "Window click", 0x67: "Set slot", 0x68: "Window items", 0x69: "Update progress bar", 0x6A: "Transaction", 0x6B: "Creative inventory action", 0x6C: "Enchant Item", 0x82: "Update sign", 0x83: "Map data", 0x84: "Update tile entity", 0xCA: "Player Abilities", 0xC8: "Increment statistic", 0xC9: "Player List Item", 0xFA: "Plugin message", 0xFE: "Server list ping", 0xFF: "Disconnect" } structs = { #Keep-alive 0x00: ("int", "keep_alive_id"), #Login request 0x01: { CLIENT_TO_SERVER: ( ("int", "protocol_version"), ("string16", "username"), ("string16", "level type"), ("int", "server_mode"), ("int", "dimension"), ("byte", "difficulty"), ("ubyte", "world_height"), ("ubyte", "max_players")), SERVER_TO_CLIENT: ( ("int", "entity_id"), ("string16", "unknown"), ("string16", "level type"), ("int", "server_mode"), ("int", "dimension"), ("byte", "difficulty"), ("ubyte", "world_height"), ("ubyte", "max_players"))}, #Handshake 0x02: { CLIENT_TO_SERVER: ("string16", "username"), SERVER_TO_CLIENT: ("string16", "connection_hash")}, #Chat message 0x03: ("string16", "text"), #Time update 0x04: ("long", "time"), #Entity Equipment 0x05: ( ("int", "entity_id"), ("short", "slot"), ("short", "item_id"), ("short", "damage")), #Spawn position 0x06: ( ("int", "x"), ("int", "y"), ("int", "z")), #Use entity 0x07: ( ("int", "subject_entity_id"), ("int", "object_entity_id"), ("bool", "left_click")), #Update health 0x08: ( ("short", "health"), ("short", "food"), ("float", "food_saturation")), #Respawn 0x09: ( ("int", "dimension"), ("byte", "difficulty"), ("byte", "server_mode"), ("short", "world_height"), ("string16", "level_type")), #Player 0x0A: ("bool", "on_ground"), #Player position 0x0B: ( ("double", "x"), ("double", "y"), ("double", "stance"), ("double", "z"), ("bool", "on_ground")), #Player look 0x0C: ( ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground")), #Player position & look 0x0D: { CLIENT_TO_SERVER: ( ("double", "x"), ("double", "y"), ("double", "stance"), ("double", "z"), ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground")), SERVER_TO_CLIENT: ( ("double", "x"), ("double", "stance"), ("double", "y"), ("double", "z"), ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground"))}, #Player digging 0x0E: ( ("byte", "status"), ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "face")), #Player block placement 0x0F: ( ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "direction"), ("slot", "slot")), #Holding change 0x10: ("short", "slot"), #Use bed 0x11: ( ("int", "entity_id"), ("byte", "in_bed"), ("int", "x"), ("byte", "y"), ("int", "z")), #Animation 0x12: ( ("int", "entity_id"), ("byte", "animation")), #Entity action 0x13: ( ("int", "entity_id"), ("byte", "action")), #Named entity spawn 0x14: ( ("int", "entity_id"), ("string16", "player_name"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "rotation"), ("byte", "pitch"), ("short", "current_item")), #Pickup spawn 0x15: ( ("int", "entity_id"), ("short", "item"), ("byte", "count"), ("short", "metadata"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "rotation"), ("byte", "pitch"), ("byte", "roll")), #Collect item 0x16: ( ("int", "subject_entity_id"), ("int", "object_entity_id")), #Add object or vehicle 0x17: ( ("int", "entity_id"), ("byte", "type"), ("int", "x"), ("int", "y"), ("int", "z"), ("int", "unknown")), #Mob spawn 0x18: ( ("int", "entity_id"), ("byte", "type"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "yaw"), ("byte", "pitch"), ("byte", "head yaw"), ("metadata", "metadata")), #Entity: painting 0x19: ( ("int", "entity_id"), ("string16", "title"), ("int", "x"), ("int", "y"), ("int", "z"), ("int", "direction")), #Experience Orb 0x1A: ( ("int", "entity_id"), ("int", "x"), ("int", "y"), ("int", "z"), ("short", "count")), #Stance update 0x1B: ( ("float", "unknown1"), ("float", "unknown2"), ("float", "unknown3"), ("float", "unknown4"), ("bool", "unknown5"), ("bool", "unknown6")), #Entity velocity 0x1C: ( ("int", "entity_id"), ("short", "x_velocity"), ("short", "y_velocity"), ("short", "z_velocity")), #Destroy entity 0x1D: ("int", "entity_id"), #Entity 0x1E: ("int", "entity_id"), #Entity relative move 0x1F: ( ("int", "entity_id"), ("byte", "x_change"), ("byte", "y_change"), ("byte", "z_change")), #Entity look 0x20: ( ("int", "entity_id"), ("byte", "yaw"), ("byte", "pitch")), #Entity look and relative move 0x21: ( ("int", "entity_id"), ("byte", "x_change"), ("byte", "y_change"), ("byte", "z_change"), ("byte", "yaw"), ("byte", "pitch")), #Entity teleport 0x22: ( ("int", "entity_id"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "yaw"), ("byte", "pitch")), # Entity head look 0x23: ( ("int", "entity_id"), ("byte", "head yaw")), #Entity status 0x26: ( ("int", "entity_id"), ("byte", "status")), #Attach entity 0x27: ( ("int", "subject_entity_id"), ("int", "object_entity_id")), #Entity metadata 0x28: ( ("int", "entity_id"), ("metadata", "metadata")), # Entity effect 0x29: ( ("int", "entity_id"), ("byte", "effect_id"), ("byte", "amplifier"), ("short", "duration")), # remove entity effect 0x2A: ( ("int", "entity_id"), ("byte", "effect_id")), # Experience 0x2B: ( ("float", "experience_bar"), ("short", "level"), ("short", "total_experience")), #Pre-chunk 0x32: ( ("int", "x"), ("int", "z"), ("bool", "load")), #Map chunk 0x33: ( ("int", "x"), ("int", "z"), ("bool", "contiguous"), ("short", "bitmap"), ("short", "add_bitmap"), ("int", "data_size"), ("int", "unknown")), #Multi-block change 0x34: ( ("int", "x_chunk"), ("int", "z_chunk"), ("short", "record_count"), ("int", "data_size")), #Block change 0x35: ( ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "id"), ("byte", "metadata")), #Block action 0x36: ( ("int", "x"), ("short", "y"), ("int", "z"), ("byte", "type_state"), ("byte", "pitch_direction")), #Explosion 0x3C: ( ("double", "x"), ("double", "y"), ("double", "z"), ("float", "unknown"), ("int", "data_size")), #Sound effect 0x3D: ( ("int", "effect_id"), ("int", "x"), ("byte", "y"), ("int", "z"), ("int", "extra")), #New or invalid state 0x46: ( ("byte", "reason"), ("byte", "gamemode")), #Thunderbolt 0x47: ( ("int", "entity_id"), ("bool", "unknown"), ("int", "x"), ("int", "y"), ("int", "z")), #Open window 0x64: ( ("byte", "window_id"), ("byte", "inventory_type"), ("string16", "window_title"), ("byte", "slots_count")), #Close window 0x65: ("byte", "window_id"), #Window click 0x66: ( ("byte", "window_id"), ("short", "slot"), ("byte", "right_click"), ("short", "transaction_id"), ("bool", "shift"), ("slot", "slot_data")), #Set slot 0x67: ( ("byte", "window_id"), ("short", "slot"), ("slot", "slot_data")), #Window items 0x68: ( ("byte", "window_id"), ("short", "data_size")), #Update progress bar 0x69: ( ("byte", "window_id"), ("short", "progress_bar_type"), ("short", "progress")), #Transaction 0x6A: ( ("byte", "window_id"), ("short", "transaction_id"), ("bool", "accepted")), # Creative Inventory Action 0x6B: ( ("short", "slot"), ("slot", "slot_data")), # Enchant Item 0x6C: ( ("byte", "window_id"), ("byte", "enchantment")), #Update sign 0x82: ( ("int", "x"), ("short", "y"), ("int", "z"), ("string16", "line_1"), ("string16", "line_2"), ("string16", "line_3"), ("string16", "line_4")), #Map data 0x83: ( ("short", "unknown1"), ("short", "map_id"), ("ubyte", "data_size")), #Update Tile Entity 0x84: ( ("int", "x"), ("short", "y"), ("int", "z"), ("byte", "action"), ("int", "custom1"), ("int", "custom2"), ("int", "custom3")), # Player Abilities 0xCA: ( ("bool", "invulnerable"), ("bool", "flying"), ("bool", "can_fly"), ("bool", "instant_destroy")), #Increment statistic 0xC8: ( ("int", "statistic_id"), ("byte", "amount")), # Player List Item 0xC9: ( ("string16", "player_name"), ("bool", "online"), ("short", "ping")), #Server list ping 0xFE: (), #Disconnect 0xFF: ("string16", "reason")} class PacketDecoder: def __init__(self, to_server): self.buff = '' self.error_count = 0 self.node = CLIENT_TO_SERVER if to_server else SERVER_TO_CLIENT self.iPacketCounter = 0 def get_struct(self, packet): """Reads ident and direction from packet, and returns the associated struct description from structs global. Normalises return to be a ((str, str), ...)""" o = structs[packet.ident] if isinstance(o, dict): o = o[packet.direction] if len(o) and not isinstance(o[0], tuple): o = (o), return o def pack(self, data_type, data): if data_type in data_types: format = data_types[data_type] return self.pack_real(format[0], data) if data_type == "string8": return self.pack("short", len(data)) + data if data_type == "string16": return self.pack("short", len(data)) + data.encode('utf-16be') if data_type == "slot": o = self.pack('short', data['id']) if data['id'] > 0: o += self.pack('byte', data['amount']) o += self.pack('short', data['damage']) if 'extra' in data: nbtdata = data['extra'] if nbtdata is None: o += self.pack('short', -1) else: nbt_len = len(nbtdata) o += self.pack('short', nbt_len) o += nbtdata return o if data_type == "metadata": o = '' for mtype, val in data: mtype2 = mtype >> 5 o += self.pack('byte', mtype) if mtype2 == 0: o += self.pack('byte', val) if mtype2 == 1: o += self.pack('short', val) if mtype2 == 2: o += self.pack('int', val) if mtype2 == 3: o += self.pack('float', val) if mtype2 == 4: o += self.pack('string16', val) if mtype2 == 5: o += self.pack('short', val['id']) o += self.pack('byte', val['count']) o += self.pack('short', val['damage']) if mtype2 == 6: for i in range(3): o += self.pack('int', val[i]) o += self.pack('byte', 127) return o def unpack(self, data_type): """Reads buff (consuming bytes) and returns the unpacked value according to the given type.""" if data_type in data_types: format = data_types[data_type] return self.unpack_real(format[0], format[1]) if data_type == "string8": length = self.unpack('short') if length < 0: raise Exception("Negative length for string") if len(self.buff) < length: raise IncompleteData() string = self.buff[:length] self.buff = self.buff[length:] return string if data_type == "string16": length = self.unpack('short') if length < 0: raise Exception("Negative length for string") if len(self.buff) < 2*length: raise IncompleteData() string = self.buff[:2*length].decode('utf-16be') self.buff = self.buff[2*length:] return string if data_type == "slot": o = {} o["id"] = self.unpack('short') if o["id"] > 0: o["amount"] = self.unpack('byte') o["damage"] = self.unpack('short') if o["id"] in SLOT_EXTRA_DATA_IDS: extra_len = self.unpack('short') if extra_len <= 0: o["extra"] = None else: if len(self.buff) < extra_len: raise IncompleteData() extra_buff = self.buff[:extra_len] self.buff = self.buff[extra_len:] o["extra"] = extra_buff return o if data_type == "metadata": #[(17, 0), (0, 0), (16, -1)] o = [] mtype = self.unpack('byte') while mtype != 127: mtype2 = mtype >> 5 t = 0 if mtype2 == 0: t = self.unpack('byte') if mtype2 == 1: t = self.unpack('short') if mtype2 == 2: t = self.unpack('int') if mtype2 == 3: t = self.unpack('float') if mtype2 == 4: t = self.unpack('string16') if mtype2 == 5: t = {} t["id"] = self.unpack('short') t["count"] = self.unpack('byte') t["damage"] = self.unpack('short') if mtype2 == 6: t = [] for i in range(3): s = self.unpack('int') t.append(s) t = (mtype, t) o.append(t) mtype = self.unpack('byte') return o def unpack_real(self, data_type, length): """A helper function for unpack(), it handles any data type that is understood by the struct module.""" if len(self.buff) < length: raise IncompleteData() o = struct.unpack_from('!'+data_type, self.buff)[0] self.buff = self.buff[length:] return o def pack_real(self, data_type, data): return struct.pack('!'+data_type, data) def unpack_array(self, data_type, count): a = [] for i in range(count): a.append(self.unpack(data_type)) return a def pack_array(self, data_type, data): o = '' for d in data: o += self.pack(data_type, d) return o def unpack_array_fast(self, data_type, count): data_type = data_types[data_type] if len(self.buff) < count*data_type[1]: raise IncompleteData() o = struct.unpack_from(data_type[0]*count, self.buff) self.buff = self.buff[count*data_type[1]:] return o def pack_array_fast(self, data_type, data): data_type = data_types[data_type] return struct.pack(data_type[0]*len(data), *data) def read_packet(self): """Reads the bytestring in self.buff, and returns the first packet contained within it. Sets self.buff to remaining bytestring. If packet is incomplete, returns None. But may raise if it thinks a real malformed packet has been recieved. """ #self.debug("READ BUFFER SIZE: %d" % len(self.buff)) backup = self.buff[:] packet = Packet() try: packet.direction = self.node packet.ident = self.unpack('ubyte') #Defined structs from huge dict for datatype, name in self.get_struct(packet): # this populates packet.data with {name: value} packet.data[name] = self.unpack(datatype) # I believe the following are packet-type specific fixes for variable-length packets. #0x17 if packet.ident == 0x17: if packet.data['unknown'] > 0: packet.data['x2'] = self.unpack('short') packet.data['y2'] = self.unpack('short') packet.data['z2'] = self.unpack('short') #0x33 if packet.ident in (0x33, 0x34): packet.data['data'] = self.unpack_array_fast('byte', packet.data['data_size']) del packet.data["data_size"] # #0x34 # if packet.ident == 0x34: # coords = self.unpack_array_fast('short', packet.data['data_size']) # btype = self.unpack_array_fast('byte', packet.data['data_size']) # metadata = self.unpack_array_fast('byte', packet.data['data_size']) # packet.data["blocks"] = [] # for i in zip(coords, btype, metadata): # block = {} # block["x"] = i[0] >> 12 # block["z"] = 0x0F & i[0] >> 8 # block["y"] = 0xFF & i[0] # block["type"] = i[1] # block["metadata"] = i[2] # packet.data["blocks"].append(block) # del packet.data["data_size"] #0x3C if packet.ident == 0x3C: records = self.unpack_array_fast('byte', packet.data['data_size']*3) i = 0 packet.data["blocks"] = [] while i < packet.data['data_size']*3: packet.data["blocks"].append(dict(zip(('x','y','z'), records[i:i+3]))) i+=3 del packet.data["data_size"] #0x68 if packet.ident == 0x68: packet.data["slots_data"] = self.unpack_array('slot', packet.data["data_size"]) del packet.data["data_size"] #0x82: if packet.ident == 0x82: packet.data["text"] = [] for i in range(4): packet.data["text"].append(packet.data["line_%s" % (i+1)]) #0x83 if packet.ident == 0x83: packet.data["data"] = self.unpack_array_fast('byte', packet.data['data_size']) del packet.data["data_size"] # Sets packet.original to the byte string that the packet was decoded from. packet.original = backup[:len(backup) - len(self.buff)] return packet except IncompleteData: self.buff = backup return None except Exception, ex: self.buff = backup ex.args += (self.buff[20:],) raise def encode_packet(self, packet): """Takes a packet, and returns the encoded bytestring representing it.""" try: output = self.pack('ubyte', packet.ident) append = '' #0x17 if packet.ident == 0x17: if packet.data['unknown'] > 0: for i in ('x2','y2','z2'): append += self.pack('short', packet.data[i]) #0x33 if packet.ident in (0x33, 0x34): packet.data['data_size'] = len(packet.data['data']) append += self.pack_array_fast('byte', packet.data['data']) # #0x34 # if packet.ident == 0x34: # coords = [] # btypes = [] # metadata = [] # for i in packet.data['blocks']: # coords.append(i['x'] << 12 | i['z'] << 8 | i['y']) # btypes.append(i['type']) # metadata.append(i['metadata']) # # packet.data['data_size'] = len(coords) # append += self.pack_array_fast('short', coords) # append += self.pack_array_fast('byte', btypes) # append += self.pack_array_fast('byte', metadata) #0x3C if packet.ident == 0x3C: array = [] for i in packet.data['blocks']: array += [i['x'], i['y'], i['z']] packet.data['data_size'] = len(packet.data['blocks']) append += self.pack_array_fast('byte', array) #0x68 if packet.ident == 0x68: packet.data['data_size'] = len(packet.data['slots_data']) append += self.pack_array('slot', packet.data['slots_data']) #0x82: Sign if packet.ident == 0x82: for i in range(4): packet.data["line_%s" % (i+1)] = packet.data["text"][i] #0x83 if packet.ident == 0x83: packet.data['data_size'] = len(packet.data['data']) append += self.pack_array_fast('byte', packet.data['data']) for i in self.get_struct(packet): output += self.pack(i[0], packet.data[i[1]]) output += append return output except Exception: raise def stateless_unpack(buff, to_server): """A wrapper about the normal objects, that lets you unpack encoded packets easily. Returns (packet, remaining_buff), where remaining_buff is the given buffer without the bytes eaten by the packet. If no more packets can be read from buff, returns (None, buff). """ decoder = PacketDecoder(to_server) decoder.buff = buff packet = decoder.read_packet() return packet, decoder.buff def stateless_pack(packet, to_server): """A wrapper about the normal objects, that lets you pack decoded packets easily. Returns the bytestring that represents the packet.""" decoder = PacketDecoder(to_server) return decoder.encode_packet(packet) class IncompleteData(Exception): pass
copy
identifier_name
packet_decoder.py
#!/usr/bin/python import struct import sys #TODO: Review which of these we actually need SERVER_TO_CLIENT = 0x01 CLIENT_TO_SERVER = 0x02 PROTOCOL_VERSION = 28 class Packet: def __init__(self, packet_type=0, **data): self.direction = 0 if type(packet_type) == str: packet_type = dict((v,k) for k,v in names.items())[packet_type] # Reverse lookup self.ident = packet_type self.data = data def name(self): return names[self.ident] def __str__(self): from_to = {CLIENT_TO_SERVER: "to server", SERVER_TO_CLIENT: "from server"}[self.direction] return "%s packet %s: %s" % (self.name(), from_to, repr(self.data)) def copy(self): p = Packet() p.direction = self.direction p.ident = self.ident p.data = self.data.copy() return p SLOT_EXTRA_DATA_IDS = [ 0x103, 0x105, 0x15A, 0x167, 0x10C, 0x10D, 0x10E, 0x10F, 0x122, 0x110, 0x111, 0x112, 0x113, 0x123, 0x10B, 0x100, 0x101, 0x102, 0x124, 0x114, 0x115, 0x116, 0x117, 0x125, 0x11B, 0x11C, 0x11D, 0x11E, 0x126, 0x12A, 0x12B, 0x12C, 0x12D, 0x12E, 0x12F, 0x130, 0x131, 0x132, 0x133, 0x134, 0x135, 0x136, 0x137, 0x138, 0x139, 0x13A, 0x13B, 0x13C, 0x13D ] data_types = { "ubyte": ('B', 1), "byte": ('b', 1), "bool": ('?', 1), "short": ('h', 2), "float": ('f', 4), "int": ('i', 4), "double": ('d', 8), "long": ('q', 8) } names = { 0x00: "Keep-alive", 0x01: "Login request", 0x02: "Handshake", 0x03: "Chat message", 0x04: "Time update", 0x05: "Entity Equipment", 0x06: "Spawn position", 0x07: "Use entity", 0x08: "Update health", 0x09: "Respawn", 0x0A: "Player", 0x0B: "Player position", 0x0C: "Player look", 0x0D: "Player position & look", 0x0E: "Player digging", 0x0F: "Player block placement", 0x10: "Holding change", 0x11: "Use bed", 0x12: "Animation", 0x13: "Entity action", 0x14: "Named entity spawn", 0x15: "Pickup spawn", 0x16: "Collect item", 0x17: "Add object or vehicle", 0x18: "Mob spawn", 0x19: "Entity: painting", 0x1A: "Experience Orb", 0x1B: "Stance update (DEPRECATED)", 0x1C: "Entity velocity", 0x1D: "Destroy entity", 0x1E: "Entity", 0x1F: "Entity relative move", 0x20: "Entity look", 0x21: "Entity look and relative move", 0x22: "Entity teleport", 0x23: "Entity head look", 0x26: "Entity status", 0x27: "Attach entity", 0x28: "Entity metadata", 0x29: "Entity effect", 0x2A: "Remove entity effect", 0x2B: "Experience", 0x32: "Pre-chunk", 0x33: "Map chunk", 0x34: "Multi-block change", 0x35: "Block change", 0x36: "Block action", 0x3C: "Explosion", 0x3D: "Sound effect", 0x46: "New or invalid state", 0x47: "Thunderbolt", 0x64: "Open window", 0x65: "Close window", 0x66: "Window click", 0x67: "Set slot", 0x68: "Window items", 0x69: "Update progress bar", 0x6A: "Transaction", 0x6B: "Creative inventory action", 0x6C: "Enchant Item", 0x82: "Update sign", 0x83: "Map data", 0x84: "Update tile entity", 0xCA: "Player Abilities", 0xC8: "Increment statistic", 0xC9: "Player List Item", 0xFA: "Plugin message", 0xFE: "Server list ping", 0xFF: "Disconnect" } structs = { #Keep-alive 0x00: ("int", "keep_alive_id"), #Login request 0x01: { CLIENT_TO_SERVER: ( ("int", "protocol_version"), ("string16", "username"), ("string16", "level type"), ("int", "server_mode"), ("int", "dimension"), ("byte", "difficulty"), ("ubyte", "world_height"), ("ubyte", "max_players")), SERVER_TO_CLIENT: ( ("int", "entity_id"), ("string16", "unknown"), ("string16", "level type"), ("int", "server_mode"), ("int", "dimension"), ("byte", "difficulty"), ("ubyte", "world_height"), ("ubyte", "max_players"))}, #Handshake 0x02: { CLIENT_TO_SERVER: ("string16", "username"), SERVER_TO_CLIENT: ("string16", "connection_hash")}, #Chat message 0x03: ("string16", "text"), #Time update 0x04: ("long", "time"), #Entity Equipment 0x05: ( ("int", "entity_id"), ("short", "slot"), ("short", "item_id"), ("short", "damage")), #Spawn position 0x06: ( ("int", "x"), ("int", "y"), ("int", "z")), #Use entity 0x07: ( ("int", "subject_entity_id"), ("int", "object_entity_id"), ("bool", "left_click")), #Update health 0x08: ( ("short", "health"), ("short", "food"), ("float", "food_saturation")), #Respawn 0x09: ( ("int", "dimension"), ("byte", "difficulty"), ("byte", "server_mode"), ("short", "world_height"), ("string16", "level_type")), #Player 0x0A: ("bool", "on_ground"), #Player position 0x0B: ( ("double", "x"), ("double", "y"), ("double", "stance"), ("double", "z"), ("bool", "on_ground")), #Player look 0x0C: ( ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground")), #Player position & look 0x0D: { CLIENT_TO_SERVER: ( ("double", "x"), ("double", "y"), ("double", "stance"), ("double", "z"), ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground")), SERVER_TO_CLIENT: ( ("double", "x"), ("double", "stance"), ("double", "y"), ("double", "z"), ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground"))}, #Player digging 0x0E: ( ("byte", "status"), ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "face")), #Player block placement 0x0F: ( ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "direction"), ("slot", "slot")), #Holding change 0x10: ("short", "slot"), #Use bed 0x11: ( ("int", "entity_id"), ("byte", "in_bed"), ("int", "x"), ("byte", "y"), ("int", "z")), #Animation 0x12: ( ("int", "entity_id"), ("byte", "animation")), #Entity action 0x13: ( ("int", "entity_id"), ("byte", "action")), #Named entity spawn 0x14: ( ("int", "entity_id"), ("string16", "player_name"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "rotation"), ("byte", "pitch"), ("short", "current_item")), #Pickup spawn 0x15: ( ("int", "entity_id"), ("short", "item"), ("byte", "count"), ("short", "metadata"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "rotation"), ("byte", "pitch"), ("byte", "roll")), #Collect item 0x16: ( ("int", "subject_entity_id"), ("int", "object_entity_id")), #Add object or vehicle 0x17: ( ("int", "entity_id"), ("byte", "type"), ("int", "x"), ("int", "y"), ("int", "z"), ("int", "unknown")), #Mob spawn 0x18: ( ("int", "entity_id"), ("byte", "type"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "yaw"), ("byte", "pitch"), ("byte", "head yaw"), ("metadata", "metadata")), #Entity: painting 0x19: ( ("int", "entity_id"), ("string16", "title"), ("int", "x"), ("int", "y"), ("int", "z"), ("int", "direction")), #Experience Orb 0x1A: ( ("int", "entity_id"), ("int", "x"), ("int", "y"), ("int", "z"), ("short", "count")), #Stance update 0x1B: ( ("float", "unknown1"), ("float", "unknown2"), ("float", "unknown3"), ("float", "unknown4"), ("bool", "unknown5"), ("bool", "unknown6")), #Entity velocity 0x1C: ( ("int", "entity_id"), ("short", "x_velocity"), ("short", "y_velocity"), ("short", "z_velocity")), #Destroy entity 0x1D: ("int", "entity_id"), #Entity 0x1E: ("int", "entity_id"), #Entity relative move 0x1F: ( ("int", "entity_id"), ("byte", "x_change"), ("byte", "y_change"), ("byte", "z_change")), #Entity look 0x20: ( ("int", "entity_id"), ("byte", "yaw"), ("byte", "pitch")), #Entity look and relative move 0x21: ( ("int", "entity_id"), ("byte", "x_change"), ("byte", "y_change"), ("byte", "z_change"), ("byte", "yaw"), ("byte", "pitch")), #Entity teleport 0x22: ( ("int", "entity_id"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "yaw"), ("byte", "pitch")), # Entity head look 0x23: ( ("int", "entity_id"), ("byte", "head yaw")), #Entity status 0x26: ( ("int", "entity_id"), ("byte", "status")), #Attach entity 0x27: ( ("int", "subject_entity_id"), ("int", "object_entity_id")), #Entity metadata 0x28: ( ("int", "entity_id"), ("metadata", "metadata")), # Entity effect 0x29: ( ("int", "entity_id"), ("byte", "effect_id"), ("byte", "amplifier"), ("short", "duration")), # remove entity effect 0x2A: ( ("int", "entity_id"), ("byte", "effect_id")), # Experience 0x2B: ( ("float", "experience_bar"), ("short", "level"), ("short", "total_experience")), #Pre-chunk 0x32: ( ("int", "x"), ("int", "z"), ("bool", "load")), #Map chunk 0x33: ( ("int", "x"), ("int", "z"), ("bool", "contiguous"), ("short", "bitmap"), ("short", "add_bitmap"), ("int", "data_size"), ("int", "unknown")), #Multi-block change 0x34: ( ("int", "x_chunk"), ("int", "z_chunk"), ("short", "record_count"), ("int", "data_size")), #Block change 0x35: ( ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "id"), ("byte", "metadata")), #Block action 0x36: ( ("int", "x"), ("short", "y"), ("int", "z"), ("byte", "type_state"), ("byte", "pitch_direction")), #Explosion 0x3C: ( ("double", "x"), ("double", "y"), ("double", "z"), ("float", "unknown"), ("int", "data_size")), #Sound effect 0x3D: ( ("int", "effect_id"), ("int", "x"), ("byte", "y"), ("int", "z"), ("int", "extra")), #New or invalid state 0x46: ( ("byte", "reason"), ("byte", "gamemode")), #Thunderbolt 0x47: ( ("int", "entity_id"), ("bool", "unknown"), ("int", "x"), ("int", "y"), ("int", "z")), #Open window 0x64: ( ("byte", "window_id"), ("byte", "inventory_type"), ("string16", "window_title"), ("byte", "slots_count")), #Close window 0x65: ("byte", "window_id"), #Window click 0x66: ( ("byte", "window_id"), ("short", "slot"), ("byte", "right_click"), ("short", "transaction_id"), ("bool", "shift"), ("slot", "slot_data")), #Set slot 0x67: ( ("byte", "window_id"), ("short", "slot"), ("slot", "slot_data")), #Window items 0x68: ( ("byte", "window_id"), ("short", "data_size")), #Update progress bar 0x69: ( ("byte", "window_id"), ("short", "progress_bar_type"), ("short", "progress")), #Transaction 0x6A: ( ("byte", "window_id"), ("short", "transaction_id"), ("bool", "accepted")), # Creative Inventory Action 0x6B: ( ("short", "slot"), ("slot", "slot_data")), # Enchant Item 0x6C: ( ("byte", "window_id"), ("byte", "enchantment")), #Update sign 0x82: ( ("int", "x"), ("short", "y"), ("int", "z"), ("string16", "line_1"), ("string16", "line_2"), ("string16", "line_3"), ("string16", "line_4")), #Map data 0x83: ( ("short", "unknown1"), ("short", "map_id"), ("ubyte", "data_size")), #Update Tile Entity 0x84: ( ("int", "x"), ("short", "y"), ("int", "z"), ("byte", "action"), ("int", "custom1"), ("int", "custom2"), ("int", "custom3")), # Player Abilities 0xCA: ( ("bool", "invulnerable"), ("bool", "flying"), ("bool", "can_fly"), ("bool", "instant_destroy")), #Increment statistic 0xC8: ( ("int", "statistic_id"), ("byte", "amount")), # Player List Item 0xC9: ( ("string16", "player_name"), ("bool", "online"), ("short", "ping")), #Server list ping 0xFE: (), #Disconnect 0xFF: ("string16", "reason")} class PacketDecoder: def __init__(self, to_server): self.buff = '' self.error_count = 0 self.node = CLIENT_TO_SERVER if to_server else SERVER_TO_CLIENT self.iPacketCounter = 0 def get_struct(self, packet): """Reads ident and direction from packet, and returns the associated struct description from structs global. Normalises return to be a ((str, str), ...)""" o = structs[packet.ident] if isinstance(o, dict): o = o[packet.direction] if len(o) and not isinstance(o[0], tuple): o = (o), return o def pack(self, data_type, data): if data_type in data_types: format = data_types[data_type] return self.pack_real(format[0], data) if data_type == "string8": return self.pack("short", len(data)) + data if data_type == "string16": return self.pack("short", len(data)) + data.encode('utf-16be') if data_type == "slot": o = self.pack('short', data['id']) if data['id'] > 0: o += self.pack('byte', data['amount']) o += self.pack('short', data['damage']) if 'extra' in data: nbtdata = data['extra'] if nbtdata is None: o += self.pack('short', -1) else: nbt_len = len(nbtdata) o += self.pack('short', nbt_len) o += nbtdata return o if data_type == "metadata": o = '' for mtype, val in data: mtype2 = mtype >> 5 o += self.pack('byte', mtype) if mtype2 == 0: o += self.pack('byte', val) if mtype2 == 1: o += self.pack('short', val) if mtype2 == 2: o += self.pack('int', val) if mtype2 == 3: o += self.pack('float', val) if mtype2 == 4: o += self.pack('string16', val) if mtype2 == 5: o += self.pack('short', val['id']) o += self.pack('byte', val['count']) o += self.pack('short', val['damage']) if mtype2 == 6: for i in range(3): o += self.pack('int', val[i]) o += self.pack('byte', 127) return o def unpack(self, data_type): """Reads buff (consuming bytes) and returns the unpacked value according to the given type.""" if data_type in data_types: format = data_types[data_type] return self.unpack_real(format[0], format[1]) if data_type == "string8": length = self.unpack('short') if length < 0: raise Exception("Negative length for string") if len(self.buff) < length: raise IncompleteData() string = self.buff[:length] self.buff = self.buff[length:] return string if data_type == "string16": length = self.unpack('short') if length < 0: raise Exception("Negative length for string") if len(self.buff) < 2*length: raise IncompleteData() string = self.buff[:2*length].decode('utf-16be') self.buff = self.buff[2*length:] return string if data_type == "slot": o = {} o["id"] = self.unpack('short') if o["id"] > 0: o["amount"] = self.unpack('byte') o["damage"] = self.unpack('short') if o["id"] in SLOT_EXTRA_DATA_IDS: extra_len = self.unpack('short') if extra_len <= 0: o["extra"] = None else: if len(self.buff) < extra_len: raise IncompleteData() extra_buff = self.buff[:extra_len] self.buff = self.buff[extra_len:] o["extra"] = extra_buff return o if data_type == "metadata": #[(17, 0), (0, 0), (16, -1)] o = [] mtype = self.unpack('byte') while mtype != 127: mtype2 = mtype >> 5 t = 0 if mtype2 == 0: t = self.unpack('byte') if mtype2 == 1: t = self.unpack('short') if mtype2 == 2: t = self.unpack('int') if mtype2 == 3: t = self.unpack('float') if mtype2 == 4: t = self.unpack('string16') if mtype2 == 5: t = {} t["id"] = self.unpack('short') t["count"] = self.unpack('byte') t["damage"] = self.unpack('short') if mtype2 == 6: t = [] for i in range(3): s = self.unpack('int') t.append(s) t = (mtype, t) o.append(t) mtype = self.unpack('byte') return o def unpack_real(self, data_type, length): """A helper function for unpack(), it handles any data type that is understood by the struct module.""" if len(self.buff) < length: raise IncompleteData() o = struct.unpack_from('!'+data_type, self.buff)[0] self.buff = self.buff[length:] return o def pack_real(self, data_type, data): return struct.pack('!'+data_type, data) def unpack_array(self, data_type, count): a = [] for i in range(count): a.append(self.unpack(data_type)) return a def pack_array(self, data_type, data): o = '' for d in data: o += self.pack(data_type, d) return o def unpack_array_fast(self, data_type, count): data_type = data_types[data_type] if len(self.buff) < count*data_type[1]: raise IncompleteData() o = struct.unpack_from(data_type[0]*count, self.buff) self.buff = self.buff[count*data_type[1]:] return o def pack_array_fast(self, data_type, data): data_type = data_types[data_type] return struct.pack(data_type[0]*len(data), *data) def read_packet(self): """Reads the bytestring in self.buff, and returns the first packet contained within it. Sets self.buff to remaining bytestring. If packet is incomplete, returns None. But may raise if it thinks a real malformed packet has been recieved. """ #self.debug("READ BUFFER SIZE: %d" % len(self.buff)) backup = self.buff[:] packet = Packet() try: packet.direction = self.node packet.ident = self.unpack('ubyte') #Defined structs from huge dict for datatype, name in self.get_struct(packet): # this populates packet.data with {name: value} packet.data[name] = self.unpack(datatype) # I believe the following are packet-type specific fixes for variable-length packets. #0x17 if packet.ident == 0x17: if packet.data['unknown'] > 0: packet.data['x2'] = self.unpack('short') packet.data['y2'] = self.unpack('short') packet.data['z2'] = self.unpack('short') #0x33 if packet.ident in (0x33, 0x34): packet.data['data'] = self.unpack_array_fast('byte', packet.data['data_size']) del packet.data["data_size"] # #0x34 # if packet.ident == 0x34: # coords = self.unpack_array_fast('short', packet.data['data_size']) # btype = self.unpack_array_fast('byte', packet.data['data_size']) # metadata = self.unpack_array_fast('byte', packet.data['data_size']) # packet.data["blocks"] = [] # for i in zip(coords, btype, metadata): # block = {} # block["x"] = i[0] >> 12 # block["z"] = 0x0F & i[0] >> 8 # block["y"] = 0xFF & i[0] # block["type"] = i[1] # block["metadata"] = i[2] # packet.data["blocks"].append(block) # del packet.data["data_size"] #0x3C if packet.ident == 0x3C: records = self.unpack_array_fast('byte', packet.data['data_size']*3) i = 0 packet.data["blocks"] = [] while i < packet.data['data_size']*3: packet.data["blocks"].append(dict(zip(('x','y','z'), records[i:i+3]))) i+=3 del packet.data["data_size"] #0x68 if packet.ident == 0x68: packet.data["slots_data"] = self.unpack_array('slot', packet.data["data_size"]) del packet.data["data_size"] #0x82: if packet.ident == 0x82: packet.data["text"] = [] for i in range(4): packet.data["text"].append(packet.data["line_%s" % (i+1)]) #0x83 if packet.ident == 0x83: packet.data["data"] = self.unpack_array_fast('byte', packet.data['data_size']) del packet.data["data_size"] # Sets packet.original to the byte string that the packet was decoded from. packet.original = backup[:len(backup) - len(self.buff)] return packet except IncompleteData: self.buff = backup return None except Exception, ex: self.buff = backup ex.args += (self.buff[20:],) raise def encode_packet(self, packet): """Takes a packet, and returns the encoded bytestring representing it.""" try: output = self.pack('ubyte', packet.ident) append = '' #0x17 if packet.ident == 0x17: if packet.data['unknown'] > 0: for i in ('x2','y2','z2'): append += self.pack('short', packet.data[i]) #0x33 if packet.ident in (0x33, 0x34): packet.data['data_size'] = len(packet.data['data']) append += self.pack_array_fast('byte', packet.data['data']) # #0x34 # if packet.ident == 0x34: # coords = [] # btypes = [] # metadata = [] # for i in packet.data['blocks']: # coords.append(i['x'] << 12 | i['z'] << 8 | i['y']) # btypes.append(i['type']) # metadata.append(i['metadata']) # # packet.data['data_size'] = len(coords) # append += self.pack_array_fast('short', coords) # append += self.pack_array_fast('byte', btypes) # append += self.pack_array_fast('byte', metadata) #0x3C if packet.ident == 0x3C: array = [] for i in packet.data['blocks']: array += [i['x'], i['y'], i['z']] packet.data['data_size'] = len(packet.data['blocks']) append += self.pack_array_fast('byte', array) #0x68 if packet.ident == 0x68: packet.data['data_size'] = len(packet.data['slots_data']) append += self.pack_array('slot', packet.data['slots_data']) #0x82: Sign if packet.ident == 0x82: for i in range(4): packet.data["line_%s" % (i+1)] = packet.data["text"][i] #0x83 if packet.ident == 0x83: packet.data['data_size'] = len(packet.data['data']) append += self.pack_array_fast('byte', packet.data['data']) for i in self.get_struct(packet): output += self.pack(i[0], packet.data[i[1]]) output += append return output except Exception: raise def stateless_unpack(buff, to_server): """A wrapper about the normal objects, that lets you unpack encoded packets easily. Returns (packet, remaining_buff), where remaining_buff is the given buffer without the bytes eaten by the packet. If no more packets can be read from buff, returns (None, buff). """ decoder = PacketDecoder(to_server) decoder.buff = buff packet = decoder.read_packet() return packet, decoder.buff def stateless_pack(packet, to_server):
class IncompleteData(Exception): pass
"""A wrapper about the normal objects, that lets you pack decoded packets easily. Returns the bytestring that represents the packet.""" decoder = PacketDecoder(to_server) return decoder.encode_packet(packet)
identifier_body
packet_decoder.py
#!/usr/bin/python import struct import sys #TODO: Review which of these we actually need SERVER_TO_CLIENT = 0x01 CLIENT_TO_SERVER = 0x02 PROTOCOL_VERSION = 28 class Packet: def __init__(self, packet_type=0, **data): self.direction = 0 if type(packet_type) == str: packet_type = dict((v,k) for k,v in names.items())[packet_type] # Reverse lookup self.ident = packet_type self.data = data def name(self): return names[self.ident] def __str__(self): from_to = {CLIENT_TO_SERVER: "to server", SERVER_TO_CLIENT: "from server"}[self.direction] return "%s packet %s: %s" % (self.name(), from_to, repr(self.data)) def copy(self): p = Packet() p.direction = self.direction p.ident = self.ident p.data = self.data.copy() return p SLOT_EXTRA_DATA_IDS = [ 0x103, 0x105, 0x15A, 0x167, 0x10C, 0x10D, 0x10E, 0x10F, 0x122, 0x110, 0x111, 0x112, 0x113, 0x123, 0x10B, 0x100, 0x101, 0x102, 0x124, 0x114, 0x115, 0x116, 0x117, 0x125, 0x11B, 0x11C, 0x11D, 0x11E, 0x126, 0x12A, 0x12B, 0x12C, 0x12D, 0x12E, 0x12F, 0x130, 0x131, 0x132, 0x133, 0x134, 0x135, 0x136, 0x137, 0x138, 0x139, 0x13A, 0x13B, 0x13C, 0x13D ] data_types = { "ubyte": ('B', 1), "byte": ('b', 1), "bool": ('?', 1), "short": ('h', 2), "float": ('f', 4), "int": ('i', 4), "double": ('d', 8), "long": ('q', 8) } names = { 0x00: "Keep-alive", 0x01: "Login request", 0x02: "Handshake", 0x03: "Chat message", 0x04: "Time update", 0x05: "Entity Equipment", 0x06: "Spawn position", 0x07: "Use entity", 0x08: "Update health", 0x09: "Respawn", 0x0A: "Player", 0x0B: "Player position", 0x0C: "Player look", 0x0D: "Player position & look", 0x0E: "Player digging", 0x0F: "Player block placement", 0x10: "Holding change", 0x11: "Use bed", 0x12: "Animation", 0x13: "Entity action", 0x14: "Named entity spawn", 0x15: "Pickup spawn", 0x16: "Collect item", 0x17: "Add object or vehicle", 0x18: "Mob spawn", 0x19: "Entity: painting", 0x1A: "Experience Orb", 0x1B: "Stance update (DEPRECATED)", 0x1C: "Entity velocity", 0x1D: "Destroy entity", 0x1E: "Entity", 0x1F: "Entity relative move", 0x20: "Entity look", 0x21: "Entity look and relative move", 0x22: "Entity teleport", 0x23: "Entity head look", 0x26: "Entity status", 0x27: "Attach entity", 0x28: "Entity metadata", 0x29: "Entity effect", 0x2A: "Remove entity effect", 0x2B: "Experience", 0x32: "Pre-chunk", 0x33: "Map chunk", 0x34: "Multi-block change", 0x35: "Block change", 0x36: "Block action", 0x3C: "Explosion", 0x3D: "Sound effect", 0x46: "New or invalid state", 0x47: "Thunderbolt", 0x64: "Open window", 0x65: "Close window", 0x66: "Window click", 0x67: "Set slot", 0x68: "Window items", 0x69: "Update progress bar", 0x6A: "Transaction", 0x6B: "Creative inventory action", 0x6C: "Enchant Item", 0x82: "Update sign", 0x83: "Map data", 0x84: "Update tile entity", 0xCA: "Player Abilities", 0xC8: "Increment statistic", 0xC9: "Player List Item", 0xFA: "Plugin message", 0xFE: "Server list ping", 0xFF: "Disconnect" } structs = { #Keep-alive 0x00: ("int", "keep_alive_id"), #Login request 0x01: { CLIENT_TO_SERVER: ( ("int", "protocol_version"), ("string16", "username"), ("string16", "level type"), ("int", "server_mode"), ("int", "dimension"), ("byte", "difficulty"), ("ubyte", "world_height"), ("ubyte", "max_players")), SERVER_TO_CLIENT: ( ("int", "entity_id"), ("string16", "unknown"), ("string16", "level type"), ("int", "server_mode"), ("int", "dimension"), ("byte", "difficulty"), ("ubyte", "world_height"), ("ubyte", "max_players"))}, #Handshake 0x02: { CLIENT_TO_SERVER: ("string16", "username"), SERVER_TO_CLIENT: ("string16", "connection_hash")}, #Chat message 0x03: ("string16", "text"), #Time update 0x04: ("long", "time"), #Entity Equipment 0x05: ( ("int", "entity_id"), ("short", "slot"), ("short", "item_id"), ("short", "damage")), #Spawn position 0x06: ( ("int", "x"), ("int", "y"), ("int", "z")), #Use entity 0x07: ( ("int", "subject_entity_id"), ("int", "object_entity_id"), ("bool", "left_click")), #Update health 0x08: ( ("short", "health"), ("short", "food"), ("float", "food_saturation")), #Respawn 0x09: ( ("int", "dimension"), ("byte", "difficulty"), ("byte", "server_mode"), ("short", "world_height"), ("string16", "level_type")), #Player 0x0A: ("bool", "on_ground"), #Player position 0x0B: ( ("double", "x"), ("double", "y"), ("double", "stance"), ("double", "z"), ("bool", "on_ground")), #Player look 0x0C: ( ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground")), #Player position & look 0x0D: { CLIENT_TO_SERVER: ( ("double", "x"), ("double", "y"), ("double", "stance"), ("double", "z"), ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground")), SERVER_TO_CLIENT: ( ("double", "x"), ("double", "stance"), ("double", "y"), ("double", "z"), ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground"))}, #Player digging 0x0E: ( ("byte", "status"), ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "face")), #Player block placement 0x0F: ( ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "direction"), ("slot", "slot")), #Holding change 0x10: ("short", "slot"), #Use bed 0x11: ( ("int", "entity_id"), ("byte", "in_bed"), ("int", "x"), ("byte", "y"), ("int", "z")), #Animation 0x12: ( ("int", "entity_id"), ("byte", "animation")), #Entity action 0x13: ( ("int", "entity_id"), ("byte", "action")), #Named entity spawn 0x14: ( ("int", "entity_id"), ("string16", "player_name"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "rotation"), ("byte", "pitch"), ("short", "current_item")), #Pickup spawn 0x15: ( ("int", "entity_id"), ("short", "item"), ("byte", "count"), ("short", "metadata"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "rotation"), ("byte", "pitch"), ("byte", "roll")), #Collect item 0x16: ( ("int", "subject_entity_id"), ("int", "object_entity_id")), #Add object or vehicle 0x17: ( ("int", "entity_id"), ("byte", "type"), ("int", "x"), ("int", "y"), ("int", "z"), ("int", "unknown")), #Mob spawn 0x18: ( ("int", "entity_id"), ("byte", "type"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "yaw"), ("byte", "pitch"), ("byte", "head yaw"), ("metadata", "metadata")), #Entity: painting 0x19: ( ("int", "entity_id"), ("string16", "title"), ("int", "x"), ("int", "y"), ("int", "z"), ("int", "direction")), #Experience Orb 0x1A: ( ("int", "entity_id"), ("int", "x"), ("int", "y"), ("int", "z"), ("short", "count")), #Stance update 0x1B: ( ("float", "unknown1"), ("float", "unknown2"), ("float", "unknown3"), ("float", "unknown4"), ("bool", "unknown5"), ("bool", "unknown6")), #Entity velocity 0x1C: ( ("int", "entity_id"), ("short", "x_velocity"), ("short", "y_velocity"), ("short", "z_velocity")), #Destroy entity 0x1D: ("int", "entity_id"), #Entity 0x1E: ("int", "entity_id"), #Entity relative move 0x1F: ( ("int", "entity_id"), ("byte", "x_change"), ("byte", "y_change"), ("byte", "z_change")), #Entity look 0x20: ( ("int", "entity_id"), ("byte", "yaw"), ("byte", "pitch")), #Entity look and relative move 0x21: ( ("int", "entity_id"), ("byte", "x_change"), ("byte", "y_change"), ("byte", "z_change"), ("byte", "yaw"), ("byte", "pitch")), #Entity teleport 0x22: ( ("int", "entity_id"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "yaw"), ("byte", "pitch")), # Entity head look 0x23: ( ("int", "entity_id"), ("byte", "head yaw")), #Entity status 0x26: ( ("int", "entity_id"), ("byte", "status")), #Attach entity 0x27: ( ("int", "subject_entity_id"), ("int", "object_entity_id")), #Entity metadata 0x28: ( ("int", "entity_id"), ("metadata", "metadata")), # Entity effect 0x29: ( ("int", "entity_id"), ("byte", "effect_id"), ("byte", "amplifier"), ("short", "duration")), # remove entity effect 0x2A: ( ("int", "entity_id"), ("byte", "effect_id")), # Experience 0x2B: ( ("float", "experience_bar"), ("short", "level"), ("short", "total_experience")), #Pre-chunk 0x32: ( ("int", "x"), ("int", "z"), ("bool", "load")), #Map chunk 0x33: ( ("int", "x"), ("int", "z"), ("bool", "contiguous"), ("short", "bitmap"), ("short", "add_bitmap"), ("int", "data_size"), ("int", "unknown")), #Multi-block change 0x34: ( ("int", "x_chunk"), ("int", "z_chunk"), ("short", "record_count"), ("int", "data_size")), #Block change 0x35: ( ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "id"), ("byte", "metadata")), #Block action 0x36: ( ("int", "x"), ("short", "y"), ("int", "z"), ("byte", "type_state"), ("byte", "pitch_direction")), #Explosion 0x3C: ( ("double", "x"), ("double", "y"), ("double", "z"), ("float", "unknown"), ("int", "data_size")), #Sound effect 0x3D: ( ("int", "effect_id"), ("int", "x"), ("byte", "y"), ("int", "z"), ("int", "extra")), #New or invalid state 0x46: ( ("byte", "reason"), ("byte", "gamemode")), #Thunderbolt 0x47: ( ("int", "entity_id"), ("bool", "unknown"), ("int", "x"), ("int", "y"), ("int", "z")), #Open window 0x64: ( ("byte", "window_id"), ("byte", "inventory_type"), ("string16", "window_title"), ("byte", "slots_count")), #Close window 0x65: ("byte", "window_id"), #Window click 0x66: ( ("byte", "window_id"), ("short", "slot"), ("byte", "right_click"), ("short", "transaction_id"), ("bool", "shift"), ("slot", "slot_data")), #Set slot 0x67: ( ("byte", "window_id"), ("short", "slot"), ("slot", "slot_data")), #Window items 0x68: ( ("byte", "window_id"), ("short", "data_size")), #Update progress bar 0x69: ( ("byte", "window_id"), ("short", "progress_bar_type"), ("short", "progress")), #Transaction 0x6A: ( ("byte", "window_id"), ("short", "transaction_id"), ("bool", "accepted")), # Creative Inventory Action 0x6B: ( ("short", "slot"), ("slot", "slot_data")), # Enchant Item 0x6C: ( ("byte", "window_id"), ("byte", "enchantment")), #Update sign 0x82: ( ("int", "x"), ("short", "y"), ("int", "z"), ("string16", "line_1"), ("string16", "line_2"), ("string16", "line_3"), ("string16", "line_4")), #Map data 0x83: ( ("short", "unknown1"), ("short", "map_id"), ("ubyte", "data_size")), #Update Tile Entity 0x84: ( ("int", "x"), ("short", "y"), ("int", "z"), ("byte", "action"), ("int", "custom1"), ("int", "custom2"), ("int", "custom3")), # Player Abilities 0xCA: ( ("bool", "invulnerable"), ("bool", "flying"), ("bool", "can_fly"), ("bool", "instant_destroy")), #Increment statistic 0xC8: ( ("int", "statistic_id"), ("byte", "amount")), # Player List Item 0xC9: ( ("string16", "player_name"), ("bool", "online"), ("short", "ping")), #Server list ping 0xFE: (), #Disconnect 0xFF: ("string16", "reason")} class PacketDecoder: def __init__(self, to_server): self.buff = '' self.error_count = 0 self.node = CLIENT_TO_SERVER if to_server else SERVER_TO_CLIENT self.iPacketCounter = 0 def get_struct(self, packet): """Reads ident and direction from packet, and returns the associated struct description from structs global. Normalises return to be a ((str, str), ...)""" o = structs[packet.ident] if isinstance(o, dict): o = o[packet.direction] if len(o) and not isinstance(o[0], tuple): o = (o), return o def pack(self, data_type, data): if data_type in data_types: format = data_types[data_type] return self.pack_real(format[0], data) if data_type == "string8": return self.pack("short", len(data)) + data if data_type == "string16": return self.pack("short", len(data)) + data.encode('utf-16be') if data_type == "slot": o = self.pack('short', data['id']) if data['id'] > 0: o += self.pack('byte', data['amount']) o += self.pack('short', data['damage']) if 'extra' in data: nbtdata = data['extra'] if nbtdata is None: o += self.pack('short', -1) else: nbt_len = len(nbtdata) o += self.pack('short', nbt_len) o += nbtdata return o if data_type == "metadata": o = '' for mtype, val in data: mtype2 = mtype >> 5 o += self.pack('byte', mtype) if mtype2 == 0:
if mtype2 == 1: o += self.pack('short', val) if mtype2 == 2: o += self.pack('int', val) if mtype2 == 3: o += self.pack('float', val) if mtype2 == 4: o += self.pack('string16', val) if mtype2 == 5: o += self.pack('short', val['id']) o += self.pack('byte', val['count']) o += self.pack('short', val['damage']) if mtype2 == 6: for i in range(3): o += self.pack('int', val[i]) o += self.pack('byte', 127) return o def unpack(self, data_type): """Reads buff (consuming bytes) and returns the unpacked value according to the given type.""" if data_type in data_types: format = data_types[data_type] return self.unpack_real(format[0], format[1]) if data_type == "string8": length = self.unpack('short') if length < 0: raise Exception("Negative length for string") if len(self.buff) < length: raise IncompleteData() string = self.buff[:length] self.buff = self.buff[length:] return string if data_type == "string16": length = self.unpack('short') if length < 0: raise Exception("Negative length for string") if len(self.buff) < 2*length: raise IncompleteData() string = self.buff[:2*length].decode('utf-16be') self.buff = self.buff[2*length:] return string if data_type == "slot": o = {} o["id"] = self.unpack('short') if o["id"] > 0: o["amount"] = self.unpack('byte') o["damage"] = self.unpack('short') if o["id"] in SLOT_EXTRA_DATA_IDS: extra_len = self.unpack('short') if extra_len <= 0: o["extra"] = None else: if len(self.buff) < extra_len: raise IncompleteData() extra_buff = self.buff[:extra_len] self.buff = self.buff[extra_len:] o["extra"] = extra_buff return o if data_type == "metadata": #[(17, 0), (0, 0), (16, -1)] o = [] mtype = self.unpack('byte') while mtype != 127: mtype2 = mtype >> 5 t = 0 if mtype2 == 0: t = self.unpack('byte') if mtype2 == 1: t = self.unpack('short') if mtype2 == 2: t = self.unpack('int') if mtype2 == 3: t = self.unpack('float') if mtype2 == 4: t = self.unpack('string16') if mtype2 == 5: t = {} t["id"] = self.unpack('short') t["count"] = self.unpack('byte') t["damage"] = self.unpack('short') if mtype2 == 6: t = [] for i in range(3): s = self.unpack('int') t.append(s) t = (mtype, t) o.append(t) mtype = self.unpack('byte') return o def unpack_real(self, data_type, length): """A helper function for unpack(), it handles any data type that is understood by the struct module.""" if len(self.buff) < length: raise IncompleteData() o = struct.unpack_from('!'+data_type, self.buff)[0] self.buff = self.buff[length:] return o def pack_real(self, data_type, data): return struct.pack('!'+data_type, data) def unpack_array(self, data_type, count): a = [] for i in range(count): a.append(self.unpack(data_type)) return a def pack_array(self, data_type, data): o = '' for d in data: o += self.pack(data_type, d) return o def unpack_array_fast(self, data_type, count): data_type = data_types[data_type] if len(self.buff) < count*data_type[1]: raise IncompleteData() o = struct.unpack_from(data_type[0]*count, self.buff) self.buff = self.buff[count*data_type[1]:] return o def pack_array_fast(self, data_type, data): data_type = data_types[data_type] return struct.pack(data_type[0]*len(data), *data) def read_packet(self): """Reads the bytestring in self.buff, and returns the first packet contained within it. Sets self.buff to remaining bytestring. If packet is incomplete, returns None. But may raise if it thinks a real malformed packet has been recieved. """ #self.debug("READ BUFFER SIZE: %d" % len(self.buff)) backup = self.buff[:] packet = Packet() try: packet.direction = self.node packet.ident = self.unpack('ubyte') #Defined structs from huge dict for datatype, name in self.get_struct(packet): # this populates packet.data with {name: value} packet.data[name] = self.unpack(datatype) # I believe the following are packet-type specific fixes for variable-length packets. #0x17 if packet.ident == 0x17: if packet.data['unknown'] > 0: packet.data['x2'] = self.unpack('short') packet.data['y2'] = self.unpack('short') packet.data['z2'] = self.unpack('short') #0x33 if packet.ident in (0x33, 0x34): packet.data['data'] = self.unpack_array_fast('byte', packet.data['data_size']) del packet.data["data_size"] # #0x34 # if packet.ident == 0x34: # coords = self.unpack_array_fast('short', packet.data['data_size']) # btype = self.unpack_array_fast('byte', packet.data['data_size']) # metadata = self.unpack_array_fast('byte', packet.data['data_size']) # packet.data["blocks"] = [] # for i in zip(coords, btype, metadata): # block = {} # block["x"] = i[0] >> 12 # block["z"] = 0x0F & i[0] >> 8 # block["y"] = 0xFF & i[0] # block["type"] = i[1] # block["metadata"] = i[2] # packet.data["blocks"].append(block) # del packet.data["data_size"] #0x3C if packet.ident == 0x3C: records = self.unpack_array_fast('byte', packet.data['data_size']*3) i = 0 packet.data["blocks"] = [] while i < packet.data['data_size']*3: packet.data["blocks"].append(dict(zip(('x','y','z'), records[i:i+3]))) i+=3 del packet.data["data_size"] #0x68 if packet.ident == 0x68: packet.data["slots_data"] = self.unpack_array('slot', packet.data["data_size"]) del packet.data["data_size"] #0x82: if packet.ident == 0x82: packet.data["text"] = [] for i in range(4): packet.data["text"].append(packet.data["line_%s" % (i+1)]) #0x83 if packet.ident == 0x83: packet.data["data"] = self.unpack_array_fast('byte', packet.data['data_size']) del packet.data["data_size"] # Sets packet.original to the byte string that the packet was decoded from. packet.original = backup[:len(backup) - len(self.buff)] return packet except IncompleteData: self.buff = backup return None except Exception, ex: self.buff = backup ex.args += (self.buff[20:],) raise def encode_packet(self, packet): """Takes a packet, and returns the encoded bytestring representing it.""" try: output = self.pack('ubyte', packet.ident) append = '' #0x17 if packet.ident == 0x17: if packet.data['unknown'] > 0: for i in ('x2','y2','z2'): append += self.pack('short', packet.data[i]) #0x33 if packet.ident in (0x33, 0x34): packet.data['data_size'] = len(packet.data['data']) append += self.pack_array_fast('byte', packet.data['data']) # #0x34 # if packet.ident == 0x34: # coords = [] # btypes = [] # metadata = [] # for i in packet.data['blocks']: # coords.append(i['x'] << 12 | i['z'] << 8 | i['y']) # btypes.append(i['type']) # metadata.append(i['metadata']) # # packet.data['data_size'] = len(coords) # append += self.pack_array_fast('short', coords) # append += self.pack_array_fast('byte', btypes) # append += self.pack_array_fast('byte', metadata) #0x3C if packet.ident == 0x3C: array = [] for i in packet.data['blocks']: array += [i['x'], i['y'], i['z']] packet.data['data_size'] = len(packet.data['blocks']) append += self.pack_array_fast('byte', array) #0x68 if packet.ident == 0x68: packet.data['data_size'] = len(packet.data['slots_data']) append += self.pack_array('slot', packet.data['slots_data']) #0x82: Sign if packet.ident == 0x82: for i in range(4): packet.data["line_%s" % (i+1)] = packet.data["text"][i] #0x83 if packet.ident == 0x83: packet.data['data_size'] = len(packet.data['data']) append += self.pack_array_fast('byte', packet.data['data']) for i in self.get_struct(packet): output += self.pack(i[0], packet.data[i[1]]) output += append return output except Exception: raise def stateless_unpack(buff, to_server): """A wrapper about the normal objects, that lets you unpack encoded packets easily. Returns (packet, remaining_buff), where remaining_buff is the given buffer without the bytes eaten by the packet. If no more packets can be read from buff, returns (None, buff). """ decoder = PacketDecoder(to_server) decoder.buff = buff packet = decoder.read_packet() return packet, decoder.buff def stateless_pack(packet, to_server): """A wrapper about the normal objects, that lets you pack decoded packets easily. Returns the bytestring that represents the packet.""" decoder = PacketDecoder(to_server) return decoder.encode_packet(packet) class IncompleteData(Exception): pass
o += self.pack('byte', val)
conditional_block
packet_decoder.py
#!/usr/bin/python import struct import sys #TODO: Review which of these we actually need SERVER_TO_CLIENT = 0x01 CLIENT_TO_SERVER = 0x02 PROTOCOL_VERSION = 28 class Packet: def __init__(self, packet_type=0, **data): self.direction = 0 if type(packet_type) == str: packet_type = dict((v,k) for k,v in names.items())[packet_type] # Reverse lookup self.ident = packet_type self.data = data def name(self): return names[self.ident] def __str__(self): from_to = {CLIENT_TO_SERVER: "to server", SERVER_TO_CLIENT: "from server"}[self.direction] return "%s packet %s: %s" % (self.name(), from_to, repr(self.data)) def copy(self): p = Packet() p.direction = self.direction p.ident = self.ident p.data = self.data.copy() return p SLOT_EXTRA_DATA_IDS = [ 0x103, 0x105, 0x15A, 0x167, 0x10C, 0x10D, 0x10E, 0x10F, 0x122, 0x110, 0x111, 0x112, 0x113, 0x123, 0x10B, 0x100, 0x101, 0x102, 0x124, 0x114, 0x115, 0x116, 0x117, 0x125, 0x11B, 0x11C, 0x11D, 0x11E, 0x126, 0x12A, 0x12B, 0x12C, 0x12D, 0x12E, 0x12F, 0x130, 0x131, 0x132, 0x133, 0x134, 0x135, 0x136, 0x137, 0x138, 0x139, 0x13A, 0x13B, 0x13C, 0x13D ] data_types = { "ubyte": ('B', 1), "byte": ('b', 1), "bool": ('?', 1), "short": ('h', 2), "float": ('f', 4), "int": ('i', 4), "double": ('d', 8), "long": ('q', 8) } names = { 0x00: "Keep-alive", 0x01: "Login request", 0x02: "Handshake", 0x03: "Chat message", 0x04: "Time update", 0x05: "Entity Equipment", 0x06: "Spawn position", 0x07: "Use entity", 0x08: "Update health", 0x09: "Respawn", 0x0A: "Player", 0x0B: "Player position", 0x0C: "Player look", 0x0D: "Player position & look", 0x0E: "Player digging", 0x0F: "Player block placement", 0x10: "Holding change", 0x11: "Use bed", 0x12: "Animation", 0x13: "Entity action", 0x14: "Named entity spawn", 0x15: "Pickup spawn", 0x16: "Collect item", 0x17: "Add object or vehicle", 0x18: "Mob spawn", 0x19: "Entity: painting", 0x1A: "Experience Orb", 0x1B: "Stance update (DEPRECATED)", 0x1C: "Entity velocity", 0x1D: "Destroy entity", 0x1E: "Entity", 0x1F: "Entity relative move", 0x20: "Entity look", 0x21: "Entity look and relative move", 0x22: "Entity teleport", 0x23: "Entity head look", 0x26: "Entity status", 0x27: "Attach entity", 0x28: "Entity metadata", 0x29: "Entity effect", 0x2A: "Remove entity effect", 0x2B: "Experience", 0x32: "Pre-chunk", 0x33: "Map chunk", 0x34: "Multi-block change", 0x35: "Block change", 0x36: "Block action", 0x3C: "Explosion", 0x3D: "Sound effect", 0x46: "New or invalid state", 0x47: "Thunderbolt", 0x64: "Open window", 0x65: "Close window", 0x66: "Window click", 0x67: "Set slot", 0x68: "Window items", 0x69: "Update progress bar", 0x6A: "Transaction", 0x6B: "Creative inventory action", 0x6C: "Enchant Item", 0x82: "Update sign", 0x83: "Map data", 0x84: "Update tile entity", 0xCA: "Player Abilities", 0xC8: "Increment statistic", 0xC9: "Player List Item", 0xFA: "Plugin message", 0xFE: "Server list ping", 0xFF: "Disconnect" } structs = { #Keep-alive 0x00: ("int", "keep_alive_id"), #Login request 0x01: { CLIENT_TO_SERVER: ( ("int", "protocol_version"), ("string16", "username"), ("string16", "level type"), ("int", "server_mode"), ("int", "dimension"), ("byte", "difficulty"), ("ubyte", "world_height"), ("ubyte", "max_players")), SERVER_TO_CLIENT: ( ("int", "entity_id"), ("string16", "unknown"), ("string16", "level type"), ("int", "server_mode"), ("int", "dimension"), ("byte", "difficulty"), ("ubyte", "world_height"), ("ubyte", "max_players"))}, #Handshake 0x02: { CLIENT_TO_SERVER: ("string16", "username"), SERVER_TO_CLIENT: ("string16", "connection_hash")}, #Chat message 0x03: ("string16", "text"), #Time update 0x04: ("long", "time"), #Entity Equipment 0x05: ( ("int", "entity_id"), ("short", "slot"), ("short", "item_id"), ("short", "damage")), #Spawn position 0x06: ( ("int", "x"), ("int", "y"), ("int", "z")), #Use entity 0x07: ( ("int", "subject_entity_id"), ("int", "object_entity_id"), ("bool", "left_click")), #Update health 0x08: ( ("short", "health"), ("short", "food"), ("float", "food_saturation")), #Respawn 0x09: ( ("int", "dimension"), ("byte", "difficulty"), ("byte", "server_mode"), ("short", "world_height"), ("string16", "level_type")), #Player 0x0A: ("bool", "on_ground"), #Player position 0x0B: ( ("double", "x"), ("double", "y"), ("double", "stance"), ("double", "z"), ("bool", "on_ground")), #Player look 0x0C: ( ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground")), #Player position & look 0x0D: { CLIENT_TO_SERVER: ( ("double", "x"), ("double", "y"), ("double", "stance"), ("double", "z"), ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground")), SERVER_TO_CLIENT: ( ("double", "x"), ("double", "stance"), ("double", "y"), ("double", "z"), ("float", "yaw"), ("float", "pitch"), ("bool", "on_ground"))}, #Player digging 0x0E: ( ("byte", "status"), ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "face")), #Player block placement 0x0F: ( ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "direction"), ("slot", "slot")), #Holding change 0x10: ("short", "slot"), #Use bed 0x11: ( ("int", "entity_id"), ("byte", "in_bed"), ("int", "x"), ("byte", "y"), ("int", "z")), #Animation 0x12: ( ("int", "entity_id"), ("byte", "animation")), #Entity action 0x13: ( ("int", "entity_id"), ("byte", "action")), #Named entity spawn 0x14: ( ("int", "entity_id"), ("string16", "player_name"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "rotation"), ("byte", "pitch"), ("short", "current_item")), #Pickup spawn 0x15: ( ("int", "entity_id"), ("short", "item"), ("byte", "count"), ("short", "metadata"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "rotation"), ("byte", "pitch"), ("byte", "roll")), #Collect item 0x16: ( ("int", "subject_entity_id"), ("int", "object_entity_id")), #Add object or vehicle 0x17: ( ("int", "entity_id"), ("byte", "type"), ("int", "x"), ("int", "y"), ("int", "z"), ("int", "unknown")), #Mob spawn 0x18: ( ("int", "entity_id"), ("byte", "type"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "yaw"), ("byte", "pitch"), ("byte", "head yaw"), ("metadata", "metadata")), #Entity: painting 0x19: ( ("int", "entity_id"), ("string16", "title"), ("int", "x"), ("int", "y"), ("int", "z"), ("int", "direction")), #Experience Orb 0x1A: ( ("int", "entity_id"), ("int", "x"), ("int", "y"), ("int", "z"), ("short", "count")), #Stance update 0x1B: ( ("float", "unknown1"), ("float", "unknown2"), ("float", "unknown3"), ("float", "unknown4"), ("bool", "unknown5"), ("bool", "unknown6")), #Entity velocity 0x1C: ( ("int", "entity_id"), ("short", "x_velocity"), ("short", "y_velocity"), ("short", "z_velocity")), #Destroy entity 0x1D: ("int", "entity_id"), #Entity 0x1E: ("int", "entity_id"), #Entity relative move 0x1F: ( ("int", "entity_id"), ("byte", "x_change"), ("byte", "y_change"), ("byte", "z_change")), #Entity look 0x20: ( ("int", "entity_id"), ("byte", "yaw"), ("byte", "pitch")), #Entity look and relative move 0x21: ( ("int", "entity_id"), ("byte", "x_change"), ("byte", "y_change"), ("byte", "z_change"), ("byte", "yaw"), ("byte", "pitch")), #Entity teleport 0x22: ( ("int", "entity_id"), ("int", "x"), ("int", "y"), ("int", "z"), ("byte", "yaw"), ("byte", "pitch")), # Entity head look 0x23: ( ("int", "entity_id"), ("byte", "head yaw")), #Entity status 0x26: ( ("int", "entity_id"), ("byte", "status")), #Attach entity 0x27: ( ("int", "subject_entity_id"), ("int", "object_entity_id")), #Entity metadata 0x28: ( ("int", "entity_id"), ("metadata", "metadata")), # Entity effect 0x29: ( ("int", "entity_id"), ("byte", "effect_id"), ("byte", "amplifier"), ("short", "duration")), # remove entity effect 0x2A: ( ("int", "entity_id"), ("byte", "effect_id")), # Experience 0x2B: ( ("float", "experience_bar"), ("short", "level"), ("short", "total_experience")), #Pre-chunk 0x32: ( ("int", "x"), ("int", "z"), ("bool", "load")), #Map chunk 0x33: ( ("int", "x"), ("int", "z"), ("bool", "contiguous"), ("short", "bitmap"), ("short", "add_bitmap"), ("int", "data_size"), ("int", "unknown")), #Multi-block change 0x34: ( ("int", "x_chunk"), ("int", "z_chunk"), ("short", "record_count"), ("int", "data_size")), #Block change 0x35: ( ("int", "x"), ("byte", "y"), ("int", "z"), ("byte", "id"), ("byte", "metadata")), #Block action 0x36: ( ("int", "x"), ("short", "y"), ("int", "z"), ("byte", "type_state"), ("byte", "pitch_direction")), #Explosion 0x3C: ( ("double", "x"), ("double", "y"), ("double", "z"), ("float", "unknown"), ("int", "data_size")), #Sound effect 0x3D: ( ("int", "effect_id"), ("int", "x"), ("byte", "y"), ("int", "z"), ("int", "extra")), #New or invalid state 0x46: ( ("byte", "reason"), ("byte", "gamemode")), #Thunderbolt 0x47: ( ("int", "entity_id"), ("bool", "unknown"), ("int", "x"), ("int", "y"), ("int", "z")), #Open window 0x64: ( ("byte", "window_id"), ("byte", "inventory_type"), ("string16", "window_title"), ("byte", "slots_count")), #Close window 0x65: ("byte", "window_id"), #Window click 0x66: ( ("byte", "window_id"), ("short", "slot"), ("byte", "right_click"), ("short", "transaction_id"), ("bool", "shift"), ("slot", "slot_data")), #Set slot 0x67: ( ("byte", "window_id"), ("short", "slot"), ("slot", "slot_data")), #Window items 0x68: ( ("byte", "window_id"), ("short", "data_size")), #Update progress bar 0x69: ( ("byte", "window_id"), ("short", "progress_bar_type"), ("short", "progress")), #Transaction 0x6A: ( ("byte", "window_id"), ("short", "transaction_id"), ("bool", "accepted")), # Creative Inventory Action 0x6B: ( ("short", "slot"), ("slot", "slot_data")), # Enchant Item 0x6C: ( ("byte", "window_id"), ("byte", "enchantment")), #Update sign 0x82: ( ("int", "x"), ("short", "y"), ("int", "z"), ("string16", "line_1"), ("string16", "line_2"), ("string16", "line_3"), ("string16", "line_4")), #Map data 0x83: ( ("short", "unknown1"), ("short", "map_id"), ("ubyte", "data_size")), #Update Tile Entity 0x84: ( ("int", "x"), ("short", "y"), ("int", "z"), ("byte", "action"), ("int", "custom1"), ("int", "custom2"), ("int", "custom3")), # Player Abilities 0xCA: ( ("bool", "invulnerable"), ("bool", "flying"), ("bool", "can_fly"), ("bool", "instant_destroy")), #Increment statistic 0xC8: ( ("int", "statistic_id"), ("byte", "amount")), # Player List Item 0xC9: ( ("string16", "player_name"), ("bool", "online"), ("short", "ping")), #Server list ping 0xFE: (), #Disconnect 0xFF: ("string16", "reason")} class PacketDecoder: def __init__(self, to_server): self.buff = '' self.error_count = 0 self.node = CLIENT_TO_SERVER if to_server else SERVER_TO_CLIENT self.iPacketCounter = 0 def get_struct(self, packet): """Reads ident and direction from packet, and returns the associated struct description from structs global. Normalises return to be a ((str, str), ...)""" o = structs[packet.ident] if isinstance(o, dict): o = o[packet.direction] if len(o) and not isinstance(o[0], tuple): o = (o), return o def pack(self, data_type, data): if data_type in data_types: format = data_types[data_type] return self.pack_real(format[0], data) if data_type == "string8": return self.pack("short", len(data)) + data if data_type == "string16": return self.pack("short", len(data)) + data.encode('utf-16be') if data_type == "slot": o = self.pack('short', data['id']) if data['id'] > 0: o += self.pack('byte', data['amount']) o += self.pack('short', data['damage']) if 'extra' in data: nbtdata = data['extra'] if nbtdata is None: o += self.pack('short', -1) else: nbt_len = len(nbtdata) o += self.pack('short', nbt_len) o += nbtdata return o if data_type == "metadata": o = '' for mtype, val in data: mtype2 = mtype >> 5 o += self.pack('byte', mtype) if mtype2 == 0: o += self.pack('byte', val) if mtype2 == 1: o += self.pack('short', val) if mtype2 == 2: o += self.pack('int', val) if mtype2 == 3: o += self.pack('float', val) if mtype2 == 4: o += self.pack('string16', val) if mtype2 == 5: o += self.pack('short', val['id']) o += self.pack('byte', val['count']) o += self.pack('short', val['damage']) if mtype2 == 6: for i in range(3): o += self.pack('int', val[i]) o += self.pack('byte', 127) return o def unpack(self, data_type): """Reads buff (consuming bytes) and returns the unpacked value according to the given type.""" if data_type in data_types: format = data_types[data_type] return self.unpack_real(format[0], format[1]) if data_type == "string8": length = self.unpack('short') if length < 0: raise Exception("Negative length for string") if len(self.buff) < length: raise IncompleteData() string = self.buff[:length] self.buff = self.buff[length:] return string if data_type == "string16": length = self.unpack('short') if length < 0: raise Exception("Negative length for string") if len(self.buff) < 2*length: raise IncompleteData() string = self.buff[:2*length].decode('utf-16be') self.buff = self.buff[2*length:] return string if data_type == "slot": o = {} o["id"] = self.unpack('short') if o["id"] > 0: o["amount"] = self.unpack('byte') o["damage"] = self.unpack('short') if o["id"] in SLOT_EXTRA_DATA_IDS: extra_len = self.unpack('short') if extra_len <= 0: o["extra"] = None else: if len(self.buff) < extra_len: raise IncompleteData() extra_buff = self.buff[:extra_len] self.buff = self.buff[extra_len:] o["extra"] = extra_buff return o if data_type == "metadata": #[(17, 0), (0, 0), (16, -1)] o = [] mtype = self.unpack('byte') while mtype != 127: mtype2 = mtype >> 5 t = 0 if mtype2 == 0: t = self.unpack('byte') if mtype2 == 1: t = self.unpack('short') if mtype2 == 2: t = self.unpack('int') if mtype2 == 3: t = self.unpack('float') if mtype2 == 4: t = self.unpack('string16') if mtype2 == 5: t = {} t["id"] = self.unpack('short') t["count"] = self.unpack('byte') t["damage"] = self.unpack('short') if mtype2 == 6: t = [] for i in range(3): s = self.unpack('int') t.append(s) t = (mtype, t) o.append(t) mtype = self.unpack('byte') return o def unpack_real(self, data_type, length): """A helper function for unpack(), it handles any data type that is understood by the struct module.""" if len(self.buff) < length: raise IncompleteData() o = struct.unpack_from('!'+data_type, self.buff)[0] self.buff = self.buff[length:] return o def pack_real(self, data_type, data): return struct.pack('!'+data_type, data) def unpack_array(self, data_type, count): a = [] for i in range(count): a.append(self.unpack(data_type)) return a def pack_array(self, data_type, data): o = '' for d in data: o += self.pack(data_type, d) return o def unpack_array_fast(self, data_type, count): data_type = data_types[data_type] if len(self.buff) < count*data_type[1]: raise IncompleteData() o = struct.unpack_from(data_type[0]*count, self.buff) self.buff = self.buff[count*data_type[1]:] return o def pack_array_fast(self, data_type, data): data_type = data_types[data_type] return struct.pack(data_type[0]*len(data), *data) def read_packet(self): """Reads the bytestring in self.buff, and returns the first packet contained within it. Sets self.buff to remaining bytestring. If packet is incomplete, returns None. But may raise if it thinks a real malformed packet has been recieved. """ #self.debug("READ BUFFER SIZE: %d" % len(self.buff)) backup = self.buff[:] packet = Packet() try: packet.direction = self.node packet.ident = self.unpack('ubyte') #Defined structs from huge dict for datatype, name in self.get_struct(packet): # this populates packet.data with {name: value} packet.data[name] = self.unpack(datatype) # I believe the following are packet-type specific fixes for variable-length packets. #0x17 if packet.ident == 0x17: if packet.data['unknown'] > 0: packet.data['x2'] = self.unpack('short') packet.data['y2'] = self.unpack('short') packet.data['z2'] = self.unpack('short') #0x33 if packet.ident in (0x33, 0x34): packet.data['data'] = self.unpack_array_fast('byte', packet.data['data_size']) del packet.data["data_size"] # #0x34 # if packet.ident == 0x34: # coords = self.unpack_array_fast('short', packet.data['data_size']) # btype = self.unpack_array_fast('byte', packet.data['data_size']) # metadata = self.unpack_array_fast('byte', packet.data['data_size']) # packet.data["blocks"] = [] # for i in zip(coords, btype, metadata): # block = {} # block["x"] = i[0] >> 12 # block["z"] = 0x0F & i[0] >> 8 # block["y"] = 0xFF & i[0] # block["type"] = i[1] # block["metadata"] = i[2] # packet.data["blocks"].append(block) # del packet.data["data_size"] #0x3C if packet.ident == 0x3C: records = self.unpack_array_fast('byte', packet.data['data_size']*3) i = 0 packet.data["blocks"] = [] while i < packet.data['data_size']*3: packet.data["blocks"].append(dict(zip(('x','y','z'), records[i:i+3]))) i+=3 del packet.data["data_size"] #0x68 if packet.ident == 0x68: packet.data["slots_data"] = self.unpack_array('slot', packet.data["data_size"]) del packet.data["data_size"] #0x82: if packet.ident == 0x82: packet.data["text"] = [] for i in range(4): packet.data["text"].append(packet.data["line_%s" % (i+1)]) #0x83 if packet.ident == 0x83: packet.data["data"] = self.unpack_array_fast('byte', packet.data['data_size']) del packet.data["data_size"] # Sets packet.original to the byte string that the packet was decoded from. packet.original = backup[:len(backup) - len(self.buff)] return packet except IncompleteData: self.buff = backup return None except Exception, ex: self.buff = backup ex.args += (self.buff[20:],) raise def encode_packet(self, packet): """Takes a packet, and returns the encoded bytestring representing it.""" try: output = self.pack('ubyte', packet.ident) append = '' #0x17 if packet.ident == 0x17: if packet.data['unknown'] > 0: for i in ('x2','y2','z2'): append += self.pack('short', packet.data[i]) #0x33 if packet.ident in (0x33, 0x34): packet.data['data_size'] = len(packet.data['data']) append += self.pack_array_fast('byte', packet.data['data']) # #0x34 # if packet.ident == 0x34:
# btypes.append(i['type']) # metadata.append(i['metadata']) # # packet.data['data_size'] = len(coords) # append += self.pack_array_fast('short', coords) # append += self.pack_array_fast('byte', btypes) # append += self.pack_array_fast('byte', metadata) #0x3C if packet.ident == 0x3C: array = [] for i in packet.data['blocks']: array += [i['x'], i['y'], i['z']] packet.data['data_size'] = len(packet.data['blocks']) append += self.pack_array_fast('byte', array) #0x68 if packet.ident == 0x68: packet.data['data_size'] = len(packet.data['slots_data']) append += self.pack_array('slot', packet.data['slots_data']) #0x82: Sign if packet.ident == 0x82: for i in range(4): packet.data["line_%s" % (i+1)] = packet.data["text"][i] #0x83 if packet.ident == 0x83: packet.data['data_size'] = len(packet.data['data']) append += self.pack_array_fast('byte', packet.data['data']) for i in self.get_struct(packet): output += self.pack(i[0], packet.data[i[1]]) output += append return output except Exception: raise def stateless_unpack(buff, to_server): """A wrapper about the normal objects, that lets you unpack encoded packets easily. Returns (packet, remaining_buff), where remaining_buff is the given buffer without the bytes eaten by the packet. If no more packets can be read from buff, returns (None, buff). """ decoder = PacketDecoder(to_server) decoder.buff = buff packet = decoder.read_packet() return packet, decoder.buff def stateless_pack(packet, to_server): """A wrapper about the normal objects, that lets you pack decoded packets easily. Returns the bytestring that represents the packet.""" decoder = PacketDecoder(to_server) return decoder.encode_packet(packet) class IncompleteData(Exception): pass
# coords = [] # btypes = [] # metadata = [] # for i in packet.data['blocks']: # coords.append(i['x'] << 12 | i['z'] << 8 | i['y'])
random_line_split
lib.rs
//! Async concurrent hashmap built on top of [dashmap](https://docs.rs/dashmap/). //! //! # Wait //! [`WaitMap`](crate::WaitMap) is a concurrent hashmap with an asynchronous `wait` operation. //! ``` //! # extern crate async_std; //! # extern crate waitmap; //! # use async_std::main; //! # use waitmap::WaitMap; //! # #[async_std::main] //! # async fn main() -> std::io::Result<()> { //! let map: WaitMap<String, i32> = WaitMap::new(); //! # map.insert(String::from("Rosa Luxemburg"), 1); //! //! // This will wait until a value is put under the key "Rosa Luxemburg" //! if let Some(value) = map.wait("Rosa Luxemburg").await { //! // ... //! } //! # Ok(()) //! # } //! ``` //! //! Waits are cancellable. Cancelled waits evaluate to `None`. //! ``` //! # extern crate async_std; //! # extern crate waitmap; //! # use async_std::{main, task}; //! # use std::time::Duration; //! # use std::sync::Arc; //! # use waitmap::WaitMap; //! # #[async_std::main] //! # async fn main() -> std::io::Result<()> { //! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new()); //! let map1 = map.clone(); //! //! let handle = task::spawn(async move { //! let result = map.wait("Voltairine de Cleyre").await; //! assert!(result.is_none()); //! }); //! //! task::spawn(async move { //! task::sleep(Duration::from_millis(100)).await; // avoid deadlock //! map1.cancel("Voltairine de Cleyre"); //! }); //! //! task::block_on(handle); //! # Ok(()) //! # } //! ``` mod wait; mod waker_set; use std::borrow::Borrow; use std::collections::hash_map::RandomState; use std::future::Future; use std::hash::{BuildHasher, Hash}; use std::mem; use dashmap::mapref::entry::Entry::*; use dashmap::mapref::one; use dashmap::DashMap; use wait::{Wait, WaitMut}; use waker_set::WakerSet; use WaitEntry::*; /// An asynchronous concurrent hashmap. pub struct WaitMap<K, V, S = RandomState> { map: DashMap<K, WaitEntry<V>, S>, } impl<K: Hash + Eq, V> WaitMap<K, V> { /// Make a new `WaitMap` using the default hasher. pub fn new() -> WaitMap<K, V> { WaitMap { map: DashMap::with_hasher(RandomState::default()), } } } impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> { /// Make a new `WaitMap` using a custom hasher. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::main; /// # use waitmap::WaitMap; /// use std::collections::hash_map::RandomState; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new()); /// # Ok(()) /// # } /// ``` pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> { WaitMap { map: DashMap::with_hasher(hasher), } } /// Inserts a key-value pair into the map. /// /// If the map did not have this key present, `None` is returned. /// /// If there are any pending `wait` calls for this key, they are woken up. /// /// If the map did have this key present, the value is updated and the old value is returned. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::{main, sync::Arc, prelude::*}; /// # use waitmap::WaitMap; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new()); /// /// let insert_fut = async { map.insert("hi".to_string(), 0) }; /// let wait_fut = map.wait("hi"); /// /// let (insert_res, wait_res) = insert_fut.join(wait_fut).await; /// assert!(insert_res.is_none()); /// assert!(wait_res.is_some()); /// # Ok(()) /// # } /// ``` pub fn insert(&self, key: K, value: V) -> Option<V> { match self.map.entry(key) { Occupied(mut entry) => { match mem::replace(entry.get_mut(), Filled(value)) { Waiting(wakers) => { drop(entry); // drop early to release lock before waking other tasks wakers.wake(); None } Filled(value) => Some(value), } } Vacant(slot) => { slot.insert(Filled(value)); None } } } pub fn get<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>> where K: Borrow<Q>, { Some(Ref { inner: self.map.get(key)?, }) } pub fn get_mut<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>> where K: Borrow<Q>, { Some(RefMut { inner: self.map.get_mut(key)?, }) } pub fn wait<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>( &'a self, qey: &'b Q, ) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f where K: Borrow<Q> + From<&'b Q>, { let key = K::from(qey); self.map.entry(key).or_insert(Waiting(WakerSet::new())); Wait::new(&self.map, qey) } pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>( &'a self, qey: &'b Q, ) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f where K: Borrow<Q> + From<&'b Q>, { let key = K::from(qey); self.map.entry(key).or_insert(Waiting(WakerSet::new())); WaitMut::new(&self.map, qey) } pub fn cancel<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool where K: Borrow<Q>, { if let Some((_, entry)) = self.map.remove_if( key, |_, entry| { if let Waiting(_) = entry { true } else { false } }, ) { if let Waiting(wakers) = entry { wakers.wake(); } true } else { false } } pub fn remove<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool where K: Borrow<Q>, { if let Some((_, entry)) = self.map.remove(key) { if let Waiting(wakers) = entry { wakers.wake(); } true } else { false } } pub fn clear(&self) { self.map.retain(|_, v| { if let Waiting(wakers) = v { mem::replace(wakers, WakerSet::new()).wake(); } false }); } pub fn clear_not_waiting(&self) { self.map .retain(|_, v| if let Waiting(_) = v { true } else { false }); } pub fn len(&self) -> usize { self.map.len() } /// Cancels all outstanding `waits` on the map. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::{main, stream, prelude::*}; /// # use waitmap::WaitMap; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<String, i32> = WaitMap::new(); /// let mut waitstream = /// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]); /// /// map.cancel_all(); /// /// let mut num_cancelled = 0; /// while let Some(wait_fut) = waitstream.next().await { /// assert!(wait_fut.await.is_none()); /// num_cancelled += 1; /// } /// /// assert!(num_cancelled == 3); /// # Ok(()) /// # } /// ``` pub fn cancel_all(&self) { self.map.retain(|_, entry| { if let Waiting(wakers) = entry { // NB: In theory, there is a deadlock risk: if a task is awoken before the // retain is completed, it may see a waiting entry with an empty waker set, // rather than a missing entry. // // However, this is prevented by the memory guards already present in DashMap. // No other task will be able to view this entry until the guard on this shard // has been dropped, which will not occur until this shard's unretained members // have actually been removed. mem::replace(wakers, WakerSet::new()).wake(); false } else { true } }) } } enum WaitEntry<V> { Waiting(WakerSet), Filled(V), } /// A shared reference to a `WaitMap` key-value pair. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::main; /// # use waitmap::{Ref, WaitMap}; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<String, i32> = WaitMap::new();
/// assert!(*kv.key() == emma); /// assert!(*kv.value() == 0); /// assert!(kv.pair() == (&"Emma Goldman".to_string(), &0)); /// # Ok(()) /// # } /// ``` pub struct Ref<'a, K, V, S> { inner: one::Ref<'a, K, WaitEntry<V>, S>, } impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> { pub fn key(&self) -> &K { self.inner.key() } pub fn value(&self) -> &V { match self.inner.value() { Filled(value) => value, _ => panic!(), } } pub fn pair(&self) -> (&K, &V) { (self.key(), self.value()) } } /// An exclusive reference to a `WaitMap` key-value pair. pub struct RefMut<'a, K, V, S> { inner: one::RefMut<'a, K, WaitEntry<V>, S>, } impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> { pub fn key(&self) -> &K { self.inner.key() } pub fn value(&self) -> &V { match self.inner.value() { Filled(value) => value, _ => panic!(), } } pub fn value_mut(&mut self) -> &mut V { match self.inner.value_mut() { Filled(value) => value, _ => panic!(), } } pub fn pair(&self) -> (&K, &V) { (self.key(), self.value()) } pub fn pair_mut(&mut self) -> (&K, &mut V) { match self.inner.pair_mut() { (key, Filled(value)) => (key, value), _ => panic!(), } } }
/// let emma = "Emma Goldman".to_string(); /// /// map.insert(emma.clone(), 0); /// let kv: Ref<String, i32, _> = map.get(&emma).unwrap(); ///
random_line_split
lib.rs
//! Async concurrent hashmap built on top of [dashmap](https://docs.rs/dashmap/). //! //! # Wait //! [`WaitMap`](crate::WaitMap) is a concurrent hashmap with an asynchronous `wait` operation. //! ``` //! # extern crate async_std; //! # extern crate waitmap; //! # use async_std::main; //! # use waitmap::WaitMap; //! # #[async_std::main] //! # async fn main() -> std::io::Result<()> { //! let map: WaitMap<String, i32> = WaitMap::new(); //! # map.insert(String::from("Rosa Luxemburg"), 1); //! //! // This will wait until a value is put under the key "Rosa Luxemburg" //! if let Some(value) = map.wait("Rosa Luxemburg").await { //! // ... //! } //! # Ok(()) //! # } //! ``` //! //! Waits are cancellable. Cancelled waits evaluate to `None`. //! ``` //! # extern crate async_std; //! # extern crate waitmap; //! # use async_std::{main, task}; //! # use std::time::Duration; //! # use std::sync::Arc; //! # use waitmap::WaitMap; //! # #[async_std::main] //! # async fn main() -> std::io::Result<()> { //! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new()); //! let map1 = map.clone(); //! //! let handle = task::spawn(async move { //! let result = map.wait("Voltairine de Cleyre").await; //! assert!(result.is_none()); //! }); //! //! task::spawn(async move { //! task::sleep(Duration::from_millis(100)).await; // avoid deadlock //! map1.cancel("Voltairine de Cleyre"); //! }); //! //! task::block_on(handle); //! # Ok(()) //! # } //! ``` mod wait; mod waker_set; use std::borrow::Borrow; use std::collections::hash_map::RandomState; use std::future::Future; use std::hash::{BuildHasher, Hash}; use std::mem; use dashmap::mapref::entry::Entry::*; use dashmap::mapref::one; use dashmap::DashMap; use wait::{Wait, WaitMut}; use waker_set::WakerSet; use WaitEntry::*; /// An asynchronous concurrent hashmap. pub struct WaitMap<K, V, S = RandomState> { map: DashMap<K, WaitEntry<V>, S>, } impl<K: Hash + Eq, V> WaitMap<K, V> { /// Make a new `WaitMap` using the default hasher. pub fn new() -> WaitMap<K, V> { WaitMap { map: DashMap::with_hasher(RandomState::default()), } } } impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> { /// Make a new `WaitMap` using a custom hasher. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::main; /// # use waitmap::WaitMap; /// use std::collections::hash_map::RandomState; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new()); /// # Ok(()) /// # } /// ``` pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> { WaitMap { map: DashMap::with_hasher(hasher), } } /// Inserts a key-value pair into the map. /// /// If the map did not have this key present, `None` is returned. /// /// If there are any pending `wait` calls for this key, they are woken up. /// /// If the map did have this key present, the value is updated and the old value is returned. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::{main, sync::Arc, prelude::*}; /// # use waitmap::WaitMap; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new()); /// /// let insert_fut = async { map.insert("hi".to_string(), 0) }; /// let wait_fut = map.wait("hi"); /// /// let (insert_res, wait_res) = insert_fut.join(wait_fut).await; /// assert!(insert_res.is_none()); /// assert!(wait_res.is_some()); /// # Ok(()) /// # } /// ``` pub fn insert(&self, key: K, value: V) -> Option<V> { match self.map.entry(key) { Occupied(mut entry) => { match mem::replace(entry.get_mut(), Filled(value)) { Waiting(wakers) => { drop(entry); // drop early to release lock before waking other tasks wakers.wake(); None } Filled(value) => Some(value), } } Vacant(slot) => { slot.insert(Filled(value)); None } } } pub fn get<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>> where K: Borrow<Q>, { Some(Ref { inner: self.map.get(key)?, }) } pub fn get_mut<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>> where K: Borrow<Q>, { Some(RefMut { inner: self.map.get_mut(key)?, }) } pub fn wait<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>( &'a self, qey: &'b Q, ) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f where K: Borrow<Q> + From<&'b Q>, { let key = K::from(qey); self.map.entry(key).or_insert(Waiting(WakerSet::new())); Wait::new(&self.map, qey) } pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>( &'a self, qey: &'b Q, ) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f where K: Borrow<Q> + From<&'b Q>, { let key = K::from(qey); self.map.entry(key).or_insert(Waiting(WakerSet::new())); WaitMut::new(&self.map, qey) } pub fn cancel<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool where K: Borrow<Q>, { if let Some((_, entry)) = self.map.remove_if( key, |_, entry| { if let Waiting(_) = entry { true } else { false } }, ) { if let Waiting(wakers) = entry { wakers.wake(); } true } else { false } } pub fn remove<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool where K: Borrow<Q>, { if let Some((_, entry)) = self.map.remove(key) { if let Waiting(wakers) = entry { wakers.wake(); } true } else { false } } pub fn clear(&self) { self.map.retain(|_, v| { if let Waiting(wakers) = v { mem::replace(wakers, WakerSet::new()).wake(); } false }); } pub fn clear_not_waiting(&self) { self.map .retain(|_, v| if let Waiting(_) = v { true } else { false }); } pub fn len(&self) -> usize { self.map.len() } /// Cancels all outstanding `waits` on the map. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::{main, stream, prelude::*}; /// # use waitmap::WaitMap; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<String, i32> = WaitMap::new(); /// let mut waitstream = /// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]); /// /// map.cancel_all(); /// /// let mut num_cancelled = 0; /// while let Some(wait_fut) = waitstream.next().await { /// assert!(wait_fut.await.is_none()); /// num_cancelled += 1; /// } /// /// assert!(num_cancelled == 3); /// # Ok(()) /// # } /// ``` pub fn cancel_all(&self)
} enum WaitEntry<V> { Waiting(WakerSet), Filled(V), } /// A shared reference to a `WaitMap` key-value pair. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::main; /// # use waitmap::{Ref, WaitMap}; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<String, i32> = WaitMap::new(); /// let emma = "Emma Goldman".to_string(); /// /// map.insert(emma.clone(), 0); /// let kv: Ref<String, i32, _> = map.get(&emma).unwrap(); /// /// assert!(*kv.key() == emma); /// assert!(*kv.value() == 0); /// assert!(kv.pair() == (&"Emma Goldman".to_string(), &0)); /// # Ok(()) /// # } /// ``` pub struct Ref<'a, K, V, S> { inner: one::Ref<'a, K, WaitEntry<V>, S>, } impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> { pub fn key(&self) -> &K { self.inner.key() } pub fn value(&self) -> &V { match self.inner.value() { Filled(value) => value, _ => panic!(), } } pub fn pair(&self) -> (&K, &V) { (self.key(), self.value()) } } /// An exclusive reference to a `WaitMap` key-value pair. pub struct RefMut<'a, K, V, S> { inner: one::RefMut<'a, K, WaitEntry<V>, S>, } impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> { pub fn key(&self) -> &K { self.inner.key() } pub fn value(&self) -> &V { match self.inner.value() { Filled(value) => value, _ => panic!(), } } pub fn value_mut(&mut self) -> &mut V { match self.inner.value_mut() { Filled(value) => value, _ => panic!(), } } pub fn pair(&self) -> (&K, &V) { (self.key(), self.value()) } pub fn pair_mut(&mut self) -> (&K, &mut V) { match self.inner.pair_mut() { (key, Filled(value)) => (key, value), _ => panic!(), } } }
{ self.map.retain(|_, entry| { if let Waiting(wakers) = entry { // NB: In theory, there is a deadlock risk: if a task is awoken before the // retain is completed, it may see a waiting entry with an empty waker set, // rather than a missing entry. // // However, this is prevented by the memory guards already present in DashMap. // No other task will be able to view this entry until the guard on this shard // has been dropped, which will not occur until this shard's unretained members // have actually been removed. mem::replace(wakers, WakerSet::new()).wake(); false } else { true } }) }
identifier_body
lib.rs
//! Async concurrent hashmap built on top of [dashmap](https://docs.rs/dashmap/). //! //! # Wait //! [`WaitMap`](crate::WaitMap) is a concurrent hashmap with an asynchronous `wait` operation. //! ``` //! # extern crate async_std; //! # extern crate waitmap; //! # use async_std::main; //! # use waitmap::WaitMap; //! # #[async_std::main] //! # async fn main() -> std::io::Result<()> { //! let map: WaitMap<String, i32> = WaitMap::new(); //! # map.insert(String::from("Rosa Luxemburg"), 1); //! //! // This will wait until a value is put under the key "Rosa Luxemburg" //! if let Some(value) = map.wait("Rosa Luxemburg").await { //! // ... //! } //! # Ok(()) //! # } //! ``` //! //! Waits are cancellable. Cancelled waits evaluate to `None`. //! ``` //! # extern crate async_std; //! # extern crate waitmap; //! # use async_std::{main, task}; //! # use std::time::Duration; //! # use std::sync::Arc; //! # use waitmap::WaitMap; //! # #[async_std::main] //! # async fn main() -> std::io::Result<()> { //! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new()); //! let map1 = map.clone(); //! //! let handle = task::spawn(async move { //! let result = map.wait("Voltairine de Cleyre").await; //! assert!(result.is_none()); //! }); //! //! task::spawn(async move { //! task::sleep(Duration::from_millis(100)).await; // avoid deadlock //! map1.cancel("Voltairine de Cleyre"); //! }); //! //! task::block_on(handle); //! # Ok(()) //! # } //! ``` mod wait; mod waker_set; use std::borrow::Borrow; use std::collections::hash_map::RandomState; use std::future::Future; use std::hash::{BuildHasher, Hash}; use std::mem; use dashmap::mapref::entry::Entry::*; use dashmap::mapref::one; use dashmap::DashMap; use wait::{Wait, WaitMut}; use waker_set::WakerSet; use WaitEntry::*; /// An asynchronous concurrent hashmap. pub struct WaitMap<K, V, S = RandomState> { map: DashMap<K, WaitEntry<V>, S>, } impl<K: Hash + Eq, V> WaitMap<K, V> { /// Make a new `WaitMap` using the default hasher. pub fn new() -> WaitMap<K, V> { WaitMap { map: DashMap::with_hasher(RandomState::default()), } } } impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> { /// Make a new `WaitMap` using a custom hasher. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::main; /// # use waitmap::WaitMap; /// use std::collections::hash_map::RandomState; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new()); /// # Ok(()) /// # } /// ``` pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> { WaitMap { map: DashMap::with_hasher(hasher), } } /// Inserts a key-value pair into the map. /// /// If the map did not have this key present, `None` is returned. /// /// If there are any pending `wait` calls for this key, they are woken up. /// /// If the map did have this key present, the value is updated and the old value is returned. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::{main, sync::Arc, prelude::*}; /// # use waitmap::WaitMap; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new()); /// /// let insert_fut = async { map.insert("hi".to_string(), 0) }; /// let wait_fut = map.wait("hi"); /// /// let (insert_res, wait_res) = insert_fut.join(wait_fut).await; /// assert!(insert_res.is_none()); /// assert!(wait_res.is_some()); /// # Ok(()) /// # } /// ``` pub fn insert(&self, key: K, value: V) -> Option<V> { match self.map.entry(key) { Occupied(mut entry) => { match mem::replace(entry.get_mut(), Filled(value)) { Waiting(wakers) => { drop(entry); // drop early to release lock before waking other tasks wakers.wake(); None } Filled(value) => Some(value), } } Vacant(slot) => { slot.insert(Filled(value)); None } } } pub fn get<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>> where K: Borrow<Q>, { Some(Ref { inner: self.map.get(key)?, }) } pub fn get_mut<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>> where K: Borrow<Q>, { Some(RefMut { inner: self.map.get_mut(key)?, }) } pub fn wait<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>( &'a self, qey: &'b Q, ) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f where K: Borrow<Q> + From<&'b Q>, { let key = K::from(qey); self.map.entry(key).or_insert(Waiting(WakerSet::new())); Wait::new(&self.map, qey) } pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>( &'a self, qey: &'b Q, ) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f where K: Borrow<Q> + From<&'b Q>, { let key = K::from(qey); self.map.entry(key).or_insert(Waiting(WakerSet::new())); WaitMut::new(&self.map, qey) } pub fn cancel<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool where K: Borrow<Q>, { if let Some((_, entry)) = self.map.remove_if( key, |_, entry| { if let Waiting(_) = entry { true } else { false } }, ) { if let Waiting(wakers) = entry { wakers.wake(); } true } else { false } } pub fn remove<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool where K: Borrow<Q>, { if let Some((_, entry)) = self.map.remove(key) { if let Waiting(wakers) = entry { wakers.wake(); } true } else { false } } pub fn clear(&self) { self.map.retain(|_, v| { if let Waiting(wakers) = v { mem::replace(wakers, WakerSet::new()).wake(); } false }); } pub fn clear_not_waiting(&self) { self.map .retain(|_, v| if let Waiting(_) = v { true } else { false }); } pub fn len(&self) -> usize { self.map.len() } /// Cancels all outstanding `waits` on the map. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::{main, stream, prelude::*}; /// # use waitmap::WaitMap; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<String, i32> = WaitMap::new(); /// let mut waitstream = /// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]); /// /// map.cancel_all(); /// /// let mut num_cancelled = 0; /// while let Some(wait_fut) = waitstream.next().await { /// assert!(wait_fut.await.is_none()); /// num_cancelled += 1; /// } /// /// assert!(num_cancelled == 3); /// # Ok(()) /// # } /// ``` pub fn cancel_all(&self) { self.map.retain(|_, entry| { if let Waiting(wakers) = entry { // NB: In theory, there is a deadlock risk: if a task is awoken before the // retain is completed, it may see a waiting entry with an empty waker set, // rather than a missing entry. // // However, this is prevented by the memory guards already present in DashMap. // No other task will be able to view this entry until the guard on this shard // has been dropped, which will not occur until this shard's unretained members // have actually been removed. mem::replace(wakers, WakerSet::new()).wake(); false } else { true } }) } } enum WaitEntry<V> { Waiting(WakerSet), Filled(V), } /// A shared reference to a `WaitMap` key-value pair. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::main; /// # use waitmap::{Ref, WaitMap}; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<String, i32> = WaitMap::new(); /// let emma = "Emma Goldman".to_string(); /// /// map.insert(emma.clone(), 0); /// let kv: Ref<String, i32, _> = map.get(&emma).unwrap(); /// /// assert!(*kv.key() == emma); /// assert!(*kv.value() == 0); /// assert!(kv.pair() == (&"Emma Goldman".to_string(), &0)); /// # Ok(()) /// # } /// ``` pub struct Ref<'a, K, V, S> { inner: one::Ref<'a, K, WaitEntry<V>, S>, } impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> { pub fn key(&self) -> &K { self.inner.key() } pub fn value(&self) -> &V { match self.inner.value() { Filled(value) => value, _ => panic!(), } } pub fn pair(&self) -> (&K, &V) { (self.key(), self.value()) } } /// An exclusive reference to a `WaitMap` key-value pair. pub struct RefMut<'a, K, V, S> { inner: one::RefMut<'a, K, WaitEntry<V>, S>, } impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> { pub fn key(&self) -> &K { self.inner.key() } pub fn value(&self) -> &V { match self.inner.value() { Filled(value) => value, _ => panic!(), } } pub fn
(&mut self) -> &mut V { match self.inner.value_mut() { Filled(value) => value, _ => panic!(), } } pub fn pair(&self) -> (&K, &V) { (self.key(), self.value()) } pub fn pair_mut(&mut self) -> (&K, &mut V) { match self.inner.pair_mut() { (key, Filled(value)) => (key, value), _ => panic!(), } } }
value_mut
identifier_name
lib.rs
//! Async concurrent hashmap built on top of [dashmap](https://docs.rs/dashmap/). //! //! # Wait //! [`WaitMap`](crate::WaitMap) is a concurrent hashmap with an asynchronous `wait` operation. //! ``` //! # extern crate async_std; //! # extern crate waitmap; //! # use async_std::main; //! # use waitmap::WaitMap; //! # #[async_std::main] //! # async fn main() -> std::io::Result<()> { //! let map: WaitMap<String, i32> = WaitMap::new(); //! # map.insert(String::from("Rosa Luxemburg"), 1); //! //! // This will wait until a value is put under the key "Rosa Luxemburg" //! if let Some(value) = map.wait("Rosa Luxemburg").await { //! // ... //! } //! # Ok(()) //! # } //! ``` //! //! Waits are cancellable. Cancelled waits evaluate to `None`. //! ``` //! # extern crate async_std; //! # extern crate waitmap; //! # use async_std::{main, task}; //! # use std::time::Duration; //! # use std::sync::Arc; //! # use waitmap::WaitMap; //! # #[async_std::main] //! # async fn main() -> std::io::Result<()> { //! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new()); //! let map1 = map.clone(); //! //! let handle = task::spawn(async move { //! let result = map.wait("Voltairine de Cleyre").await; //! assert!(result.is_none()); //! }); //! //! task::spawn(async move { //! task::sleep(Duration::from_millis(100)).await; // avoid deadlock //! map1.cancel("Voltairine de Cleyre"); //! }); //! //! task::block_on(handle); //! # Ok(()) //! # } //! ``` mod wait; mod waker_set; use std::borrow::Borrow; use std::collections::hash_map::RandomState; use std::future::Future; use std::hash::{BuildHasher, Hash}; use std::mem; use dashmap::mapref::entry::Entry::*; use dashmap::mapref::one; use dashmap::DashMap; use wait::{Wait, WaitMut}; use waker_set::WakerSet; use WaitEntry::*; /// An asynchronous concurrent hashmap. pub struct WaitMap<K, V, S = RandomState> { map: DashMap<K, WaitEntry<V>, S>, } impl<K: Hash + Eq, V> WaitMap<K, V> { /// Make a new `WaitMap` using the default hasher. pub fn new() -> WaitMap<K, V> { WaitMap { map: DashMap::with_hasher(RandomState::default()), } } } impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> { /// Make a new `WaitMap` using a custom hasher. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::main; /// # use waitmap::WaitMap; /// use std::collections::hash_map::RandomState; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new()); /// # Ok(()) /// # } /// ``` pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> { WaitMap { map: DashMap::with_hasher(hasher), } } /// Inserts a key-value pair into the map. /// /// If the map did not have this key present, `None` is returned. /// /// If there are any pending `wait` calls for this key, they are woken up. /// /// If the map did have this key present, the value is updated and the old value is returned. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::{main, sync::Arc, prelude::*}; /// # use waitmap::WaitMap; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new()); /// /// let insert_fut = async { map.insert("hi".to_string(), 0) }; /// let wait_fut = map.wait("hi"); /// /// let (insert_res, wait_res) = insert_fut.join(wait_fut).await; /// assert!(insert_res.is_none()); /// assert!(wait_res.is_some()); /// # Ok(()) /// # } /// ``` pub fn insert(&self, key: K, value: V) -> Option<V> { match self.map.entry(key) { Occupied(mut entry) => { match mem::replace(entry.get_mut(), Filled(value)) { Waiting(wakers) => { drop(entry); // drop early to release lock before waking other tasks wakers.wake(); None } Filled(value) => Some(value), } } Vacant(slot) => { slot.insert(Filled(value)); None } } } pub fn get<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>> where K: Borrow<Q>, { Some(Ref { inner: self.map.get(key)?, }) } pub fn get_mut<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>> where K: Borrow<Q>, { Some(RefMut { inner: self.map.get_mut(key)?, }) } pub fn wait<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>( &'a self, qey: &'b Q, ) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f where K: Borrow<Q> + From<&'b Q>, { let key = K::from(qey); self.map.entry(key).or_insert(Waiting(WakerSet::new())); Wait::new(&self.map, qey) } pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>( &'a self, qey: &'b Q, ) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f where K: Borrow<Q> + From<&'b Q>, { let key = K::from(qey); self.map.entry(key).or_insert(Waiting(WakerSet::new())); WaitMut::new(&self.map, qey) } pub fn cancel<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool where K: Borrow<Q>, { if let Some((_, entry)) = self.map.remove_if( key, |_, entry| { if let Waiting(_) = entry { true } else { false } }, ) { if let Waiting(wakers) = entry { wakers.wake(); } true } else { false } } pub fn remove<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool where K: Borrow<Q>, { if let Some((_, entry)) = self.map.remove(key) { if let Waiting(wakers) = entry { wakers.wake(); } true } else { false } } pub fn clear(&self) { self.map.retain(|_, v| { if let Waiting(wakers) = v { mem::replace(wakers, WakerSet::new()).wake(); } false }); } pub fn clear_not_waiting(&self) { self.map .retain(|_, v| if let Waiting(_) = v
else { false }); } pub fn len(&self) -> usize { self.map.len() } /// Cancels all outstanding `waits` on the map. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::{main, stream, prelude::*}; /// # use waitmap::WaitMap; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<String, i32> = WaitMap::new(); /// let mut waitstream = /// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]); /// /// map.cancel_all(); /// /// let mut num_cancelled = 0; /// while let Some(wait_fut) = waitstream.next().await { /// assert!(wait_fut.await.is_none()); /// num_cancelled += 1; /// } /// /// assert!(num_cancelled == 3); /// # Ok(()) /// # } /// ``` pub fn cancel_all(&self) { self.map.retain(|_, entry| { if let Waiting(wakers) = entry { // NB: In theory, there is a deadlock risk: if a task is awoken before the // retain is completed, it may see a waiting entry with an empty waker set, // rather than a missing entry. // // However, this is prevented by the memory guards already present in DashMap. // No other task will be able to view this entry until the guard on this shard // has been dropped, which will not occur until this shard's unretained members // have actually been removed. mem::replace(wakers, WakerSet::new()).wake(); false } else { true } }) } } enum WaitEntry<V> { Waiting(WakerSet), Filled(V), } /// A shared reference to a `WaitMap` key-value pair. /// ``` /// # extern crate async_std; /// # extern crate waitmap; /// # use async_std::main; /// # use waitmap::{Ref, WaitMap}; /// # #[async_std::main] /// # async fn main() -> std::io::Result<()> { /// let map: WaitMap<String, i32> = WaitMap::new(); /// let emma = "Emma Goldman".to_string(); /// /// map.insert(emma.clone(), 0); /// let kv: Ref<String, i32, _> = map.get(&emma).unwrap(); /// /// assert!(*kv.key() == emma); /// assert!(*kv.value() == 0); /// assert!(kv.pair() == (&"Emma Goldman".to_string(), &0)); /// # Ok(()) /// # } /// ``` pub struct Ref<'a, K, V, S> { inner: one::Ref<'a, K, WaitEntry<V>, S>, } impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> { pub fn key(&self) -> &K { self.inner.key() } pub fn value(&self) -> &V { match self.inner.value() { Filled(value) => value, _ => panic!(), } } pub fn pair(&self) -> (&K, &V) { (self.key(), self.value()) } } /// An exclusive reference to a `WaitMap` key-value pair. pub struct RefMut<'a, K, V, S> { inner: one::RefMut<'a, K, WaitEntry<V>, S>, } impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> { pub fn key(&self) -> &K { self.inner.key() } pub fn value(&self) -> &V { match self.inner.value() { Filled(value) => value, _ => panic!(), } } pub fn value_mut(&mut self) -> &mut V { match self.inner.value_mut() { Filled(value) => value, _ => panic!(), } } pub fn pair(&self) -> (&K, &V) { (self.key(), self.value()) } pub fn pair_mut(&mut self) -> (&K, &mut V) { match self.inner.pair_mut() { (key, Filled(value)) => (key, value), _ => panic!(), } } }
{ true }
conditional_block
controller_test.go
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httptest" "os" "strings" "testing" "github.com/flynn/go-sql" "github.com/flynn/rpcplus" "github.com/go-martini/martini" tu "github.com/flynn/flynn-controller/testutils" ct "github.com/flynn/flynn-controller/types" "github.com/flynn/flynn-controller/utils" _ "github.com/flynn/pq" . "github.com/titanous/gocheck" ) // Hook gocheck up to the "go test" runner func Test(t *testing.T) { TestingT(t) } type S struct { cc *tu.FakeCluster srv *httptest.Server m *martini.Martini } var _ = Suite(&S{}) func (s *S) SetUpSuite(c *C) { dbname := "controllertest" if os.Getenv("PGDATABASE") != "" { dbname = os.Getenv("PGDATABASE") } else { os.Setenv("PGDATABASE", dbname) } if os.Getenv("PGSSLMODE") == "" { os.Setenv("PGSSLMODE", "disable") } db, err := sql.Open("postgres", "dbname=postgres") if err != nil { c.Fatal(err) } if _, err := db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s", dbname)); err != nil { c.Fatal(err) } if _, err := db.Exec(fmt.Sprintf("CREATE DATABASE %s", dbname)); err != nil { c.Fatal(err) } db.Close() dsn := fmt.Sprintf("dbname=%s", dbname) db, err = sql.Open("postgres", dsn) if err != nil { c.Fatal(err) } if err = migrateDB(db); err != nil { c.Fatal(err) } dbw := testDBWrapper{DB: db, dsn: dsn} s.cc = tu.NewFakeCluster() handler, m := appHandler(handlerConfig{db: dbw, cc: s.cc, sc: newFakeRouter(), key: "test"}) s.m = m s.srv = httptest.NewServer(handler) } type testDBWrapper struct { *sql.DB dsn string } func (w testDBWrapper) DSN() string { return w.dsn } func (w testDBWrapper) Database() *sql.DB { return w.DB } var authKey = "test" func (s *S) send(method, path string, in, out interface{}) (*http.Response, error) { buf, err := json.Marshal(in) if err != nil { return nil, err } req, err := http.NewRequest(method, s.srv.URL+path, bytes.NewBuffer(buf)) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") req.SetBasicAuth("", authKey) res, err := http.DefaultClient.Do(req) if err != nil { return nil, err } if out != nil && res.StatusCode == 200 { defer res.Body.Close() return res, json.NewDecoder(res.Body).Decode(out) } return res, nil } func (s *S) body(res *http.Response) (string, error) { defer res.Body.Close() buf, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } return string(buf), nil } func (s *S) Post(path string, in, out interface{}) (*http.Response, error) { return s.send("POST", path, in, out) } func (s *S) Put(path string, in, out interface{}) (*http.Response, error) { return s.send("PUT", path, in, out) } func (s *S) Get(path string, data interface{}) (*http.Response, error) { req, err := http.NewRequest("GET", s.srv.URL+path, nil) if err != nil { return nil, err } req.SetBasicAuth("", authKey) res, err := http.DefaultClient.Do(req) defer res.Body.Close() if res.StatusCode != http.StatusOK { return res, fmt.Errorf("Unexpected status code %d", res.StatusCode) } return res, json.NewDecoder(res.Body).Decode(data) } func (s *S) Delete(path string) (*http.Response, error) { req, err := http.NewRequest("DELETE", s.srv.URL+path, nil) if err != nil { return nil, err } req.SetBasicAuth("", authKey) return http.DefaultClient.Do(req) } func (s *S) TestBadAuth(c *C) { res, err := http.Get(s.srv.URL + "/apps") c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, 401) req, err := http.NewRequest("GET", s.srv.URL+"/apps", nil) c.Assert(err, IsNil) req.SetBasicAuth("", authKey+"wrong") res, err = http.DefaultClient.Do(req) c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, 401) _, err = rpcplus.DialHTTP("tcp", s.srv.Listener.Addr().String()) c.Assert(err, Not(IsNil)) } func (s *S) createTestApp(c *C, in *ct.App) *ct.App { out := &ct.App{} res, err := s.Post("/apps", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateApp(c *C) { // app with no name returns 400 res, err := s.Post("/apps", &ct.App{}, &ct.App{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 400) body, err := s.body(res) c.Assert(err, IsNil) c.Assert(body, Equals, `{"field":"name","message":"must not be blank"}`) for i, id := range []string{"", utils.UUID()} { name := fmt.Sprintf("create-app-%d", i) app := s.createTestApp(c, &ct.App{ID: id, Name: name, Protected: true, Meta: map[string]string{"foo": "bar"}}) c.Assert(app.Name, Equals, name) c.Assert(app.ID, Not(Equals), "") if id != "" { c.Assert(app.ID, Equals, id) } c.Assert(app.Protected, Equals, true) c.Assert(app.Meta["foo"], Equals, "bar") gotApp := &ct.App{} res, err := s.Get("/apps/"+app.ID, gotApp) c.Assert(err, IsNil) c.Assert(gotApp, DeepEquals, app) res, err = s.Get("/apps/"+app.Name, gotApp) c.Assert(err, IsNil) c.Assert(gotApp, DeepEquals, app) res, err = s.Get("/apps/fail"+app.ID, gotApp) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestUpdateApp(c *C) { meta := map[string]string{"foo": "bar"} app := s.createTestApp(c, &ct.App{Name: "update-app", Meta: meta}) c.Assert(app.Protected, Equals, false) c.Assert(app.Meta, DeepEquals, meta) gotApp := &ct.App{} res, err := s.Post("/apps/"+app.Name, map[string]bool{"protected": true}, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, true) c.Assert(gotApp.Meta, DeepEquals, meta) meta = map[string]string{"foo": "baz", "bar": "foo"} res, err = s.Post("/apps/"+app.ID, map[string]interface{}{"protected": false, "meta": meta}, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, false) c.Assert(gotApp.Meta, DeepEquals, meta) res, err = s.Get("/apps/"+app.ID, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, false) c.Assert(gotApp.Meta, DeepEquals, meta) } func (s *S) TestDeleteApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "delete-app"}) path := "/apps/" + app.ID res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, app) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestRecreateApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "recreate-app"}) // Post a duplicate res, err := s.Post("/apps", app, &ct.App{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error // Delete the original path := "/apps/" + app.ID res, err = s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) // Create the same key app = s.createTestApp(c, &ct.App{Name: "recreate-app"}) c.Assert(app.Name, Equals, "recreate-app") } func (s *S) TestProtectedApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "protected-app", Protected: true}) release := s.createTestRelease(c, &ct.Release{ Processes: map[string]ct.ProcessType{"web": {}, "worker": {}}, }) path := formationPath(app.ID, release.ID) for _, t := range []struct { procs map[string]int status int }{ {nil, 400}, {map[string]int{"web": 1}, 400}, {map[string]int{"worker": 1, "web": 0}, 400}, {map[string]int{"worker": 1, "web": 1}, 200}, } { res, err := s.Put(path, &ct.Formation{Processes: t.procs}, nil) c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, t.status) } } func (s *S) createTestArtifact(c *C, in *ct.Artifact) *ct.Artifact { out := &ct.Artifact{} res, err := s.Post("/artifacts", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateArtifact(c *C) { for i, id := range []string{"", utils.UUID()} { in := &ct.Artifact{ ID: id, Type: "docker-image", URI: fmt.Sprintf("docker://flynn/host?id=adsf%d", i), } out := s.createTestArtifact(c, in) c.Assert(out.Type, Equals, in.Type) c.Assert(out.URI, Equals, in.URI) c.Assert(out.ID, Not(Equals), "") if id != "" { c.Assert(out.ID, Equals, id) } gotArtifact := &ct.Artifact{} res, err := s.Get("/artifacts/"+out.ID, gotArtifact) c.Assert(err, IsNil) c.Assert(gotArtifact, DeepEquals, out) res, err = s.Get("/artifacts/fail"+out.ID, gotArtifact) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release { if in.ArtifactID == "" { in.ArtifactID = s.createTestArtifact(c, &ct.Artifact{}).ID } out := &ct.Release{} res, err := s.Post("/releases", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) createTestKey(c *C, in *ct.Key) *ct.Key { out := &ct.Key{} res, err := s.Post("/keys", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateRelease(c *C) { for _, id := range []string{"", utils.UUID()} { in := &ct.Release{ID: id} out := s.createTestRelease(c, in) c.Assert(out.ArtifactID, Equals, in.ArtifactID) if id != "" { c.Assert(out.ID, Equals, id) } gotRelease := &ct.Release{} res, err := s.Get("/releases/"+out.ID, gotRelease) c.Assert(err, IsNil)
res, err = s.Get("/releases/fail"+out.ID, gotRelease) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestCreateFormation(c *C) { for i, useName := range []bool{false, true} { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("create-formation-%d", i)}) in := &ct.Formation{ReleaseID: release.ID, AppID: app.ID, Processes: map[string]int{"web": 1}} if useName { in.AppID = app.Name } out := s.createTestFormation(c, in) c.Assert(out.AppID, Equals, app.ID) c.Assert(out.ReleaseID, Equals, release.ID) c.Assert(out.Processes["web"], Equals, 1) gotFormation := &ct.Formation{} var path string if useName { path = formationPath(app.Name, release.ID) } else { path = formationPath(app.ID, release.ID) } res, err := s.Get(path, gotFormation) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotFormation, DeepEquals, out) res, err = s.Get(path+"fail", gotFormation) c.Assert(res.StatusCode, Equals, 404, Commentf("path:%s formation:", path+"fail")) } } func (s *S) createTestFormation(c *C, formation *ct.Formation) *ct.Formation { path := formationPath(formation.AppID, formation.ReleaseID) formation.AppID = "" formation.ReleaseID = "" out := &ct.Formation{} res, err := s.Put(path, formation, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func formationPath(appID, releaseID string) string { return "/apps/" + appID + "/formations/" + releaseID } func (s *S) TestDeleteFormation(c *C) { for i, useName := range []bool{false, true} { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("delete-formation-%d", i)}) out := s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID}) var path string if useName { path = formationPath(app.Name, release.ID) } else { path = formationPath(app.ID, release.ID) } res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, out) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestCreateKey(c *C) { in := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5r1JfsAYIFi86KBa7C5nqKo+BLMJk29+5GsjelgBnCmn4J/QxOrVtovNcntoRLUCRwoHEMHzs3Tc6+PdswIxpX1l3YC78kgdJe6LVb962xUgP6xuxauBNRO7tnh9aPGyLbjl9j7qZAcn2/ansG1GBVoX1GSB58iBsVDH18DdVzlGwrR4OeNLmRQj8kuJEuKOoKEkW55CektcXjV08K3QSQID7aRNHgDpGGgp6XDi0GhIMsuDUGHAdPGZnqYZlxuUFaCW2hK6i1UkwnQCCEv/9IUFl2/aqVep2iX/ynrIaIsNKm16o0ooZ1gCHJEuUKRPUXhZUXqkRXqqHd3a4CUhH jonathan@titanous.com"} out := s.createTestKey(c, in) c.Assert(out.ID, Equals, "7ab054ff4a2009fadc67e1f8b380dbee") c.Assert(out.Key, Equals, in.Key[:strings.LastIndex(in.Key, " ")]) c.Assert(out.Comment, Equals, "jonathan@titanous.com") gotKey := &ct.Key{} path := "/keys/" + out.ID res, err := s.Get(path, gotKey) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotKey, DeepEquals, out) res, err = s.Get(path+"fail", gotKey) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestDeleteKey(c *C) { key := s.createTestKey(c, &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJv/RsyRxiSAh7cU236LOCZ3vD9PO87Fi32QbojQxuGDotmk65fN6WUuL7DQjzUnWkFRu4w/svmb+9MuYK0L2b4Kc1rKXBYaytzWqGtv2VaAFObth40AlNr0V26hcTcBNQQPa23Z8LwQNgELn2b/o2CK+Pie1UbE5lHg8R+pm03cI7fYPB0jA6LIS+IVKHslVhjzxtN49xm9W0DiCxouHZEl+Fd5asgtg10HN7CV5l2+ZFyrPAkxkQrzWpkUMgfvU+xFamyczzBKMT0fTYo+TUM3w3w3njJvqXdHjo3anrUF65rSFxfeNkXoe/NQDdvWu+XBfEypWv25hlQv91JI0N"}) path := "/keys/" + key.ID res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, key) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestRecreateKey(c *C) { key := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3I4gHed4RioRMoJTFdVYp9S6QhHUtMe2cdQAmaN5lVuAaEe9GmJ/wtD4pd7sCpw9daCVOD/WWKCDunrwiEwMNzZKPFQPRfrGAgpCdweD+mk62n/DuaeKJFcfB4C/iLqUrYQ9q0QNnokchI4Ts/CaWoesJOQsbtxDwxcaOlYA/Yq/nY/RA3aK0ZfZqngrOjNRuvhnNFeCF94w2CwwX9ley+PtL0LSWOK2F9D/VEAoRMY89av6WQEoho3vLH7PIOP4OKdla7ezxP9nU14MN4PSv2yUS15mZ14SkA3EF+xmO0QXYUcUi4v5UxkBpoRYNAh32KMMD70pXPRCmWvZ5pRrH lewis@lmars.net"} originalKey := s.createTestKey(c, key) c.Assert(originalKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559") c.Assert(originalKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")]) c.Assert(originalKey.Comment, Equals, "lewis@lmars.net") // Post a duplicate res, err := s.Post("/keys", key, &ct.Key{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error // Delete the original path := "/keys/" + originalKey.ID res, err = s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) // Create the same key newKey := s.createTestKey(c, key) c.Assert(newKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559") c.Assert(newKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")]) c.Assert(newKey.Comment, Equals, "lewis@lmars.net") } func (s *S) TestAppList(c *C) { s.createTestApp(c, &ct.App{Name: "list-test"}) var list []ct.App res, err := s.Get("/apps", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestReleaseList(c *C) { s.createTestRelease(c, &ct.Release{}) var list []ct.Release res, err := s.Get("/releases", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestKeyList(c *C) { s.createTestKey(c, &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqE9AJti/17eigkIhA7+6TF9rdTVxjPv80UxIT6ELaNPHegqib5m94Wab4UoZAGtBPLKJs9o8LRO3H29X5q5eXCU5mwx4qQhcMEYkILWj0Y1T39Xi2RI3jiWcTsphAAYmy+uT2Nt740OK1FaQxfdzYx4cjsjtb8L82e35BkJE2TdjXWkeHxZWDZxMlZXme56jTNsqB2OuC0gfbAbrjSCkolvK1RJbBZSSBgKQrYXiyYjjLfcw2O0ZAKPBeS8ckVf6PO8s/+azZzJZ0Kl7YGHYEX3xRi6sJS0gsI4Y6+sddT1zT5kh0Bg3C8cKnZ1NiVXLH0pPKz68PhjWhwpOVUehD"}) var list []ct.Key res, err := s.Get("/keys", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") for _, k := range list { s.Delete("/keys/" + k.ID) } res, err = s.Get("/keys", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(list, HasLen, 0) } func (s *S) TestArtifactList(c *C) { s.createTestArtifact(c, &ct.Artifact{}) var list []ct.Artifact res, err := s.Get("/artifacts", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestFormationList(c *C) { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: "formation-list"}) s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID}) var list []ct.Formation path := "/apps/" + app.ID + "/formations" res, err := s.Get(path, &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ReleaseID, Not(Equals), "") for _, f := range list { s.Delete(formationPath(f.AppID, f.ReleaseID)) } res, err = s.Get(path, &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(list, HasLen, 0) } func (s *S) setAppRelease(c *C, appID, id string) *ct.Release { out := &ct.Release{} res, err := s.Put("/apps/"+appID+"/release", &ct.Release{ID: id}, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestSetAppRelease(c *C) { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: "set-release"}) out := s.setAppRelease(c, app.ID, release.ID) c.Assert(out, DeepEquals, release) gotRelease := &ct.Release{} res, err := s.Get("/apps/"+app.ID+"/release", gotRelease) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotRelease, DeepEquals, release) res, err = s.Get("/apps/"+app.Name+"/release", gotRelease) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotRelease, DeepEquals, release) var formations []ct.Formation formationsPath := "/apps/" + app.ID + "/formations" res, err = s.Get(formationsPath, &formations) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(formations, HasLen, 0) s.createTestFormation(c, &ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}}) newRelease := s.createTestRelease(c, &ct.Release{}) s.setAppRelease(c, app.ID, newRelease.ID) res, err = s.Get(formationsPath, &formations) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(formations, HasLen, 1) c.Assert(formations[0].ReleaseID, Equals, newRelease.ID) } func (s *S) createTestProvider(c *C, provider *ct.Provider) *ct.Provider { out := &ct.Provider{} res, err := s.Post("/providers", provider, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateProvider(c *C) { provider := s.createTestProvider(c, &ct.Provider{URL: "https://example.com", Name: "foo"}) c.Assert(provider.Name, Equals, "foo") c.Assert(provider.URL, Equals, "https://example.com") c.Assert(provider.ID, Not(Equals), "") gotProvider := &ct.Provider{} res, err := s.Get("/providers/"+provider.ID, gotProvider) c.Assert(err, IsNil) c.Assert(gotProvider, DeepEquals, provider) res, err = s.Get("/providers/"+provider.Name, gotProvider) c.Assert(err, IsNil) c.Assert(gotProvider, DeepEquals, provider) res, err = s.Get("/apps/fail"+provider.ID, gotProvider) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestProviderList(c *C) { s.createTestProvider(c, &ct.Provider{URL: "https://example.org", Name: "list-test"}) var list []ct.Provider res, err := s.Get("/providers", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") }
c.Assert(gotRelease, DeepEquals, out)
random_line_split
controller_test.go
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httptest" "os" "strings" "testing" "github.com/flynn/go-sql" "github.com/flynn/rpcplus" "github.com/go-martini/martini" tu "github.com/flynn/flynn-controller/testutils" ct "github.com/flynn/flynn-controller/types" "github.com/flynn/flynn-controller/utils" _ "github.com/flynn/pq" . "github.com/titanous/gocheck" ) // Hook gocheck up to the "go test" runner func Test(t *testing.T) { TestingT(t) } type S struct { cc *tu.FakeCluster srv *httptest.Server m *martini.Martini } var _ = Suite(&S{}) func (s *S) SetUpSuite(c *C) { dbname := "controllertest" if os.Getenv("PGDATABASE") != "" { dbname = os.Getenv("PGDATABASE") } else { os.Setenv("PGDATABASE", dbname) } if os.Getenv("PGSSLMODE") == "" { os.Setenv("PGSSLMODE", "disable") } db, err := sql.Open("postgres", "dbname=postgres") if err != nil { c.Fatal(err) } if _, err := db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s", dbname)); err != nil { c.Fatal(err) } if _, err := db.Exec(fmt.Sprintf("CREATE DATABASE %s", dbname)); err != nil { c.Fatal(err) } db.Close() dsn := fmt.Sprintf("dbname=%s", dbname) db, err = sql.Open("postgres", dsn) if err != nil { c.Fatal(err) } if err = migrateDB(db); err != nil { c.Fatal(err) } dbw := testDBWrapper{DB: db, dsn: dsn} s.cc = tu.NewFakeCluster() handler, m := appHandler(handlerConfig{db: dbw, cc: s.cc, sc: newFakeRouter(), key: "test"}) s.m = m s.srv = httptest.NewServer(handler) } type testDBWrapper struct { *sql.DB dsn string } func (w testDBWrapper) DSN() string { return w.dsn } func (w testDBWrapper) Database() *sql.DB { return w.DB } var authKey = "test" func (s *S) send(method, path string, in, out interface{}) (*http.Response, error) { buf, err := json.Marshal(in) if err != nil { return nil, err } req, err := http.NewRequest(method, s.srv.URL+path, bytes.NewBuffer(buf)) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") req.SetBasicAuth("", authKey) res, err := http.DefaultClient.Do(req) if err != nil { return nil, err } if out != nil && res.StatusCode == 200 { defer res.Body.Close() return res, json.NewDecoder(res.Body).Decode(out) } return res, nil } func (s *S) body(res *http.Response) (string, error) { defer res.Body.Close() buf, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } return string(buf), nil } func (s *S) Post(path string, in, out interface{}) (*http.Response, error) { return s.send("POST", path, in, out) } func (s *S) Put(path string, in, out interface{}) (*http.Response, error) { return s.send("PUT", path, in, out) } func (s *S) Get(path string, data interface{}) (*http.Response, error) { req, err := http.NewRequest("GET", s.srv.URL+path, nil) if err != nil { return nil, err } req.SetBasicAuth("", authKey) res, err := http.DefaultClient.Do(req) defer res.Body.Close() if res.StatusCode != http.StatusOK { return res, fmt.Errorf("Unexpected status code %d", res.StatusCode) } return res, json.NewDecoder(res.Body).Decode(data) } func (s *S) Delete(path string) (*http.Response, error) { req, err := http.NewRequest("DELETE", s.srv.URL+path, nil) if err != nil { return nil, err } req.SetBasicAuth("", authKey) return http.DefaultClient.Do(req) } func (s *S) TestBadAuth(c *C) { res, err := http.Get(s.srv.URL + "/apps") c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, 401) req, err := http.NewRequest("GET", s.srv.URL+"/apps", nil) c.Assert(err, IsNil) req.SetBasicAuth("", authKey+"wrong") res, err = http.DefaultClient.Do(req) c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, 401) _, err = rpcplus.DialHTTP("tcp", s.srv.Listener.Addr().String()) c.Assert(err, Not(IsNil)) } func (s *S) createTestApp(c *C, in *ct.App) *ct.App { out := &ct.App{} res, err := s.Post("/apps", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateApp(c *C) { // app with no name returns 400 res, err := s.Post("/apps", &ct.App{}, &ct.App{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 400) body, err := s.body(res) c.Assert(err, IsNil) c.Assert(body, Equals, `{"field":"name","message":"must not be blank"}`) for i, id := range []string{"", utils.UUID()} { name := fmt.Sprintf("create-app-%d", i) app := s.createTestApp(c, &ct.App{ID: id, Name: name, Protected: true, Meta: map[string]string{"foo": "bar"}}) c.Assert(app.Name, Equals, name) c.Assert(app.ID, Not(Equals), "") if id != "" { c.Assert(app.ID, Equals, id) } c.Assert(app.Protected, Equals, true) c.Assert(app.Meta["foo"], Equals, "bar") gotApp := &ct.App{} res, err := s.Get("/apps/"+app.ID, gotApp) c.Assert(err, IsNil) c.Assert(gotApp, DeepEquals, app) res, err = s.Get("/apps/"+app.Name, gotApp) c.Assert(err, IsNil) c.Assert(gotApp, DeepEquals, app) res, err = s.Get("/apps/fail"+app.ID, gotApp) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestUpdateApp(c *C) { meta := map[string]string{"foo": "bar"} app := s.createTestApp(c, &ct.App{Name: "update-app", Meta: meta}) c.Assert(app.Protected, Equals, false) c.Assert(app.Meta, DeepEquals, meta) gotApp := &ct.App{} res, err := s.Post("/apps/"+app.Name, map[string]bool{"protected": true}, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, true) c.Assert(gotApp.Meta, DeepEquals, meta) meta = map[string]string{"foo": "baz", "bar": "foo"} res, err = s.Post("/apps/"+app.ID, map[string]interface{}{"protected": false, "meta": meta}, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, false) c.Assert(gotApp.Meta, DeepEquals, meta) res, err = s.Get("/apps/"+app.ID, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, false) c.Assert(gotApp.Meta, DeepEquals, meta) } func (s *S) TestDeleteApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "delete-app"}) path := "/apps/" + app.ID res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, app) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestRecreateApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "recreate-app"}) // Post a duplicate res, err := s.Post("/apps", app, &ct.App{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error // Delete the original path := "/apps/" + app.ID res, err = s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) // Create the same key app = s.createTestApp(c, &ct.App{Name: "recreate-app"}) c.Assert(app.Name, Equals, "recreate-app") } func (s *S) TestProtectedApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "protected-app", Protected: true}) release := s.createTestRelease(c, &ct.Release{ Processes: map[string]ct.ProcessType{"web": {}, "worker": {}}, }) path := formationPath(app.ID, release.ID) for _, t := range []struct { procs map[string]int status int }{ {nil, 400}, {map[string]int{"web": 1}, 400}, {map[string]int{"worker": 1, "web": 0}, 400}, {map[string]int{"worker": 1, "web": 1}, 200}, } { res, err := s.Put(path, &ct.Formation{Processes: t.procs}, nil) c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, t.status) } } func (s *S) createTestArtifact(c *C, in *ct.Artifact) *ct.Artifact { out := &ct.Artifact{} res, err := s.Post("/artifacts", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateArtifact(c *C) { for i, id := range []string{"", utils.UUID()} { in := &ct.Artifact{ ID: id, Type: "docker-image", URI: fmt.Sprintf("docker://flynn/host?id=adsf%d", i), } out := s.createTestArtifact(c, in) c.Assert(out.Type, Equals, in.Type) c.Assert(out.URI, Equals, in.URI) c.Assert(out.ID, Not(Equals), "") if id != "" { c.Assert(out.ID, Equals, id) } gotArtifact := &ct.Artifact{} res, err := s.Get("/artifacts/"+out.ID, gotArtifact) c.Assert(err, IsNil) c.Assert(gotArtifact, DeepEquals, out) res, err = s.Get("/artifacts/fail"+out.ID, gotArtifact) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release { if in.ArtifactID == "" { in.ArtifactID = s.createTestArtifact(c, &ct.Artifact{}).ID } out := &ct.Release{} res, err := s.Post("/releases", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) createTestKey(c *C, in *ct.Key) *ct.Key { out := &ct.Key{} res, err := s.Post("/keys", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateRelease(c *C) { for _, id := range []string{"", utils.UUID()} { in := &ct.Release{ID: id} out := s.createTestRelease(c, in) c.Assert(out.ArtifactID, Equals, in.ArtifactID) if id != "" { c.Assert(out.ID, Equals, id) } gotRelease := &ct.Release{} res, err := s.Get("/releases/"+out.ID, gotRelease) c.Assert(err, IsNil) c.Assert(gotRelease, DeepEquals, out) res, err = s.Get("/releases/fail"+out.ID, gotRelease) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestCreateFormation(c *C) { for i, useName := range []bool{false, true} { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("create-formation-%d", i)}) in := &ct.Formation{ReleaseID: release.ID, AppID: app.ID, Processes: map[string]int{"web": 1}} if useName { in.AppID = app.Name } out := s.createTestFormation(c, in) c.Assert(out.AppID, Equals, app.ID) c.Assert(out.ReleaseID, Equals, release.ID) c.Assert(out.Processes["web"], Equals, 1) gotFormation := &ct.Formation{} var path string if useName { path = formationPath(app.Name, release.ID) } else { path = formationPath(app.ID, release.ID) } res, err := s.Get(path, gotFormation) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotFormation, DeepEquals, out) res, err = s.Get(path+"fail", gotFormation) c.Assert(res.StatusCode, Equals, 404, Commentf("path:%s formation:", path+"fail")) } } func (s *S) createTestFormation(c *C, formation *ct.Formation) *ct.Formation { path := formationPath(formation.AppID, formation.ReleaseID) formation.AppID = "" formation.ReleaseID = "" out := &ct.Formation{} res, err := s.Put(path, formation, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func formationPath(appID, releaseID string) string { return "/apps/" + appID + "/formations/" + releaseID } func (s *S) TestDeleteFormation(c *C) { for i, useName := range []bool{false, true}
} func (s *S) TestCreateKey(c *C) { in := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5r1JfsAYIFi86KBa7C5nqKo+BLMJk29+5GsjelgBnCmn4J/QxOrVtovNcntoRLUCRwoHEMHzs3Tc6+PdswIxpX1l3YC78kgdJe6LVb962xUgP6xuxauBNRO7tnh9aPGyLbjl9j7qZAcn2/ansG1GBVoX1GSB58iBsVDH18DdVzlGwrR4OeNLmRQj8kuJEuKOoKEkW55CektcXjV08K3QSQID7aRNHgDpGGgp6XDi0GhIMsuDUGHAdPGZnqYZlxuUFaCW2hK6i1UkwnQCCEv/9IUFl2/aqVep2iX/ynrIaIsNKm16o0ooZ1gCHJEuUKRPUXhZUXqkRXqqHd3a4CUhH jonathan@titanous.com"} out := s.createTestKey(c, in) c.Assert(out.ID, Equals, "7ab054ff4a2009fadc67e1f8b380dbee") c.Assert(out.Key, Equals, in.Key[:strings.LastIndex(in.Key, " ")]) c.Assert(out.Comment, Equals, "jonathan@titanous.com") gotKey := &ct.Key{} path := "/keys/" + out.ID res, err := s.Get(path, gotKey) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotKey, DeepEquals, out) res, err = s.Get(path+"fail", gotKey) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestDeleteKey(c *C) { key := s.createTestKey(c, &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJv/RsyRxiSAh7cU236LOCZ3vD9PO87Fi32QbojQxuGDotmk65fN6WUuL7DQjzUnWkFRu4w/svmb+9MuYK0L2b4Kc1rKXBYaytzWqGtv2VaAFObth40AlNr0V26hcTcBNQQPa23Z8LwQNgELn2b/o2CK+Pie1UbE5lHg8R+pm03cI7fYPB0jA6LIS+IVKHslVhjzxtN49xm9W0DiCxouHZEl+Fd5asgtg10HN7CV5l2+ZFyrPAkxkQrzWpkUMgfvU+xFamyczzBKMT0fTYo+TUM3w3w3njJvqXdHjo3anrUF65rSFxfeNkXoe/NQDdvWu+XBfEypWv25hlQv91JI0N"}) path := "/keys/" + key.ID res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, key) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestRecreateKey(c *C) { key := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3I4gHed4RioRMoJTFdVYp9S6QhHUtMe2cdQAmaN5lVuAaEe9GmJ/wtD4pd7sCpw9daCVOD/WWKCDunrwiEwMNzZKPFQPRfrGAgpCdweD+mk62n/DuaeKJFcfB4C/iLqUrYQ9q0QNnokchI4Ts/CaWoesJOQsbtxDwxcaOlYA/Yq/nY/RA3aK0ZfZqngrOjNRuvhnNFeCF94w2CwwX9ley+PtL0LSWOK2F9D/VEAoRMY89av6WQEoho3vLH7PIOP4OKdla7ezxP9nU14MN4PSv2yUS15mZ14SkA3EF+xmO0QXYUcUi4v5UxkBpoRYNAh32KMMD70pXPRCmWvZ5pRrH lewis@lmars.net"} originalKey := s.createTestKey(c, key) c.Assert(originalKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559") c.Assert(originalKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")]) c.Assert(originalKey.Comment, Equals, "lewis@lmars.net") // Post a duplicate res, err := s.Post("/keys", key, &ct.Key{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error // Delete the original path := "/keys/" + originalKey.ID res, err = s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) // Create the same key newKey := s.createTestKey(c, key) c.Assert(newKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559") c.Assert(newKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")]) c.Assert(newKey.Comment, Equals, "lewis@lmars.net") } func (s *S) TestAppList(c *C) { s.createTestApp(c, &ct.App{Name: "list-test"}) var list []ct.App res, err := s.Get("/apps", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestReleaseList(c *C) { s.createTestRelease(c, &ct.Release{}) var list []ct.Release res, err := s.Get("/releases", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestKeyList(c *C) { s.createTestKey(c, &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqE9AJti/17eigkIhA7+6TF9rdTVxjPv80UxIT6ELaNPHegqib5m94Wab4UoZAGtBPLKJs9o8LRO3H29X5q5eXCU5mwx4qQhcMEYkILWj0Y1T39Xi2RI3jiWcTsphAAYmy+uT2Nt740OK1FaQxfdzYx4cjsjtb8L82e35BkJE2TdjXWkeHxZWDZxMlZXme56jTNsqB2OuC0gfbAbrjSCkolvK1RJbBZSSBgKQrYXiyYjjLfcw2O0ZAKPBeS8ckVf6PO8s/+azZzJZ0Kl7YGHYEX3xRi6sJS0gsI4Y6+sddT1zT5kh0Bg3C8cKnZ1NiVXLH0pPKz68PhjWhwpOVUehD"}) var list []ct.Key res, err := s.Get("/keys", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") for _, k := range list { s.Delete("/keys/" + k.ID) } res, err = s.Get("/keys", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(list, HasLen, 0) } func (s *S) TestArtifactList(c *C) { s.createTestArtifact(c, &ct.Artifact{}) var list []ct.Artifact res, err := s.Get("/artifacts", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestFormationList(c *C) { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: "formation-list"}) s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID}) var list []ct.Formation path := "/apps/" + app.ID + "/formations" res, err := s.Get(path, &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ReleaseID, Not(Equals), "") for _, f := range list { s.Delete(formationPath(f.AppID, f.ReleaseID)) } res, err = s.Get(path, &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(list, HasLen, 0) } func (s *S) setAppRelease(c *C, appID, id string) *ct.Release { out := &ct.Release{} res, err := s.Put("/apps/"+appID+"/release", &ct.Release{ID: id}, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestSetAppRelease(c *C) { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: "set-release"}) out := s.setAppRelease(c, app.ID, release.ID) c.Assert(out, DeepEquals, release) gotRelease := &ct.Release{} res, err := s.Get("/apps/"+app.ID+"/release", gotRelease) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotRelease, DeepEquals, release) res, err = s.Get("/apps/"+app.Name+"/release", gotRelease) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotRelease, DeepEquals, release) var formations []ct.Formation formationsPath := "/apps/" + app.ID + "/formations" res, err = s.Get(formationsPath, &formations) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(formations, HasLen, 0) s.createTestFormation(c, &ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}}) newRelease := s.createTestRelease(c, &ct.Release{}) s.setAppRelease(c, app.ID, newRelease.ID) res, err = s.Get(formationsPath, &formations) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(formations, HasLen, 1) c.Assert(formations[0].ReleaseID, Equals, newRelease.ID) } func (s *S) createTestProvider(c *C, provider *ct.Provider) *ct.Provider { out := &ct.Provider{} res, err := s.Post("/providers", provider, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateProvider(c *C) { provider := s.createTestProvider(c, &ct.Provider{URL: "https://example.com", Name: "foo"}) c.Assert(provider.Name, Equals, "foo") c.Assert(provider.URL, Equals, "https://example.com") c.Assert(provider.ID, Not(Equals), "") gotProvider := &ct.Provider{} res, err := s.Get("/providers/"+provider.ID, gotProvider) c.Assert(err, IsNil) c.Assert(gotProvider, DeepEquals, provider) res, err = s.Get("/providers/"+provider.Name, gotProvider) c.Assert(err, IsNil) c.Assert(gotProvider, DeepEquals, provider) res, err = s.Get("/apps/fail"+provider.ID, gotProvider) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestProviderList(c *C) { s.createTestProvider(c, &ct.Provider{URL: "https://example.org", Name: "list-test"}) var list []ct.Provider res, err := s.Get("/providers", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") }
{ release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("delete-formation-%d", i)}) out := s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID}) var path string if useName { path = formationPath(app.Name, release.ID) } else { path = formationPath(app.ID, release.ID) } res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, out) c.Assert(res.StatusCode, Equals, 404) }
conditional_block
controller_test.go
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httptest" "os" "strings" "testing" "github.com/flynn/go-sql" "github.com/flynn/rpcplus" "github.com/go-martini/martini" tu "github.com/flynn/flynn-controller/testutils" ct "github.com/flynn/flynn-controller/types" "github.com/flynn/flynn-controller/utils" _ "github.com/flynn/pq" . "github.com/titanous/gocheck" ) // Hook gocheck up to the "go test" runner func Test(t *testing.T) { TestingT(t) } type S struct { cc *tu.FakeCluster srv *httptest.Server m *martini.Martini } var _ = Suite(&S{}) func (s *S) SetUpSuite(c *C) { dbname := "controllertest" if os.Getenv("PGDATABASE") != "" { dbname = os.Getenv("PGDATABASE") } else { os.Setenv("PGDATABASE", dbname) } if os.Getenv("PGSSLMODE") == "" { os.Setenv("PGSSLMODE", "disable") } db, err := sql.Open("postgres", "dbname=postgres") if err != nil { c.Fatal(err) } if _, err := db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s", dbname)); err != nil { c.Fatal(err) } if _, err := db.Exec(fmt.Sprintf("CREATE DATABASE %s", dbname)); err != nil { c.Fatal(err) } db.Close() dsn := fmt.Sprintf("dbname=%s", dbname) db, err = sql.Open("postgres", dsn) if err != nil { c.Fatal(err) } if err = migrateDB(db); err != nil { c.Fatal(err) } dbw := testDBWrapper{DB: db, dsn: dsn} s.cc = tu.NewFakeCluster() handler, m := appHandler(handlerConfig{db: dbw, cc: s.cc, sc: newFakeRouter(), key: "test"}) s.m = m s.srv = httptest.NewServer(handler) } type testDBWrapper struct { *sql.DB dsn string } func (w testDBWrapper) DSN() string { return w.dsn } func (w testDBWrapper) Database() *sql.DB { return w.DB } var authKey = "test" func (s *S) send(method, path string, in, out interface{}) (*http.Response, error) { buf, err := json.Marshal(in) if err != nil { return nil, err } req, err := http.NewRequest(method, s.srv.URL+path, bytes.NewBuffer(buf)) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") req.SetBasicAuth("", authKey) res, err := http.DefaultClient.Do(req) if err != nil { return nil, err } if out != nil && res.StatusCode == 200 { defer res.Body.Close() return res, json.NewDecoder(res.Body).Decode(out) } return res, nil } func (s *S) body(res *http.Response) (string, error) { defer res.Body.Close() buf, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } return string(buf), nil } func (s *S) Post(path string, in, out interface{}) (*http.Response, error) { return s.send("POST", path, in, out) } func (s *S) Put(path string, in, out interface{}) (*http.Response, error) { return s.send("PUT", path, in, out) } func (s *S) Get(path string, data interface{}) (*http.Response, error) { req, err := http.NewRequest("GET", s.srv.URL+path, nil) if err != nil { return nil, err } req.SetBasicAuth("", authKey) res, err := http.DefaultClient.Do(req) defer res.Body.Close() if res.StatusCode != http.StatusOK { return res, fmt.Errorf("Unexpected status code %d", res.StatusCode) } return res, json.NewDecoder(res.Body).Decode(data) } func (s *S) Delete(path string) (*http.Response, error) { req, err := http.NewRequest("DELETE", s.srv.URL+path, nil) if err != nil { return nil, err } req.SetBasicAuth("", authKey) return http.DefaultClient.Do(req) } func (s *S) TestBadAuth(c *C) { res, err := http.Get(s.srv.URL + "/apps") c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, 401) req, err := http.NewRequest("GET", s.srv.URL+"/apps", nil) c.Assert(err, IsNil) req.SetBasicAuth("", authKey+"wrong") res, err = http.DefaultClient.Do(req) c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, 401) _, err = rpcplus.DialHTTP("tcp", s.srv.Listener.Addr().String()) c.Assert(err, Not(IsNil)) } func (s *S) createTestApp(c *C, in *ct.App) *ct.App { out := &ct.App{} res, err := s.Post("/apps", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateApp(c *C) { // app with no name returns 400 res, err := s.Post("/apps", &ct.App{}, &ct.App{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 400) body, err := s.body(res) c.Assert(err, IsNil) c.Assert(body, Equals, `{"field":"name","message":"must not be blank"}`) for i, id := range []string{"", utils.UUID()} { name := fmt.Sprintf("create-app-%d", i) app := s.createTestApp(c, &ct.App{ID: id, Name: name, Protected: true, Meta: map[string]string{"foo": "bar"}}) c.Assert(app.Name, Equals, name) c.Assert(app.ID, Not(Equals), "") if id != "" { c.Assert(app.ID, Equals, id) } c.Assert(app.Protected, Equals, true) c.Assert(app.Meta["foo"], Equals, "bar") gotApp := &ct.App{} res, err := s.Get("/apps/"+app.ID, gotApp) c.Assert(err, IsNil) c.Assert(gotApp, DeepEquals, app) res, err = s.Get("/apps/"+app.Name, gotApp) c.Assert(err, IsNil) c.Assert(gotApp, DeepEquals, app) res, err = s.Get("/apps/fail"+app.ID, gotApp) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestUpdateApp(c *C) { meta := map[string]string{"foo": "bar"} app := s.createTestApp(c, &ct.App{Name: "update-app", Meta: meta}) c.Assert(app.Protected, Equals, false) c.Assert(app.Meta, DeepEquals, meta) gotApp := &ct.App{} res, err := s.Post("/apps/"+app.Name, map[string]bool{"protected": true}, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, true) c.Assert(gotApp.Meta, DeepEquals, meta) meta = map[string]string{"foo": "baz", "bar": "foo"} res, err = s.Post("/apps/"+app.ID, map[string]interface{}{"protected": false, "meta": meta}, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, false) c.Assert(gotApp.Meta, DeepEquals, meta) res, err = s.Get("/apps/"+app.ID, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, false) c.Assert(gotApp.Meta, DeepEquals, meta) } func (s *S) TestDeleteApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "delete-app"}) path := "/apps/" + app.ID res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, app) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestRecreateApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "recreate-app"}) // Post a duplicate res, err := s.Post("/apps", app, &ct.App{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error // Delete the original path := "/apps/" + app.ID res, err = s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) // Create the same key app = s.createTestApp(c, &ct.App{Name: "recreate-app"}) c.Assert(app.Name, Equals, "recreate-app") } func (s *S) TestProtectedApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "protected-app", Protected: true}) release := s.createTestRelease(c, &ct.Release{ Processes: map[string]ct.ProcessType{"web": {}, "worker": {}}, }) path := formationPath(app.ID, release.ID) for _, t := range []struct { procs map[string]int status int }{ {nil, 400}, {map[string]int{"web": 1}, 400}, {map[string]int{"worker": 1, "web": 0}, 400}, {map[string]int{"worker": 1, "web": 1}, 200}, } { res, err := s.Put(path, &ct.Formation{Processes: t.procs}, nil) c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, t.status) } } func (s *S) createTestArtifact(c *C, in *ct.Artifact) *ct.Artifact { out := &ct.Artifact{} res, err := s.Post("/artifacts", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateArtifact(c *C) { for i, id := range []string{"", utils.UUID()} { in := &ct.Artifact{ ID: id, Type: "docker-image", URI: fmt.Sprintf("docker://flynn/host?id=adsf%d", i), } out := s.createTestArtifact(c, in) c.Assert(out.Type, Equals, in.Type) c.Assert(out.URI, Equals, in.URI) c.Assert(out.ID, Not(Equals), "") if id != "" { c.Assert(out.ID, Equals, id) } gotArtifact := &ct.Artifact{} res, err := s.Get("/artifacts/"+out.ID, gotArtifact) c.Assert(err, IsNil) c.Assert(gotArtifact, DeepEquals, out) res, err = s.Get("/artifacts/fail"+out.ID, gotArtifact) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release { if in.ArtifactID == "" { in.ArtifactID = s.createTestArtifact(c, &ct.Artifact{}).ID } out := &ct.Release{} res, err := s.Post("/releases", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) createTestKey(c *C, in *ct.Key) *ct.Key { out := &ct.Key{} res, err := s.Post("/keys", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateRelease(c *C) { for _, id := range []string{"", utils.UUID()} { in := &ct.Release{ID: id} out := s.createTestRelease(c, in) c.Assert(out.ArtifactID, Equals, in.ArtifactID) if id != "" { c.Assert(out.ID, Equals, id) } gotRelease := &ct.Release{} res, err := s.Get("/releases/"+out.ID, gotRelease) c.Assert(err, IsNil) c.Assert(gotRelease, DeepEquals, out) res, err = s.Get("/releases/fail"+out.ID, gotRelease) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestCreateFormation(c *C) { for i, useName := range []bool{false, true} { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("create-formation-%d", i)}) in := &ct.Formation{ReleaseID: release.ID, AppID: app.ID, Processes: map[string]int{"web": 1}} if useName { in.AppID = app.Name } out := s.createTestFormation(c, in) c.Assert(out.AppID, Equals, app.ID) c.Assert(out.ReleaseID, Equals, release.ID) c.Assert(out.Processes["web"], Equals, 1) gotFormation := &ct.Formation{} var path string if useName { path = formationPath(app.Name, release.ID) } else { path = formationPath(app.ID, release.ID) } res, err := s.Get(path, gotFormation) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotFormation, DeepEquals, out) res, err = s.Get(path+"fail", gotFormation) c.Assert(res.StatusCode, Equals, 404, Commentf("path:%s formation:", path+"fail")) } } func (s *S) createTestFormation(c *C, formation *ct.Formation) *ct.Formation { path := formationPath(formation.AppID, formation.ReleaseID) formation.AppID = "" formation.ReleaseID = "" out := &ct.Formation{} res, err := s.Put(path, formation, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func formationPath(appID, releaseID string) string { return "/apps/" + appID + "/formations/" + releaseID } func (s *S) TestDeleteFormation(c *C) { for i, useName := range []bool{false, true} { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("delete-formation-%d", i)}) out := s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID}) var path string if useName { path = formationPath(app.Name, release.ID) } else { path = formationPath(app.ID, release.ID) } res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, out) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestCreateKey(c *C) { in := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5r1JfsAYIFi86KBa7C5nqKo+BLMJk29+5GsjelgBnCmn4J/QxOrVtovNcntoRLUCRwoHEMHzs3Tc6+PdswIxpX1l3YC78kgdJe6LVb962xUgP6xuxauBNRO7tnh9aPGyLbjl9j7qZAcn2/ansG1GBVoX1GSB58iBsVDH18DdVzlGwrR4OeNLmRQj8kuJEuKOoKEkW55CektcXjV08K3QSQID7aRNHgDpGGgp6XDi0GhIMsuDUGHAdPGZnqYZlxuUFaCW2hK6i1UkwnQCCEv/9IUFl2/aqVep2iX/ynrIaIsNKm16o0ooZ1gCHJEuUKRPUXhZUXqkRXqqHd3a4CUhH jonathan@titanous.com"} out := s.createTestKey(c, in) c.Assert(out.ID, Equals, "7ab054ff4a2009fadc67e1f8b380dbee") c.Assert(out.Key, Equals, in.Key[:strings.LastIndex(in.Key, " ")]) c.Assert(out.Comment, Equals, "jonathan@titanous.com") gotKey := &ct.Key{} path := "/keys/" + out.ID res, err := s.Get(path, gotKey) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotKey, DeepEquals, out) res, err = s.Get(path+"fail", gotKey) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestDeleteKey(c *C) { key := s.createTestKey(c, &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJv/RsyRxiSAh7cU236LOCZ3vD9PO87Fi32QbojQxuGDotmk65fN6WUuL7DQjzUnWkFRu4w/svmb+9MuYK0L2b4Kc1rKXBYaytzWqGtv2VaAFObth40AlNr0V26hcTcBNQQPa23Z8LwQNgELn2b/o2CK+Pie1UbE5lHg8R+pm03cI7fYPB0jA6LIS+IVKHslVhjzxtN49xm9W0DiCxouHZEl+Fd5asgtg10HN7CV5l2+ZFyrPAkxkQrzWpkUMgfvU+xFamyczzBKMT0fTYo+TUM3w3w3njJvqXdHjo3anrUF65rSFxfeNkXoe/NQDdvWu+XBfEypWv25hlQv91JI0N"}) path := "/keys/" + key.ID res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, key) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestRecreateKey(c *C) { key := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3I4gHed4RioRMoJTFdVYp9S6QhHUtMe2cdQAmaN5lVuAaEe9GmJ/wtD4pd7sCpw9daCVOD/WWKCDunrwiEwMNzZKPFQPRfrGAgpCdweD+mk62n/DuaeKJFcfB4C/iLqUrYQ9q0QNnokchI4Ts/CaWoesJOQsbtxDwxcaOlYA/Yq/nY/RA3aK0ZfZqngrOjNRuvhnNFeCF94w2CwwX9ley+PtL0LSWOK2F9D/VEAoRMY89av6WQEoho3vLH7PIOP4OKdla7ezxP9nU14MN4PSv2yUS15mZ14SkA3EF+xmO0QXYUcUi4v5UxkBpoRYNAh32KMMD70pXPRCmWvZ5pRrH lewis@lmars.net"} originalKey := s.createTestKey(c, key) c.Assert(originalKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559") c.Assert(originalKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")]) c.Assert(originalKey.Comment, Equals, "lewis@lmars.net") // Post a duplicate res, err := s.Post("/keys", key, &ct.Key{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error // Delete the original path := "/keys/" + originalKey.ID res, err = s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) // Create the same key newKey := s.createTestKey(c, key) c.Assert(newKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559") c.Assert(newKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")]) c.Assert(newKey.Comment, Equals, "lewis@lmars.net") } func (s *S) TestAppList(c *C)
func (s *S) TestReleaseList(c *C) { s.createTestRelease(c, &ct.Release{}) var list []ct.Release res, err := s.Get("/releases", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestKeyList(c *C) { s.createTestKey(c, &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqE9AJti/17eigkIhA7+6TF9rdTVxjPv80UxIT6ELaNPHegqib5m94Wab4UoZAGtBPLKJs9o8LRO3H29X5q5eXCU5mwx4qQhcMEYkILWj0Y1T39Xi2RI3jiWcTsphAAYmy+uT2Nt740OK1FaQxfdzYx4cjsjtb8L82e35BkJE2TdjXWkeHxZWDZxMlZXme56jTNsqB2OuC0gfbAbrjSCkolvK1RJbBZSSBgKQrYXiyYjjLfcw2O0ZAKPBeS8ckVf6PO8s/+azZzJZ0Kl7YGHYEX3xRi6sJS0gsI4Y6+sddT1zT5kh0Bg3C8cKnZ1NiVXLH0pPKz68PhjWhwpOVUehD"}) var list []ct.Key res, err := s.Get("/keys", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") for _, k := range list { s.Delete("/keys/" + k.ID) } res, err = s.Get("/keys", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(list, HasLen, 0) } func (s *S) TestArtifactList(c *C) { s.createTestArtifact(c, &ct.Artifact{}) var list []ct.Artifact res, err := s.Get("/artifacts", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestFormationList(c *C) { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: "formation-list"}) s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID}) var list []ct.Formation path := "/apps/" + app.ID + "/formations" res, err := s.Get(path, &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ReleaseID, Not(Equals), "") for _, f := range list { s.Delete(formationPath(f.AppID, f.ReleaseID)) } res, err = s.Get(path, &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(list, HasLen, 0) } func (s *S) setAppRelease(c *C, appID, id string) *ct.Release { out := &ct.Release{} res, err := s.Put("/apps/"+appID+"/release", &ct.Release{ID: id}, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestSetAppRelease(c *C) { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: "set-release"}) out := s.setAppRelease(c, app.ID, release.ID) c.Assert(out, DeepEquals, release) gotRelease := &ct.Release{} res, err := s.Get("/apps/"+app.ID+"/release", gotRelease) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotRelease, DeepEquals, release) res, err = s.Get("/apps/"+app.Name+"/release", gotRelease) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotRelease, DeepEquals, release) var formations []ct.Formation formationsPath := "/apps/" + app.ID + "/formations" res, err = s.Get(formationsPath, &formations) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(formations, HasLen, 0) s.createTestFormation(c, &ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}}) newRelease := s.createTestRelease(c, &ct.Release{}) s.setAppRelease(c, app.ID, newRelease.ID) res, err = s.Get(formationsPath, &formations) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(formations, HasLen, 1) c.Assert(formations[0].ReleaseID, Equals, newRelease.ID) } func (s *S) createTestProvider(c *C, provider *ct.Provider) *ct.Provider { out := &ct.Provider{} res, err := s.Post("/providers", provider, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateProvider(c *C) { provider := s.createTestProvider(c, &ct.Provider{URL: "https://example.com", Name: "foo"}) c.Assert(provider.Name, Equals, "foo") c.Assert(provider.URL, Equals, "https://example.com") c.Assert(provider.ID, Not(Equals), "") gotProvider := &ct.Provider{} res, err := s.Get("/providers/"+provider.ID, gotProvider) c.Assert(err, IsNil) c.Assert(gotProvider, DeepEquals, provider) res, err = s.Get("/providers/"+provider.Name, gotProvider) c.Assert(err, IsNil) c.Assert(gotProvider, DeepEquals, provider) res, err = s.Get("/apps/fail"+provider.ID, gotProvider) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestProviderList(c *C) { s.createTestProvider(c, &ct.Provider{URL: "https://example.org", Name: "list-test"}) var list []ct.Provider res, err := s.Get("/providers", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") }
{ s.createTestApp(c, &ct.App{Name: "list-test"}) var list []ct.App res, err := s.Get("/apps", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") }
identifier_body
controller_test.go
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httptest" "os" "strings" "testing" "github.com/flynn/go-sql" "github.com/flynn/rpcplus" "github.com/go-martini/martini" tu "github.com/flynn/flynn-controller/testutils" ct "github.com/flynn/flynn-controller/types" "github.com/flynn/flynn-controller/utils" _ "github.com/flynn/pq" . "github.com/titanous/gocheck" ) // Hook gocheck up to the "go test" runner func Test(t *testing.T) { TestingT(t) } type S struct { cc *tu.FakeCluster srv *httptest.Server m *martini.Martini } var _ = Suite(&S{}) func (s *S) SetUpSuite(c *C) { dbname := "controllertest" if os.Getenv("PGDATABASE") != "" { dbname = os.Getenv("PGDATABASE") } else { os.Setenv("PGDATABASE", dbname) } if os.Getenv("PGSSLMODE") == "" { os.Setenv("PGSSLMODE", "disable") } db, err := sql.Open("postgres", "dbname=postgres") if err != nil { c.Fatal(err) } if _, err := db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s", dbname)); err != nil { c.Fatal(err) } if _, err := db.Exec(fmt.Sprintf("CREATE DATABASE %s", dbname)); err != nil { c.Fatal(err) } db.Close() dsn := fmt.Sprintf("dbname=%s", dbname) db, err = sql.Open("postgres", dsn) if err != nil { c.Fatal(err) } if err = migrateDB(db); err != nil { c.Fatal(err) } dbw := testDBWrapper{DB: db, dsn: dsn} s.cc = tu.NewFakeCluster() handler, m := appHandler(handlerConfig{db: dbw, cc: s.cc, sc: newFakeRouter(), key: "test"}) s.m = m s.srv = httptest.NewServer(handler) } type testDBWrapper struct { *sql.DB dsn string } func (w testDBWrapper) DSN() string { return w.dsn } func (w testDBWrapper) Database() *sql.DB { return w.DB } var authKey = "test" func (s *S) send(method, path string, in, out interface{}) (*http.Response, error) { buf, err := json.Marshal(in) if err != nil { return nil, err } req, err := http.NewRequest(method, s.srv.URL+path, bytes.NewBuffer(buf)) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") req.SetBasicAuth("", authKey) res, err := http.DefaultClient.Do(req) if err != nil { return nil, err } if out != nil && res.StatusCode == 200 { defer res.Body.Close() return res, json.NewDecoder(res.Body).Decode(out) } return res, nil } func (s *S) body(res *http.Response) (string, error) { defer res.Body.Close() buf, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } return string(buf), nil } func (s *S) Post(path string, in, out interface{}) (*http.Response, error) { return s.send("POST", path, in, out) } func (s *S) Put(path string, in, out interface{}) (*http.Response, error) { return s.send("PUT", path, in, out) } func (s *S) Get(path string, data interface{}) (*http.Response, error) { req, err := http.NewRequest("GET", s.srv.URL+path, nil) if err != nil { return nil, err } req.SetBasicAuth("", authKey) res, err := http.DefaultClient.Do(req) defer res.Body.Close() if res.StatusCode != http.StatusOK { return res, fmt.Errorf("Unexpected status code %d", res.StatusCode) } return res, json.NewDecoder(res.Body).Decode(data) } func (s *S) Delete(path string) (*http.Response, error) { req, err := http.NewRequest("DELETE", s.srv.URL+path, nil) if err != nil { return nil, err } req.SetBasicAuth("", authKey) return http.DefaultClient.Do(req) } func (s *S) TestBadAuth(c *C) { res, err := http.Get(s.srv.URL + "/apps") c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, 401) req, err := http.NewRequest("GET", s.srv.URL+"/apps", nil) c.Assert(err, IsNil) req.SetBasicAuth("", authKey+"wrong") res, err = http.DefaultClient.Do(req) c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, 401) _, err = rpcplus.DialHTTP("tcp", s.srv.Listener.Addr().String()) c.Assert(err, Not(IsNil)) } func (s *S) createTestApp(c *C, in *ct.App) *ct.App { out := &ct.App{} res, err := s.Post("/apps", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateApp(c *C) { // app with no name returns 400 res, err := s.Post("/apps", &ct.App{}, &ct.App{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 400) body, err := s.body(res) c.Assert(err, IsNil) c.Assert(body, Equals, `{"field":"name","message":"must not be blank"}`) for i, id := range []string{"", utils.UUID()} { name := fmt.Sprintf("create-app-%d", i) app := s.createTestApp(c, &ct.App{ID: id, Name: name, Protected: true, Meta: map[string]string{"foo": "bar"}}) c.Assert(app.Name, Equals, name) c.Assert(app.ID, Not(Equals), "") if id != "" { c.Assert(app.ID, Equals, id) } c.Assert(app.Protected, Equals, true) c.Assert(app.Meta["foo"], Equals, "bar") gotApp := &ct.App{} res, err := s.Get("/apps/"+app.ID, gotApp) c.Assert(err, IsNil) c.Assert(gotApp, DeepEquals, app) res, err = s.Get("/apps/"+app.Name, gotApp) c.Assert(err, IsNil) c.Assert(gotApp, DeepEquals, app) res, err = s.Get("/apps/fail"+app.ID, gotApp) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestUpdateApp(c *C) { meta := map[string]string{"foo": "bar"} app := s.createTestApp(c, &ct.App{Name: "update-app", Meta: meta}) c.Assert(app.Protected, Equals, false) c.Assert(app.Meta, DeepEquals, meta) gotApp := &ct.App{} res, err := s.Post("/apps/"+app.Name, map[string]bool{"protected": true}, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, true) c.Assert(gotApp.Meta, DeepEquals, meta) meta = map[string]string{"foo": "baz", "bar": "foo"} res, err = s.Post("/apps/"+app.ID, map[string]interface{}{"protected": false, "meta": meta}, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, false) c.Assert(gotApp.Meta, DeepEquals, meta) res, err = s.Get("/apps/"+app.ID, gotApp) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotApp.Protected, Equals, false) c.Assert(gotApp.Meta, DeepEquals, meta) } func (s *S) TestDeleteApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "delete-app"}) path := "/apps/" + app.ID res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, app) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestRecreateApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "recreate-app"}) // Post a duplicate res, err := s.Post("/apps", app, &ct.App{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error // Delete the original path := "/apps/" + app.ID res, err = s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) // Create the same key app = s.createTestApp(c, &ct.App{Name: "recreate-app"}) c.Assert(app.Name, Equals, "recreate-app") } func (s *S) TestProtectedApp(c *C) { app := s.createTestApp(c, &ct.App{Name: "protected-app", Protected: true}) release := s.createTestRelease(c, &ct.Release{ Processes: map[string]ct.ProcessType{"web": {}, "worker": {}}, }) path := formationPath(app.ID, release.ID) for _, t := range []struct { procs map[string]int status int }{ {nil, 400}, {map[string]int{"web": 1}, 400}, {map[string]int{"worker": 1, "web": 0}, 400}, {map[string]int{"worker": 1, "web": 1}, 200}, } { res, err := s.Put(path, &ct.Formation{Processes: t.procs}, nil) c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, t.status) } } func (s *S) createTestArtifact(c *C, in *ct.Artifact) *ct.Artifact { out := &ct.Artifact{} res, err := s.Post("/artifacts", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateArtifact(c *C) { for i, id := range []string{"", utils.UUID()} { in := &ct.Artifact{ ID: id, Type: "docker-image", URI: fmt.Sprintf("docker://flynn/host?id=adsf%d", i), } out := s.createTestArtifact(c, in) c.Assert(out.Type, Equals, in.Type) c.Assert(out.URI, Equals, in.URI) c.Assert(out.ID, Not(Equals), "") if id != "" { c.Assert(out.ID, Equals, id) } gotArtifact := &ct.Artifact{} res, err := s.Get("/artifacts/"+out.ID, gotArtifact) c.Assert(err, IsNil) c.Assert(gotArtifact, DeepEquals, out) res, err = s.Get("/artifacts/fail"+out.ID, gotArtifact) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release { if in.ArtifactID == "" { in.ArtifactID = s.createTestArtifact(c, &ct.Artifact{}).ID } out := &ct.Release{} res, err := s.Post("/releases", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) createTestKey(c *C, in *ct.Key) *ct.Key { out := &ct.Key{} res, err := s.Post("/keys", in, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateRelease(c *C) { for _, id := range []string{"", utils.UUID()} { in := &ct.Release{ID: id} out := s.createTestRelease(c, in) c.Assert(out.ArtifactID, Equals, in.ArtifactID) if id != "" { c.Assert(out.ID, Equals, id) } gotRelease := &ct.Release{} res, err := s.Get("/releases/"+out.ID, gotRelease) c.Assert(err, IsNil) c.Assert(gotRelease, DeepEquals, out) res, err = s.Get("/releases/fail"+out.ID, gotRelease) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestCreateFormation(c *C) { for i, useName := range []bool{false, true} { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("create-formation-%d", i)}) in := &ct.Formation{ReleaseID: release.ID, AppID: app.ID, Processes: map[string]int{"web": 1}} if useName { in.AppID = app.Name } out := s.createTestFormation(c, in) c.Assert(out.AppID, Equals, app.ID) c.Assert(out.ReleaseID, Equals, release.ID) c.Assert(out.Processes["web"], Equals, 1) gotFormation := &ct.Formation{} var path string if useName { path = formationPath(app.Name, release.ID) } else { path = formationPath(app.ID, release.ID) } res, err := s.Get(path, gotFormation) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotFormation, DeepEquals, out) res, err = s.Get(path+"fail", gotFormation) c.Assert(res.StatusCode, Equals, 404, Commentf("path:%s formation:", path+"fail")) } } func (s *S) createTestFormation(c *C, formation *ct.Formation) *ct.Formation { path := formationPath(formation.AppID, formation.ReleaseID) formation.AppID = "" formation.ReleaseID = "" out := &ct.Formation{} res, err := s.Put(path, formation, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func
(appID, releaseID string) string { return "/apps/" + appID + "/formations/" + releaseID } func (s *S) TestDeleteFormation(c *C) { for i, useName := range []bool{false, true} { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("delete-formation-%d", i)}) out := s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID}) var path string if useName { path = formationPath(app.Name, release.ID) } else { path = formationPath(app.ID, release.ID) } res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, out) c.Assert(res.StatusCode, Equals, 404) } } func (s *S) TestCreateKey(c *C) { in := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5r1JfsAYIFi86KBa7C5nqKo+BLMJk29+5GsjelgBnCmn4J/QxOrVtovNcntoRLUCRwoHEMHzs3Tc6+PdswIxpX1l3YC78kgdJe6LVb962xUgP6xuxauBNRO7tnh9aPGyLbjl9j7qZAcn2/ansG1GBVoX1GSB58iBsVDH18DdVzlGwrR4OeNLmRQj8kuJEuKOoKEkW55CektcXjV08K3QSQID7aRNHgDpGGgp6XDi0GhIMsuDUGHAdPGZnqYZlxuUFaCW2hK6i1UkwnQCCEv/9IUFl2/aqVep2iX/ynrIaIsNKm16o0ooZ1gCHJEuUKRPUXhZUXqkRXqqHd3a4CUhH jonathan@titanous.com"} out := s.createTestKey(c, in) c.Assert(out.ID, Equals, "7ab054ff4a2009fadc67e1f8b380dbee") c.Assert(out.Key, Equals, in.Key[:strings.LastIndex(in.Key, " ")]) c.Assert(out.Comment, Equals, "jonathan@titanous.com") gotKey := &ct.Key{} path := "/keys/" + out.ID res, err := s.Get(path, gotKey) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotKey, DeepEquals, out) res, err = s.Get(path+"fail", gotKey) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestDeleteKey(c *C) { key := s.createTestKey(c, &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJv/RsyRxiSAh7cU236LOCZ3vD9PO87Fi32QbojQxuGDotmk65fN6WUuL7DQjzUnWkFRu4w/svmb+9MuYK0L2b4Kc1rKXBYaytzWqGtv2VaAFObth40AlNr0V26hcTcBNQQPa23Z8LwQNgELn2b/o2CK+Pie1UbE5lHg8R+pm03cI7fYPB0jA6LIS+IVKHslVhjzxtN49xm9W0DiCxouHZEl+Fd5asgtg10HN7CV5l2+ZFyrPAkxkQrzWpkUMgfvU+xFamyczzBKMT0fTYo+TUM3w3w3njJvqXdHjo3anrUF65rSFxfeNkXoe/NQDdvWu+XBfEypWv25hlQv91JI0N"}) path := "/keys/" + key.ID res, err := s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) res, err = s.Get(path, key) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestRecreateKey(c *C) { key := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3I4gHed4RioRMoJTFdVYp9S6QhHUtMe2cdQAmaN5lVuAaEe9GmJ/wtD4pd7sCpw9daCVOD/WWKCDunrwiEwMNzZKPFQPRfrGAgpCdweD+mk62n/DuaeKJFcfB4C/iLqUrYQ9q0QNnokchI4Ts/CaWoesJOQsbtxDwxcaOlYA/Yq/nY/RA3aK0ZfZqngrOjNRuvhnNFeCF94w2CwwX9ley+PtL0LSWOK2F9D/VEAoRMY89av6WQEoho3vLH7PIOP4OKdla7ezxP9nU14MN4PSv2yUS15mZ14SkA3EF+xmO0QXYUcUi4v5UxkBpoRYNAh32KMMD70pXPRCmWvZ5pRrH lewis@lmars.net"} originalKey := s.createTestKey(c, key) c.Assert(originalKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559") c.Assert(originalKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")]) c.Assert(originalKey.Comment, Equals, "lewis@lmars.net") // Post a duplicate res, err := s.Post("/keys", key, &ct.Key{}) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error // Delete the original path := "/keys/" + originalKey.ID res, err = s.Delete(path) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) // Create the same key newKey := s.createTestKey(c, key) c.Assert(newKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559") c.Assert(newKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")]) c.Assert(newKey.Comment, Equals, "lewis@lmars.net") } func (s *S) TestAppList(c *C) { s.createTestApp(c, &ct.App{Name: "list-test"}) var list []ct.App res, err := s.Get("/apps", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestReleaseList(c *C) { s.createTestRelease(c, &ct.Release{}) var list []ct.Release res, err := s.Get("/releases", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestKeyList(c *C) { s.createTestKey(c, &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqE9AJti/17eigkIhA7+6TF9rdTVxjPv80UxIT6ELaNPHegqib5m94Wab4UoZAGtBPLKJs9o8LRO3H29X5q5eXCU5mwx4qQhcMEYkILWj0Y1T39Xi2RI3jiWcTsphAAYmy+uT2Nt740OK1FaQxfdzYx4cjsjtb8L82e35BkJE2TdjXWkeHxZWDZxMlZXme56jTNsqB2OuC0gfbAbrjSCkolvK1RJbBZSSBgKQrYXiyYjjLfcw2O0ZAKPBeS8ckVf6PO8s/+azZzJZ0Kl7YGHYEX3xRi6sJS0gsI4Y6+sddT1zT5kh0Bg3C8cKnZ1NiVXLH0pPKz68PhjWhwpOVUehD"}) var list []ct.Key res, err := s.Get("/keys", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") for _, k := range list { s.Delete("/keys/" + k.ID) } res, err = s.Get("/keys", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(list, HasLen, 0) } func (s *S) TestArtifactList(c *C) { s.createTestArtifact(c, &ct.Artifact{}) var list []ct.Artifact res, err := s.Get("/artifacts", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") } func (s *S) TestFormationList(c *C) { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: "formation-list"}) s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID}) var list []ct.Formation path := "/apps/" + app.ID + "/formations" res, err := s.Get(path, &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ReleaseID, Not(Equals), "") for _, f := range list { s.Delete(formationPath(f.AppID, f.ReleaseID)) } res, err = s.Get(path, &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(list, HasLen, 0) } func (s *S) setAppRelease(c *C, appID, id string) *ct.Release { out := &ct.Release{} res, err := s.Put("/apps/"+appID+"/release", &ct.Release{ID: id}, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestSetAppRelease(c *C) { release := s.createTestRelease(c, &ct.Release{}) app := s.createTestApp(c, &ct.App{Name: "set-release"}) out := s.setAppRelease(c, app.ID, release.ID) c.Assert(out, DeepEquals, release) gotRelease := &ct.Release{} res, err := s.Get("/apps/"+app.ID+"/release", gotRelease) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotRelease, DeepEquals, release) res, err = s.Get("/apps/"+app.Name+"/release", gotRelease) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(gotRelease, DeepEquals, release) var formations []ct.Formation formationsPath := "/apps/" + app.ID + "/formations" res, err = s.Get(formationsPath, &formations) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(formations, HasLen, 0) s.createTestFormation(c, &ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}}) newRelease := s.createTestRelease(c, &ct.Release{}) s.setAppRelease(c, app.ID, newRelease.ID) res, err = s.Get(formationsPath, &formations) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(formations, HasLen, 1) c.Assert(formations[0].ReleaseID, Equals, newRelease.ID) } func (s *S) createTestProvider(c *C, provider *ct.Provider) *ct.Provider { out := &ct.Provider{} res, err := s.Post("/providers", provider, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out } func (s *S) TestCreateProvider(c *C) { provider := s.createTestProvider(c, &ct.Provider{URL: "https://example.com", Name: "foo"}) c.Assert(provider.Name, Equals, "foo") c.Assert(provider.URL, Equals, "https://example.com") c.Assert(provider.ID, Not(Equals), "") gotProvider := &ct.Provider{} res, err := s.Get("/providers/"+provider.ID, gotProvider) c.Assert(err, IsNil) c.Assert(gotProvider, DeepEquals, provider) res, err = s.Get("/providers/"+provider.Name, gotProvider) c.Assert(err, IsNil) c.Assert(gotProvider, DeepEquals, provider) res, err = s.Get("/apps/fail"+provider.ID, gotProvider) c.Assert(res.StatusCode, Equals, 404) } func (s *S) TestProviderList(c *C) { s.createTestProvider(c, &ct.Provider{URL: "https://example.org", Name: "list-test"}) var list []ct.Provider res, err := s.Get("/providers", &list) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(len(list) > 0, Equals, true) c.Assert(list[0].ID, Not(Equals), "") }
formationPath
identifier_name
colores-empresa.component.ts
import { Component, OnInit, Inject } from '@angular/core'; import { FormControl, Validators, FormGroup } from '@angular/forms'; import { MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog'; import { ToastrService } from 'ngx-toastr'; import { Router } from '@angular/router'; import { Location } from '@angular/common'; import * as moment from 'moment'; import pdfMake from 'pdfmake/build/pdfmake'; import pdfFonts from 'pdfmake/build/vfs_fonts'; pdfMake.vfs = pdfFonts.pdfMake.vfs; import { EmpresaService } from 'src/app/servicios/catalogos/catEmpresa/empresa.service'; import { EmpleadoService } from 'src/app/servicios/empleado/empleadoRegistro/empleado.service'; import { ThemePalette } from '@angular/material/core'; import { ProgressSpinnerMode } from '@angular/material/progress-spinner'; import { MatRadioChange } from '@angular/material/radio'; @Component({ selector: 'app-colores-empresa', templateUrl: './colores-empresa.component.html', styleUrls: ['./colores-empresa.component.css'] })
export class ColoresEmpresaComponent implements OnInit { selec1 = false; selec2 = false; selec3 = false; selec4 = false; ingresarOtro = false; verOtro: any; fraseReporte = new FormControl(''); nuevaF = new FormControl(''); public fraseForm = new FormGroup({ fraseReporteF: this.fraseReporte, nuevaForm: this.nuevaF }); principal = new FormControl(''); secundario = new FormControl(''); public coloresForm = new FormGroup({ color_p: this.principal, color_s: this.secundario }); idEmpleado: number; empleado: any = []; p_color: any; s_color: any; frase: any; verColores: boolean = false; verFrase: boolean = false; /** * Variables progress spinner */ color: ThemePalette = 'primary'; mode: ProgressSpinnerMode = 'indeterminate'; value = 10; habilitarprogress: boolean = false; constructor( private rest: EmpresaService, public restE: EmpleadoService, private toastr: ToastrService, public router: Router, public location: Location, public dialogRef: MatDialogRef<ColoresEmpresaComponent>, @Inject(MAT_DIALOG_DATA) public data: any ) { this.idEmpleado = parseInt(localStorage.getItem('empleado')); } ngOnInit(): void { this.VerFormularios(); this.obtenerColores(); this.ObtenerEmpleados(this.idEmpleado); this.ObtenerLogo(); } VerFormularios() { if (this.data.ventana === 'colores') { this.verColores = true; } else { this.verFrase = true; this.ImprimirFrase(); } } // Método para ver la información del empleado ObtenerEmpleados(idemploy: any) { this.empleado = []; this.restE.getOneEmpleadoRest(idemploy).subscribe(data => { this.empleado = data; }) } // Método para obtener el logo de la empresa logo: any = String; ObtenerLogo() { this.rest.LogoEmpresaImagenBase64(localStorage.getItem('empresa')).subscribe(res => { this.logo = 'data:image/jpeg;base64,' + res.imagen; }); } CambiarColores() { this.habilitarprogress = true; let datos = { color_p: this.p_color, color_s: this.s_color, id: this.data.datos.id } this.rest.ActualizarColores(datos).subscribe(data => { this.toastr.success('Nuevos colores registrados exitosamente', '', { timeOut: 6000, }); this.obtenerColores(); this.dialogRef.close({ actualizar: true }); this.habilitarprogress = false; }) } empresas: any = []; obtenerColores() { this.empresas = []; this.rest.ConsultarDatosEmpresa(this.data.datos.id).subscribe(res => { this.empresas = res; this.p_color = this.empresas[0].color_p; this.s_color = this.empresas[0].color_s; this.frase = this.empresas[0].marca_agua; }); } ImprimirFrase() { if (this.data.datos.marca_agua === 'FullTime') { this.selec1 = true; this.fraseForm.patchValue({ fraseReporteF: 'Fulltime' }); } else if (this.data.datos.marca_agua === 'Confidencial') { this.selec2 = true; this.fraseForm.patchValue({ fraseReporteF: 'Confidencial' }); } else if (this.data.datos.marca_agua === '') { this.selec3 = true; this.fraseForm.patchValue({ fraseReporteF: '' }); } else { this.selec4 = true; this.fraseForm.patchValue({ fraseReporteF: 'Otro' }); } } IngresarFrase() { this.verOtro = { 'visibility': 'visible' }; this.ingresarOtro = true; } CambiarFrase(ob: MatRadioChange) { this.verOtro = { 'visibility': 'hidden' }; this.ingresarOtro = false; this.fraseForm.patchValue({ nuevaForm: '' }) this.frase = ob.value } VerArchivo(form) { if (form.fraseReporteF === 'Otro') { if (form.nuevaForm != '') { this.frase = form.nuevaForm; this.generarPdf('open'); } else { this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', { timeOut: 6000, }); } } else { this.generarPdf('open'); } } ActualizarFrase() { this.habilitarprogress = true; let datos = { marca_agua: this.frase, id: this.data.datos.id } this.rest.ActualizarMarcaAgua(datos).subscribe(data => { this.toastr.success('Nueva frase registrada exitosamente', '', { timeOut: 6000, }); this.obtenerColores(); this.dialogRef.close({ actualizar: true }); this.habilitarprogress = false; }) } GuardarFrase(form) { if (form.fraseReporteF === 'Otro') { if (form.nuevaForm != '') { this.frase = form.nuevaForm; this.ActualizarFrase(); } else { this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', { timeOut: 6000, }); } } else { this.ActualizarFrase(); } } /****************************************************************************************************** * MÉTODO PARA EXPORTAR A PDF ******************************************************************************************************/ generarPdf(action = 'open') { const documentDefinition = this.getDocumentDefinicion(); switch (action) { case 'open': pdfMake.createPdf(documentDefinition).open(); break; case 'print': pdfMake.createPdf(documentDefinition).print(); break; case 'download': pdfMake.createPdf(documentDefinition).download(); break; default: pdfMake.createPdf(documentDefinition).open(); break; } } getDocumentDefinicion() { sessionStorage.setItem('Empresas', this.empresas); return { // Encabezado de la página pageOrientation: 'landscape', watermark: { text: this.frase, color: 'blue', opacity: 0.1, bold: true, italics: false }, header: { text: 'Impreso por: ' + this.empleado[0].nombre + ' ' + this.empleado[0].apellido, margin: 5, fontSize: 9, opacity: 0.3, alignment: 'right' }, // Pie de página footer: function (currentPage: any, pageCount: any, fecha: any, hora: any) { var h = new Date(); var f = moment(); fecha = f.format('YYYY-MM-DD'); // Formato de hora actual if (h.getMinutes() < 10) { var time = h.getHours() + ':0' + h.getMinutes(); } else { var time = h.getHours() + ':' + h.getMinutes(); } return { margin: 10, columns: [ 'Fecha: ' + fecha + ' Hora: ' + time, , { text: [ { text: '© Pag ' + currentPage.toString() + ' of ' + pageCount, alignment: 'right', color: 'blue', opacity: 0.5 } ], } ], fontSize: 10, color: '#A4B8FF', } }, content: [ { image: this.logo, width: 150, margin: [10, -25, 0, 5] }, { text: 'Prueba de Colores', bold: true, fontSize: 20, alignment: 'center', margin: [0, -30, 0, 10] }, this.presentarDataPDFEmpresas(), ], styles: { tableHeader: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.p_color }, tableHeaderS: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.s_color }, itemsTable: { fontSize: 10 }, itemsTableC: { fontSize: 10, alignment: 'center' }, } }; } presentarDataPDFEmpresas() { return { columns: [ { width: '*', text: '' }, { width: 'auto', table: { widths: [30, 'auto', 'auto', '*', '*', 'auto', 'auto', '*', '*'], body: [ [ { text: 'Id', style: 'tableHeader' }, { text: 'Nombre', style: 'tableHeader' }, { text: 'RUC', style: 'tableHeader' }, { text: 'Dirección', style: 'tableHeader' }, { text: 'Teléfono', style: 'tableHeader' }, { text: 'Correo', style: 'tableHeader' }, { text: 'Tipo de Empresa', style: 'tableHeader' }, { text: 'Representante', style: 'tableHeader' }, { text: 'Resumen', style: 'tableHeaderS' } ], ...this.empresas.map(obj => { return [ { text: obj.id, style: 'itemsTableC' }, { text: obj.nombre, style: 'itemsTable' }, { text: obj.ruc, style: 'itemsTableC' }, { text: obj.direccion, style: 'itemsTable' }, { text: obj.telefono, style: 'itemsTableC' }, { text: obj.correo, style: 'itemsTable' }, { text: obj.tipo_empresa, style: 'itemsTable' }, { text: obj.representante, style: 'itemsTable' }, { text: 'Generalidades', style: 'itemsTable' }, ]; }) ] } }, { width: '*', text: '' }, ] }; } cerrarVentana() { this.dialogRef.close({ actualizar: false }); } }
random_line_split
colores-empresa.component.ts
import { Component, OnInit, Inject } from '@angular/core'; import { FormControl, Validators, FormGroup } from '@angular/forms'; import { MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog'; import { ToastrService } from 'ngx-toastr'; import { Router } from '@angular/router'; import { Location } from '@angular/common'; import * as moment from 'moment'; import pdfMake from 'pdfmake/build/pdfmake'; import pdfFonts from 'pdfmake/build/vfs_fonts'; pdfMake.vfs = pdfFonts.pdfMake.vfs; import { EmpresaService } from 'src/app/servicios/catalogos/catEmpresa/empresa.service'; import { EmpleadoService } from 'src/app/servicios/empleado/empleadoRegistro/empleado.service'; import { ThemePalette } from '@angular/material/core'; import { ProgressSpinnerMode } from '@angular/material/progress-spinner'; import { MatRadioChange } from '@angular/material/radio'; @Component({ selector: 'app-colores-empresa', templateUrl: './colores-empresa.component.html', styleUrls: ['./colores-empresa.component.css'] }) export class ColoresEmpresaComponent implements OnInit { selec1 = false; selec2 = false; selec3 = false; selec4 = false; ingresarOtro = false; verOtro: any; fraseReporte = new FormControl(''); nuevaF = new FormControl(''); public fraseForm = new FormGroup({ fraseReporteF: this.fraseReporte, nuevaForm: this.nuevaF }); principal = new FormControl(''); secundario = new FormControl(''); public coloresForm = new FormGroup({ color_p: this.principal, color_s: this.secundario }); idEmpleado: number; empleado: any = []; p_color: any; s_color: any; frase: any; verColores: boolean = false; verFrase: boolean = false; /** * Variables progress spinner */ color: ThemePalette = 'primary'; mode: ProgressSpinnerMode = 'indeterminate'; value = 10; habilitarprogress: boolean = false; constructor( private rest: EmpresaService, public restE: EmpleadoService, private toastr: ToastrService, public router: Router, public location: Location, public dialogRef: MatDialogRef<ColoresEmpresaComponent>, @Inject(MAT_DIALOG_DATA) public data: any )
ngOnInit(): void { this.VerFormularios(); this.obtenerColores(); this.ObtenerEmpleados(this.idEmpleado); this.ObtenerLogo(); } VerFormularios() { if (this.data.ventana === 'colores') { this.verColores = true; } else { this.verFrase = true; this.ImprimirFrase(); } } // Método para ver la información del empleado ObtenerEmpleados(idemploy: any) { this.empleado = []; this.restE.getOneEmpleadoRest(idemploy).subscribe(data => { this.empleado = data; }) } // Método para obtener el logo de la empresa logo: any = String; ObtenerLogo() { this.rest.LogoEmpresaImagenBase64(localStorage.getItem('empresa')).subscribe(res => { this.logo = 'data:image/jpeg;base64,' + res.imagen; }); } CambiarColores() { this.habilitarprogress = true; let datos = { color_p: this.p_color, color_s: this.s_color, id: this.data.datos.id } this.rest.ActualizarColores(datos).subscribe(data => { this.toastr.success('Nuevos colores registrados exitosamente', '', { timeOut: 6000, }); this.obtenerColores(); this.dialogRef.close({ actualizar: true }); this.habilitarprogress = false; }) } empresas: any = []; obtenerColores() { this.empresas = []; this.rest.ConsultarDatosEmpresa(this.data.datos.id).subscribe(res => { this.empresas = res; this.p_color = this.empresas[0].color_p; this.s_color = this.empresas[0].color_s; this.frase = this.empresas[0].marca_agua; }); } ImprimirFrase() { if (this.data.datos.marca_agua === 'FullTime') { this.selec1 = true; this.fraseForm.patchValue({ fraseReporteF: 'Fulltime' }); } else if (this.data.datos.marca_agua === 'Confidencial') { this.selec2 = true; this.fraseForm.patchValue({ fraseReporteF: 'Confidencial' }); } else if (this.data.datos.marca_agua === '') { this.selec3 = true; this.fraseForm.patchValue({ fraseReporteF: '' }); } else { this.selec4 = true; this.fraseForm.patchValue({ fraseReporteF: 'Otro' }); } } IngresarFrase() { this.verOtro = { 'visibility': 'visible' }; this.ingresarOtro = true; } CambiarFrase(ob: MatRadioChange) { this.verOtro = { 'visibility': 'hidden' }; this.ingresarOtro = false; this.fraseForm.patchValue({ nuevaForm: '' }) this.frase = ob.value } VerArchivo(form) { if (form.fraseReporteF === 'Otro') { if (form.nuevaForm != '') { this.frase = form.nuevaForm; this.generarPdf('open'); } else { this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', { timeOut: 6000, }); } } else { this.generarPdf('open'); } } ActualizarFrase() { this.habilitarprogress = true; let datos = { marca_agua: this.frase, id: this.data.datos.id } this.rest.ActualizarMarcaAgua(datos).subscribe(data => { this.toastr.success('Nueva frase registrada exitosamente', '', { timeOut: 6000, }); this.obtenerColores(); this.dialogRef.close({ actualizar: true }); this.habilitarprogress = false; }) } GuardarFrase(form) { if (form.fraseReporteF === 'Otro') { if (form.nuevaForm != '') { this.frase = form.nuevaForm; this.ActualizarFrase(); } else { this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', { timeOut: 6000, }); } } else { this.ActualizarFrase(); } } /****************************************************************************************************** * MÉTODO PARA EXPORTAR A PDF ******************************************************************************************************/ generarPdf(action = 'open') { const documentDefinition = this.getDocumentDefinicion(); switch (action) { case 'open': pdfMake.createPdf(documentDefinition).open(); break; case 'print': pdfMake.createPdf(documentDefinition).print(); break; case 'download': pdfMake.createPdf(documentDefinition).download(); break; default: pdfMake.createPdf(documentDefinition).open(); break; } } getDocumentDefinicion() { sessionStorage.setItem('Empresas', this.empresas); return { // Encabezado de la página pageOrientation: 'landscape', watermark: { text: this.frase, color: 'blue', opacity: 0.1, bold: true, italics: false }, header: { text: 'Impreso por: ' + this.empleado[0].nombre + ' ' + this.empleado[0].apellido, margin: 5, fontSize: 9, opacity: 0.3, alignment: 'right' }, // Pie de página footer: function (currentPage: any, pageCount: any, fecha: any, hora: any) { var h = new Date(); var f = moment(); fecha = f.format('YYYY-MM-DD'); // Formato de hora actual if (h.getMinutes() < 10) { var time = h.getHours() + ':0' + h.getMinutes(); } else { var time = h.getHours() + ':' + h.getMinutes(); } return { margin: 10, columns: [ 'Fecha: ' + fecha + ' Hora: ' + time, , { text: [ { text: '© Pag ' + currentPage.toString() + ' of ' + pageCount, alignment: 'right', color: 'blue', opacity: 0.5 } ], } ], fontSize: 10, color: '#A4B8FF', } }, content: [ { image: this.logo, width: 150, margin: [10, -25, 0, 5] }, { text: 'Prueba de Colores', bold: true, fontSize: 20, alignment: 'center', margin: [0, -30, 0, 10] }, this.presentarDataPDFEmpresas(), ], styles: { tableHeader: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.p_color }, tableHeaderS: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.s_color }, itemsTable: { fontSize: 10 }, itemsTableC: { fontSize: 10, alignment: 'center' }, } }; } presentarDataPDFEmpresas() { return { columns: [ { width: '*', text: '' }, { width: 'auto', table: { widths: [30, 'auto', 'auto', '*', '*', 'auto', 'auto', '*', '*'], body: [ [ { text: 'Id', style: 'tableHeader' }, { text: 'Nombre', style: 'tableHeader' }, { text: 'RUC', style: 'tableHeader' }, { text: 'Dirección', style: 'tableHeader' }, { text: 'Teléfono', style: 'tableHeader' }, { text: 'Correo', style: 'tableHeader' }, { text: 'Tipo de Empresa', style: 'tableHeader' }, { text: 'Representante', style: 'tableHeader' }, { text: 'Resumen', style: 'tableHeaderS' } ], ...this.empresas.map(obj => { return [ { text: obj.id, style: 'itemsTableC' }, { text: obj.nombre, style: 'itemsTable' }, { text: obj.ruc, style: 'itemsTableC' }, { text: obj.direccion, style: 'itemsTable' }, { text: obj.telefono, style: 'itemsTableC' }, { text: obj.correo, style: 'itemsTable' }, { text: obj.tipo_empresa, style: 'itemsTable' }, { text: obj.representante, style: 'itemsTable' }, { text: 'Generalidades', style: 'itemsTable' }, ]; }) ] } }, { width: '*', text: '' }, ] }; } cerrarVentana() { this.dialogRef.close({ actualizar: false }); } }
{ this.idEmpleado = parseInt(localStorage.getItem('empleado')); }
identifier_body
colores-empresa.component.ts
import { Component, OnInit, Inject } from '@angular/core'; import { FormControl, Validators, FormGroup } from '@angular/forms'; import { MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog'; import { ToastrService } from 'ngx-toastr'; import { Router } from '@angular/router'; import { Location } from '@angular/common'; import * as moment from 'moment'; import pdfMake from 'pdfmake/build/pdfmake'; import pdfFonts from 'pdfmake/build/vfs_fonts'; pdfMake.vfs = pdfFonts.pdfMake.vfs; import { EmpresaService } from 'src/app/servicios/catalogos/catEmpresa/empresa.service'; import { EmpleadoService } from 'src/app/servicios/empleado/empleadoRegistro/empleado.service'; import { ThemePalette } from '@angular/material/core'; import { ProgressSpinnerMode } from '@angular/material/progress-spinner'; import { MatRadioChange } from '@angular/material/radio'; @Component({ selector: 'app-colores-empresa', templateUrl: './colores-empresa.component.html', styleUrls: ['./colores-empresa.component.css'] }) export class ColoresEmpresaComponent implements OnInit { selec1 = false; selec2 = false; selec3 = false; selec4 = false; ingresarOtro = false; verOtro: any; fraseReporte = new FormControl(''); nuevaF = new FormControl(''); public fraseForm = new FormGroup({ fraseReporteF: this.fraseReporte, nuevaForm: this.nuevaF }); principal = new FormControl(''); secundario = new FormControl(''); public coloresForm = new FormGroup({ color_p: this.principal, color_s: this.secundario }); idEmpleado: number; empleado: any = []; p_color: any; s_color: any; frase: any; verColores: boolean = false; verFrase: boolean = false; /** * Variables progress spinner */ color: ThemePalette = 'primary'; mode: ProgressSpinnerMode = 'indeterminate'; value = 10; habilitarprogress: boolean = false; constructor( private rest: EmpresaService, public restE: EmpleadoService, private toastr: ToastrService, public router: Router, public location: Location, public dialogRef: MatDialogRef<ColoresEmpresaComponent>, @Inject(MAT_DIALOG_DATA) public data: any ) { this.idEmpleado = parseInt(localStorage.getItem('empleado')); } ngOnInit(): void { this.VerFormularios(); this.obtenerColores(); this.ObtenerEmpleados(this.idEmpleado); this.ObtenerLogo(); } VerFormularios() { if (this.data.ventana === 'colores') { this.verColores = true; } else { this.verFrase = true; this.ImprimirFrase(); } } // Método para ver la información del empleado ObtenerEmpleados(idemploy: any) { this.empleado = []; this.restE.getOneEmpleadoRest(idemploy).subscribe(data => { this.empleado = data; }) } // Método para obtener el logo de la empresa logo: any = String; ObtenerLogo() { this.rest.LogoEmpresaImagenBase64(localStorage.getItem('empresa')).subscribe(res => { this.logo = 'data:image/jpeg;base64,' + res.imagen; }); } CambiarColores() { this.habilitarprogress = true; let datos = { color_p: this.p_color, color_s: this.s_color, id: this.data.datos.id } this.rest.ActualizarColores(datos).subscribe(data => { this.toastr.success('Nuevos colores registrados exitosamente', '', { timeOut: 6000, }); this.obtenerColores(); this.dialogRef.close({ actualizar: true }); this.habilitarprogress = false; }) } empresas: any = []; obtenerColores() { this.empresas = []; this.rest.ConsultarDatosEmpresa(this.data.datos.id).subscribe(res => { this.empresas = res; this.p_color = this.empresas[0].color_p; this.s_color = this.empresas[0].color_s; this.frase = this.empresas[0].marca_agua; }); } ImprimirFrase() { if (this.data.datos.marca_agua === 'FullTime') { this.selec1 = true; this.fraseForm.patchValue({ fraseReporteF: 'Fulltime' }); } else if (this.data.datos.marca_agua === 'Confidencial') { this.selec2 = true; this.fraseForm.patchValue({ fraseReporteF: 'Confidencial' }); } else if (this.data.datos.marca_agua === '') { this.selec3 = true; this.fraseForm.patchValue({ fraseReporteF: '' }); } else { this.selec4 = true; this.fraseForm.patchValue({ fraseReporteF: 'Otro' }); } } IngresarFrase() { this.verOtro = { 'visibility': 'visible' }; this.ingresarOtro = true; } CambiarFrase(ob: MatRadioChange) { this.verOtro = { 'visibility': 'hidden' }; this.ingresarOtro = false; this.fraseForm.patchValue({ nuevaForm: '' }) this.frase = ob.value } VerArchivo(form) { if (form.fraseReporteF === 'Otro') { if (form.nuevaForm != '') { this.frase = form.nuevaForm; this.generarPdf('open'); } else { this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', { timeOut: 6000, }); } } else { this.generarPdf('open'); } } ActualizarFrase() { this.habilitarprogress = true; let datos = { marca_agua: this.frase, id: this.data.datos.id } this.rest.ActualizarMarcaAgua(datos).subscribe(data => { this.toastr.success('Nueva frase registrada exitosamente', '', { timeOut: 6000, }); this.obtenerColores(); this.dialogRef.close({ actualizar: true }); this.habilitarprogress = false; }) } GuardarFrase(form) { if (form.fraseReporteF === 'Otro') { if (form.nuevaForm != '') { this.frase = form.nuevaForm; this.ActualizarFrase(); } else { this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', { timeOut: 6000, }); } } else { this.ActualizarFrase(); } } /****************************************************************************************************** * MÉTODO PARA EXPORTAR A PDF ******************************************************************************************************/ generarPdf(action = 'open') { const documentDefinition = this.getDocumentDefinicion(); switch (action) { case 'open': pdfMake.createPdf(documentDefinition).open(); break; case 'print': pdfMake.createPdf(documentDefinition).print(); break; case 'download': pdfMake.createPdf(documentDefinition).download(); break; default: pdfMake.createPdf(documentDefinition).open(); break; } } getDocumentDefinicion() { sessionStorage.setItem('Empresas', this.empresas); return { // Encabezado de la página pageOrientation: 'landscape', watermark: { text: this.frase, color: 'blue', opacity: 0.1, bold: true, italics: false }, header: { text: 'Impreso por: ' + this.empleado[0].nombre + ' ' + this.empleado[0].apellido, margin: 5, fontSize: 9, opacity: 0.3, alignment: 'right' }, // Pie de página footer: function (currentPage: any, pageCount: any, fecha: any, hora: any) { var h = new Date(); var f = moment(); fecha = f.format('YYYY-MM-DD'); // Formato de hora actual if (h.getMinutes() < 10) { var time = h.getHours() + ':0' + h.getMinutes(); } else { var time = h.getHours() + ':' + h.getMinutes(); } return { margin: 10, columns: [ 'Fecha: ' + fecha + ' Hora: ' + time, , { text: [ { text: '© Pag ' + currentPage.toString() + ' of ' + pageCount, alignment: 'right', color: 'blue', opacity: 0.5 } ], } ], fontSize: 10, color: '#A4B8FF', } }, content: [ { image: this.logo, width: 150, margin: [10, -25, 0, 5] }, { text: 'Prueba de Colores', bold: true, fontSize: 20, alignment: 'center', margin: [0, -30, 0, 10] }, this.presentarDataPDFEmpresas(), ], styles: { tableHeader: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.p_color }, tableHeaderS: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.s_color }, itemsTable: { fontSize: 10 }, itemsTableC: { fontSize: 10, alignment: 'center' }, } }; } presentarDataPDFEmpresas() { return { columns: [ { width: '*', text: '' }, { width: 'auto', table: { widths: [30, 'auto', 'auto', '*', '*', 'auto', 'auto', '*', '*'], body: [ [ { text: 'Id', style: 'tableHeader' }, { text: 'Nombre', style: 'tableHeader' }, { text: 'RUC', style: 'tableHeader' }, { text: 'Dirección', style: 'tableHeader' }, { text: 'Teléfono', style: 'tableHeader' }, { text: 'Correo', style: 'tableHeader' }, { text: 'Tipo de Empresa', style: 'tableHeader' }, { text: 'Representante', style: 'tableHeader' }, { text: 'Resumen', style: 'tableHeaderS' } ], ...this.empresas.map(obj => { return [ { text: obj.id, style: 'itemsTableC' }, { text: obj.nombre, style: 'itemsTable' }, { text: obj.ruc, style: 'itemsTableC' }, { text: obj.direccion, style: 'itemsTable' }, { text: obj.telefono, style: 'itemsTableC' }, { text: obj.correo, style: 'itemsTable' }, { text: obj.tipo_empresa, style: 'itemsTable' }, { text: obj.representante, style: 'itemsTable' }, { text: 'Generalidades', style: 'itemsTable' }, ]; }) ] } }, { width: '*', text: '' }, ] }; } cerrarVen
this.dialogRef.close({ actualizar: false }); } }
tana() {
identifier_name
SuggestionManagerFRS.py
#This class implements methods which measure friendship between two users and make appropriate suggestion lists of users #to add in chat sessions and add as friends #The friendship is measured based on no of interactions between users against time #Therefore following class implements a scoring model to measure friend-ship between two user nodes #Inorder to measure the friendship it uses # ~ No of chats between user # ~ Duration of relationships from DataAccess import FriendshipManagerFRS from DataAccess import UserManagerFRS import operator import json import random class SuggestionManagerFRS(object): def __init__(self):
def makeFriendshipScore(self,email,friend): """ score friendship """ # * the total score will be out of 100 % # * weight for chat frequencey is 60% # * weight for duration from last chat 40% friendMgr =FriendshipManagerFRS() array = friendMgr.selectRelationship(email,friend) c=0 try: if len(array)==5: c+=1 #print "\n Id:"+str(c) #print"User: %s"% str(i['email']) #print "Dur:%s days from last chat" % str(i['duration']) #print "Chats: %s chats"% str(i['chats']) total=-1 chatVoteScore=self.scoreChatsNVotes(array[0],array[2],array[3]) durationScore=self.scoreDur(array[1]) total =self.calculateTotalScore(chatVoteScore,durationScore) #deduct / add by votes #print "chat score %s"% str(chatScore) #print "duration score %s" % str(durationScore) #print "total score %s"%str(float(total)/float(100)) "return score" return float(total)/100.0 except Exception, e: print str(e.message) def scoreChatsNVotes(self,chats,pvotes,nvotes): "Score Chats represents the affnity score" # 0 : 0 # >250 : 60 # 0-25 : 10 #25-50 : 20 #50-150 :30 #150-250:40 #250-500:50 chats =int(chats) pvotes= int(pvotes) nvotes = int(nvotes) if chats == 0: #no chats no marks if pvotes>nvotes:#if pvotes>=nvotes socre:+5 only ;else return 0 return 5 else:return 0 if 500<chats: # chats more than 250 full marks if nvotes>pvotes :#chats ?votes-5only return 55 else:return 60 score=0 if 0<chats and chats <= 25: score= 10 elif 25<chats and chats<=50: score= 20 elif chats<50 and chats<=150: score= 30 elif 150<chats and chats<=250: score= 40 elif 250<chats and chats<=500: score =50 score=self.voteHandler(score,pvotes,nvotes) return score #score for votes def voteHandler(self,score,pv,nv): pv =int(pv) nv= int(nv) if score>=5: if pv> nv: score+=5 elif pv<nv: score-=5 return score #score Duration def scoreDur(self,dur): "duration represents time decay" dur =int(dur) if 730 <dur: #more than 2 years return 0 if dur ==0: # today return 40 if 0<dur and dur<=182: #less than 6 months return 30 elif 182<dur and 365>=dur: # 6 month - 1 year return 20 elif 365<dur and 730>=dur:# 1 year - 2 years return 10 #calculate cumulative score def calculateTotalScore(self,chat,duration): if chat!=None and duration != None: return chat+ duration else: if chat ==None: chat =0 if duration == None: duration = 0 return duration + chat #sort the 8 product category values of user in decending order def sortUserPreferences(self,user): userMgr =UserManagerFRS() categories = userMgr.getCategoryExp(user) #sort list if len(categories)!=0: categories=sorted(categories.items(), key=operator.itemgetter(1)) return categories #get maximum category out of given list of product categories def getMaxCategory(self,categories): categories = dict(categories) maxIndex=max(categories.items(), key=operator.itemgetter(1))[0] # index of max. maxValue =max(categories.items(), key=operator.itemgetter(1))[1] # max. value maxCat ={maxIndex:maxValue} return maxCat #update all the relationship strength of user def upgradeRelationshipStrength(self,user): fMgr = FriendshipManagerFRS() user =str(user) allFriends = fMgr.selectAllFriends(user) for friend in allFriends: score=0 try: email = str(friend['email']) score =self.makeFriendshipScore(user,email) #calculate friendship score fMgr.upgradeRelationship(user,email,"strength",score) except Exception,e: print str(e.message)+" -2" continue finally:print email+"-> STR:"+str(score) #refine selected list of users for chat def refineChatList(self,user,catId): categoryKey ="cat"+catId friendMgr = FriendshipManagerFRS() uMgr =UserManagerFRS() #sorted on expereince about product category expFriends = friendMgr.selectFriendsForChatOnExp(user,categoryKey) print "sorted friendlist from highest experience about product category: \n \n"+str( expFriends) #sorted on relationship strength closeFriends = friendMgr.selectAllFriends(user) print "\n\nsorted friendlist from highest relationship strength :\n \n"+str(closeFriends) #merge the lists mixList=self.mixLists(closeFriends,expFriends) #perpare final list finalList =[] for item in mixList: friend={'friend':str(item)} finalList.append(friend) return finalList #merge two user lists with same length and remove reducdency def mixLists(self,closeFriends,expFriends): finalList=[] for OutItem in closeFriends: chk =finalList.count(OutItem['email']) if chk==0: finalList.append(OutItem['email']) else:continue for InItem in expFriends: chkIn =finalList.count(InItem['email']) if chkIn ==0: if OutItem!=InItem: finalList.append(InItem['email']) break else:continue return finalList #suggest New Friends for User def suggestNewFriends(self,email): #get favourite product category fMgr =FriendshipManagerFRS() categories =self.sortUserPreferences(email) maxCat = self.getMaxCategory(categories) print "product category favourations of user: "+str(email)+" are :\n"+str(categories) print "most favourite category of user is : cat-"+str(maxCat) print '\ntherefore the friends of friends of user are suggested sorted according to this category value' key =0 value=0 for index in maxCat: key = index value = maxCat[index] break category= "cat"+str(key) #select the friends of friends with favouration to the same category(sorted desc) which user node does candidates =fMgr.selectFriendsForFriendship(email,category) #dispatch #for user in candidates: # print str(user['email'])+"->"+str(user[category]) return candidates #replace existing relationship with random values #def replaceRandomValuesInRel(self,fEmail,sEmail): # #generating values # fMgr = FriendshipManagerFRS() # dates = fMgr.getRandomDate() # chats = int(random.uniform(0,200)) # nVotes = int(random.uniform(0,200)) # pVotes = int(random.uniform(0,200)) # score= 0 # try: # #update relationship # fMgr.upgradeRelationship(fEmail,sEmail,"chats",str(chats)) # fMgr.upgradeRelationship(fEmail,sEmail,"nVotes",str(nVotes)) # fMgr.upgradeRelationship(fEmail,sEmail,"pVotes",str(pVotes)) # fMgr.upgradeRelationship(fEmail,sEmail,"started",str(dates[0])) # fMgr.upgradeRelationship(fEmail,sEmail,"triggered",str(dates[1])) # fMgr.upgradeRelationship(fEmail,sEmail,"duration",str(dates[2])) # except Exception ,e: # print e.message # return False # finally: # #calculate and save frienship score # score = self.makeFriendshipScore(fEmail,sEmail) # print "Strength"+str(score) # fMgr.upgradeRelationship(fEmail,sEmail,"strength",str(score)) #build friend of friend network #users from uid 1-104 will be linked with users between uid 200-250 #def buildFriendsOfFriendNetwork(self): # email = "kalana331@gmail.com" # uMgr =UserManagerFRS() # fMgr =FriendshipManagerFRS() # friends = fMgr.selectAllFriends(email) # for friend in friends: # fEmail= str( friend['email']) # fUid = uMgr.getUserId(fEmail) # if fUid!=0: # #pick random user # randId= random.uniform(200,250) # randId=str(int(randId)) # uEmail =str(uMgr.getUserEmail(randId)) # fMgr.makeNewFriendship(fEmail,uEmail) # self.replaceRandomValuesInRel(fEmail,uEmail) # print str(fUid)+"->"+str(randId)+";"+str(fEmail)+"->"+str(uEmail) #remove freind of friend network # all relationships from uid 1-103 linked with users between uid 200-250 get removed #def destructFriendOfFriendNetwork(self): # uMgr = UserManagerFRS() # #create user ids # for userId in range(200,251): # userId =str(userId) # uEmail = str(uMgr.getUserEmail(userId)) # print uEmail # try: # status=False # status = uMgr.removeAllRels(uEmail) # except Exception,e: # print e.message # print uEmail+" failed" # continue # finally:print status
SuggestionManagerFRS =self
identifier_body
SuggestionManagerFRS.py
#This class implements methods which measure friendship between two users and make appropriate suggestion lists of users #to add in chat sessions and add as friends #The friendship is measured based on no of interactions between users against time #Therefore following class implements a scoring model to measure friend-ship between two user nodes #Inorder to measure the friendship it uses # ~ No of chats between user # ~ Duration of relationships from DataAccess import FriendshipManagerFRS from DataAccess import UserManagerFRS import operator import json import random class
(object): def __init__(self): SuggestionManagerFRS =self def makeFriendshipScore(self,email,friend): """ score friendship """ # * the total score will be out of 100 % # * weight for chat frequencey is 60% # * weight for duration from last chat 40% friendMgr =FriendshipManagerFRS() array = friendMgr.selectRelationship(email,friend) c=0 try: if len(array)==5: c+=1 #print "\n Id:"+str(c) #print"User: %s"% str(i['email']) #print "Dur:%s days from last chat" % str(i['duration']) #print "Chats: %s chats"% str(i['chats']) total=-1 chatVoteScore=self.scoreChatsNVotes(array[0],array[2],array[3]) durationScore=self.scoreDur(array[1]) total =self.calculateTotalScore(chatVoteScore,durationScore) #deduct / add by votes #print "chat score %s"% str(chatScore) #print "duration score %s" % str(durationScore) #print "total score %s"%str(float(total)/float(100)) "return score" return float(total)/100.0 except Exception, e: print str(e.message) def scoreChatsNVotes(self,chats,pvotes,nvotes): "Score Chats represents the affnity score" # 0 : 0 # >250 : 60 # 0-25 : 10 #25-50 : 20 #50-150 :30 #150-250:40 #250-500:50 chats =int(chats) pvotes= int(pvotes) nvotes = int(nvotes) if chats == 0: #no chats no marks if pvotes>nvotes:#if pvotes>=nvotes socre:+5 only ;else return 0 return 5 else:return 0 if 500<chats: # chats more than 250 full marks if nvotes>pvotes :#chats ?votes-5only return 55 else:return 60 score=0 if 0<chats and chats <= 25: score= 10 elif 25<chats and chats<=50: score= 20 elif chats<50 and chats<=150: score= 30 elif 150<chats and chats<=250: score= 40 elif 250<chats and chats<=500: score =50 score=self.voteHandler(score,pvotes,nvotes) return score #score for votes def voteHandler(self,score,pv,nv): pv =int(pv) nv= int(nv) if score>=5: if pv> nv: score+=5 elif pv<nv: score-=5 return score #score Duration def scoreDur(self,dur): "duration represents time decay" dur =int(dur) if 730 <dur: #more than 2 years return 0 if dur ==0: # today return 40 if 0<dur and dur<=182: #less than 6 months return 30 elif 182<dur and 365>=dur: # 6 month - 1 year return 20 elif 365<dur and 730>=dur:# 1 year - 2 years return 10 #calculate cumulative score def calculateTotalScore(self,chat,duration): if chat!=None and duration != None: return chat+ duration else: if chat ==None: chat =0 if duration == None: duration = 0 return duration + chat #sort the 8 product category values of user in decending order def sortUserPreferences(self,user): userMgr =UserManagerFRS() categories = userMgr.getCategoryExp(user) #sort list if len(categories)!=0: categories=sorted(categories.items(), key=operator.itemgetter(1)) return categories #get maximum category out of given list of product categories def getMaxCategory(self,categories): categories = dict(categories) maxIndex=max(categories.items(), key=operator.itemgetter(1))[0] # index of max. maxValue =max(categories.items(), key=operator.itemgetter(1))[1] # max. value maxCat ={maxIndex:maxValue} return maxCat #update all the relationship strength of user def upgradeRelationshipStrength(self,user): fMgr = FriendshipManagerFRS() user =str(user) allFriends = fMgr.selectAllFriends(user) for friend in allFriends: score=0 try: email = str(friend['email']) score =self.makeFriendshipScore(user,email) #calculate friendship score fMgr.upgradeRelationship(user,email,"strength",score) except Exception,e: print str(e.message)+" -2" continue finally:print email+"-> STR:"+str(score) #refine selected list of users for chat def refineChatList(self,user,catId): categoryKey ="cat"+catId friendMgr = FriendshipManagerFRS() uMgr =UserManagerFRS() #sorted on expereince about product category expFriends = friendMgr.selectFriendsForChatOnExp(user,categoryKey) print "sorted friendlist from highest experience about product category: \n \n"+str( expFriends) #sorted on relationship strength closeFriends = friendMgr.selectAllFriends(user) print "\n\nsorted friendlist from highest relationship strength :\n \n"+str(closeFriends) #merge the lists mixList=self.mixLists(closeFriends,expFriends) #perpare final list finalList =[] for item in mixList: friend={'friend':str(item)} finalList.append(friend) return finalList #merge two user lists with same length and remove reducdency def mixLists(self,closeFriends,expFriends): finalList=[] for OutItem in closeFriends: chk =finalList.count(OutItem['email']) if chk==0: finalList.append(OutItem['email']) else:continue for InItem in expFriends: chkIn =finalList.count(InItem['email']) if chkIn ==0: if OutItem!=InItem: finalList.append(InItem['email']) break else:continue return finalList #suggest New Friends for User def suggestNewFriends(self,email): #get favourite product category fMgr =FriendshipManagerFRS() categories =self.sortUserPreferences(email) maxCat = self.getMaxCategory(categories) print "product category favourations of user: "+str(email)+" are :\n"+str(categories) print "most favourite category of user is : cat-"+str(maxCat) print '\ntherefore the friends of friends of user are suggested sorted according to this category value' key =0 value=0 for index in maxCat: key = index value = maxCat[index] break category= "cat"+str(key) #select the friends of friends with favouration to the same category(sorted desc) which user node does candidates =fMgr.selectFriendsForFriendship(email,category) #dispatch #for user in candidates: # print str(user['email'])+"->"+str(user[category]) return candidates #replace existing relationship with random values #def replaceRandomValuesInRel(self,fEmail,sEmail): # #generating values # fMgr = FriendshipManagerFRS() # dates = fMgr.getRandomDate() # chats = int(random.uniform(0,200)) # nVotes = int(random.uniform(0,200)) # pVotes = int(random.uniform(0,200)) # score= 0 # try: # #update relationship # fMgr.upgradeRelationship(fEmail,sEmail,"chats",str(chats)) # fMgr.upgradeRelationship(fEmail,sEmail,"nVotes",str(nVotes)) # fMgr.upgradeRelationship(fEmail,sEmail,"pVotes",str(pVotes)) # fMgr.upgradeRelationship(fEmail,sEmail,"started",str(dates[0])) # fMgr.upgradeRelationship(fEmail,sEmail,"triggered",str(dates[1])) # fMgr.upgradeRelationship(fEmail,sEmail,"duration",str(dates[2])) # except Exception ,e: # print e.message # return False # finally: # #calculate and save frienship score # score = self.makeFriendshipScore(fEmail,sEmail) # print "Strength"+str(score) # fMgr.upgradeRelationship(fEmail,sEmail,"strength",str(score)) #build friend of friend network #users from uid 1-104 will be linked with users between uid 200-250 #def buildFriendsOfFriendNetwork(self): # email = "kalana331@gmail.com" # uMgr =UserManagerFRS() # fMgr =FriendshipManagerFRS() # friends = fMgr.selectAllFriends(email) # for friend in friends: # fEmail= str( friend['email']) # fUid = uMgr.getUserId(fEmail) # if fUid!=0: # #pick random user # randId= random.uniform(200,250) # randId=str(int(randId)) # uEmail =str(uMgr.getUserEmail(randId)) # fMgr.makeNewFriendship(fEmail,uEmail) # self.replaceRandomValuesInRel(fEmail,uEmail) # print str(fUid)+"->"+str(randId)+";"+str(fEmail)+"->"+str(uEmail) #remove freind of friend network # all relationships from uid 1-103 linked with users between uid 200-250 get removed #def destructFriendOfFriendNetwork(self): # uMgr = UserManagerFRS() # #create user ids # for userId in range(200,251): # userId =str(userId) # uEmail = str(uMgr.getUserEmail(userId)) # print uEmail # try: # status=False # status = uMgr.removeAllRels(uEmail) # except Exception,e: # print e.message # print uEmail+" failed" # continue # finally:print status
SuggestionManagerFRS
identifier_name
SuggestionManagerFRS.py
#This class implements methods which measure friendship between two users and make appropriate suggestion lists of users #to add in chat sessions and add as friends #The friendship is measured based on no of interactions between users against time #Therefore following class implements a scoring model to measure friend-ship between two user nodes #Inorder to measure the friendship it uses # ~ No of chats between user # ~ Duration of relationships from DataAccess import FriendshipManagerFRS from DataAccess import UserManagerFRS import operator import json import random class SuggestionManagerFRS(object): def __init__(self): SuggestionManagerFRS =self def makeFriendshipScore(self,email,friend): """ score friendship """ # * the total score will be out of 100 % # * weight for chat frequencey is 60% # * weight for duration from last chat 40% friendMgr =FriendshipManagerFRS() array = friendMgr.selectRelationship(email,friend) c=0 try: if len(array)==5: c+=1 #print "\n Id:"+str(c) #print"User: %s"% str(i['email']) #print "Dur:%s days from last chat" % str(i['duration']) #print "Chats: %s chats"% str(i['chats']) total=-1 chatVoteScore=self.scoreChatsNVotes(array[0],array[2],array[3]) durationScore=self.scoreDur(array[1]) total =self.calculateTotalScore(chatVoteScore,durationScore) #deduct / add by votes #print "chat score %s"% str(chatScore) #print "duration score %s" % str(durationScore) #print "total score %s"%str(float(total)/float(100)) "return score" return float(total)/100.0 except Exception, e: print str(e.message) def scoreChatsNVotes(self,chats,pvotes,nvotes): "Score Chats represents the affnity score" # 0 : 0 # >250 : 60 # 0-25 : 10 #25-50 : 20 #50-150 :30 #150-250:40 #250-500:50 chats =int(chats) pvotes= int(pvotes) nvotes = int(nvotes) if chats == 0: #no chats no marks if pvotes>nvotes:#if pvotes>=nvotes socre:+5 only ;else return 0 return 5 else:return 0 if 500<chats: # chats more than 250 full marks if nvotes>pvotes :#chats ?votes-5only return 55 else:return 60 score=0 if 0<chats and chats <= 25: score= 10 elif 25<chats and chats<=50: score= 20 elif chats<50 and chats<=150: score= 30 elif 150<chats and chats<=250: score= 40 elif 250<chats and chats<=500: score =50 score=self.voteHandler(score,pvotes,nvotes) return score #score for votes def voteHandler(self,score,pv,nv): pv =int(pv) nv= int(nv) if score>=5: if pv> nv: score+=5 elif pv<nv:
return score #score Duration def scoreDur(self,dur): "duration represents time decay" dur =int(dur) if 730 <dur: #more than 2 years return 0 if dur ==0: # today return 40 if 0<dur and dur<=182: #less than 6 months return 30 elif 182<dur and 365>=dur: # 6 month - 1 year return 20 elif 365<dur and 730>=dur:# 1 year - 2 years return 10 #calculate cumulative score def calculateTotalScore(self,chat,duration): if chat!=None and duration != None: return chat+ duration else: if chat ==None: chat =0 if duration == None: duration = 0 return duration + chat #sort the 8 product category values of user in decending order def sortUserPreferences(self,user): userMgr =UserManagerFRS() categories = userMgr.getCategoryExp(user) #sort list if len(categories)!=0: categories=sorted(categories.items(), key=operator.itemgetter(1)) return categories #get maximum category out of given list of product categories def getMaxCategory(self,categories): categories = dict(categories) maxIndex=max(categories.items(), key=operator.itemgetter(1))[0] # index of max. maxValue =max(categories.items(), key=operator.itemgetter(1))[1] # max. value maxCat ={maxIndex:maxValue} return maxCat #update all the relationship strength of user def upgradeRelationshipStrength(self,user): fMgr = FriendshipManagerFRS() user =str(user) allFriends = fMgr.selectAllFriends(user) for friend in allFriends: score=0 try: email = str(friend['email']) score =self.makeFriendshipScore(user,email) #calculate friendship score fMgr.upgradeRelationship(user,email,"strength",score) except Exception,e: print str(e.message)+" -2" continue finally:print email+"-> STR:"+str(score) #refine selected list of users for chat def refineChatList(self,user,catId): categoryKey ="cat"+catId friendMgr = FriendshipManagerFRS() uMgr =UserManagerFRS() #sorted on expereince about product category expFriends = friendMgr.selectFriendsForChatOnExp(user,categoryKey) print "sorted friendlist from highest experience about product category: \n \n"+str( expFriends) #sorted on relationship strength closeFriends = friendMgr.selectAllFriends(user) print "\n\nsorted friendlist from highest relationship strength :\n \n"+str(closeFriends) #merge the lists mixList=self.mixLists(closeFriends,expFriends) #perpare final list finalList =[] for item in mixList: friend={'friend':str(item)} finalList.append(friend) return finalList #merge two user lists with same length and remove reducdency def mixLists(self,closeFriends,expFriends): finalList=[] for OutItem in closeFriends: chk =finalList.count(OutItem['email']) if chk==0: finalList.append(OutItem['email']) else:continue for InItem in expFriends: chkIn =finalList.count(InItem['email']) if chkIn ==0: if OutItem!=InItem: finalList.append(InItem['email']) break else:continue return finalList #suggest New Friends for User def suggestNewFriends(self,email): #get favourite product category fMgr =FriendshipManagerFRS() categories =self.sortUserPreferences(email) maxCat = self.getMaxCategory(categories) print "product category favourations of user: "+str(email)+" are :\n"+str(categories) print "most favourite category of user is : cat-"+str(maxCat) print '\ntherefore the friends of friends of user are suggested sorted according to this category value' key =0 value=0 for index in maxCat: key = index value = maxCat[index] break category= "cat"+str(key) #select the friends of friends with favouration to the same category(sorted desc) which user node does candidates =fMgr.selectFriendsForFriendship(email,category) #dispatch #for user in candidates: # print str(user['email'])+"->"+str(user[category]) return candidates #replace existing relationship with random values #def replaceRandomValuesInRel(self,fEmail,sEmail): # #generating values # fMgr = FriendshipManagerFRS() # dates = fMgr.getRandomDate() # chats = int(random.uniform(0,200)) # nVotes = int(random.uniform(0,200)) # pVotes = int(random.uniform(0,200)) # score= 0 # try: # #update relationship # fMgr.upgradeRelationship(fEmail,sEmail,"chats",str(chats)) # fMgr.upgradeRelationship(fEmail,sEmail,"nVotes",str(nVotes)) # fMgr.upgradeRelationship(fEmail,sEmail,"pVotes",str(pVotes)) # fMgr.upgradeRelationship(fEmail,sEmail,"started",str(dates[0])) # fMgr.upgradeRelationship(fEmail,sEmail,"triggered",str(dates[1])) # fMgr.upgradeRelationship(fEmail,sEmail,"duration",str(dates[2])) # except Exception ,e: # print e.message # return False # finally: # #calculate and save frienship score # score = self.makeFriendshipScore(fEmail,sEmail) # print "Strength"+str(score) # fMgr.upgradeRelationship(fEmail,sEmail,"strength",str(score)) #build friend of friend network #users from uid 1-104 will be linked with users between uid 200-250 #def buildFriendsOfFriendNetwork(self): # email = "kalana331@gmail.com" # uMgr =UserManagerFRS() # fMgr =FriendshipManagerFRS() # friends = fMgr.selectAllFriends(email) # for friend in friends: # fEmail= str( friend['email']) # fUid = uMgr.getUserId(fEmail) # if fUid!=0: # #pick random user # randId= random.uniform(200,250) # randId=str(int(randId)) # uEmail =str(uMgr.getUserEmail(randId)) # fMgr.makeNewFriendship(fEmail,uEmail) # self.replaceRandomValuesInRel(fEmail,uEmail) # print str(fUid)+"->"+str(randId)+";"+str(fEmail)+"->"+str(uEmail) #remove freind of friend network # all relationships from uid 1-103 linked with users between uid 200-250 get removed #def destructFriendOfFriendNetwork(self): # uMgr = UserManagerFRS() # #create user ids # for userId in range(200,251): # userId =str(userId) # uEmail = str(uMgr.getUserEmail(userId)) # print uEmail # try: # status=False # status = uMgr.removeAllRels(uEmail) # except Exception,e: # print e.message # print uEmail+" failed" # continue # finally:print status
score-=5
conditional_block
SuggestionManagerFRS.py
#This class implements methods which measure friendship between two users and make appropriate suggestion lists of users #to add in chat sessions and add as friends #The friendship is measured based on no of interactions between users against time #Therefore following class implements a scoring model to measure friend-ship between two user nodes #Inorder to measure the friendship it uses # ~ No of chats between user # ~ Duration of relationships from DataAccess import FriendshipManagerFRS from DataAccess import UserManagerFRS import operator import json import random class SuggestionManagerFRS(object): def __init__(self): SuggestionManagerFRS =self def makeFriendshipScore(self,email,friend): """ score friendship """ # * the total score will be out of 100 % # * weight for chat frequencey is 60% # * weight for duration from last chat 40% friendMgr =FriendshipManagerFRS() array = friendMgr.selectRelationship(email,friend) c=0 try: if len(array)==5: c+=1 #print "\n Id:"+str(c) #print"User: %s"% str(i['email']) #print "Dur:%s days from last chat" % str(i['duration']) #print "Chats: %s chats"% str(i['chats']) total=-1 chatVoteScore=self.scoreChatsNVotes(array[0],array[2],array[3]) durationScore=self.scoreDur(array[1]) total =self.calculateTotalScore(chatVoteScore,durationScore) #deduct / add by votes #print "chat score %s"% str(chatScore) #print "duration score %s" % str(durationScore) #print "total score %s"%str(float(total)/float(100)) "return score" return float(total)/100.0 except Exception, e: print str(e.message) def scoreChatsNVotes(self,chats,pvotes,nvotes): "Score Chats represents the affnity score" # 0 : 0 # >250 : 60 # 0-25 : 10 #25-50 : 20 #50-150 :30 #150-250:40 #250-500:50 chats =int(chats) pvotes= int(pvotes) nvotes = int(nvotes) if chats == 0: #no chats no marks if pvotes>nvotes:#if pvotes>=nvotes socre:+5 only ;else return 0 return 5 else:return 0 if 500<chats: # chats more than 250 full marks if nvotes>pvotes :#chats ?votes-5only return 55 else:return 60 score=0 if 0<chats and chats <= 25: score= 10 elif 25<chats and chats<=50: score= 20 elif chats<50 and chats<=150: score= 30 elif 150<chats and chats<=250: score= 40 elif 250<chats and chats<=500: score =50 score=self.voteHandler(score,pvotes,nvotes) return score #score for votes def voteHandler(self,score,pv,nv): pv =int(pv) nv= int(nv) if score>=5: if pv> nv: score+=5 elif pv<nv: score-=5 return score #score Duration def scoreDur(self,dur): "duration represents time decay" dur =int(dur) if 730 <dur: #more than 2 years return 0 if dur ==0: # today return 40 if 0<dur and dur<=182: #less than 6 months return 30 elif 182<dur and 365>=dur: # 6 month - 1 year return 20 elif 365<dur and 730>=dur:# 1 year - 2 years return 10 #calculate cumulative score def calculateTotalScore(self,chat,duration): if chat!=None and duration != None: return chat+ duration else: if chat ==None: chat =0 if duration == None: duration = 0 return duration + chat #sort the 8 product category values of user in decending order def sortUserPreferences(self,user): userMgr =UserManagerFRS() categories = userMgr.getCategoryExp(user) #sort list if len(categories)!=0: categories=sorted(categories.items(), key=operator.itemgetter(1)) return categories #get maximum category out of given list of product categories def getMaxCategory(self,categories): categories = dict(categories) maxIndex=max(categories.items(), key=operator.itemgetter(1))[0] # index of max. maxValue =max(categories.items(), key=operator.itemgetter(1))[1] # max. value
#update all the relationship strength of user def upgradeRelationshipStrength(self,user): fMgr = FriendshipManagerFRS() user =str(user) allFriends = fMgr.selectAllFriends(user) for friend in allFriends: score=0 try: email = str(friend['email']) score =self.makeFriendshipScore(user,email) #calculate friendship score fMgr.upgradeRelationship(user,email,"strength",score) except Exception,e: print str(e.message)+" -2" continue finally:print email+"-> STR:"+str(score) #refine selected list of users for chat def refineChatList(self,user,catId): categoryKey ="cat"+catId friendMgr = FriendshipManagerFRS() uMgr =UserManagerFRS() #sorted on expereince about product category expFriends = friendMgr.selectFriendsForChatOnExp(user,categoryKey) print "sorted friendlist from highest experience about product category: \n \n"+str( expFriends) #sorted on relationship strength closeFriends = friendMgr.selectAllFriends(user) print "\n\nsorted friendlist from highest relationship strength :\n \n"+str(closeFriends) #merge the lists mixList=self.mixLists(closeFriends,expFriends) #perpare final list finalList =[] for item in mixList: friend={'friend':str(item)} finalList.append(friend) return finalList #merge two user lists with same length and remove reducdency def mixLists(self,closeFriends,expFriends): finalList=[] for OutItem in closeFriends: chk =finalList.count(OutItem['email']) if chk==0: finalList.append(OutItem['email']) else:continue for InItem in expFriends: chkIn =finalList.count(InItem['email']) if chkIn ==0: if OutItem!=InItem: finalList.append(InItem['email']) break else:continue return finalList #suggest New Friends for User def suggestNewFriends(self,email): #get favourite product category fMgr =FriendshipManagerFRS() categories =self.sortUserPreferences(email) maxCat = self.getMaxCategory(categories) print "product category favourations of user: "+str(email)+" are :\n"+str(categories) print "most favourite category of user is : cat-"+str(maxCat) print '\ntherefore the friends of friends of user are suggested sorted according to this category value' key =0 value=0 for index in maxCat: key = index value = maxCat[index] break category= "cat"+str(key) #select the friends of friends with favouration to the same category(sorted desc) which user node does candidates =fMgr.selectFriendsForFriendship(email,category) #dispatch #for user in candidates: # print str(user['email'])+"->"+str(user[category]) return candidates #replace existing relationship with random values #def replaceRandomValuesInRel(self,fEmail,sEmail): # #generating values # fMgr = FriendshipManagerFRS() # dates = fMgr.getRandomDate() # chats = int(random.uniform(0,200)) # nVotes = int(random.uniform(0,200)) # pVotes = int(random.uniform(0,200)) # score= 0 # try: # #update relationship # fMgr.upgradeRelationship(fEmail,sEmail,"chats",str(chats)) # fMgr.upgradeRelationship(fEmail,sEmail,"nVotes",str(nVotes)) # fMgr.upgradeRelationship(fEmail,sEmail,"pVotes",str(pVotes)) # fMgr.upgradeRelationship(fEmail,sEmail,"started",str(dates[0])) # fMgr.upgradeRelationship(fEmail,sEmail,"triggered",str(dates[1])) # fMgr.upgradeRelationship(fEmail,sEmail,"duration",str(dates[2])) # except Exception ,e: # print e.message # return False # finally: # #calculate and save frienship score # score = self.makeFriendshipScore(fEmail,sEmail) # print "Strength"+str(score) # fMgr.upgradeRelationship(fEmail,sEmail,"strength",str(score)) #build friend of friend network #users from uid 1-104 will be linked with users between uid 200-250 #def buildFriendsOfFriendNetwork(self): # email = "kalana331@gmail.com" # uMgr =UserManagerFRS() # fMgr =FriendshipManagerFRS() # friends = fMgr.selectAllFriends(email) # for friend in friends: # fEmail= str( friend['email']) # fUid = uMgr.getUserId(fEmail) # if fUid!=0: # #pick random user # randId= random.uniform(200,250) # randId=str(int(randId)) # uEmail =str(uMgr.getUserEmail(randId)) # fMgr.makeNewFriendship(fEmail,uEmail) # self.replaceRandomValuesInRel(fEmail,uEmail) # print str(fUid)+"->"+str(randId)+";"+str(fEmail)+"->"+str(uEmail) #remove freind of friend network # all relationships from uid 1-103 linked with users between uid 200-250 get removed #def destructFriendOfFriendNetwork(self): # uMgr = UserManagerFRS() # #create user ids # for userId in range(200,251): # userId =str(userId) # uEmail = str(uMgr.getUserEmail(userId)) # print uEmail # try: # status=False # status = uMgr.removeAllRels(uEmail) # except Exception,e: # print e.message # print uEmail+" failed" # continue # finally:print status
maxCat ={maxIndex:maxValue} return maxCat
random_line_split
proxy.go
// Copyright 2021 BoCloud // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package proxy import ( "context" "reflect" "sync" "time" "github.com/go-logr/logr" "github.com/jjeffery/stringset" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/fabedge/fabedge/pkg/operator/predicates" ) const ( LabelServiceName = "kubernetes.io/service-name" LabelHostname = "kubernetes.io/hostname" ) // type shortcuts type ( EndpointSlice = discoveryv1.EndpointSlice Result = reconcile.Result ObjectKey = client.ObjectKey ) type Config struct { Manager manager.Manager // the namespace where agent and configmap are created AgentNamespace string IPVSScheduler string // the interval to check if agent load balance rules is consistent with configmap CheckInterval time.Duration } // proxy keep proxy rules configmap for each service which has edge endpoints. // An edge-endpoint is an endpoint which has corresponding pod on a edge node. type proxy struct { mu sync.Mutex serviceMap ServiceMap endpointSliceMap EndpointSliceMap nodeSet EdgeNodeSet // the namespace where agent and configmap are created namespace string keeper *loadBalanceConfigKeeper checkInterval time.Duration client client.Client log logr.Logger } func AddToManager(cnf Config) error { mgr := cnf.Manager keeper := &loadBalanceConfigKeeper{ namespace: cnf.AgentNamespace, interval: 5 * time.Second, nodeSet: make(EdgeNodeSet), ipvsScheduler: cnf.IPVSScheduler, client: mgr.GetClient(), log: mgr.GetLogger().WithName("load-balance-keeper"), } proxy := &proxy{ serviceMap: make(ServiceMap), endpointSliceMap: make(EndpointSliceMap), nodeSet: make(EdgeNodeSet), keeper: keeper, checkInterval: cnf.CheckInterval, log: mgr.GetLogger().WithName("fab-proxy"), client: mgr.GetClient(), } if err := mgr.Add(manager.RunnableFunc(keeper.Start)); err != nil { return err } if err := mgr.Add(manager.RunnableFunc(proxy.startCheckLoadBalanceRules)); err != nil { return err } err := addController( "proxy-endpointslice", mgr, proxy.OnEndpointSliceUpdate, &EndpointSlice{}, ) if err != nil { return err } err = addController( "proxy-node", mgr, proxy.onNodeUpdate, &corev1.Node{}, predicates.EdgeNodePredicate(), ) if err != nil { return err } return addController("proxy-service", mgr, proxy.OnServiceUpdate, &corev1.Service{}, ) } func addController(name string, mgr manager.Manager, reconciler reconcile.Func, watchObj client.Object, predicates ...predicate.Predicate) error { c, err := controller.New( name, mgr, controller.Options{ Reconciler: reconciler, }, ) if err != nil { return err } return c.Watch( &source.Kind{Type: watchObj}, &handler.EnqueueRequestForObject{}, predicates..., ) } func (p *proxy) onNodeUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var node corev1.Node if err := p.client.Get(ctx, request.NamespacedName, &node); err != nil { if errors.IsNotFound(err) { p.removeNode(request.Name) return Result{}, nil } log.Error(err, "failed to get node") return Result{}, err } if node.DeletionTimestamp != nil { p.removeNode(request.Name) return Result{}, nil } p.addNode(request.Name) return Result{}, nil } func (p *proxy) addNode(name string) { p.mu.Lock() defer p.mu.Unlock() if _, exists := p.nodeSet[name]; !exists { p.nodeSet[name] = newEdgeNode(name) } } func (p *proxy) removeNode(name string) { p.mu.Lock() defer p.mu.Unlock() delete(p.nodeSet, name) } func (p *proxy) OnServiceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var service corev1.Service if err := p.client.Get(ctx, request.NamespacedName, &service); err != nil { log.Error(err, "failed to get service") if errors.IsNotFound(err) { log.Info("service is deleted, cleanup service and endpoints") p.cleanupService(request.NamespacedName) return Result{}, nil } return Result{}, err } // if service is updated to a invalid service, we take it as deleted and cleanup related resources if p.shouldSkipService(&service) { log.Info("service has no ClusterIP, skip it", "service", service) p.cleanupService(request.NamespacedName) return Result{}, nil } changed := p.syncServiceInfoFromService(request.NamespacedName, &service) if changed { p.syncServiceChangesToAgentByKey(request.NamespacedName) } return Result{}, nil } // syncServiceInfoFromService only sync clusterIP, sessionAffinity and StickyMaxAgeSeconds as needed // if these are the same, just skip synchronizing func (p *proxy) syncServiceInfoFromService(key ObjectKey, svc *corev1.Service) bool { p.mu.Lock() defer p.mu.Unlock() oldService := p.serviceMap[key] newService := makeServiceInfo(svc) if oldService.ClusterIP == newService.ClusterIP && oldService.SessionAffinity == newService.SessionAffinity && oldService.StickyMaxAgeSeconds == newService.StickyMaxAgeSeconds { return false } oldService.ClusterIP = newService.ClusterIP oldService.SessionAffinity = newService.SessionAffinity oldService.StickyMaxAgeSeconds = newService.StickyMaxAgeSeconds if oldService.EndpointMap == nil { oldService.EndpointMap = make(map[Port]EndpointSet) } if oldService.EndpointToNodes == nil { oldService.EndpointToNodes = make(map[Endpoint]NodeName) } p.serviceMap[key] = oldService return true } func (p *proxy) cleanupService(serviceKey ObjectKey) { p.mu.Lock() defer p.mu.Unlock() serviceInfo, exists := p.serviceMap[serviceKey] if !exists { return } // cleanup endpoints in related edge node for port := range serviceInfo.EndpointMap { spn := ServicePortName{NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol} for _, nodeName := range serviceInfo.EndpointToNodes { node, exists := p.nodeSet[nodeName] if !exists { continue } delete(node.ServicePortMap, spn) delete(node.EndpointMap, spn) p.nodeSet[nodeName] = node } } p.syncServiceChangesToAgent(serviceInfo) delete(p.serviceMap, serviceKey) } func (p *proxy) syncServiceChangesToAgentByKey(key ObjectKey) { p.mu.Lock() defer p.mu.Unlock() serviceInfo, ok := p.serviceMap[key] if !ok { return } p.syncServiceChangesToAgent(serviceInfo) } func (p *proxy) syncServiceChangesToAgent(serviceInfo ServiceInfo) { for _, name := range serviceInfo.EndpointToNodes { node, ok := p.nodeSet[name] if !ok { continue } p.keeper.AddNode(node) } } func (p *proxy) OnEndpointSliceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var es EndpointSlice err := p.client.Get(ctx, request.NamespacedName, &es) if err != nil { if errors.IsNotFound(err) { log.Info("endpointslice is deleted, cleanup related endpoints") p.cleanupEndpointsOfEndpointSlice(request.NamespacedName) return reconcile.Result{}, nil } log.Error(err, "failed to get endpointslice") return reconcile.Result{}, err } if es.DeletionTimestamp != nil { log.Info("endpointslice is terminating, cleanup related endpoints") p.cleanupEndpointsOfEndpointSlice(request.NamespacedName) return reconcile.Result{}, nil } serviceName := getServiceName(es.Labels) if serviceName == "" { log.Info("no service name found in endpointslice", "endpointslice", es) return Result{}, nil } var ( service corev1.Service serviceKey = ObjectKey{Name: serviceName, Namespace: request.Namespace} ) if err = p.client.Get(ctx, serviceKey, &service); err != nil { log.Error(err, "failed to get service") // if service is not found, we don't handle this endpointslice if errors.IsNotFound(err) { log.Info("Corresponding service is not found, cleanup service and endpoints", "serviceKey", serviceKey) p.cleanupService(serviceKey) return Result{}, nil } return Result{}, err } if p.shouldSkipService(&service) { log.Info("service has no ClusterIP, skip it", "service", service) p.cleanupService(serviceKey) return Result{}, nil } serviceChanged := p.syncServiceInfoFromService(serviceKey, &service) p.syncServiceEndpointsFromEndpointSlice(p.makeEndpointSliceInfo(&es), serviceChanged) return Result{}, nil } func (p *proxy) syncServiceEndpointsFromEndpointSlice(newES EndpointSliceInfo, serviceChanged bool) { p.mu.Lock() defer p.mu.Unlock() key, serviceKey := newES.ObjectKey, newES.ServiceKey oldES := p.endpointSliceMap[key] isSame := reflect.DeepEqual(oldES, newES) if isSame { return } serviceInfo := p.serviceMap[serviceKey] // collect node which has endpoints changes changedNodeNames := stringset.New() // add new endpoints for port := range newES.Ports { servicePortName := ServicePortName{ NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol, } endpointSet := serviceInfo.EndpointMap[port] for _, ep := range newES.Endpoints { endpoint := Endpoint{ IP: ep.IP, Port: port.Port, } endpointSet.Add(endpoint) serviceInfo.EndpointToNodes[endpoint] = ep.NodeName p.addServicePortToNode(ep.NodeName, servicePortName, ServicePort{ ClusterIP: serviceInfo.ClusterIP, Port: port.Port, Protocol: port.Protocol, SessionAffinity: serviceInfo.SessionAffinity, StickyMaxAgeSeconds: serviceInfo.StickyMaxAgeSeconds, }) added := p.addEndpointToNode(ep.NodeName, servicePortName, endpoint) if serviceChanged || added { changedNodeNames.Add(ep.NodeName) } } serviceInfo.EndpointMap[port] = endpointSet } // remove old endpoints for port := range oldES.Ports { _, exists := newES.Ports[port] portRemoved := !exists servicePortName := ServicePortName{ NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol, } endpointSet := serviceInfo.EndpointMap[port] for _, ep := range oldES.Endpoints { _, exist := newES.Endpoints[ep.IP] endpointRemoved := !exist if portRemoved || endpointRemoved { endpoint := Endpoint{ IP: ep.IP, Port: port.Port, } endpointSet.Remove(endpoint) delete(serviceInfo.EndpointToNodes, endpoint) p.removeEndpointFromNode(ep.NodeName, servicePortName, endpoint) changedNodeNames.Add(ep.NodeName) } if portRemoved { p.removeServicePortFromNode(ep.NodeName, servicePortName) } } if len(endpointSet) == 0 { delete(serviceInfo.EndpointMap, port) } else { serviceInfo.EndpointMap[port] = endpointSet } } p.endpointSliceMap[key] = newES p.serviceMap[serviceKey] = serviceInfo for nodeName := range changedNodeNames { node, ok := p.nodeSet[nodeName] if !ok { continue } p.keeper.AddNode(node) } } func (p *proxy) cleanupEndpointsOfEndpointSlice(key ObjectKey) { es, ok := p.getEndpointSliceInfo(key) if !ok { return } // no matter what caused cleanup, we take current endpointslice which // has empty ports and endpoints as deleted, es.Ports = make(map[Port]Empty) es.Endpoints = make(map[string]EndpointInfo) p.syncServiceEndpointsFromEndpointSlice(es, false) p.mu.Lock() delete(p.endpointSliceMap, key) p.mu.Unlock() } func (p *proxy) startCheckLoadBalanceRules(ctx context.Context) error { tick := time.NewTicker(p.checkInterval) for { select { case <-tick.C: for _, node := range p.nodeSet { p.keeper.AddNodeIfNotPresent(node) } case <-ctx.Done(): return nil } } } func (p *proxy) addEndpointToNode(nodeName string, spn ServicePortName, endpoint Endpoint) bool
func (p *proxy) removeEndpointFromNode(nodeName string, spn ServicePortName, endpoint Endpoint) { node, ok := p.nodeSet[nodeName] if !ok { return } eps := node.EndpointMap[spn] eps.Remove(endpoint) if len(eps) == 0 { delete(node.EndpointMap, spn) } p.nodeSet[nodeName] = node } func (p *proxy) addServicePortToNode(nodeName string, spn ServicePortName, servicePort ServicePort) { node, ok := p.nodeSet[nodeName] if !ok { node = newEdgeNode(nodeName) } node.ServicePortMap[spn] = servicePort p.nodeSet[nodeName] = node } func (p *proxy) removeServicePortFromNode(nodeName string, spn ServicePortName) { node, ok := p.nodeSet[nodeName] if !ok { return } delete(node.ServicePortMap, spn) p.nodeSet[nodeName] = node } func (p *proxy) makeEndpointSliceInfo(es *EndpointSlice) EndpointSliceInfo { info := EndpointSliceInfo{ ObjectKey: ObjectKey{ Name: es.Name, Namespace: es.Namespace, }, ServiceKey: ObjectKey{ Name: getServiceName(es.Labels), Namespace: es.Namespace, }, Ports: make(map[Port]Empty), Endpoints: make(map[string]EndpointInfo), } for _, port := range es.Ports { p := Port{ Port: *port.Port, Protocol: *port.Protocol, } info.Ports[p] = Empty{} } for _, ep := range es.Endpoints { nodeName := getHostname(&ep) if _, exists := p.nodeSet[nodeName]; !exists { continue } // 在边缘场景, endpoint的稳定性有些问题, 会导致conditions.Ready的状态反复变化 // 暂时原因不明,所以我们不考虑这个问题 // todo: 处理网络抖动导致的endpoint不稳定情况 info.Endpoints[ep.Addresses[0]] = EndpointInfo{ IP: ep.Addresses[0], NodeName: nodeName, } } return info } func (p *proxy) getEndpointSliceInfo(key ObjectKey) (EndpointSliceInfo, bool) { p.mu.Lock() defer p.mu.Unlock() es, ok := p.endpointSliceMap[key] return es, ok } func (p *proxy) shouldSkipService(svc *corev1.Service) bool { if svc.Spec.Type != corev1.ServiceTypeClusterIP { return true } if svc.Spec.ClusterIP == corev1.ClusterIPNone || svc.Spec.ClusterIP == "" { return true } if svc.Spec.Selector == nil || len(svc.Spec.Selector) == 0 { return true } return false } func makeServiceInfo(svc *corev1.Service) ServiceInfo { var stickyMaxAgeSeconds int32 if svc.Spec.SessionAffinity == corev1.ServiceAffinityClientIP { // Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP stickyMaxAgeSeconds = *svc.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds } return ServiceInfo{ ClusterIP: svc.Spec.ClusterIP, SessionAffinity: svc.Spec.SessionAffinity, StickyMaxAgeSeconds: stickyMaxAgeSeconds, } } func getValueByKey(data map[string]string, key string) string { if data == nil { return "" } return data[key] } func getServiceName(data map[string]string) string { return getValueByKey(data, LabelServiceName) } func getHostname(endpoint *discoveryv1.Endpoint) string { if endpoint.NodeName != nil && *endpoint.NodeName != "" { return *endpoint.NodeName } return getValueByKey(endpoint.Topology, LabelHostname) } func newEdgeNode(name string) EdgeNode { return EdgeNode{ Name: name, ServicePortMap: make(map[ServicePortName]ServicePort), EndpointMap: make(map[ServicePortName]EndpointSet), } }
{ node, ok := p.nodeSet[nodeName] if !ok { node = newEdgeNode(nodeName) } endpointSet := node.EndpointMap[spn] if endpointSet.Contains(endpoint) { return false } endpointSet.Add(endpoint) node.EndpointMap[spn] = endpointSet p.nodeSet[nodeName] = node return true }
identifier_body
proxy.go
// Copyright 2021 BoCloud // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package proxy import ( "context" "reflect" "sync" "time" "github.com/go-logr/logr" "github.com/jjeffery/stringset" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/fabedge/fabedge/pkg/operator/predicates" ) const ( LabelServiceName = "kubernetes.io/service-name" LabelHostname = "kubernetes.io/hostname" ) // type shortcuts type ( EndpointSlice = discoveryv1.EndpointSlice Result = reconcile.Result ObjectKey = client.ObjectKey ) type Config struct { Manager manager.Manager // the namespace where agent and configmap are created AgentNamespace string IPVSScheduler string // the interval to check if agent load balance rules is consistent with configmap CheckInterval time.Duration } // proxy keep proxy rules configmap for each service which has edge endpoints. // An edge-endpoint is an endpoint which has corresponding pod on a edge node. type proxy struct { mu sync.Mutex serviceMap ServiceMap endpointSliceMap EndpointSliceMap nodeSet EdgeNodeSet // the namespace where agent and configmap are created namespace string keeper *loadBalanceConfigKeeper checkInterval time.Duration client client.Client log logr.Logger } func AddToManager(cnf Config) error { mgr := cnf.Manager keeper := &loadBalanceConfigKeeper{ namespace: cnf.AgentNamespace, interval: 5 * time.Second, nodeSet: make(EdgeNodeSet), ipvsScheduler: cnf.IPVSScheduler, client: mgr.GetClient(), log: mgr.GetLogger().WithName("load-balance-keeper"), } proxy := &proxy{ serviceMap: make(ServiceMap), endpointSliceMap: make(EndpointSliceMap), nodeSet: make(EdgeNodeSet), keeper: keeper, checkInterval: cnf.CheckInterval, log: mgr.GetLogger().WithName("fab-proxy"), client: mgr.GetClient(), } if err := mgr.Add(manager.RunnableFunc(keeper.Start)); err != nil { return err } if err := mgr.Add(manager.RunnableFunc(proxy.startCheckLoadBalanceRules)); err != nil { return err } err := addController( "proxy-endpointslice", mgr, proxy.OnEndpointSliceUpdate, &EndpointSlice{}, ) if err != nil { return err } err = addController( "proxy-node", mgr, proxy.onNodeUpdate, &corev1.Node{}, predicates.EdgeNodePredicate(), ) if err != nil { return err } return addController("proxy-service", mgr, proxy.OnServiceUpdate, &corev1.Service{}, ) } func addController(name string, mgr manager.Manager, reconciler reconcile.Func, watchObj client.Object, predicates ...predicate.Predicate) error { c, err := controller.New( name, mgr, controller.Options{ Reconciler: reconciler, }, ) if err != nil { return err } return c.Watch( &source.Kind{Type: watchObj}, &handler.EnqueueRequestForObject{}, predicates..., ) } func (p *proxy) onNodeUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var node corev1.Node if err := p.client.Get(ctx, request.NamespacedName, &node); err != nil { if errors.IsNotFound(err) { p.removeNode(request.Name) return Result{}, nil } log.Error(err, "failed to get node") return Result{}, err } if node.DeletionTimestamp != nil { p.removeNode(request.Name) return Result{}, nil } p.addNode(request.Name) return Result{}, nil } func (p *proxy) addNode(name string) { p.mu.Lock() defer p.mu.Unlock() if _, exists := p.nodeSet[name]; !exists { p.nodeSet[name] = newEdgeNode(name) } } func (p *proxy) removeNode(name string) { p.mu.Lock() defer p.mu.Unlock() delete(p.nodeSet, name) } func (p *proxy) OnServiceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var service corev1.Service if err := p.client.Get(ctx, request.NamespacedName, &service); err != nil { log.Error(err, "failed to get service") if errors.IsNotFound(err) { log.Info("service is deleted, cleanup service and endpoints") p.cleanupService(request.NamespacedName) return Result{}, nil } return Result{}, err } // if service is updated to a invalid service, we take it as deleted and cleanup related resources if p.shouldSkipService(&service) { log.Info("service has no ClusterIP, skip it", "service", service) p.cleanupService(request.NamespacedName) return Result{}, nil } changed := p.syncServiceInfoFromService(request.NamespacedName, &service) if changed { p.syncServiceChangesToAgentByKey(request.NamespacedName) } return Result{}, nil } // syncServiceInfoFromService only sync clusterIP, sessionAffinity and StickyMaxAgeSeconds as needed // if these are the same, just skip synchronizing func (p *proxy) syncServiceInfoFromService(key ObjectKey, svc *corev1.Service) bool { p.mu.Lock() defer p.mu.Unlock() oldService := p.serviceMap[key] newService := makeServiceInfo(svc) if oldService.ClusterIP == newService.ClusterIP && oldService.SessionAffinity == newService.SessionAffinity && oldService.StickyMaxAgeSeconds == newService.StickyMaxAgeSeconds { return false } oldService.ClusterIP = newService.ClusterIP oldService.SessionAffinity = newService.SessionAffinity oldService.StickyMaxAgeSeconds = newService.StickyMaxAgeSeconds if oldService.EndpointMap == nil { oldService.EndpointMap = make(map[Port]EndpointSet) } if oldService.EndpointToNodes == nil { oldService.EndpointToNodes = make(map[Endpoint]NodeName) } p.serviceMap[key] = oldService return true } func (p *proxy) cleanupService(serviceKey ObjectKey) { p.mu.Lock() defer p.mu.Unlock() serviceInfo, exists := p.serviceMap[serviceKey] if !exists { return } // cleanup endpoints in related edge node for port := range serviceInfo.EndpointMap { spn := ServicePortName{NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol} for _, nodeName := range serviceInfo.EndpointToNodes { node, exists := p.nodeSet[nodeName] if !exists { continue } delete(node.ServicePortMap, spn) delete(node.EndpointMap, spn) p.nodeSet[nodeName] = node } } p.syncServiceChangesToAgent(serviceInfo) delete(p.serviceMap, serviceKey) } func (p *proxy) syncServiceChangesToAgentByKey(key ObjectKey) { p.mu.Lock() defer p.mu.Unlock() serviceInfo, ok := p.serviceMap[key] if !ok { return } p.syncServiceChangesToAgent(serviceInfo) } func (p *proxy) syncServiceChangesToAgent(serviceInfo ServiceInfo) { for _, name := range serviceInfo.EndpointToNodes { node, ok := p.nodeSet[name] if !ok { continue } p.keeper.AddNode(node) } } func (p *proxy) OnEndpointSliceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var es EndpointSlice err := p.client.Get(ctx, request.NamespacedName, &es) if err != nil { if errors.IsNotFound(err) { log.Info("endpointslice is deleted, cleanup related endpoints") p.cleanupEndpointsOfEndpointSlice(request.NamespacedName) return reconcile.Result{}, nil } log.Error(err, "failed to get endpointslice") return reconcile.Result{}, err } if es.DeletionTimestamp != nil { log.Info("endpointslice is terminating, cleanup related endpoints") p.cleanupEndpointsOfEndpointSlice(request.NamespacedName) return reconcile.Result{}, nil } serviceName := getServiceName(es.Labels) if serviceName == "" { log.Info("no service name found in endpointslice", "endpointslice", es) return Result{}, nil } var ( service corev1.Service serviceKey = ObjectKey{Name: serviceName, Namespace: request.Namespace} ) if err = p.client.Get(ctx, serviceKey, &service); err != nil { log.Error(err, "failed to get service") // if service is not found, we don't handle this endpointslice if errors.IsNotFound(err) { log.Info("Corresponding service is not found, cleanup service and endpoints", "serviceKey", serviceKey) p.cleanupService(serviceKey) return Result{}, nil } return Result{}, err } if p.shouldSkipService(&service) { log.Info("service has no ClusterIP, skip it", "service", service) p.cleanupService(serviceKey) return Result{}, nil } serviceChanged := p.syncServiceInfoFromService(serviceKey, &service) p.syncServiceEndpointsFromEndpointSlice(p.makeEndpointSliceInfo(&es), serviceChanged) return Result{}, nil } func (p *proxy) syncServiceEndpointsFromEndpointSlice(newES EndpointSliceInfo, serviceChanged bool) { p.mu.Lock() defer p.mu.Unlock() key, serviceKey := newES.ObjectKey, newES.ServiceKey oldES := p.endpointSliceMap[key] isSame := reflect.DeepEqual(oldES, newES) if isSame { return } serviceInfo := p.serviceMap[serviceKey] // collect node which has endpoints changes changedNodeNames := stringset.New() // add new endpoints for port := range newES.Ports { servicePortName := ServicePortName{ NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol, } endpointSet := serviceInfo.EndpointMap[port] for _, ep := range newES.Endpoints { endpoint := Endpoint{ IP: ep.IP, Port: port.Port, } endpointSet.Add(endpoint) serviceInfo.EndpointToNodes[endpoint] = ep.NodeName p.addServicePortToNode(ep.NodeName, servicePortName, ServicePort{ ClusterIP: serviceInfo.ClusterIP, Port: port.Port, Protocol: port.Protocol, SessionAffinity: serviceInfo.SessionAffinity, StickyMaxAgeSeconds: serviceInfo.StickyMaxAgeSeconds, }) added := p.addEndpointToNode(ep.NodeName, servicePortName, endpoint) if serviceChanged || added { changedNodeNames.Add(ep.NodeName) } } serviceInfo.EndpointMap[port] = endpointSet } // remove old endpoints for port := range oldES.Ports { _, exists := newES.Ports[port] portRemoved := !exists servicePortName := ServicePortName{ NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol, } endpointSet := serviceInfo.EndpointMap[port] for _, ep := range oldES.Endpoints { _, exist := newES.Endpoints[ep.IP] endpointRemoved := !exist if portRemoved || endpointRemoved { endpoint := Endpoint{ IP: ep.IP, Port: port.Port, } endpointSet.Remove(endpoint) delete(serviceInfo.EndpointToNodes, endpoint) p.removeEndpointFromNode(ep.NodeName, servicePortName, endpoint) changedNodeNames.Add(ep.NodeName) } if portRemoved { p.removeServicePortFromNode(ep.NodeName, servicePortName) } } if len(endpointSet) == 0 { delete(serviceInfo.EndpointMap, port) } else { serviceInfo.EndpointMap[port] = endpointSet } } p.endpointSliceMap[key] = newES p.serviceMap[serviceKey] = serviceInfo for nodeName := range changedNodeNames { node, ok := p.nodeSet[nodeName] if !ok { continue } p.keeper.AddNode(node) } } func (p *proxy) cleanupEndpointsOfEndpointSlice(key ObjectKey) { es, ok := p.getEndpointSliceInfo(key) if !ok { return } // no matter what caused cleanup, we take current endpointslice which // has empty ports and endpoints as deleted, es.Ports = make(map[Port]Empty) es.Endpoints = make(map[string]EndpointInfo) p.syncServiceEndpointsFromEndpointSlice(es, false) p.mu.Lock() delete(p.endpointSliceMap, key) p.mu.Unlock() } func (p *proxy) startCheckLoadBalanceRules(ctx context.Context) error { tick := time.NewTicker(p.checkInterval) for { select { case <-tick.C: for _, node := range p.nodeSet { p.keeper.AddNodeIfNotPresent(node) } case <-ctx.Done(): return nil } } } func (p *proxy) addEndpointToNode(nodeName string, spn ServicePortName, endpoint Endpoint) bool { node, ok := p.nodeSet[nodeName] if !ok { node = newEdgeNode(nodeName) } endpointSet := node.EndpointMap[spn] if endpointSet.Contains(endpoint) { return false } endpointSet.Add(endpoint) node.EndpointMap[spn] = endpointSet p.nodeSet[nodeName] = node return true } func (p *proxy) removeEndpointFromNode(nodeName string, spn ServicePortName, endpoint Endpoint) { node, ok := p.nodeSet[nodeName] if !ok { return } eps := node.EndpointMap[spn] eps.Remove(endpoint) if len(eps) == 0 { delete(node.EndpointMap, spn) } p.nodeSet[nodeName] = node } func (p *proxy) addServicePortToNode(nodeName string, spn ServicePortName, servicePort ServicePort) { node, ok := p.nodeSet[nodeName] if !ok { node = newEdgeNode(nodeName) } node.ServicePortMap[spn] = servicePort p.nodeSet[nodeName] = node } func (p *proxy) removeServicePortFromNode(nodeName string, spn ServicePortName) { node, ok := p.nodeSet[nodeName] if !ok { return } delete(node.ServicePortMap, spn) p.nodeSet[nodeName] = node } func (p *proxy) makeEndpointSliceInfo(es *EndpointSlice) EndpointSliceInfo { info := EndpointSliceInfo{ ObjectKey: ObjectKey{ Name: es.Name, Namespace: es.Namespace, }, ServiceKey: ObjectKey{ Name: getServiceName(es.Labels), Namespace: es.Namespace, }, Ports: make(map[Port]Empty), Endpoints: make(map[string]EndpointInfo), } for _, port := range es.Ports { p := Port{ Port: *port.Port, Protocol: *port.Protocol, } info.Ports[p] = Empty{} } for _, ep := range es.Endpoints { nodeName := getHostname(&ep) if _, exists := p.nodeSet[nodeName]; !exists { continue } // 在边缘场景, endpoint的稳定性有些问题, 会导致conditions.Ready的状态反复变化 // 暂时原因不明,所以我们不考虑这个问题 // todo: 处理网络抖动导致的endpoint不稳定情况 info.Endpoints[ep.Addresses[0]] = EndpointInfo{ IP: ep.Addresses[0], NodeName: nodeName, } } return info } func (p *proxy) getEndpointSliceInfo(key ObjectKey) (EndpointSliceInfo, bool) { p.mu.Lock() defer p.mu.Unlock() es, ok := p.endpointSliceMap[key] return es, ok }
} if svc.Spec.ClusterIP == corev1.ClusterIPNone || svc.Spec.ClusterIP == "" { return true } if svc.Spec.Selector == nil || len(svc.Spec.Selector) == 0 { return true } return false } func makeServiceInfo(svc *corev1.Service) ServiceInfo { var stickyMaxAgeSeconds int32 if svc.Spec.SessionAffinity == corev1.ServiceAffinityClientIP { // Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP stickyMaxAgeSeconds = *svc.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds } return ServiceInfo{ ClusterIP: svc.Spec.ClusterIP, SessionAffinity: svc.Spec.SessionAffinity, StickyMaxAgeSeconds: stickyMaxAgeSeconds, } } func getValueByKey(data map[string]string, key string) string { if data == nil { return "" } return data[key] } func getServiceName(data map[string]string) string { return getValueByKey(data, LabelServiceName) } func getHostname(endpoint *discoveryv1.Endpoint) string { if endpoint.NodeName != nil && *endpoint.NodeName != "" { return *endpoint.NodeName } return getValueByKey(endpoint.Topology, LabelHostname) } func newEdgeNode(name string) EdgeNode { return EdgeNode{ Name: name, ServicePortMap: make(map[ServicePortName]ServicePort), EndpointMap: make(map[ServicePortName]EndpointSet), } }
func (p *proxy) shouldSkipService(svc *corev1.Service) bool { if svc.Spec.Type != corev1.ServiceTypeClusterIP { return true
random_line_split
proxy.go
// Copyright 2021 BoCloud // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package proxy import ( "context" "reflect" "sync" "time" "github.com/go-logr/logr" "github.com/jjeffery/stringset" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/fabedge/fabedge/pkg/operator/predicates" ) const ( LabelServiceName = "kubernetes.io/service-name" LabelHostname = "kubernetes.io/hostname" ) // type shortcuts type ( EndpointSlice = discoveryv1.EndpointSlice Result = reconcile.Result ObjectKey = client.ObjectKey ) type Config struct { Manager manager.Manager // the namespace where agent and configmap are created AgentNamespace string IPVSScheduler string // the interval to check if agent load balance rules is consistent with configmap CheckInterval time.Duration } // proxy keep proxy rules configmap for each service which has edge endpoints. // An edge-endpoint is an endpoint which has corresponding pod on a edge node. type proxy struct { mu sync.Mutex serviceMap ServiceMap endpointSliceMap EndpointSliceMap nodeSet EdgeNodeSet // the namespace where agent and configmap are created namespace string keeper *loadBalanceConfigKeeper checkInterval time.Duration client client.Client log logr.Logger } func AddToManager(cnf Config) error { mgr := cnf.Manager keeper := &loadBalanceConfigKeeper{ namespace: cnf.AgentNamespace, interval: 5 * time.Second, nodeSet: make(EdgeNodeSet), ipvsScheduler: cnf.IPVSScheduler, client: mgr.GetClient(), log: mgr.GetLogger().WithName("load-balance-keeper"), } proxy := &proxy{ serviceMap: make(ServiceMap), endpointSliceMap: make(EndpointSliceMap), nodeSet: make(EdgeNodeSet), keeper: keeper, checkInterval: cnf.CheckInterval, log: mgr.GetLogger().WithName("fab-proxy"), client: mgr.GetClient(), } if err := mgr.Add(manager.RunnableFunc(keeper.Start)); err != nil { return err } if err := mgr.Add(manager.RunnableFunc(proxy.startCheckLoadBalanceRules)); err != nil { return err } err := addController( "proxy-endpointslice", mgr, proxy.OnEndpointSliceUpdate, &EndpointSlice{}, ) if err != nil { return err } err = addController( "proxy-node", mgr, proxy.onNodeUpdate, &corev1.Node{}, predicates.EdgeNodePredicate(), ) if err != nil { return err } return addController("proxy-service", mgr, proxy.OnServiceUpdate, &corev1.Service{}, ) } func addController(name string, mgr manager.Manager, reconciler reconcile.Func, watchObj client.Object, predicates ...predicate.Predicate) error { c, err := controller.New( name, mgr, controller.Options{ Reconciler: reconciler, }, ) if err != nil { return err } return c.Watch( &source.Kind{Type: watchObj}, &handler.EnqueueRequestForObject{}, predicates..., ) } func (p *proxy) onNodeUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var node corev1.Node if err := p.client.Get(ctx, request.NamespacedName, &node); err != nil { if errors.IsNotFound(err) { p.removeNode(request.Name) return Result{}, nil } log.Error(err, "failed to get node") return Result{}, err } if node.DeletionTimestamp != nil { p.removeNode(request.Name) return Result{}, nil } p.addNode(request.Name) return Result{}, nil } func (p *proxy) addNode(name string) { p.mu.Lock() defer p.mu.Unlock() if _, exists := p.nodeSet[name]; !exists { p.nodeSet[name] = newEdgeNode(name) } } func (p *proxy) removeNode(name string) { p.mu.Lock() defer p.mu.Unlock() delete(p.nodeSet, name) } func (p *proxy) OnServiceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var service corev1.Service if err := p.client.Get(ctx, request.NamespacedName, &service); err != nil { log.Error(err, "failed to get service") if errors.IsNotFound(err) { log.Info("service is deleted, cleanup service and endpoints") p.cleanupService(request.NamespacedName) return Result{}, nil } return Result{}, err } // if service is updated to a invalid service, we take it as deleted and cleanup related resources if p.shouldSkipService(&service) { log.Info("service has no ClusterIP, skip it", "service", service) p.cleanupService(request.NamespacedName) return Result{}, nil } changed := p.syncServiceInfoFromService(request.NamespacedName, &service) if changed { p.syncServiceChangesToAgentByKey(request.NamespacedName) } return Result{}, nil } // syncServiceInfoFromService only sync clusterIP, sessionAffinity and StickyMaxAgeSeconds as needed // if these are the same, just skip synchronizing func (p *proxy) syncServiceInfoFromService(key ObjectKey, svc *corev1.Service) bool { p.mu.Lock() defer p.mu.Unlock() oldService := p.serviceMap[key] newService := makeServiceInfo(svc) if oldService.ClusterIP == newService.ClusterIP && oldService.SessionAffinity == newService.SessionAffinity && oldService.StickyMaxAgeSeconds == newService.StickyMaxAgeSeconds { return false } oldService.ClusterIP = newService.ClusterIP oldService.SessionAffinity = newService.SessionAffinity oldService.StickyMaxAgeSeconds = newService.StickyMaxAgeSeconds if oldService.EndpointMap == nil { oldService.EndpointMap = make(map[Port]EndpointSet) } if oldService.EndpointToNodes == nil { oldService.EndpointToNodes = make(map[Endpoint]NodeName) } p.serviceMap[key] = oldService return true } func (p *proxy) cleanupService(serviceKey ObjectKey) { p.mu.Lock() defer p.mu.Unlock() serviceInfo, exists := p.serviceMap[serviceKey] if !exists { return } // cleanup endpoints in related edge node for port := range serviceInfo.EndpointMap { spn := ServicePortName{NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol} for _, nodeName := range serviceInfo.EndpointToNodes { node, exists := p.nodeSet[nodeName] if !exists { continue } delete(node.ServicePortMap, spn) delete(node.EndpointMap, spn) p.nodeSet[nodeName] = node } } p.syncServiceChangesToAgent(serviceInfo) delete(p.serviceMap, serviceKey) } func (p *proxy) syncServiceChangesToAgentByKey(key ObjectKey) { p.mu.Lock() defer p.mu.Unlock() serviceInfo, ok := p.serviceMap[key] if !ok { return } p.syncServiceChangesToAgent(serviceInfo) } func (p *proxy) syncServiceChangesToAgent(serviceInfo ServiceInfo) { for _, name := range serviceInfo.EndpointToNodes { node, ok := p.nodeSet[name] if !ok { continue } p.keeper.AddNode(node) } } func (p *proxy) OnEndpointSliceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var es EndpointSlice err := p.client.Get(ctx, request.NamespacedName, &es) if err != nil { if errors.IsNotFound(err) { log.Info("endpointslice is deleted, cleanup related endpoints") p.cleanupEndpointsOfEndpointSlice(request.NamespacedName) return reconcile.Result{}, nil } log.Error(err, "failed to get endpointslice") return reconcile.Result{}, err } if es.DeletionTimestamp != nil { log.Info("endpointslice is terminating, cleanup related endpoints") p.cleanupEndpointsOfEndpointSlice(request.NamespacedName) return reconcile.Result{}, nil } serviceName := getServiceName(es.Labels) if serviceName == "" { log.Info("no service name found in endpointslice", "endpointslice", es) return Result{}, nil } var ( service corev1.Service serviceKey = ObjectKey{Name: serviceName, Namespace: request.Namespace} ) if err = p.client.Get(ctx, serviceKey, &service); err != nil { log.Error(err, "failed to get service") // if service is not found, we don't handle this endpointslice if errors.IsNotFound(err) { log.Info("Corresponding service is not found, cleanup service and endpoints", "serviceKey", serviceKey) p.cleanupService(serviceKey) return Result{}, nil } return Result{}, err } if p.shouldSkipService(&service) { log.Info("service has no ClusterIP, skip it", "service", service) p.cleanupService(serviceKey) return Result{}, nil } serviceChanged := p.syncServiceInfoFromService(serviceKey, &service) p.syncServiceEndpointsFromEndpointSlice(p.makeEndpointSliceInfo(&es), serviceChanged) return Result{}, nil } func (p *proxy) syncServiceEndpointsFromEndpointSlice(newES EndpointSliceInfo, serviceChanged bool) { p.mu.Lock() defer p.mu.Unlock() key, serviceKey := newES.ObjectKey, newES.ServiceKey oldES := p.endpointSliceMap[key] isSame := reflect.DeepEqual(oldES, newES) if isSame { return } serviceInfo := p.serviceMap[serviceKey] // collect node which has endpoints changes changedNodeNames := stringset.New() // add new endpoints for port := range newES.Ports { servicePortName := ServicePortName{ NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol, } endpointSet := serviceInfo.EndpointMap[port] for _, ep := range newES.Endpoints { endpoint := Endpoint{ IP: ep.IP, Port: port.Port, } endpointSet.Add(endpoint) serviceInfo.EndpointToNodes[endpoint] = ep.NodeName p.addServicePortToNode(ep.NodeName, servicePortName, ServicePort{ ClusterIP: serviceInfo.ClusterIP, Port: port.Port, Protocol: port.Protocol, SessionAffinity: serviceInfo.SessionAffinity, StickyMaxAgeSeconds: serviceInfo.StickyMaxAgeSeconds, }) added := p.addEndpointToNode(ep.NodeName, servicePortName, endpoint) if serviceChanged || added { changedNodeNames.Add(ep.NodeName) } } serviceInfo.EndpointMap[port] = endpointSet } // remove old endpoints for port := range oldES.Ports { _, exists := newES.Ports[port] portRemoved := !exists servicePortName := ServicePortName{ NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol, } endpointSet := serviceInfo.EndpointMap[port] for _, ep := range oldES.Endpoints { _, exist := newES.Endpoints[ep.IP] endpointRemoved := !exist if portRemoved || endpointRemoved { endpoint := Endpoint{ IP: ep.IP, Port: port.Port, } endpointSet.Remove(endpoint) delete(serviceInfo.EndpointToNodes, endpoint) p.removeEndpointFromNode(ep.NodeName, servicePortName, endpoint) changedNodeNames.Add(ep.NodeName) } if portRemoved { p.removeServicePortFromNode(ep.NodeName, servicePortName) } } if len(endpointSet) == 0 { delete(serviceInfo.EndpointMap, port) } else { serviceInfo.EndpointMap[port] = endpointSet } } p.endpointSliceMap[key] = newES p.serviceMap[serviceKey] = serviceInfo for nodeName := range changedNodeNames { node, ok := p.nodeSet[nodeName] if !ok { continue } p.keeper.AddNode(node) } } func (p *proxy) cleanupEndpointsOfEndpointSlice(key ObjectKey) { es, ok := p.getEndpointSliceInfo(key) if !ok { return } // no matter what caused cleanup, we take current endpointslice which // has empty ports and endpoints as deleted, es.Ports = make(map[Port]Empty) es.Endpoints = make(map[string]EndpointInfo) p.syncServiceEndpointsFromEndpointSlice(es, false) p.mu.Lock() delete(p.endpointSliceMap, key) p.mu.Unlock() } func (p *proxy) startCheckLoadBalanceRules(ctx context.Context) error { tick := time.NewTicker(p.checkInterval) for { select { case <-tick.C: for _, node := range p.nodeSet { p.keeper.AddNodeIfNotPresent(node) } case <-ctx.Done(): return nil } } } func (p *proxy) addEndpointToNode(nodeName string, spn ServicePortName, endpoint Endpoint) bool { node, ok := p.nodeSet[nodeName] if !ok { node = newEdgeNode(nodeName) } endpointSet := node.EndpointMap[spn] if endpointSet.Contains(endpoint) { return false } endpointSet.Add(endpoint) node.EndpointMap[spn] = endpointSet p.nodeSet[nodeName] = node return true } func (p *proxy) removeEndpointFromNode(nodeName string, spn ServicePortName, endpoint Endpoint) { node, ok := p.nodeSet[nodeName] if !ok { return } eps := node.EndpointMap[spn] eps.Remove(endpoint) if len(eps) == 0 { delete(node.EndpointMap, spn) } p.nodeSet[nodeName] = node } func (p *proxy) addServicePortToNode(nodeName string, spn ServicePortName, servicePort ServicePort) { node, ok := p.nodeSet[nodeName] if !ok { node = newEdgeNode(nodeName) } node.ServicePortMap[spn] = servicePort p.nodeSet[nodeName] = node } func (p *proxy) removeServicePortFromNode(nodeName string, spn ServicePortName) { node, ok := p.nodeSet[nodeName] if !ok { return } delete(node.ServicePortMap, spn) p.nodeSet[nodeName] = node } func (p *proxy) makeEndpointSliceInfo(es *EndpointSlice) EndpointSliceInfo { info := EndpointSliceInfo{ ObjectKey: ObjectKey{ Name: es.Name, Namespace: es.Namespace, }, ServiceKey: ObjectKey{ Name: getServiceName(es.Labels), Namespace: es.Namespace, }, Ports: make(map[Port]Empty), Endpoints: make(map[string]EndpointInfo), } for _, port := range es.Ports { p := Port{ Port: *port.Port, Protocol: *port.Protocol, } info.Ports[p] = Empty{} } for _, ep := range es.Endpoints { nodeName := getHostname(&ep) if _, exists := p.nodeSet[nodeName]; !exists { continue } // 在边缘场景, endpoint的稳定性有些问题, 会导致conditions.Ready的状态反复变化 // 暂时原因不明,所以我们不考虑这个问题 // todo: 处理网络抖动导致的endpoint不稳定情况 info.Endpoints[ep.Addresses[0]] = EndpointInfo{ IP: ep.Addresses[0], NodeName: nodeName, } } return info } func (p *proxy) getEndpointSliceInfo(key ObjectKey) (EndpointSliceInfo, bool) { p.mu.Lock() defer p.mu.Unlock() es, ok := p.endpointSliceMap[key] return es, ok } func (p *proxy) shouldSkipService(svc *corev1.Service) bool { if svc.Spec.Type != corev1.ServiceTypeClusterIP { return true } if svc.Spec.ClusterIP == corev1.ClusterIPNone || svc.Spec.ClusterIP == "" { return true } if svc.Spec.Selector == nil || len(svc.Spec.Selector) == 0 { return true } return false } func makeServiceInfo(svc *corev1.Service) ServiceInfo { var stickyMaxAgeSeconds int32 if svc.Spec.SessionAffinity
iceAffinityClientIP { // Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP stickyMaxAgeSeconds = *svc.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds } return ServiceInfo{ ClusterIP: svc.Spec.ClusterIP, SessionAffinity: svc.Spec.SessionAffinity, StickyMaxAgeSeconds: stickyMaxAgeSeconds, } } func getValueByKey(data map[string]string, key string) string { if data == nil { return "" } return data[key] } func getServiceName(data map[string]string) string { return getValueByKey(data, LabelServiceName) } func getHostname(endpoint *discoveryv1.Endpoint) string { if endpoint.NodeName != nil && *endpoint.NodeName != "" { return *endpoint.NodeName } return getValueByKey(endpoint.Topology, LabelHostname) } func newEdgeNode(name string) EdgeNode { return EdgeNode{ Name: name, ServicePortMap: make(map[ServicePortName]ServicePort), EndpointMap: make(map[ServicePortName]EndpointSet), } }
== corev1.Serv
identifier_name
proxy.go
// Copyright 2021 BoCloud // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package proxy import ( "context" "reflect" "sync" "time" "github.com/go-logr/logr" "github.com/jjeffery/stringset" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/fabedge/fabedge/pkg/operator/predicates" ) const ( LabelServiceName = "kubernetes.io/service-name" LabelHostname = "kubernetes.io/hostname" ) // type shortcuts type ( EndpointSlice = discoveryv1.EndpointSlice Result = reconcile.Result ObjectKey = client.ObjectKey ) type Config struct { Manager manager.Manager // the namespace where agent and configmap are created AgentNamespace string IPVSScheduler string // the interval to check if agent load balance rules is consistent with configmap CheckInterval time.Duration } // proxy keep proxy rules configmap for each service which has edge endpoints. // An edge-endpoint is an endpoint which has corresponding pod on a edge node. type proxy struct { mu sync.Mutex serviceMap ServiceMap endpointSliceMap EndpointSliceMap nodeSet EdgeNodeSet // the namespace where agent and configmap are created namespace string keeper *loadBalanceConfigKeeper checkInterval time.Duration client client.Client log logr.Logger } func AddToManager(cnf Config) error { mgr := cnf.Manager keeper := &loadBalanceConfigKeeper{ namespace: cnf.AgentNamespace, interval: 5 * time.Second, nodeSet: make(EdgeNodeSet), ipvsScheduler: cnf.IPVSScheduler, client: mgr.GetClient(), log: mgr.GetLogger().WithName("load-balance-keeper"), } proxy := &proxy{ serviceMap: make(ServiceMap), endpointSliceMap: make(EndpointSliceMap), nodeSet: make(EdgeNodeSet), keeper: keeper, checkInterval: cnf.CheckInterval, log: mgr.GetLogger().WithName("fab-proxy"), client: mgr.GetClient(), } if err := mgr.Add(manager.RunnableFunc(keeper.Start)); err != nil { return err } if err := mgr.Add(manager.RunnableFunc(proxy.startCheckLoadBalanceRules)); err != nil { return err } err := addController( "proxy-endpointslice", mgr, proxy.OnEndpointSliceUpdate, &EndpointSlice{}, ) if err != nil { return err } err = addController( "proxy-node", mgr, proxy.onNodeUpdate, &corev1.Node{}, predicates.EdgeNodePredicate(), ) if err != nil { return err } return addController("proxy-service", mgr, proxy.OnServiceUpdate, &corev1.Service{}, ) } func addController(name string, mgr manager.Manager, reconciler reconcile.Func, watchObj client.Object, predicates ...predicate.Predicate) error { c, err := controller.New( name, mgr, controller.Options{ Reconciler: reconciler, }, ) if err != nil { return err } return c.Watch( &source.Kind{Type: watchObj}, &handler.EnqueueRequestForObject{}, predicates..., ) } func (p *proxy) onNodeUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var node corev1.Node if err := p.client.Get(ctx, request.NamespacedName, &node); err != nil { if errors.IsNotFound(err) { p.removeNode(request.Name) return Result{}, nil } log.Error(err, "failed to get node") return Result{}, err } if node.DeletionTimestamp != nil { p.removeNode(request.Name) return Result{}, nil } p.addNode(request.Name) return Result{}, nil } func (p *proxy) addNode(name string) { p.mu.Lock() defer p.mu.Unlock() if _, exists := p.nodeSet[name]; !exists { p.nodeSet[name] = newEdgeNode(name) } } func (p *proxy) removeNode(name string) { p.mu.Lock() defer p.mu.Unlock() delete(p.nodeSet, name) } func (p *proxy) OnServiceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var service corev1.Service if err := p.client.Get(ctx, request.NamespacedName, &service); err != nil { log.Error(err, "failed to get service") if errors.IsNotFound(err) { log.Info("service is deleted, cleanup service and endpoints") p.cleanupService(request.NamespacedName) return Result{}, nil } return Result{}, err } // if service is updated to a invalid service, we take it as deleted and cleanup related resources if p.shouldSkipService(&service) { log.Info("service has no ClusterIP, skip it", "service", service) p.cleanupService(request.NamespacedName) return Result{}, nil } changed := p.syncServiceInfoFromService(request.NamespacedName, &service) if changed { p.syncServiceChangesToAgentByKey(request.NamespacedName) } return Result{}, nil } // syncServiceInfoFromService only sync clusterIP, sessionAffinity and StickyMaxAgeSeconds as needed // if these are the same, just skip synchronizing func (p *proxy) syncServiceInfoFromService(key ObjectKey, svc *corev1.Service) bool { p.mu.Lock() defer p.mu.Unlock() oldService := p.serviceMap[key] newService := makeServiceInfo(svc) if oldService.ClusterIP == newService.ClusterIP && oldService.SessionAffinity == newService.SessionAffinity && oldService.StickyMaxAgeSeconds == newService.StickyMaxAgeSeconds { return false } oldService.ClusterIP = newService.ClusterIP oldService.SessionAffinity = newService.SessionAffinity oldService.StickyMaxAgeSeconds = newService.StickyMaxAgeSeconds if oldService.EndpointMap == nil { oldService.EndpointMap = make(map[Port]EndpointSet) } if oldService.EndpointToNodes == nil { oldService.EndpointToNodes = make(map[Endpoint]NodeName) } p.serviceMap[key] = oldService return true } func (p *proxy) cleanupService(serviceKey ObjectKey) { p.mu.Lock() defer p.mu.Unlock() serviceInfo, exists := p.serviceMap[serviceKey] if !exists { return } // cleanup endpoints in related edge node for port := range serviceInfo.EndpointMap { spn := ServicePortName{NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol} for _, nodeName := range serviceInfo.EndpointToNodes { node, exists := p.nodeSet[nodeName] if !exists { continue } delete(node.ServicePortMap, spn) delete(node.EndpointMap, spn) p.nodeSet[nodeName] = node } } p.syncServiceChangesToAgent(serviceInfo) delete(p.serviceMap, serviceKey) } func (p *proxy) syncServiceChangesToAgentByKey(key ObjectKey) { p.mu.Lock() defer p.mu.Unlock() serviceInfo, ok := p.serviceMap[key] if !ok { return } p.syncServiceChangesToAgent(serviceInfo) } func (p *proxy) syncServiceChangesToAgent(serviceInfo ServiceInfo) { for _, name := range serviceInfo.EndpointToNodes { node, ok := p.nodeSet[name] if !ok { continue } p.keeper.AddNode(node) } } func (p *proxy) OnEndpointSliceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := p.log.WithValues("request", request) var es EndpointSlice err := p.client.Get(ctx, request.NamespacedName, &es) if err != nil { if errors.IsNotFound(err) { log.Info("endpointslice is deleted, cleanup related endpoints") p.cleanupEndpointsOfEndpointSlice(request.NamespacedName) return reconcile.Result{}, nil } log.Error(err, "failed to get endpointslice") return reconcile.Result{}, err } if es.DeletionTimestamp != nil { log.Info("endpointslice is terminating, cleanup related endpoints") p.cleanupEndpointsOfEndpointSlice(request.NamespacedName) return reconcile.Result{}, nil } serviceName := getServiceName(es.Labels) if serviceName == "" { log.Info("no service name found in endpointslice", "endpointslice", es) return Result{}, nil } var ( service corev1.Service serviceKey = ObjectKey{Name: serviceName, Namespace: request.Namespace} ) if err = p.client.Get(ctx, serviceKey, &service); err != nil { log.Error(err, "failed to get service") // if service is not found, we don't handle this endpointslice if errors.IsNotFound(err) { log.Info("Corresponding service is not found, cleanup service and endpoints", "serviceKey", serviceKey) p.cleanupService(serviceKey) return Result{}, nil } return Result{}, err } if p.shouldSkipService(&service) { log.Info("service has no ClusterIP, skip it", "service", service) p.cleanupService(serviceKey) return Result{}, nil } serviceChanged := p.syncServiceInfoFromService(serviceKey, &service) p.syncServiceEndpointsFromEndpointSlice(p.makeEndpointSliceInfo(&es), serviceChanged) return Result{}, nil } func (p *proxy) syncServiceEndpointsFromEndpointSlice(newES EndpointSliceInfo, serviceChanged bool) { p.mu.Lock() defer p.mu.Unlock() key, serviceKey := newES.ObjectKey, newES.ServiceKey oldES := p.endpointSliceMap[key] isSame := reflect.DeepEqual(oldES, newES) if isSame { return } serviceInfo := p.serviceMap[serviceKey] // collect node which has endpoints changes changedNodeNames := stringset.New() // add new endpoints for port := range newES.Ports { servicePortName := ServicePortName{ NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol, } endpointSet := serviceInfo.EndpointMap[port] for _, ep := range newES.Endpoints { endpoint := Endpoint{ IP: ep.IP, Port: port.Port, } endpointSet.Add(endpoint) serviceInfo.EndpointToNodes[endpoint] = ep.NodeName p.addServicePortToNode(ep.NodeName, servicePortName, ServicePort{ ClusterIP: serviceInfo.ClusterIP, Port: port.Port, Protocol: port.Protocol, SessionAffinity: serviceInfo.SessionAffinity, StickyMaxAgeSeconds: serviceInfo.StickyMaxAgeSeconds, }) added := p.addEndpointToNode(ep.NodeName, servicePortName, endpoint) if serviceChanged || added { changedNodeNames.Add(ep.NodeName) } } serviceInfo.EndpointMap[port] = endpointSet } // remove old endpoints for port := range oldES.Ports { _, exists := newES.Ports[port] portRemoved := !exists servicePortName := ServicePortName{ NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol, } endpointSet := serviceInfo.EndpointMap[port] for _, ep := range oldES.Endpoints { _, exist := newES.Endpoints[ep.IP] endpointRemoved := !exist if portRemoved || endpointRemoved { endpoint := Endpoint{ IP: ep.IP, Port: port.Port, } endpointSet.Remove(endpoint) delete(serviceInfo.EndpointToNodes, endpoint) p.removeEndpointFromNode(ep.NodeName, servicePortName, endpoint) changedNodeNames.Add(ep.NodeName) } if portRemoved { p.removeServicePortFromNode(ep.NodeName, servicePortName) } } if len(endpointSet) == 0 { delete(serviceInfo.EndpointMap, port) } else { serviceInfo.EndpointMap[port] = endpointSet } } p.endpointSliceMap[key] = newES p.serviceMap[serviceKey] = serviceInfo for nodeName := range changedNodeNames { node, ok := p.nodeSet[nodeName] if !ok { continue } p.keeper.AddNode(node) } } func (p *proxy) cleanupEndpointsOfEndpointSlice(key ObjectKey) { es, ok := p.getEndpointSliceInfo(key) if !ok { return } // no matter what caused cleanup, we take current endpointslice which // has empty ports and endpoints as deleted, es.Ports = make(map[Port]Empty) es.Endpoints = make(map[string]EndpointInfo) p.syncServiceEndpointsFromEndpointSlice(es, false) p.mu.Lock() delete(p.endpointSliceMap, key) p.mu.Unlock() } func (p *proxy) startCheckLoadBalanceRules(ctx context.Context) error { tick := time.NewTicker(p.checkInterval) for { select { case <-tick.C: for _, node := range p.nodeSet { p.keeper.AddNodeIfNotPresent(node) } case <-ctx.Done(): return nil } } } func (p *proxy) addEndpointToNode(nodeName string, spn ServicePortName, endpoint Endpoint) bool { node, ok := p.nodeSet[nodeName] if !ok { node = newEdgeNode(nodeName) } endpointSet := node.EndpointMap[spn] if endpointSet.Contains(endpoint) { return false } endpointSet.Add(endpoint) node.EndpointMap[spn] = endpointSet p.nodeSet[nodeName] = node return true } func (p *proxy) removeEndpointFromNode(nodeName string, spn ServicePortName, endpoint Endpoint) { node, ok := p.nodeSet[nodeName] if !ok { return } eps := node.EndpointMap[spn] eps.Remove(endpoint) if len(eps) == 0 { delete(node.EndpointMap, spn) } p.nodeSet[nodeName] = node } func (p *proxy) addServicePortToNode(nodeName string, spn ServicePortName, servicePort ServicePort) { node, ok := p.nodeSet[nodeName] if !ok { node = newEdgeNode(nodeName) } node.ServicePortMap[spn] = servicePort p.nodeSet[nodeName] = node } func (p *proxy) removeServicePortFromNode(nodeName string, spn ServicePortName) { node, ok := p.nodeSet[nodeName] if !ok { return } delete(node.ServicePortMap, spn) p.nodeSet[nodeName] = node } func (p *proxy) makeEndpointSliceInfo(es *EndpointSlice) EndpointSliceInfo { info := EndpointSliceInfo{ ObjectKey: ObjectKey{ Name: es.Name, Namespace: es.Namespace, }, ServiceKey: ObjectKey{ Name: getServiceName(es.Labels), Namespace: es.Namespace, }, Ports: make(map[Port]Empty), Endpoints: make(map[string]EndpointInfo), } for _, port := range es.Ports { p := Port{ Port: *port.Port, Protocol: *port.Protocol, } info.Ports[p] = Empty{} } for _, ep := range es.Endpoints { nodeName := getHostname(&ep) if _, exists := p.nodeSet[nodeName]; !exists { continue } // 在边缘场景, endpoint的稳定性有些问题, 会导致conditions.Ready的状态反复变化 // 暂时原因不明,所以我们不考虑这个问题 // todo: 处理网络抖动导致的endpoint不稳定情况 info.Endpoints[ep.Addresses[0]] = EndpointInfo{ IP: ep.Addresses[0], NodeName: nodeName, } } return info } func (p *proxy) getEndpointSliceInfo(key ObjectKey) (EndpointSliceInfo, bool) { p.mu.Lock() defer p.mu.Unlock() es, ok := p.endpointSliceMap[key] return es, ok } func (p *proxy) shouldSkipService(svc *corev1.Service) bool { if svc.Spec.Type != corev1.ServiceTypeClusterIP { return true } if svc.Spec.ClusterIP == corev1.ClusterIPNone || svc.Spec.ClusterIP == "" { return true } if svc.Spec.Selector == nil || len(svc.Spec.Selector) == 0 { return true } return fa
rviceInfo(svc *corev1.Service) ServiceInfo { var stickyMaxAgeSeconds int32 if svc.Spec.SessionAffinity == corev1.ServiceAffinityClientIP { // Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP stickyMaxAgeSeconds = *svc.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds } return ServiceInfo{ ClusterIP: svc.Spec.ClusterIP, SessionAffinity: svc.Spec.SessionAffinity, StickyMaxAgeSeconds: stickyMaxAgeSeconds, } } func getValueByKey(data map[string]string, key string) string { if data == nil { return "" } return data[key] } func getServiceName(data map[string]string) string { return getValueByKey(data, LabelServiceName) } func getHostname(endpoint *discoveryv1.Endpoint) string { if endpoint.NodeName != nil && *endpoint.NodeName != "" { return *endpoint.NodeName } return getValueByKey(endpoint.Topology, LabelHostname) } func newEdgeNode(name string) EdgeNode { return EdgeNode{ Name: name, ServicePortMap: make(map[ServicePortName]ServicePort), EndpointMap: make(map[ServicePortName]EndpointSet), } }
lse } func makeSe
conditional_block
maimemo_client.rs
use crate::client::*; use crate::config::*; use chrono::Local; use cookie_store::CookieStore; use reqwest::Client; use scraper::{Html, Selector}; use serde::{Deserialize, Serialize}; use std::fmt; /// notepad包含必要的header info和内容detail #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Notepad {
brief: String, created_time: Option<String>, updated_time: Option<String>, contents: Option<String>, } impl Notepad { pub fn get_notepad_id(&self) -> &str { &self.notepad_id } pub fn set_contents(&mut self, contents: Option<String>) { self.contents = contents; } pub fn get_contents(&self) -> Option<&str> { self.contents.as_deref() } pub fn get_contents_mut(&mut self) -> Option<&mut String> { self.contents.as_mut() } } impl fmt::Display for Notepad { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut temp = self.clone(); // 仅输出第一行 与 total length let contents = temp.contents.as_mut().unwrap(); let total_len = contents.len(); contents.drain(contents.find("\n").unwrap_or(total_len)..); contents.push_str("... total length: "); contents.push_str(&total_len.to_string()); write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap()) } } #[derive(Debug, Serialize, Deserialize)] struct ResponseResult { error: String, valid: i32, total: usize, notepad: Option<Vec<Notepad>>, } /// maimemo提供一些访问操作。 pub struct MaimemoClient { client: Client, config: AppConfig, cookie_store: CookieStore, user_token_name: String, } impl std::ops::Drop for MaimemoClient { /// 在退出时保存cookie store fn drop(&mut self) { if let Some(path) = self.config.get_cookie_path() { if let Err(e) = save_cookie_store(path, &self.cookie_store) { error!("save cookie store failed: {}", e); } } } } impl MaimemoClient { /// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。 pub fn new(config: AppConfig) -> Result<Self, String> { let cookie_store = build_cookie_store(config.get_cookie_path())?; Ok(Self { client: build_general_client()?, config, cookie_store: cookie_store, user_token_name: "userToken".to_string(), }) } pub fn get_user_token_val(&self) -> Option<&str> { self.cookie_store .get("www.maimemo.com", "/", &self.user_token_name) .map(|c| c.value()) } pub fn has_logged(&self) -> bool { self.get_user_token_val().is_some() } /// 登录并更新config.cookies pub async fn login(&mut self) -> Result<(), String> { let req_name = "login"; let form = [ ("email", self.config.get_username()), ("password", self.config.get_password()), ]; let resp = send_request( &self.config, &self.client, &self.cookie_store, req_name, |url| url.to_string(), Some(&form), ) .await?; // login failed // Check if the user token exists update_set_cookies(&mut self.cookie_store, &resp); if !self.has_logged() { error!( "update cookie store failed. not found cookie: [{}] in cookie_store", self.user_token_name ); Err("login failed. not found cookie store".to_string()) } else { debug!("login successful"); Ok(()) } } /// 提供完整的notepad list调用get_notepad_list与get_notepad_contents pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> { let mut notepads = self.get_notepad_list().await?; for notepad in &mut notepads { let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?; notepad.set_contents(Some(contents)); } Ok(notepads) } /// 获取notepad list pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-search"; // ?token={user_token} let url_handler = |url: &str| { let user_token = self.get_user_token_val().expect("not found user token"); url.to_string() + user_token }; let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1}); let resp = send_request( &self.config, &self.client, &self.cookie_store, req_name, url_handler, Some(&payload), ) .await?; let result = resp .json::<ResponseResult>() .await .map_err(|e| format!("{:?}", e))?; if let Some(notepad) = result.notepad { debug!("got notepad list. len: {}", notepad.len()); Ok(notepad) } else { error!("get notepad failed: {:?}", result); Err("get notepad failed".to_string()) } } /// 获取notepad中单词文本 pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-detail"; let url_handler = |url: &str| url.to_string() + notepad_id; let resp = send_request_nobody( &self.config, &self.client, &self.cookie_store, req_name, url_handler, ) .await?; Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?) } /// 刷新下载notepad对应的captcha返回文件全路径。 pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "service-captcha"; let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string(); let resp = send_request_nobody( &self.config, &self.client, &self.cookie_store, req_name, url_handler, ) .await .map_err(|e| format!("{:?}", e))?; let contents = resp .bytes() .await .map(|body| body.to_vec()) .map_err(|e| format!("{:?}", e))?; Ok(contents) } /// 保存notepad /// /// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码, /// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器 pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-save"; if notepad.contents.is_none() { return Err("notepad contents is none".to_string()); } // form let mut form = std::collections::HashMap::new(); form.insert("id".to_string(), notepad.notepad_id); form.insert("title".to_string(), notepad.title); form.insert("brief".to_string(), notepad.brief); form.insert("content".to_string(), notepad.contents.unwrap()); form.insert( "is_private".to_string(), (notepad.is_private == 1).to_string(), ); form.insert("captcha".to_string(), captcha); let form = form .iter() .map(|(key, val)| (key.as_str(), val.as_str())) .collect::<Vec<_>>(); #[derive(Debug, Serialize, Deserialize)] struct RespResult { valid: i8, #[serde(rename = "errorCode")] error: Option<String>, } let result: RespResult = send_request( &self.config, &self.client, &self.cookie_store, req_name, |url| url.to_string(), Some(&form), ) .await? .json::<RespResult>() .await .map_err(|e| format!("{:?}", e))?; if let Some(e) = &result.error { error!("save notepad failed: {:?}", result); return Err(format!("save notepad failed: {}", e)); } debug!("save_notepad successful"); Ok(()) } /// 从response html body中取出单词文本 fn parse_notepad_text(html: &str) -> Result<String, String> { if html.is_empty() { return Err("html is empty".to_string()); } debug!("parsing notepad html"); let id = "#content"; let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?; let document = Html::parse_document(html); document .select(&id_selector) .next() .map(|e| e.inner_html()) .ok_or_else(|| { error!("not found element {} in html: \n{}", id, html); format!("not found element {} in html", id) }) } } #[cfg(test)] mod tests { use super::*; const CONFIG_PATH: &str = "config.yml"; #[tokio::test] async fn try_login() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; client.login().await.map_err(|e| format!("{:?}", e))?; Ok(()) } #[tokio::test] async fn get_notepad_list() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let notepads = client.get_notepad_list().await?; assert!(notepads.len() > 0); Ok(()) } #[tokio::test] async fn get_notepad_contents() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let notepads = client.get_notepad_list().await?; // for notepad in notepads { let contents = client.get_notepad_contents(&notepads[0].notepad_id).await?; assert!(contents.len() > 0); assert!(contents.contains("\n")); // } Ok(()) } #[allow(dead_code)] fn init_log() { pretty_env_logger::formatted_builder() // .format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args())) .filter_module("dict", log::LevelFilter::Trace) .init(); } #[tokio::test] async fn refresh_captcha() -> Result<(), String> { let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let data = client.refresh_captcha().await?; assert!(data.len() > 0); // assert!(path.is_file()); // let new_file = std::fs::read(path).map_err(|e| format!("{:?}", e))?; // if let Ok(old_file) = old_file { // assert_ne!(old_file, new_file); // } Ok(()) } }
is_private: u8, notepad_id: String, title: String,
random_line_split
maimemo_client.rs
use crate::client::*; use crate::config::*; use chrono::Local; use cookie_store::CookieStore; use reqwest::Client; use scraper::{Html, Selector}; use serde::{Deserialize, Serialize}; use std::fmt; /// notepad包含必要的header info和内容detail #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Notepad { is_private: u8, notepad_id: String, title: String, brief: String, created_time: Option<String>, updated_time: Option<String>, contents: Option<String>, } impl Notepad { pub fn get_notepad_id(&self) -> &str { &self.notepad_id } pub fn set_contents(&mut self, contents: Option<String>) { self.contents = contents; } pub fn get_contents(&self) -> Option<&str> { self.contents.as_deref() } pub fn get_contents_mut(&mut self) -> Option<&mut String> { self.contents.as_mut() } } impl fmt::Display for Notepad { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut temp = self.clone(); // 仅输出第一行 与 total length let contents = temp.contents.as_mut().unwrap(); let total_len = contents.len(); contents.drain(contents.find("\n").unwrap_or(total_len)..); contents.push_str("... total length: "); contents.push_str(&total_len.to_string()); write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap()) } } #[derive(Debug, Serialize, Deserialize)] struct ResponseResult { error: String, valid: i32, total: usize, notepad: Option<Vec<Notepad>>, } /// maimemo提供一些访问操作。 pub struct MaimemoClient { client: Client, config: AppConfig, cookie_store: CookieStore, user_token_name: String, } impl std::ops::Drop for MaimemoClient { /// 在退出时保存cookie store fn drop(&mut self) { if let Some(path) = self.config.get_cookie_path() { if let Err(e) = save_cookie_store(path, &self.cookie_store) { error!("save cookie store failed: {}", e); } } } } impl MaimemoClient { /// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。 pub fn new(config: AppConfig) -> Result<Self, String> { let cookie_store = build_cookie_store(config.get_cookie_path())?; Ok(Self { client: build_general_client()?, config, cookie_store: cookie_store, user_token_name: "userToken".to_string(), }) } pub fn get_user_token_val(&self) -> Option<&str> { self.cookie_store .get("www.maimemo.com", "/", &self.user_token_name) .map(|c| c.value()) } pub fn has_logged(&self) -> bool { self.get_user_token_val().is_some() } /// 登录并更新config.cookies pub async fn login(&mut self) -> Result<(), String> { let req_name = "login"; let form = [ ("email", self.config.get_username()), ("password", self.config.get_password()), ]; let resp = send_request( &self.config, &self.client, &self.cookie_store, req_name, |url| url.to_string(), Some(&form), ) .await?; // login failed // Check if the user token exists update_set_cookies(&mut self.cookie_store, &resp); if !self.has_logged() { error!( "update cookie store failed. not found cookie: [{}] in cookie_store", self.user_token_name ); Err("login failed. not found cookie store".to_string()) } else { debug!("login successful"); Ok(()) } } /// 提供完整的notepad list调用get_notepad_list与get_notepad_contents pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> { let mut notepads = self.get_notepad_list().await?; for notepad in &mut notepads { let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?; notepad.set_contents(Some(contents)); } Ok(notepads) } /// 获取notepad list pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-search"; // ?token={user_token} let url_handler = |url: &str| { let user_token = self.get_user_token_val().expect("not found user token"); url.to_string() + user_token }; let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1}); let resp = send_request( &self.config, &self.client, &self.cookie_store, req_name, url_handler, Some(&payload), ) .await?; let result = resp .json::<ResponseResult>() .await .map_err(|e| format!("{:?}", e))?; if let Some(notepad) = result.notepad { debug!("got notepad list. len: {}", notepad.len()); Ok(notepad) } else { error!("get notepad failed: {:?}", result); Err("get notepad failed".to_string()) } } /// 获取notepad中单词文本 pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-detail"; let url_handler = |url: &str| url.to_string() + notepad_id; let resp = send_request_nobody( &self.config, &self.client, &self.cookie_store, req_name, url_handler, ) .await?; Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?) } /// 刷新下载notepad对应的captcha返回文件全路径。 pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "service-captcha"; let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string(); let resp = send_request_nobody( &self.config, &self.client, &self.cookie_store, req_name, url_handler, ) .await .map_err(|e| format!("{:?}", e))?; let contents = resp .bytes() .await .map(|body| body.to_vec()) .map_err(|e| format!("{:?}", e))?; Ok(contents) } /// 保存notepad /// /// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码, /// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器 pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-save"; if notepad.contents.is_none() { return Err("notepad contents is none".to_string()); } // form let mut form = std::collections::HashMap::new(); form.insert("id".to_string(), notepad.notepad_id); form.insert("title".to_string(), notepad.title); form.insert("brief".to_string(), notepad.brief); form.insert("content".to_string(), notepad.contents.unwrap()); form.insert( "is_private".to_string(), (notepad.is_private == 1).to_string(), ); form.insert("captcha".to_string(), captcha); let form = form .iter() .map(|(key, val)| (key.as_str(), val.as_str())) .collect::<Vec<_>>(); #[derive(Debug, Serialize, Deserialize)] struct RespResult { valid: i8, #[serde(rename = "errorCode")] error: Option<String>, } let result: RespResult = send_request( &self.config, &self.client, &self.cookie_store, req_name, |url| url.to_string(), Some(&form), ) .await? .json::<RespResult>() .await .map_err(|e| format!("{:?}", e))?; if let Some(e) = &result.error { error!("save notepad failed: {:?}", result); return Err(format!("save notepad failed: {}", e)); } debug!("save_notepad successful"); Ok(()) } /// 从response html body中取出单词文本 fn parse_notepad_text(html: &str) -> Result<String, String> { if html.is_empty() { return Err("html is empty".to_string()); } debug!("parsing notepad html"); let id = "#content"; let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?; let document = Html::parse_document(html); document .select(&id
gin().await.map_err(|e| format!("{:?}", e))?; Ok(()) } #[tokio::test] async fn get_notepad_list() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let notepads = client.get_notepad_list().await?; assert!(notepads.len() > 0); Ok(()) } #[tokio::test] async fn get_notepad_contents() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let notepads = client.get_notepad_list().await?; // for notepad in notepads { let contents = client.get_notepad_contents(&notepads[0].notepad_id).await?; assert!(contents.len() > 0); assert!(contents.contains("\n")); // } Ok(()) } #[allow(dead_code)] fn init_log() { pretty_env_logger::formatted_builder() // .format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args())) .filter_module("dict", log::LevelFilter::Trace) .init(); } #[tokio::test] async fn refresh_captcha() -> Result<(), String> { let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let data = client.refresh_captcha().await?; assert!(data.len() > 0); // assert!(path.is_file()); // let new_file = std::fs::read(path).map_err(|e| format!("{:?}", e))?; // if let Ok(old_file) = old_file { // assert_ne!(old_file, new_file); // } Ok(()) } }
_selector) .next() .map(|e| e.inner_html()) .ok_or_else(|| { error!("not found element {} in html: \n{}", id, html); format!("not found element {} in html", id) }) } } #[cfg(test)] mod tests { use super::*; const CONFIG_PATH: &str = "config.yml"; #[tokio::test] async fn try_login() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; client.lo
identifier_body
maimemo_client.rs
use crate::client::*; use crate::config::*; use chrono::Local; use cookie_store::CookieStore; use reqwest::Client; use scraper::{Html, Selector}; use serde::{Deserialize, Serialize}; use std::fmt; /// notepad包含必要的header info和内容detail #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Notepad { is_private: u8, notepad_id: String, title: String, brief: String, created_time: Option<String>, updated_time: Option<String>, contents: Option<String>, } impl Notepad { pub fn get_notepad_id(&self) -> &str { &self.notepad_id } pub fn set_contents(&mut self, contents: Option<String>) { self.contents = contents; } pub fn get_contents(&self) -> Option<&str> { self.contents.as_deref() } pub fn get_contents_mut(&mut self) -> Option<&mut String> { self.contents.as_mut() } } impl fmt::Display for Notepad { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut temp = self.clone(); // 仅输出第一行 与 total length let contents = temp.contents.as_mut().unwrap(); let total_len = contents.len(); contents.drain(contents.find("\n").unwrap_or(total_len)..); contents.push_str("... total length: "); contents.push_str(&total_len.to_string()); write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap()) } } #[derive(Debug, Serialize, Deserialize)] struct ResponseResult { error: String, valid: i32, total: usize, notepad: Option<Vec<Notepad>>, } /// maimemo提供一些访问操作。 pub struct MaimemoClient { client: Client, config: AppConfig, cookie_store: CookieStore, user_token_name: String, } impl std::ops::Drop for MaimemoClient { /// 在退出时保存cookie store fn drop(&mut self) { if let Some(path) = self.config.get_cookie_path() { if let Err(e) = save_cookie_store(path, &self.cookie_store) { error!("save cookie store failed: {}", e); } } } } impl MaimemoClient { /// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。 pub fn new(config: AppConfig) -> Result<Self, String> { let cookie_store = build_cookie_store(config.get_cookie_path())?; Ok(Self { client: build_general_client()?, config, cookie_store: cookie_store, user_token_name: "userToken".to_string(), }) } pub fn get_user_token_val(&self) -> Option<&str> { self.cookie_store .get("www.maimemo.com", "/", &self.user_token_name) .map(|c| c.value()) } pub fn has_logged(&self) -> bool { self.get_user_token_val().is_some() } /// 登录并更新config.cookies pub async fn login(&mut self) -> Result<(), String> { let req_name = "login"; let form = [ ("email", self.config.get_username()), ("password", self.config.get_password()), ]; let resp = send_request( &self.config, &self.client, &self.cookie_store, req_name, |url| url.to_string(), Some(&form), ) .await?; // login failed // Check if the user token exists update_set_cookies(&mut self.cookie_store, &resp); if !self.has_logged() { error!( "update cookie store failed. not found cookie: [{}] in cookie_store", self.user_token_name ); Err("login failed. not found cookie store".to_string()) } else { debug!("login successful"); Ok(()) } } /// 提供完整的notepad list调用get_notepad_list与get_notepad_contents pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> { let mut notepads = self.get_notepad_list().await?; for notepad in &mut notepads { let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?; notepad.set_contents(Some(contents)); } Ok(notepads) } /// 获取notepad list pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-search"; // ?token={user_
ser_token = self.get_user_token_val().expect("not found user token"); url.to_string() + user_token }; let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1}); let resp = send_request( &self.config, &self.client, &self.cookie_store, req_name, url_handler, Some(&payload), ) .await?; let result = resp .json::<ResponseResult>() .await .map_err(|e| format!("{:?}", e))?; if let Some(notepad) = result.notepad { debug!("got notepad list. len: {}", notepad.len()); Ok(notepad) } else { error!("get notepad failed: {:?}", result); Err("get notepad failed".to_string()) } } /// 获取notepad中单词文本 pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-detail"; let url_handler = |url: &str| url.to_string() + notepad_id; let resp = send_request_nobody( &self.config, &self.client, &self.cookie_store, req_name, url_handler, ) .await?; Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?) } /// 刷新下载notepad对应的captcha返回文件全路径。 pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "service-captcha"; let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string(); let resp = send_request_nobody( &self.config, &self.client, &self.cookie_store, req_name, url_handler, ) .await .map_err(|e| format!("{:?}", e))?; let contents = resp .bytes() .await .map(|body| body.to_vec()) .map_err(|e| format!("{:?}", e))?; Ok(contents) } /// 保存notepad /// /// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码, /// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器 pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-save"; if notepad.contents.is_none() { return Err("notepad contents is none".to_string()); } // form let mut form = std::collections::HashMap::new(); form.insert("id".to_string(), notepad.notepad_id); form.insert("title".to_string(), notepad.title); form.insert("brief".to_string(), notepad.brief); form.insert("content".to_string(), notepad.contents.unwrap()); form.insert( "is_private".to_string(), (notepad.is_private == 1).to_string(), ); form.insert("captcha".to_string(), captcha); let form = form .iter() .map(|(key, val)| (key.as_str(), val.as_str())) .collect::<Vec<_>>(); #[derive(Debug, Serialize, Deserialize)] struct RespResult { valid: i8, #[serde(rename = "errorCode")] error: Option<String>, } let result: RespResult = send_request( &self.config, &self.client, &self.cookie_store, req_name, |url| url.to_string(), Some(&form), ) .await? .json::<RespResult>() .await .map_err(|e| format!("{:?}", e))?; if let Some(e) = &result.error { error!("save notepad failed: {:?}", result); return Err(format!("save notepad failed: {}", e)); } debug!("save_notepad successful"); Ok(()) } /// 从response html body中取出单词文本 fn parse_notepad_text(html: &str) -> Result<String, String> { if html.is_empty() { return Err("html is empty".to_string()); } debug!("parsing notepad html"); let id = "#content"; let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?; let document = Html::parse_document(html); document .select(&id_selector) .next() .map(|e| e.inner_html()) .ok_or_else(|| { error!("not found element {} in html: \n{}", id, html); format!("not found element {} in html", id) }) } } #[cfg(test)] mod tests { use super::*; const CONFIG_PATH: &str = "config.yml"; #[tokio::test] async fn try_login() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; client.login().await.map_err(|e| format!("{:?}", e))?; Ok(()) } #[tokio::test] async fn get_notepad_list() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let notepads = client.get_notepad_list().await?; assert!(notepads.len() > 0); Ok(()) } #[tokio::test] async fn get_notepad_contents() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let notepads = client.get_notepad_list().await?; // for notepad in notepads { let contents = client.get_notepad_contents(&notepads[0].notepad_id).await?; assert!(contents.len() > 0); assert!(contents.contains("\n")); // } Ok(()) } #[allow(dead_code)] fn init_log() { pretty_env_logger::formatted_builder() // .format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args())) .filter_module("dict", log::LevelFilter::Trace) .init(); } #[tokio::test] async fn refresh_captcha() -> Result<(), String> { let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let data = client.refresh_captcha().await?; assert!(data.len() > 0); // assert!(path.is_file()); // let new_file = std::fs::read(path).map_err(|e| format!("{:?}", e))?; // if let Ok(old_file) = old_file { // assert_ne!(old_file, new_file); // } Ok(()) } }
token} let url_handler = |url: &str| { let u
conditional_block
maimemo_client.rs
use crate::client::*; use crate::config::*; use chrono::Local; use cookie_store::CookieStore; use reqwest::Client; use scraper::{Html, Selector}; use serde::{Deserialize, Serialize}; use std::fmt; /// notepad包含必要的header info和内容detail #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Notepad { is_private: u8, notepad_id: String, title: String, brief: String, created_time: Option<String>, updated_time: Option<String>, contents: Option<String>, } impl Notepad { pub fn get_notepad_id(&self) -> &str { &self.notepad_id } pub fn set_contents(&mut self, contents: Option<String>) { self.contents = contents; } pub fn get_contents(&self) -> Option<&str> { self.contents.as_deref() } pub fn get_contents_mut(&mut self) -> Option<&mut String> { self.contents.as_mut() } } impl fmt::Display for Notepad { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut temp = self.clone(); // 仅输出第一行 与 total length let contents = temp.contents.as_mut().unwrap(); let total_len = contents.len(); contents.drain(contents.find("\n").unwrap_or(total_len)..); contents.push_str("... total length: "); contents.push_str(&total_len.to_string()); write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap()) } } #[derive(Debug, Serialize, Deserialize)] struct ResponseResult { error: String, valid: i32, total: usize, notepad: Option<Vec<Notepad>>, } /// maimemo提供一些访问操作。 pub struct MaimemoClient { client: Client, config: AppConfig, cookie_store: CookieStore, user_token_name: String, } impl std::ops::Drop for MaimemoClient { /// 在退出时保存cookie store fn drop(&mut self) { if let Some(path) = self.config.get_cookie_path() { if let Err(e) = save_cookie_store(path, &self.cookie_store) { error!("save cookie store failed: {}", e); } } } } impl MaimemoClient { /// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。 pub fn new(config: AppConfig) -> Result<Self, String> { let cookie_store = build_cookie_store(config.get_cookie_path())?; Ok(Self { client: build_general_client()?, config, cookie_store: cookie_store, user_token_name: "userToken".to_string(), }) } pub fn get_user_token_val(&self) -> Option<&str> { self.cookie_store .get("www.maimemo.com", "/", &self.user_token_name) .map(|c| c.value()) } pub fn has_logged(&self) -> bool { self.get_user_token_val().is_some() } /// 登录并更新config.cookies pub async fn login(&mut self) -> Result<(), String> { let req_name = "login"; let form = [ ("email", self.config.get_username()), ("password", self.config.get_password()), ]; let resp = send_request( &self.config, &self.client, &self.cookie_store, req_name, |url| url.to_string(), Some(&form), ) .await?; // login failed // Check if the user token exists update_set_cookies(&mut self.cookie_store, &resp); if !self.has_logged() { error!( "update cookie store failed. not found cookie: [{}] in cookie_store", self.user_token_name ); Err("login failed. not found cookie store".to_string()) } else { debug!("login successful"); Ok(()) } } /// 提供完整的notepad list调用get_notepad_list与get_notepad_contents pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> { let mut notepads = self.get_notepad_list().await?; for notepad in &mut notepads { let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?; notepad.set_contents(Some(contents)); } Ok(notepads) } /// 获取notepad list pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-search"; // ?token={user_token} let url_handler = |url: &str| { let user_token = self.get_user_token_val().expect("not found user token"); url.to_string() + user_token }; let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1}); let resp = send_request( &self.config, &self.client, &self.cookie_store, req_name, url_handler, Some(&payload), ) .await?; let result = resp .json::<ResponseResult>() .await .map_err(|e| format!("{:?}", e))?; if let Some(notepad) = result.notepad { debug!("got notepad list. len: {}", notepad.len()); Ok(notepad) } else { error!("get notepad failed: {:?}", result); Err("get notepad failed".to_string()) } } /// 获取notepad中单词文本 pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-detail"; let url_handler = |url: &str| url.to_string() + notepad_id; let resp = send_request_nobody( &self.config, &self.client, &self.cookie_store, req_name, url_handler, ) .await?; Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?) } /// 刷新下载notepad对应的captcha返回文件全路径。 pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "service-captcha"; let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string(); let resp = send_request_nobody( &self.config, &self.client, &self.cookie_store, req_name, url_handler, ) .await .map_err(|e| format!("{:?}", e))?; let contents = resp .bytes() .await .map(|body| body.to_vec()) .map_err(|e| format!("{:?}", e))?; Ok(contents) } /// 保存notepad /// /// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码, /// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器 pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> { if !self.has_logged() { return Err("not logged in".to_string()); } let req_name = "notepad-save"; if notepad.contents.is_none() { return Err("notepad contents is none".to_string()); } // form let mut form = std::collections::HashMap::new(); form.insert("id".to_string(), notepad.notepad_id); form.insert("title".to_string(), notepad.title); form.insert("brief".to_string(), notepad.brief); form.insert("content".to_string(), notepad.contents.unwrap()); form.insert( "is_private".to_string(), (notepad.is_private == 1).to_string(), ); form.insert("captcha".to_string(), captcha); let form = form .iter() .map(|(key, val)| (key.as_str(), val.as_str())) .collect::<Vec<_>>(); #[derive(Debug, Serialize, Deserialize)] struct RespResult { valid: i8, #[serde(rename = "errorCode")] error: Option<String>, } let result: RespResult = send_request( &self.config, &self.client, &self.cookie_store, req_name, |url| url.to_string(), Some(&form), ) .await? .json::<RespResult>() .await .map_err(|e| format!("{:?}", e))?; if let Some(e) = &result.error { error!("save notepad failed: {:?}", result); return Err(format!("save notepad failed: {}", e)); } debug!("save_notepad successful"); Ok(()) } /// 从response html body中取出单词文本 fn parse_notepad_text(html: &str) -> Result<String, String> { if html.is_empty() { return Err("html is empty".to_string()); } debug!("parsing notepad html"); let id = "#content"; let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?; let document = Html::parse_document(html); document .select(&id_selector) .next() .map(|e| e.inner_html()) .ok_or_else(|| { error!("not found element {} in html: \n{}", id, html); format!("not found element {} in html", id) }) } } #[cfg(test)] mod tests { use super::*; const CONFIG_PATH: &str = "config.yml"; #[tokio::test] async fn try_login() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; client.login().await.map_err(|e| format!("{:?}", e))?; Ok(()) } #[tokio::test] async fn get_notepad_list() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let notepads = client.get_notepad_list().await
t!(notepads.len() > 0); Ok(()) } #[tokio::test] async fn get_notepad_contents() -> Result<(), String> { init_log(); let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let notepads = client.get_notepad_list().await?; // for notepad in notepads { let contents = client.get_notepad_contents(&notepads[0].notepad_id).await?; assert!(contents.len() > 0); assert!(contents.contains("\n")); // } Ok(()) } #[allow(dead_code)] fn init_log() { pretty_env_logger::formatted_builder() // .format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args())) .filter_module("dict", log::LevelFilter::Trace) .init(); } #[tokio::test] async fn refresh_captcha() -> Result<(), String> { let config = Config::from_yaml_file(CONFIG_PATH).unwrap(); let mut client = MaimemoClient::new(config.maimemo.unwrap())?; if !client.has_logged() { client.login().await?; } let data = client.refresh_captcha().await?; assert!(data.len() > 0); // assert!(path.is_file()); // let new_file = std::fs::read(path).map_err(|e| format!("{:?}", e))?; // if let Ok(old_file) = old_file { // assert_ne!(old_file, new_file); // } Ok(()) } }
?; asser
identifier_name
note.py
import threading import time from DataBase import database from source import source from threadings import Tnuls from threads import qthreadt import threading import crawler ustr="" strl="" nustr="" bool=True urllist="" def bigin_click(self): # starttime=time.time(); #记录开始时间 if self.bool==True: threads = [] #创建一个线程列表,用于存放需要执行的子线程 t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务是调用task1函数,注意函数名后不能 threads.append(t1)#将这个子线程添加到线程列表中 t1.setDaemon(True) t1.start() self.bool=False # for t in threads: #遍历线程列表 # t.setDaemon(True) #将线程声明为守护线程,必须在start() 方法调用之前设置,如果不设置为守护线程程序会被无限挂起 # t.start() #启动子线程 def show_click(self): self.strl=self.comboBox.currentText() self.textEdit_get.setText(self.strl) self.textEdit_getstr.setText(self.ustr) def crawling(self, url): cl=crawler.crawler() cl.gethtml(url) self.urllist=cl.geturllist() nexturllist=cl.getnexturl() self.getnexturl(nexturllist, cl) for i in range(len(self.urllist)): ul=self.urllist[i] self.ustr=self.ustr+str(i)+"、"+ul[1]+" :"+ul[0]+"\n\n" #for ur in self.urllist: # cl.gethtml(ur[0]) #sl=cl.getstrlist() # self.strl=self.strl+sl def getnexturl(self, nexturllist, cl): for i in range(len(nexturllist)): nul=nexturllist[i] self.nustr=self.nustr+nul[1]+nul[0]+"\n" cl.gethtml("http://gz.58.com"+nul[0]) uls=cl.geturllist() if cl.isend(): if i==(len(nexturllist)-1): cl.gethtml("http://gz.58.com"+nul[0]) nus=cl.getnexturl() del nus[0:(i+1)] self.getnexturl(nus, cl) self.urllist=self.urllist+uls def threadcl(self): url="http://gz.58.com/tech/?key=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&cmcskey=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&final=1&jump=2&specialtype=gls&canclequery=isbiz%3D0&sourcetype=4" self.crawling(url) '''#时针 transform.translate(50,50) transform.rotate(hour_angle) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(Qt.NoPen) painter.setBrush(QBrush(Qt.darkRed)) painter.drawPolygon(QPolygonF(hourPoints)) transform.reset() #分针 transform.translate(50,50) transform.rotate(minite_angle) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setBrush(QBrush(Qt.darkGreen)) painter.drawPolygon(QPolygonF(minPoints)) transform.reset() #秒针 transform.translate(50,50) transform.rotate(-53)#second_angle transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(QPen(Qt.darkCyan,1)) painter.drawLine(50,50,90,20) ''' class Window(QMainWindow): def __init__(self): super(Window, self).__init__() self.initUI() def initUI(self): self.setGeometry(100, 35, 1200, 670) def paintEvent(self,event): source.ui.groupBox_show.close() pt=QPainter(self) pt.begin(self) #self.drawRect(pt) self.drawclock(pt) pt.end() def drawRect(self, pt): pen1=QPen(QColor(225, 225, 225, 225)) rec=QRect(500, 500,500, 500) pt.setPen(pen1) pt.drawRect(rec) pt.setBrush(QColor(0, 0, 0, 255)) pt.drawRect(300, 300, 300, 600) def drawclock(self, painter): painter.setRenderHint(QPainter.Antialiasing) #设置表盘中的文字字体 font=QFont("Times",6) fm=QFontMetrics(font) fontRect=fm.boundingRect("99")#获取绘制字体的矩形范围 #分针坐标点 minPoints=[QPointF(50,25), QPointF(48,50), QPointF(52,50)] #时钟坐标点 hourPoints=[QPointF(50,35), QPointF(48,50), QPointF(52,50)] side=min(self.width(),self.height()) painter.setViewport((2*self.width())/5,self.height()/16, (4*side)/7, (4*side)/7)#始终处于窗口中心位置显示 #设置QPainter的坐标系统,无论窗体大小如何变化, #窗体左上坐标为(0,0),右下坐标为(100,100), #因此窗体中心坐标为(50,50) painter.setWindow(0,0,100,100) #绘制表盘,使用环形渐变色 niceBlue=QColor(150,150,200) haloGrident=QRadialGradient(50,50,50,50,50) haloGrident.setColorAt(0.0,Qt.lightGray) haloGrident.setColorAt(0.5,Qt.darkGray) haloGrident.setColorAt(0.9,Qt.white) haloGrident.setColorAt(1.0,niceBlue) painter.setBrush(haloGrident) painter.setPen(QPen(Qt.darkGray,1)) painter.drawEllipse(0,0,100,100) transform=QTransform() #绘制时钟为0的字,以及刻度 painter.setPen(QPen(Qt.black,1.5)) fontRect.moveCenter(QPoint(50,10+fontRect.height()/2)) painter.setFont(font) painter.drawLine(50,2,50,8)# painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"0") for i in range(1,12): transform.translate(50, 50) transform.rotate(30) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.drawLine(50,2,50,8) painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"%d" % i) transform.reset() #绘制分钟刻度线 painter.setPen(QPen(Qt.blue,1)) for i in range(1,60): transform.translate(50,50) transform.rotate(6) transform.translate(-50,-50) if i%5!=0: painter.setWorldTransform(transform) painter.drawLine(50,2,50,5) transform.reset() #获取当前时间 currentTime=QTime().currentTime() #hour=currentTime.hour() if currentTime.hour()<12 else currentTime.hour()-12 #minite=currentTime.minute() second=currentTime.second() #获取所需旋转角度 #hour_angle=hour*30.0+(minite/60.0)*30.0 #minite_angle=(minite/60.0)*360.0 second_angle=second*6.0-53 source.ui.textEdit_get.setText(str(second)) self.draw_line(painter, transform, second_angle) self.draw_line(painter, transform) def draw_line(self, painter, transform, angle=-53): #秒针 transform.reset() transform.translate(50,50) transform.rotate(angle)#second_angle transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(QPen(Qt.darkCyan,1)) painter.drawLine(50,50,90,20) def drawRect(self, pt): pen1=QPen(QColor(225, 225, 225, 225)) rec=QRect(500, 500,500, 500) pt.setPen(pen1) pt.drawRect(rec) pt.setBrush(QColor(0, 0, 0, 255)) pt.drawRect(300, 300, 300, 600) #**********************************************************************8 from PyQt5 import QtCore, QtGui, QtWidgets from source import source from threadings import Tnuls import threading import time from ui_paint import Window def bigin_click(self): if not source.isbigan: if self.comboBox_2.currentText ()=="58同城": t=Tnuls(0) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="中华英才网": t=Tnuls(1) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="智联招聘": t=Tnuls(2) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="猎聘猎头网": t=Tnuls(3) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="卓博人才网": t=Tnuls(4) t.start() source.isbigan=True if self.c
t.start() source.isbigan=True if source.isbigan: t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务 t1.setDaemon(True) t1.start() MainWindow.update() def show_click(self): MainWindow.getdata() MainWindow.setUpdatesEnabled(False); MainWindow.setUpdatesEnabled(True); MainWindow.repaint(); if len(MainWindow.bestlist)>0: bestlist=MainWindow.bestlist antext=source.Eanalyze[self.comboBox.currentText ()] analyze_text="" for i in range(len(antext)): analyze_text=analyze_text+antext[i] if i<len(bestlist): analyze_text=analyze_text+bestlist[i] self.textEdit_get.setText(analyze_text) def threadcl(self): #database.open()#打开数据库 source.open_txt() ''' for i in range(1, 2):#len(source.urllist) t=Tnuls(i) t.start() time.sleep(0.5) while source.threadnum>800: time.sleep(0.3) ''' while source.isbigan: time.sleep(1) if source.threadnum<1: source.isgetweb=True source.isbigan=False # database.close()#关闭数据库 source.close_txt() source.copy_txt() #------------------------------------------------------------------------------------------ self.pushButton_bigin.clicked.connect(MainWindow.bigin_click) self.pushButton_show.clicked.connect(MainWindow.show_click) self.pushButton_back.clicked.connect(MainWindow.ui_reshow) self.pushButton.clicked.connect(MainWindow.deep_ay) self.pushButton_2.clicked.connect(MainWindow.duibi_hx) self.pushButton_3.clicked.connect(MainWindow.duibi_zx) self.pushButton_4.clicked.connect(MainWindow.duibi_nl) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow =Window() #QtWidgets.QMainWindow() source.ui = Ui_MainWindow() source.ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
omboBox_2.currentText ()=="前程无忧": t=Tnuls(5)
conditional_block
note.py
import threading import time from DataBase import database from source import source from threadings import Tnuls from threads import qthreadt import threading import crawler ustr="" strl="" nustr="" bool=True urllist="" def bigin_click(self): # starttime=time.time(); #记录开始时间 if self.bool==True: threads = [] #创建一个线程列表,用于存放需要执行的子线程 t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务是调用task1函数,注意函数名后不能 threads.append(t1)#将这个子线程添加到线程列表中 t1.setDaemon(True) t1.start() self.bool=False # for t in threads: #遍历线程列表 # t.setDaemon(True) #将线程声明为守护线程,必须在start() 方法调用之前设置,如果不设置为守护线程程序会被无限挂起 # t.start() #启动子线程 def show_click(self): self.strl=self.comboBox.currentText() self.textEdit_get.setText(self.strl) self.textEdit_getstr.setText(self.ustr) def crawling(self, url): cl=crawler.crawler() cl.gethtml(url) self.urllist=cl.geturllist() nexturllist=cl.getnexturl() self.getnexturl(nexturllist, cl) for i in range(len(self.urllist)): ul=self.urllist[i] self.ustr=self.ustr+str(i)+"、"+ul[1]+" :"+ul[0]+"\n\n" #for ur in self.urllist: # cl.gethtml(ur[0]) #sl=cl.getstrlist() # self.strl=self.strl+sl def getnexturl(self, nexturllist, cl): for i in range(len(nexturllist)): nul=nexturllist[i] self.nustr=self.nustr+nul[1]+nul[0]+"\n" cl.gethtml("http://gz.58.com"+nul[0]) uls=cl.geturllist() if cl.isend(): if i==(len(nexturllist)-1): cl.gethtml("http://gz.58.com"+nul[0]) nus=cl.getnexturl() del nus[0:(i+1)] self.getnexturl(nus, cl) self.urllist=self.urllist+uls def threadcl(self): url="http://gz.58.com/tech/?key=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&cmcskey=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&final=1&jump=2&specialtype=gls&canclequery=isbiz%3D0&sourcetype=4" self.crawling(url) '''#时针 transform.translate(50,50) transform.rotate(hour_angle) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(Qt.NoPen) painter.setBrush(QBrush(Qt.darkRed)) painter.drawPolygon(QPolygonF(hourPoints)) transform.reset() #分针 transform.translate(50,50) transform.rotate(minite_angle) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setBrush(QBrush(Qt.darkGreen)) painter.drawPolygon(QPolygonF(minPoints)) transform.reset() #秒针 transform.translate(50,50) transform.rotate(-53)#second_angle transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(QPen(Qt.darkCyan,1)) painter.drawLine(50,50,90,20) ''' class Window(QMainWindow): def __init__(self): super(Window, self).__init__() self.initUI() def initUI(self): self.setGeometry(100, 35, 1200, 670) def paintEvent(self,event): source.ui.groupBox_show.close() pt=QPainter(self) pt.begi
#self.drawRect(pt) self.drawclock(pt) pt.end() def drawRect(self, pt): pen1=QPen(QColor(225, 225, 225, 225)) rec=QRect(500, 500,500, 500) pt.setPen(pen1) pt.drawRect(rec) pt.setBrush(QColor(0, 0, 0, 255)) pt.drawRect(300, 300, 300, 600) def drawclock(self, painter): painter.setRenderHint(QPainter.Antialiasing) #设置表盘中的文字字体 font=QFont("Times",6) fm=QFontMetrics(font) fontRect=fm.boundingRect("99")#获取绘制字体的矩形范围 #分针坐标点 minPoints=[QPointF(50,25), QPointF(48,50), QPointF(52,50)] #时钟坐标点 hourPoints=[QPointF(50,35), QPointF(48,50), QPointF(52,50)] side=min(self.width(),self.height()) painter.setViewport((2*self.width())/5,self.height()/16, (4*side)/7, (4*side)/7)#始终处于窗口中心位置显示 #设置QPainter的坐标系统,无论窗体大小如何变化, #窗体左上坐标为(0,0),右下坐标为(100,100), #因此窗体中心坐标为(50,50) painter.setWindow(0,0,100,100) #绘制表盘,使用环形渐变色 niceBlue=QColor(150,150,200) haloGrident=QRadialGradient(50,50,50,50,50) haloGrident.setColorAt(0.0,Qt.lightGray) haloGrident.setColorAt(0.5,Qt.darkGray) haloGrident.setColorAt(0.9,Qt.white) haloGrident.setColorAt(1.0,niceBlue) painter.setBrush(haloGrident) painter.setPen(QPen(Qt.darkGray,1)) painter.drawEllipse(0,0,100,100) transform=QTransform() #绘制时钟为0的字,以及刻度 painter.setPen(QPen(Qt.black,1.5)) fontRect.moveCenter(QPoint(50,10+fontRect.height()/2)) painter.setFont(font) painter.drawLine(50,2,50,8)# painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"0") for i in range(1,12): transform.translate(50, 50) transform.rotate(30) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.drawLine(50,2,50,8) painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"%d" % i) transform.reset() #绘制分钟刻度线 painter.setPen(QPen(Qt.blue,1)) for i in range(1,60): transform.translate(50,50) transform.rotate(6) transform.translate(-50,-50) if i%5!=0: painter.setWorldTransform(transform) painter.drawLine(50,2,50,5) transform.reset() #获取当前时间 currentTime=QTime().currentTime() #hour=currentTime.hour() if currentTime.hour()<12 else currentTime.hour()-12 #minite=currentTime.minute() second=currentTime.second() #获取所需旋转角度 #hour_angle=hour*30.0+(minite/60.0)*30.0 #minite_angle=(minite/60.0)*360.0 second_angle=second*6.0-53 source.ui.textEdit_get.setText(str(second)) self.draw_line(painter, transform, second_angle) self.draw_line(painter, transform) def draw_line(self, painter, transform, angle=-53): #秒针 transform.reset() transform.translate(50,50) transform.rotate(angle)#second_angle transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(QPen(Qt.darkCyan,1)) painter.drawLine(50,50,90,20) def drawRect(self, pt): pen1=QPen(QColor(225, 225, 225, 225)) rec=QRect(500, 500,500, 500) pt.setPen(pen1) pt.drawRect(rec) pt.setBrush(QColor(0, 0, 0, 255)) pt.drawRect(300, 300, 300, 600) #**********************************************************************8 from PyQt5 import QtCore, QtGui, QtWidgets from source import source from threadings import Tnuls import threading import time from ui_paint import Window def bigin_click(self): if not source.isbigan: if self.comboBox_2.currentText ()=="58同城": t=Tnuls(0) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="中华英才网": t=Tnuls(1) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="智联招聘": t=Tnuls(2) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="猎聘猎头网": t=Tnuls(3) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="卓博人才网": t=Tnuls(4) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="前程无忧": t=Tnuls(5) t.start() source.isbigan=True if source.isbigan: t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务 t1.setDaemon(True) t1.start() MainWindow.update() def show_click(self): MainWindow.getdata() MainWindow.setUpdatesEnabled(False); MainWindow.setUpdatesEnabled(True); MainWindow.repaint(); if len(MainWindow.bestlist)>0: bestlist=MainWindow.bestlist antext=source.Eanalyze[self.comboBox.currentText ()] analyze_text="" for i in range(len(antext)): analyze_text=analyze_text+antext[i] if i<len(bestlist): analyze_text=analyze_text+bestlist[i] self.textEdit_get.setText(analyze_text) def threadcl(self): #database.open()#打开数据库 source.open_txt() ''' for i in range(1, 2):#len(source.urllist) t=Tnuls(i) t.start() time.sleep(0.5) while source.threadnum>800: time.sleep(0.3) ''' while source.isbigan: time.sleep(1) if source.threadnum<1: source.isgetweb=True source.isbigan=False # database.close()#关闭数据库 source.close_txt() source.copy_txt() #------------------------------------------------------------------------------------------ self.pushButton_bigin.clicked.connect(MainWindow.bigin_click) self.pushButton_show.clicked.connect(MainWindow.show_click) self.pushButton_back.clicked.connect(MainWindow.ui_reshow) self.pushButton.clicked.connect(MainWindow.deep_ay) self.pushButton_2.clicked.connect(MainWindow.duibi_hx) self.pushButton_3.clicked.connect(MainWindow.duibi_zx) self.pushButton_4.clicked.connect(MainWindow.duibi_nl) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow =Window() #QtWidgets.QMainWindow() source.ui = Ui_MainWindow() source.ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
n(self)
identifier_name
note.py
import threading import time from DataBase import database from source import source from threadings import Tnuls from threads import qthreadt import threading import crawler ustr="" strl="" nustr="" bool=True urllist="" def bigin_click(self): # starttime=time.time(); #记录开始时间 if self.bool==True: threads = [] #创建一个线程列表,用于存放需要执行的子线程 t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务是调用task1函数,注意函数名后不能 threads.append(t1)#将这个子线程添加到线程列表中 t1.setDaemon(True) t1.start() self.bool=False # for t in threads: #遍历线程列表 # t.setDaemon(True) #将线程声明为守护线程,必须在start() 方法调用之前设置,如果不设置为守护线程程序会被无限挂起 # t.start() #启动子线程 def show_click(self): self.strl=self.comboBox.currentText() self.textEdit_get.setText(self.strl) self.textEdit_getstr.setText(self.ustr) def crawling(self, url): cl=crawler.crawler() cl.gethtml(url) self.urllist=cl.geturllist() nexturllist=cl.getnexturl() self.getnexturl(nexturllist, cl) for i in range(len(self.urllist)): ul=self.urllist[i] self.ustr=self.ustr+str(i)+"、"+ul[1]+" :"+ul[0]+"\n\n" #for ur in self.urllist: # cl.gethtml(ur[0]) #sl=cl.getstrlist() # self.strl=self.strl+sl def getnexturl(self, nexturllist, cl): for i in range(len(nexturllist)): nul=nexturllist[i] self.nustr=self.nustr+nul[1]+nul[0]+"\n" cl.gethtml("http://gz.58.com"+nul[0]) uls=cl.geturllist() if cl.isend(): if i==(len(nexturllist)-1): cl.gethtml("http://gz.58.com"+nul[0]) nus=cl.getnexturl() del nus[0:(i+1)] self.getnexturl(nus, cl) self.urllist=self.urllist+uls def threadcl(self): url="http://gz.58.com/tech/?key=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&cmcskey=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&final=1&jump=2&specialtype=gls&canclequery=isbiz%3D0&sourcetype=4" self.crawling(url) '''#时针 transform.translate(50,50) transform.rotate(hour_angle) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(Qt.NoPen) painter.setBrush(QBrush(Qt.darkRed)) painter.drawPolygon(QPolygonF(hourPoints)) transform.reset() #分针 transform.translate(50,50) transform.rotate(minite_angle) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setBrush(QBrush(Qt.darkGreen)) painter.drawPolygon(QPolygonF(minPoints)) transform.reset() #秒针 transform.translate(50,50) transform.rotate(-53)#second_angle transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(QPen(Qt.darkCyan,1)) painter.drawLine(50,50,90,20) ''' class Window(QMainWindow): def __init__(self): super(Window, self).__init__() self.initUI() def initUI(self): self.setGeometry(100, 35, 1200, 670) def paintEvent(self,event): source.ui.groupBox_show.close() pt=QPainter(self) pt.begin(self) #self.drawRect(pt) self.drawclock(pt) pt.end() def drawRect(self, pt): pen1=QPen(QColor(225, 225, 225, 225)) rec=QRect(500, 500,500, 500) pt.setPen(pen1) pt.drawRect(rec) pt.setBrush(QColor(0, 0, 0, 255)) pt.drawRect(300, 300, 300, 600) def drawclock(self, painter): painter.setRenderHint(QPainter.Antialiasing) #设置表盘中的文字字体 font=QFont("Times",6) fm=QFontMetrics(font) fontRect=fm.boundingRect("99")#获取绘制字体的矩形范围 #分针坐标点 minPoints=[QPointF(50,25), QPointF(48,50), QPointF(52,50)] #时钟坐标点 hourPoints=[QPointF(50,35), QPointF(48,50), QPointF(52,50)] side=min(self.width(),self.height()) painter.setViewport((2*self.width())/5,self.height()/16, (4*side)/7, (4*side)/7)#始终处于窗口中心位置显示 #设置QPainter的坐标系统,无论窗体大小如何变化, #窗体左上坐标为(0,0),右下坐标为(100,100), #因此窗体中心坐标为(50,50) painter.setWindow(0,0,100,100) #绘制表盘,使用环形渐变色 niceBlue=QColor(150,150,200) haloGrident=QRadialGradient(50,50,50,50,50) haloGrident.setColorAt(0.0,Qt.lightGray) haloGrident.setColorAt(0.5,Qt.darkGray) haloGrident.setColorAt(0.9,Qt.white) haloGrident.setColorAt(1.0,niceBlue) painter.setBrush(haloGrident) painter.setPen(QPen(Qt.darkGray,1)) painter.drawEllipse(0,0,100,100) transform=QTransform() #绘制时钟为0的字,以及刻度 painter.setPen(QPen(Qt.black,1.5)) fontRect.moveCenter(QPoint(50,10+fontRect.height()/2)) painter.setFont(font) painter.drawLine(50,2,50,8)# painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"0") for i in range(1,12): transform.translate(50, 50) transform.rotate(30) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.drawLine(50,2,50,8) painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"%d" % i) transform.reset() #绘制分钟刻度线 painter.setPen(QPen(Qt.blue,1)) for i in range(1,60): transform.translate(50,50) transform.rotate(6) transform.translate(-50,-50) if i%5!=0: painter.setWorldTransform(transform) painter.drawLine(50,2,50,5) transform.reset() #获取当前时间 currentTime=QTime().currentTime() #hour=currentTime.hour() if currentTime.hour()<12 else currentTime.hour()-12 #minite=currentTime.minute() second=currentTime.second() #获取所需旋转角度 #hour_angle=hour*30.0+(minite/60.0)*30.0 #minite_angle=(minite/60.0)*360.0 second_angle=second*6.0-53 source.ui.textEdit_get.setText(str(second)) self.draw_line(painter, transform, second_angle) self.draw_line(painter, transform) def draw_line(self, painter, transform, angle=-53): #秒针 transform.reset() transform.translate(50,50) transform.rotate(angle)#second_angle transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(QPen(Qt.darkCyan,1)) painter.drawLine(50,50,90,20) def drawRect(self, pt): pen1=QPen(QColor(225, 225, 225, 225)) rec=QRect(500, 500,500, 500) pt.setPen(pen1) pt.drawRect(rec) pt.setBrush(QColor(0, 0, 0, 255)) pt.drawRect(300, 300, 300, 600)
import threading import time from ui_paint import Window def bigin_click(self): if not source.isbigan: if self.comboBox_2.currentText ()=="58同城": t=Tnuls(0) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="中华英才网": t=Tnuls(1) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="智联招聘": t=Tnuls(2) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="猎聘猎头网": t=Tnuls(3) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="卓博人才网": t=Tnuls(4) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="前程无忧": t=Tnuls(5) t.start() source.isbigan=True if source.isbigan: t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务 t1.setDaemon(True) t1.start() MainWindow.update() def show_click(self): MainWindow.getdata() MainWindow.setUpdatesEnabled(False); MainWindow.setUpdatesEnabled(True); MainWindow.repaint(); if len(MainWindow.bestlist)>0: bestlist=MainWindow.bestlist antext=source.Eanalyze[self.comboBox.currentText ()] analyze_text="" for i in range(len(antext)): analyze_text=analyze_text+antext[i] if i<len(bestlist): analyze_text=analyze_text+bestlist[i] self.textEdit_get.setText(analyze_text) def threadcl(self): #database.open()#打开数据库 source.open_txt() ''' for i in range(1, 2):#len(source.urllist) t=Tnuls(i) t.start() time.sleep(0.5) while source.threadnum>800: time.sleep(0.3) ''' while source.isbigan: time.sleep(1) if source.threadnum<1: source.isgetweb=True source.isbigan=False # database.close()#关闭数据库 source.close_txt() source.copy_txt() #------------------------------------------------------------------------------------------ self.pushButton_bigin.clicked.connect(MainWindow.bigin_click) self.pushButton_show.clicked.connect(MainWindow.show_click) self.pushButton_back.clicked.connect(MainWindow.ui_reshow) self.pushButton.clicked.connect(MainWindow.deep_ay) self.pushButton_2.clicked.connect(MainWindow.duibi_hx) self.pushButton_3.clicked.connect(MainWindow.duibi_zx) self.pushButton_4.clicked.connect(MainWindow.duibi_nl) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow =Window() #QtWidgets.QMainWindow() source.ui = Ui_MainWindow() source.ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
#**********************************************************************8 from PyQt5 import QtCore, QtGui, QtWidgets from source import source from threadings import Tnuls
random_line_split
note.py
import threading import time from DataBase import database from source import source from threadings import Tnuls from threads import qthreadt import threading import crawler ustr="" strl="" nustr="" bool=True urllist="" def bigin_click(self): # starttime=time.time(); #记录开始时间 if self.bool==True: threads = [] #创建一个线程列表,用于存放需要执行的子线程 t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务是调用task1函数,注意函数名后不能 threads.append(t1)#将这个子线程添加到线程列表中 t1.setDaemon(True) t1.start() self.bool=False # for t in threads: #遍历线程列表 # t.setDaemon(True) #将线程声明为守护线程,必须在start() 方法调用之前设置,如果不设置为守护线程程序会被无限挂起 # t.start() #启动子线程 def show_click(self): self.strl=self.comboBox.currentText() self.textEdit_get.setText(self.strl) self.textEdit_getstr.setText(self.ustr) def crawling(self, url): cl=crawler.crawler() cl.gethtml(url) self.urllist=cl.geturllist() nexturllist=cl.getnexturl() self.getnexturl(nexturllist, cl) for i in range(len(self.urllist)): ul=self.urllist[i] self.ustr=self.ustr+str(i)+"、"+ul[1]+" :"+ul[0]+"\n\n" #for ur in self.urllist: # cl.gethtml(ur[0]) #sl=cl.getstrlist() # self.strl=self.strl+sl def getnexturl(self, nexturllist, cl): for i in range(len(nexturllist)): nul=nexturllist[i] self.nustr=self.nustr+nul[1]+nul[0]+"\n" cl.gethtml("http://gz.58.com"+nul[0]) uls=cl.geturllist() if cl.isend(): if i==(len(nexturllist)-1): cl.gethtml("http://gz.58.com"+nul[0]) nus=cl.getnexturl() del nus[0:(i+1)] self.getnexturl(nus, cl) self.urllist=self.urllist+uls def threadcl(self): url="http://gz.58.com/tech/?key=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&cmcskey=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&final=1&jump=2&specialtype=gls&canclequery=isbiz%3D0&sourcetype=4" self.crawling(url) '''#时针 transform.translate(50,50) transform.rotate(hour_angle) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(Qt.NoPen) painter.setBrush(QBrush(Qt.darkRed)) painter.drawPolygon(QPolygonF(hourPoints)) transform.reset() #分针 transform.translate(50,50) transform.rotate(minite_angle) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setBrush(QBrush(Qt.darkGreen)) painter.drawPolygon(QPolygonF(minPoints)) transform.reset() #秒针 transform.translate(50,50) transform.rotate(-53)#second_angle transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(QPen(Qt.darkCyan,1)) painter.drawLine(50,50,90,20) ''' class Window(QMainWindow): def __init__(self): super(Window, self).__init__() self.initUI() def initUI(self): self.setGeometry(100, 35, 1200, 670) def paintEvent(self,event): source.ui.groupBox_show.close() pt=QPainter(self) pt.begin(self) #self.drawRect(pt) self.drawclock(pt) pt.end() def drawRect(self, pt): pen1=QPen(QColor(225, 225, 225, 225)) rec=QRect(500, 500,500, 500) pt.setPen(pen1) pt.drawRect(rec) pt.setBrush(QColor(0, 0, 0, 255)) pt.drawRect(300, 300, 300, 600) def drawclock(self, painter): painter.se
minPoints=[QPointF(50,25), QPointF(48,50), QPointF(52,50)] #时钟坐标点 hourPoints=[QPointF(50,35), QPointF(48,50), QPointF(52,50)] side=min(self.width(),self.height()) painter.setViewport((2*self.width())/5,self.height()/16, (4*side)/7, (4*side)/7)#始终处于窗口中心位置显示 #设置QPainter的坐标系统,无论窗体大小如何变化, #窗体左上坐标为(0,0),右下坐标为(100,100), #因此窗体中心坐标为(50,50) painter.setWindow(0,0,100,100) #绘制表盘,使用环形渐变色 niceBlue=QColor(150,150,200) haloGrident=QRadialGradient(50,50,50,50,50) haloGrident.setColorAt(0.0,Qt.lightGray) haloGrident.setColorAt(0.5,Qt.darkGray) haloGrident.setColorAt(0.9,Qt.white) haloGrident.setColorAt(1.0,niceBlue) painter.setBrush(haloGrident) painter.setPen(QPen(Qt.darkGray,1)) painter.drawEllipse(0,0,100,100) transform=QTransform() #绘制时钟为0的字,以及刻度 painter.setPen(QPen(Qt.black,1.5)) fontRect.moveCenter(QPoint(50,10+fontRect.height()/2)) painter.setFont(font) painter.drawLine(50,2,50,8)# painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"0") for i in range(1,12): transform.translate(50, 50) transform.rotate(30) transform.translate(-50,-50) painter.setWorldTransform(transform) painter.drawLine(50,2,50,8) painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"%d" % i) transform.reset() #绘制分钟刻度线 painter.setPen(QPen(Qt.blue,1)) for i in range(1,60): transform.translate(50,50) transform.rotate(6) transform.translate(-50,-50) if i%5!=0: painter.setWorldTransform(transform) painter.drawLine(50,2,50,5) transform.reset() #获取当前时间 currentTime=QTime().currentTime() #hour=currentTime.hour() if currentTime.hour()<12 else currentTime.hour()-12 #minite=currentTime.minute() second=currentTime.second() #获取所需旋转角度 #hour_angle=hour*30.0+(minite/60.0)*30.0 #minite_angle=(minite/60.0)*360.0 second_angle=second*6.0-53 source.ui.textEdit_get.setText(str(second)) self.draw_line(painter, transform, second_angle) self.draw_line(painter, transform) def draw_line(self, painter, transform, angle=-53): #秒针 transform.reset() transform.translate(50,50) transform.rotate(angle)#second_angle transform.translate(-50,-50) painter.setWorldTransform(transform) painter.setPen(QPen(Qt.darkCyan,1)) painter.drawLine(50,50,90,20) def drawRect(self, pt): pen1=QPen(QColor(225, 225, 225, 225)) rec=QRect(500, 500,500, 500) pt.setPen(pen1) pt.drawRect(rec) pt.setBrush(QColor(0, 0, 0, 255)) pt.drawRect(300, 300, 300, 600) #**********************************************************************8 from PyQt5 import QtCore, QtGui, QtWidgets from source import source from threadings import Tnuls import threading import time from ui_paint import Window def bigin_click(self): if not source.isbigan: if self.comboBox_2.currentText ()=="58同城": t=Tnuls(0) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="中华英才网": t=Tnuls(1) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="智联招聘": t=Tnuls(2) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="猎聘猎头网": t=Tnuls(3) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="卓博人才网": t=Tnuls(4) t.start() source.isbigan=True if self.comboBox_2.currentText ()=="前程无忧": t=Tnuls(5) t.start() source.isbigan=True if source.isbigan: t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务 t1.setDaemon(True) t1.start() MainWindow.update() def show_click(self): MainWindow.getdata() MainWindow.setUpdatesEnabled(False); MainWindow.setUpdatesEnabled(True); MainWindow.repaint(); if len(MainWindow.bestlist)>0: bestlist=MainWindow.bestlist antext=source.Eanalyze[self.comboBox.currentText ()] analyze_text="" for i in range(len(antext)): analyze_text=analyze_text+antext[i] if i<len(bestlist): analyze_text=analyze_text+bestlist[i] self.textEdit_get.setText(analyze_text) def threadcl(self): #database.open()#打开数据库 source.open_txt() ''' for i in range(1, 2):#len(source.urllist) t=Tnuls(i) t.start() time.sleep(0.5) while source.threadnum>800: time.sleep(0.3) ''' while source.isbigan: time.sleep(1) if source.threadnum<1: source.isgetweb=True source.isbigan=False # database.close()#关闭数据库 source.close_txt() source.copy_txt() #------------------------------------------------------------------------------------------ self.pushButton_bigin.clicked.connect(MainWindow.bigin_click) self.pushButton_show.clicked.connect(MainWindow.show_click) self.pushButton_back.clicked.connect(MainWindow.ui_reshow) self.pushButton.clicked.connect(MainWindow.deep_ay) self.pushButton_2.clicked.connect(MainWindow.duibi_hx) self.pushButton_3.clicked.connect(MainWindow.duibi_zx) self.pushButton_4.clicked.connect(MainWindow.duibi_nl) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow =Window() #QtWidgets.QMainWindow() source.ui = Ui_MainWindow() source.ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
tRenderHint(QPainter.Antialiasing) #设置表盘中的文字字体 font=QFont("Times",6) fm=QFontMetrics(font) fontRect=fm.boundingRect("99")#获取绘制字体的矩形范围 #分针坐标点
identifier_body
app.js
"use strict"; /* global process */ /* global __dirname */ /******************************************************************************* * Copyright (c) 2015 IBM Corp. * * All rights reserved. * * Contributors: * David Huffman - Initial implementation *******************************************************************************/ ///////////////////////////////////////// ///////////// Setup Node.js ///////////// ///////////////////////////////////////// var express = require('express'); var session = require('express-session'); var compression = require('compression'); var serve_static = require('serve-static'); var path = require('path'); var morgan = require('morgan'); var cookieParser = require('cookie-parser'); var bodyParser = require('body-parser'); var http = require('http'); var app = express(); var url = require('url'); var async = require('async'); var setup = require('./setup'); var cors = require("cors"); var fs = require("fs"); //// Set Server Parameters //// var host = setup.SERVER.HOST; var port = setup.SERVER.PORT; //////// Pathing and Module Setup //////// app.set('views', path.join(__dirname, 'views')); app.set('view engine', 'jade'); app.engine('.html', require('jade').__express); app.use(compression()); app.use(morgan('dev')); app.use(bodyParser.json()); app.use(bodyParser.urlencoded()); app.use(cookieParser()); app.use('/cc/summary', serve_static(path.join(__dirname, 'cc_summaries')) ); //for chaincode investigator //app.use( serve_static(path.join(__dirname, 'public'), {maxAge: '1d', setHeaders: setCustomCC}) ); //1 day cache app.use( serve_static(path.join(__dirname, 'public')) ); app.use(session({secret:'Somethignsomething1234!test', resave:true, saveUninitialized:true})); function setCustomCC(res, path) { if (serve_static.mime.lookup(path) === 'image/jpeg') res.setHeader('Cache-Control', 'public, max-age=2592000'); //30 days cache else if (serve_static.mime.lookup(path) === 'image/png') res.setHeader('Cache-Control', 'public, max-age=2592000'); else if (serve_static.mime.lookup(path) === 'image/x-icon') res.setHeader('Cache-Control', 'public, max-age=2592000'); } // Enable CORS preflight across the board. app.options('*', cors()); app.use(cors()); /////////// Configure Webserver /////////// app.use(function(req, res, next){ var keys; console.log('silly', '------------------------------------------ incoming request ------------------------------------------'); console.log('info', 'New ' + req.method + ' request for', req.url); req.bag = {}; //create my object for my stuff req.session.count = eval(req.session.count) + 1; req.bag.session = req.session; var url_parts = url.parse(req.url, true); req.parameters = url_parts.query; keys = Object.keys(req.parameters); if(req.parameters && keys.length > 0) console.log({parameters: req.parameters}); //print request parameters keys = Object.keys(req.body); if (req.body && keys.length > 0) console.log({body: req.body}); //print request body next(); }); //// Router //// app.use('/', require('./routes/site_router')); //////////////////////////////////////////// ////////////// Error Handling ////////////// //////////////////////////////////////////// app.use(function(req, res, next) { var err = new Error('Not Found'); err.status = 404; next(err); }); app.use(function(err, req, res, next) { // = development error handler, print stack trace console.log("Error Handeler -", req.url); var errorCode = err.status || 500; res.status(errorCode); req.bag.error = {msg:err.stack, status:errorCode}; if(req.bag.error.status == 404) req.bag.error.msg = "Sorry, I cannot locate that file"; res.render('template/error', {bag:req.bag}); }); // ============================================================================================================================ // Launch Webserver // ============================================================================================================================ var server = http.createServer(app).listen(port, function() {}); process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0"; server.timeout = 240000; // Ta-da. console.log('info', '------------------------------------------ Server Up - ' + host + ':' + port + ' ------------------------------------------'); if(process.env.PRODUCTION) console.log('Running using Production settings'); else console.log('Running using Developer settings'); // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // Warning // ============================================================================================================================ // ============================================================================================================================ // Entering // ============================================================================================================================ // ============================================================================================================================ // Test Area // ============================================================================================================================ var app1 = require('./utils/ws_app1'); var app2 = require('./utils/ws_app2'); var ws = require('ws'); var wss = {}; var Obc1 = require('./utils/obc-js/index'); var obc = new Obc1(); // ================================== // load peers manually or from VCAP, VCAP will overwrite hardcoded list! // ================================== var peers = [ { "discovery_host": "169.44.38.124", "discovery_port": "32826", "api_host": "169.44.38.124", "api_port": "32827", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp1", "api_url": "http://169.44.38.124:32827" }, { "discovery_host": "169.44.38.120", "discovery_port": "32804", "api_host": "169.44.38.120", "api_port": "32805", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp3", "api_url": "http://169.44.38.120:32805" }, { "discovery_host": "169.44.38.114", "discovery_port": "32810", "api_host": "169.44.38.114", "api_port": "32811", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp4", "api_url": "http://169.44.38.114:32811" }, { "discovery_host": "169.44.38.102", "discovery_port": "32820", "api_host": "169.44.38.102", "api_port": "32821", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp5", "api_url": "http://169.44.38.102:32821" }, { "discovery_host": "169.44.38.102", "discovery_port": "32818", "api_host": "169.44.38.102", "api_port": "32819", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp2", "api_url": "http://169.44.38.102:32819" } ]; console.log('loading hardcoded peers'); if(process.env.VCAP_SERVICES){ //load from vcap, search for service, 1 of the 3 should be found... var servicesObject = JSON.parse(process.env.VCAP_SERVICES); for(var i in servicesObject){ if(i.indexOf('ibm-blockchain') >= 0){ //looks close enough if(servicesObject[i][0].credentials && servicesObject[i][0].credentials.peers){ console.log('overwritting peers, loading from a vcap service: ', i); peers = servicesObject[i][0].credentials.peers; break; } } } } obc.network(peers); //setup network connection for rest endpoint // ================================== // configure obc-js sdk // ================================== var options = { zip_url: 'https://github.com/dshuffma-ibm/simplestuff/archive/master.zip', git_dir: 'simplestuff-master/phase2', //subdirectroy name of chaincode after unzipped git_url: 'https://github.com/dshuffma-ibm/simplestuff/phase2', //git clone http url //hashed cc name from prev deployment deployed_name: '4a237d1e7be8bb2fe61a9f00b7200c1f9a16f77ec2dc4045a540fd84da2327a80975d66394add22961544ea07dae943a1941f175d547b554a0b5d5d2fa8d7c93' }; if(process.env.VCAP_SERVICES){ console.log('\n[!] looks like you are in bluemix, I am going to clear out the deploy_name so that it deploys new cc.\n[!] hope that is ok budddy\n'); options.deployed_name = ""; } obc.load(options, cb_ready); //parse/load chaincode function cb_ready(err, cc){ //response has chaincode functions app1.setup(obc, cc); app2.setup(obc, cc); if(cc.details.deployed_name === ""){ //decide if i need to deploy cc.deploy('init', ['99'], './cc_summaries', cb_deployed); } else{ console.log('chaincode summary file indicates chaincode has been previously deployed'); cb_deployed(); } } // ============================================================================================================================ // WebSocket Communication Madness // ============================================================================================================================ function cb_deployed()
/* ignore this code - 2/1/2016 var ws_cons = []; function broadcast(data){ for(var i in ws_cons){ try{ console.log('sending', i);//, ws); ws_cons[i].send(JSON.stringify(data)); } catch(e){ console.log('error ws', e); } } }*/ /* CCI improvements - [x] simpilify chaincode.json, remove discovery and api_url - [x] save chaincode.json as cc_<hash>.json - [x] have GET API that retruns all cc_<hash>.json file names - [x] have GET API that returns the cc_<hash>.json file - [x] allow cci to take in <hash> as url parameter - [ ] deploy on CCI actually runs through flow - [ ] - load spin icon - poll on new chaincode.json file name API - finally fade spin and rebuild UI from file - (depending on how this works maybe remove HTML5 local storage) */
{ console.log('starting websocket'); obc.save('./cc_summaries'); //save it here for chaincode investigator wss = new ws.Server({server: server}); //start the websocket now wss.on('connection', function connection(ws) { //ws_cons.push(ws); ws.on('message', function incoming(message) { console.log('received ws msg:', message); var data = JSON.parse(message); app1.process_msg(ws, data); app2.process_msg(ws, data); }); ws.on('close', function(){ app2.close(); //close peridic poll that phase 2 does }); }); }
identifier_body
app.js
"use strict"; /* global process */ /* global __dirname */ /******************************************************************************* * Copyright (c) 2015 IBM Corp. * * All rights reserved. * * Contributors: * David Huffman - Initial implementation *******************************************************************************/ ///////////////////////////////////////// ///////////// Setup Node.js ///////////// ///////////////////////////////////////// var express = require('express'); var session = require('express-session'); var compression = require('compression'); var serve_static = require('serve-static'); var path = require('path'); var morgan = require('morgan'); var cookieParser = require('cookie-parser'); var bodyParser = require('body-parser'); var http = require('http'); var app = express(); var url = require('url'); var async = require('async'); var setup = require('./setup'); var cors = require("cors"); var fs = require("fs"); //// Set Server Parameters //// var host = setup.SERVER.HOST; var port = setup.SERVER.PORT; //////// Pathing and Module Setup //////// app.set('views', path.join(__dirname, 'views')); app.set('view engine', 'jade'); app.engine('.html', require('jade').__express); app.use(compression()); app.use(morgan('dev')); app.use(bodyParser.json()); app.use(bodyParser.urlencoded()); app.use(cookieParser()); app.use('/cc/summary', serve_static(path.join(__dirname, 'cc_summaries')) ); //for chaincode investigator //app.use( serve_static(path.join(__dirname, 'public'), {maxAge: '1d', setHeaders: setCustomCC}) ); //1 day cache app.use( serve_static(path.join(__dirname, 'public')) ); app.use(session({secret:'Somethignsomething1234!test', resave:true, saveUninitialized:true})); function setCustomCC(res, path) { if (serve_static.mime.lookup(path) === 'image/jpeg') res.setHeader('Cache-Control', 'public, max-age=2592000'); //30 days cache else if (serve_static.mime.lookup(path) === 'image/png') res.setHeader('Cache-Control', 'public, max-age=2592000'); else if (serve_static.mime.lookup(path) === 'image/x-icon') res.setHeader('Cache-Control', 'public, max-age=2592000'); } // Enable CORS preflight across the board. app.options('*', cors()); app.use(cors()); /////////// Configure Webserver /////////// app.use(function(req, res, next){ var keys; console.log('silly', '------------------------------------------ incoming request ------------------------------------------'); console.log('info', 'New ' + req.method + ' request for', req.url); req.bag = {}; //create my object for my stuff req.session.count = eval(req.session.count) + 1; req.bag.session = req.session; var url_parts = url.parse(req.url, true); req.parameters = url_parts.query; keys = Object.keys(req.parameters); if(req.parameters && keys.length > 0) console.log({parameters: req.parameters}); //print request parameters keys = Object.keys(req.body); if (req.body && keys.length > 0) console.log({body: req.body}); //print request body next(); }); //// Router //// app.use('/', require('./routes/site_router')); //////////////////////////////////////////// ////////////// Error Handling ////////////// //////////////////////////////////////////// app.use(function(req, res, next) { var err = new Error('Not Found'); err.status = 404; next(err); }); app.use(function(err, req, res, next) { // = development error handler, print stack trace console.log("Error Handeler -", req.url); var errorCode = err.status || 500; res.status(errorCode); req.bag.error = {msg:err.stack, status:errorCode}; if(req.bag.error.status == 404) req.bag.error.msg = "Sorry, I cannot locate that file"; res.render('template/error', {bag:req.bag}); }); // ============================================================================================================================ // Launch Webserver // ============================================================================================================================ var server = http.createServer(app).listen(port, function() {}); process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0"; server.timeout = 240000; // Ta-da. console.log('info', '------------------------------------------ Server Up - ' + host + ':' + port + ' ------------------------------------------'); if(process.env.PRODUCTION) console.log('Running using Production settings'); else console.log('Running using Developer settings'); // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // Warning // ============================================================================================================================ // ============================================================================================================================ // Entering // ============================================================================================================================ // ============================================================================================================================ // Test Area // ============================================================================================================================ var app1 = require('./utils/ws_app1'); var app2 = require('./utils/ws_app2'); var ws = require('ws'); var wss = {}; var Obc1 = require('./utils/obc-js/index'); var obc = new Obc1(); // ================================== // load peers manually or from VCAP, VCAP will overwrite hardcoded list! // ================================== var peers = [ { "discovery_host": "169.44.38.124", "discovery_port": "32826", "api_host": "169.44.38.124", "api_port": "32827", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp1", "api_url": "http://169.44.38.124:32827" }, { "discovery_host": "169.44.38.120", "discovery_port": "32804", "api_host": "169.44.38.120", "api_port": "32805", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp3", "api_url": "http://169.44.38.120:32805" }, { "discovery_host": "169.44.38.114", "discovery_port": "32810", "api_host": "169.44.38.114", "api_port": "32811", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp4", "api_url": "http://169.44.38.114:32811" }, { "discovery_host": "169.44.38.102", "discovery_port": "32820", "api_host": "169.44.38.102", "api_port": "32821", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp5", "api_url": "http://169.44.38.102:32821" }, { "discovery_host": "169.44.38.102", "discovery_port": "32818", "api_host": "169.44.38.102", "api_port": "32819", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp2", "api_url": "http://169.44.38.102:32819" } ]; console.log('loading hardcoded peers'); if(process.env.VCAP_SERVICES){ //load from vcap, search for service, 1 of the 3 should be found... var servicesObject = JSON.parse(process.env.VCAP_SERVICES); for(var i in servicesObject){ if(i.indexOf('ibm-blockchain') >= 0){ //looks close enough if(servicesObject[i][0].credentials && servicesObject[i][0].credentials.peers){ console.log('overwritting peers, loading from a vcap service: ', i); peers = servicesObject[i][0].credentials.peers; break; } } } } obc.network(peers); //setup network connection for rest endpoint // ================================== // configure obc-js sdk // ================================== var options = { zip_url: 'https://github.com/dshuffma-ibm/simplestuff/archive/master.zip', git_dir: 'simplestuff-master/phase2', //subdirectroy name of chaincode after unzipped git_url: 'https://github.com/dshuffma-ibm/simplestuff/phase2', //git clone http url //hashed cc name from prev deployment deployed_name: '4a237d1e7be8bb2fe61a9f00b7200c1f9a16f77ec2dc4045a540fd84da2327a80975d66394add22961544ea07dae943a1941f175d547b554a0b5d5d2fa8d7c93' }; if(process.env.VCAP_SERVICES)
obc.load(options, cb_ready); //parse/load chaincode function cb_ready(err, cc){ //response has chaincode functions app1.setup(obc, cc); app2.setup(obc, cc); if(cc.details.deployed_name === ""){ //decide if i need to deploy cc.deploy('init', ['99'], './cc_summaries', cb_deployed); } else{ console.log('chaincode summary file indicates chaincode has been previously deployed'); cb_deployed(); } } // ============================================================================================================================ // WebSocket Communication Madness // ============================================================================================================================ function cb_deployed(){ console.log('starting websocket'); obc.save('./cc_summaries'); //save it here for chaincode investigator wss = new ws.Server({server: server}); //start the websocket now wss.on('connection', function connection(ws) { //ws_cons.push(ws); ws.on('message', function incoming(message) { console.log('received ws msg:', message); var data = JSON.parse(message); app1.process_msg(ws, data); app2.process_msg(ws, data); }); ws.on('close', function(){ app2.close(); //close peridic poll that phase 2 does }); }); } /* ignore this code - 2/1/2016 var ws_cons = []; function broadcast(data){ for(var i in ws_cons){ try{ console.log('sending', i);//, ws); ws_cons[i].send(JSON.stringify(data)); } catch(e){ console.log('error ws', e); } } }*/ /* CCI improvements - [x] simpilify chaincode.json, remove discovery and api_url - [x] save chaincode.json as cc_<hash>.json - [x] have GET API that retruns all cc_<hash>.json file names - [x] have GET API that returns the cc_<hash>.json file - [x] allow cci to take in <hash> as url parameter - [ ] deploy on CCI actually runs through flow - [ ] - load spin icon - poll on new chaincode.json file name API - finally fade spin and rebuild UI from file - (depending on how this works maybe remove HTML5 local storage) */
{ console.log('\n[!] looks like you are in bluemix, I am going to clear out the deploy_name so that it deploys new cc.\n[!] hope that is ok budddy\n'); options.deployed_name = ""; }
conditional_block
app.js
"use strict"; /* global process */ /* global __dirname */ /******************************************************************************* * Copyright (c) 2015 IBM Corp. * * All rights reserved. * * Contributors: * David Huffman - Initial implementation *******************************************************************************/ ///////////////////////////////////////// ///////////// Setup Node.js ///////////// ///////////////////////////////////////// var express = require('express'); var session = require('express-session'); var compression = require('compression'); var serve_static = require('serve-static'); var path = require('path'); var morgan = require('morgan'); var cookieParser = require('cookie-parser'); var bodyParser = require('body-parser'); var http = require('http'); var app = express(); var url = require('url'); var async = require('async'); var setup = require('./setup'); var cors = require("cors"); var fs = require("fs"); //// Set Server Parameters //// var host = setup.SERVER.HOST; var port = setup.SERVER.PORT; //////// Pathing and Module Setup //////// app.set('views', path.join(__dirname, 'views')); app.set('view engine', 'jade'); app.engine('.html', require('jade').__express); app.use(compression()); app.use(morgan('dev')); app.use(bodyParser.json()); app.use(bodyParser.urlencoded()); app.use(cookieParser()); app.use('/cc/summary', serve_static(path.join(__dirname, 'cc_summaries')) ); //for chaincode investigator //app.use( serve_static(path.join(__dirname, 'public'), {maxAge: '1d', setHeaders: setCustomCC}) ); //1 day cache app.use( serve_static(path.join(__dirname, 'public')) ); app.use(session({secret:'Somethignsomething1234!test', resave:true, saveUninitialized:true})); function setCustomCC(res, path) { if (serve_static.mime.lookup(path) === 'image/jpeg') res.setHeader('Cache-Control', 'public, max-age=2592000'); //30 days cache else if (serve_static.mime.lookup(path) === 'image/png') res.setHeader('Cache-Control', 'public, max-age=2592000'); else if (serve_static.mime.lookup(path) === 'image/x-icon') res.setHeader('Cache-Control', 'public, max-age=2592000'); } // Enable CORS preflight across the board. app.options('*', cors()); app.use(cors()); /////////// Configure Webserver /////////// app.use(function(req, res, next){ var keys; console.log('silly', '------------------------------------------ incoming request ------------------------------------------'); console.log('info', 'New ' + req.method + ' request for', req.url); req.bag = {}; //create my object for my stuff req.session.count = eval(req.session.count) + 1; req.bag.session = req.session; var url_parts = url.parse(req.url, true); req.parameters = url_parts.query; keys = Object.keys(req.parameters); if(req.parameters && keys.length > 0) console.log({parameters: req.parameters}); //print request parameters keys = Object.keys(req.body); if (req.body && keys.length > 0) console.log({body: req.body}); //print request body next(); }); //// Router //// app.use('/', require('./routes/site_router')); //////////////////////////////////////////// ////////////// Error Handling ////////////// //////////////////////////////////////////// app.use(function(req, res, next) { var err = new Error('Not Found'); err.status = 404; next(err); }); app.use(function(err, req, res, next) { // = development error handler, print stack trace console.log("Error Handeler -", req.url); var errorCode = err.status || 500; res.status(errorCode); req.bag.error = {msg:err.stack, status:errorCode}; if(req.bag.error.status == 404) req.bag.error.msg = "Sorry, I cannot locate that file"; res.render('template/error', {bag:req.bag}); }); // ============================================================================================================================ // Launch Webserver // ============================================================================================================================ var server = http.createServer(app).listen(port, function() {}); process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0"; server.timeout = 240000; // Ta-da. console.log('info', '------------------------------------------ Server Up - ' + host + ':' + port + ' ------------------------------------------'); if(process.env.PRODUCTION) console.log('Running using Production settings'); else console.log('Running using Developer settings'); // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // Warning // ============================================================================================================================ // ============================================================================================================================ // Entering // ============================================================================================================================ // ============================================================================================================================ // Test Area // ============================================================================================================================ var app1 = require('./utils/ws_app1'); var app2 = require('./utils/ws_app2'); var ws = require('ws'); var wss = {}; var Obc1 = require('./utils/obc-js/index'); var obc = new Obc1(); // ================================== // load peers manually or from VCAP, VCAP will overwrite hardcoded list! // ================================== var peers = [ { "discovery_host": "169.44.38.124", "discovery_port": "32826", "api_host": "169.44.38.124", "api_port": "32827", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp1", "api_url": "http://169.44.38.124:32827" }, { "discovery_host": "169.44.38.120", "discovery_port": "32804", "api_host": "169.44.38.120", "api_port": "32805", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp3", "api_url": "http://169.44.38.120:32805" }, { "discovery_host": "169.44.38.114", "discovery_port": "32810", "api_host": "169.44.38.114", "api_port": "32811", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp4", "api_url": "http://169.44.38.114:32811" }, { "discovery_host": "169.44.38.102", "discovery_port": "32820", "api_host": "169.44.38.102", "api_port": "32821", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp5", "api_url": "http://169.44.38.102:32821" }, { "discovery_host": "169.44.38.102", "discovery_port": "32818", "api_host": "169.44.38.102", "api_port": "32819", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp2", "api_url": "http://169.44.38.102:32819" } ]; console.log('loading hardcoded peers'); if(process.env.VCAP_SERVICES){ //load from vcap, search for service, 1 of the 3 should be found... var servicesObject = JSON.parse(process.env.VCAP_SERVICES); for(var i in servicesObject){ if(i.indexOf('ibm-blockchain') >= 0){ //looks close enough if(servicesObject[i][0].credentials && servicesObject[i][0].credentials.peers){ console.log('overwritting peers, loading from a vcap service: ', i); peers = servicesObject[i][0].credentials.peers; break; } } } } obc.network(peers); //setup network connection for rest endpoint // ================================== // configure obc-js sdk // ================================== var options = { zip_url: 'https://github.com/dshuffma-ibm/simplestuff/archive/master.zip', git_dir: 'simplestuff-master/phase2', //subdirectroy name of chaincode after unzipped git_url: 'https://github.com/dshuffma-ibm/simplestuff/phase2', //git clone http url //hashed cc name from prev deployment deployed_name: '4a237d1e7be8bb2fe61a9f00b7200c1f9a16f77ec2dc4045a540fd84da2327a80975d66394add22961544ea07dae943a1941f175d547b554a0b5d5d2fa8d7c93' }; if(process.env.VCAP_SERVICES){ console.log('\n[!] looks like you are in bluemix, I am going to clear out the deploy_name so that it deploys new cc.\n[!] hope that is ok budddy\n'); options.deployed_name = ""; } obc.load(options, cb_ready); //parse/load chaincode function cb_ready(err, cc){ //response has chaincode functions app1.setup(obc, cc); app2.setup(obc, cc); if(cc.details.deployed_name === ""){ //decide if i need to deploy cc.deploy('init', ['99'], './cc_summaries', cb_deployed); } else{ console.log('chaincode summary file indicates chaincode has been previously deployed'); cb_deployed(); } } // ============================================================================================================================ // WebSocket Communication Madness // ============================================================================================================================ function
(){ console.log('starting websocket'); obc.save('./cc_summaries'); //save it here for chaincode investigator wss = new ws.Server({server: server}); //start the websocket now wss.on('connection', function connection(ws) { //ws_cons.push(ws); ws.on('message', function incoming(message) { console.log('received ws msg:', message); var data = JSON.parse(message); app1.process_msg(ws, data); app2.process_msg(ws, data); }); ws.on('close', function(){ app2.close(); //close peridic poll that phase 2 does }); }); } /* ignore this code - 2/1/2016 var ws_cons = []; function broadcast(data){ for(var i in ws_cons){ try{ console.log('sending', i);//, ws); ws_cons[i].send(JSON.stringify(data)); } catch(e){ console.log('error ws', e); } } }*/ /* CCI improvements - [x] simpilify chaincode.json, remove discovery and api_url - [x] save chaincode.json as cc_<hash>.json - [x] have GET API that retruns all cc_<hash>.json file names - [x] have GET API that returns the cc_<hash>.json file - [x] allow cci to take in <hash> as url parameter - [ ] deploy on CCI actually runs through flow - [ ] - load spin icon - poll on new chaincode.json file name API - finally fade spin and rebuild UI from file - (depending on how this works maybe remove HTML5 local storage) */
cb_deployed
identifier_name
app.js
"use strict"; /* global process */ /* global __dirname */ /******************************************************************************* * Copyright (c) 2015 IBM Corp. * * All rights reserved. * * Contributors: * David Huffman - Initial implementation *******************************************************************************/ ///////////////////////////////////////// ///////////// Setup Node.js ///////////// ///////////////////////////////////////// var express = require('express'); var session = require('express-session'); var compression = require('compression'); var serve_static = require('serve-static'); var path = require('path'); var morgan = require('morgan'); var cookieParser = require('cookie-parser'); var bodyParser = require('body-parser'); var http = require('http'); var app = express(); var url = require('url'); var async = require('async'); var setup = require('./setup'); var cors = require("cors"); var fs = require("fs"); //// Set Server Parameters //// var host = setup.SERVER.HOST; var port = setup.SERVER.PORT; //////// Pathing and Module Setup //////// app.set('views', path.join(__dirname, 'views')); app.set('view engine', 'jade'); app.engine('.html', require('jade').__express); app.use(compression()); app.use(morgan('dev')); app.use(bodyParser.json()); app.use(bodyParser.urlencoded()); app.use(cookieParser()); app.use('/cc/summary', serve_static(path.join(__dirname, 'cc_summaries')) ); //for chaincode investigator //app.use( serve_static(path.join(__dirname, 'public'), {maxAge: '1d', setHeaders: setCustomCC}) ); //1 day cache
else if (serve_static.mime.lookup(path) === 'image/x-icon') res.setHeader('Cache-Control', 'public, max-age=2592000'); } // Enable CORS preflight across the board. app.options('*', cors()); app.use(cors()); /////////// Configure Webserver /////////// app.use(function(req, res, next){ var keys; console.log('silly', '------------------------------------------ incoming request ------------------------------------------'); console.log('info', 'New ' + req.method + ' request for', req.url); req.bag = {}; //create my object for my stuff req.session.count = eval(req.session.count) + 1; req.bag.session = req.session; var url_parts = url.parse(req.url, true); req.parameters = url_parts.query; keys = Object.keys(req.parameters); if(req.parameters && keys.length > 0) console.log({parameters: req.parameters}); //print request parameters keys = Object.keys(req.body); if (req.body && keys.length > 0) console.log({body: req.body}); //print request body next(); }); //// Router //// app.use('/', require('./routes/site_router')); //////////////////////////////////////////// ////////////// Error Handling ////////////// //////////////////////////////////////////// app.use(function(req, res, next) { var err = new Error('Not Found'); err.status = 404; next(err); }); app.use(function(err, req, res, next) { // = development error handler, print stack trace console.log("Error Handeler -", req.url); var errorCode = err.status || 500; res.status(errorCode); req.bag.error = {msg:err.stack, status:errorCode}; if(req.bag.error.status == 404) req.bag.error.msg = "Sorry, I cannot locate that file"; res.render('template/error', {bag:req.bag}); }); // ============================================================================================================================ // Launch Webserver // ============================================================================================================================ var server = http.createServer(app).listen(port, function() {}); process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0"; server.timeout = 240000; // Ta-da. console.log('info', '------------------------------------------ Server Up - ' + host + ':' + port + ' ------------------------------------------'); if(process.env.PRODUCTION) console.log('Running using Production settings'); else console.log('Running using Developer settings'); // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // ============================================================================================================================ // Warning // ============================================================================================================================ // ============================================================================================================================ // Entering // ============================================================================================================================ // ============================================================================================================================ // Test Area // ============================================================================================================================ var app1 = require('./utils/ws_app1'); var app2 = require('./utils/ws_app2'); var ws = require('ws'); var wss = {}; var Obc1 = require('./utils/obc-js/index'); var obc = new Obc1(); // ================================== // load peers manually or from VCAP, VCAP will overwrite hardcoded list! // ================================== var peers = [ { "discovery_host": "169.44.38.124", "discovery_port": "32826", "api_host": "169.44.38.124", "api_port": "32827", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp1", "api_url": "http://169.44.38.124:32827" }, { "discovery_host": "169.44.38.120", "discovery_port": "32804", "api_host": "169.44.38.120", "api_port": "32805", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp3", "api_url": "http://169.44.38.120:32805" }, { "discovery_host": "169.44.38.114", "discovery_port": "32810", "api_host": "169.44.38.114", "api_port": "32811", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp4", "api_url": "http://169.44.38.114:32811" }, { "discovery_host": "169.44.38.102", "discovery_port": "32820", "api_host": "169.44.38.102", "api_port": "32821", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp5", "api_url": "http://169.44.38.102:32821" }, { "discovery_host": "169.44.38.102", "discovery_port": "32818", "api_host": "169.44.38.102", "api_port": "32819", "id": "41e46128-cbed-4cc9-a635-bc140226a309_vp2", "api_url": "http://169.44.38.102:32819" } ]; console.log('loading hardcoded peers'); if(process.env.VCAP_SERVICES){ //load from vcap, search for service, 1 of the 3 should be found... var servicesObject = JSON.parse(process.env.VCAP_SERVICES); for(var i in servicesObject){ if(i.indexOf('ibm-blockchain') >= 0){ //looks close enough if(servicesObject[i][0].credentials && servicesObject[i][0].credentials.peers){ console.log('overwritting peers, loading from a vcap service: ', i); peers = servicesObject[i][0].credentials.peers; break; } } } } obc.network(peers); //setup network connection for rest endpoint // ================================== // configure obc-js sdk // ================================== var options = { zip_url: 'https://github.com/dshuffma-ibm/simplestuff/archive/master.zip', git_dir: 'simplestuff-master/phase2', //subdirectroy name of chaincode after unzipped git_url: 'https://github.com/dshuffma-ibm/simplestuff/phase2', //git clone http url //hashed cc name from prev deployment deployed_name: '4a237d1e7be8bb2fe61a9f00b7200c1f9a16f77ec2dc4045a540fd84da2327a80975d66394add22961544ea07dae943a1941f175d547b554a0b5d5d2fa8d7c93' }; if(process.env.VCAP_SERVICES){ console.log('\n[!] looks like you are in bluemix, I am going to clear out the deploy_name so that it deploys new cc.\n[!] hope that is ok budddy\n'); options.deployed_name = ""; } obc.load(options, cb_ready); //parse/load chaincode function cb_ready(err, cc){ //response has chaincode functions app1.setup(obc, cc); app2.setup(obc, cc); if(cc.details.deployed_name === ""){ //decide if i need to deploy cc.deploy('init', ['99'], './cc_summaries', cb_deployed); } else{ console.log('chaincode summary file indicates chaincode has been previously deployed'); cb_deployed(); } } // ============================================================================================================================ // WebSocket Communication Madness // ============================================================================================================================ function cb_deployed(){ console.log('starting websocket'); obc.save('./cc_summaries'); //save it here for chaincode investigator wss = new ws.Server({server: server}); //start the websocket now wss.on('connection', function connection(ws) { //ws_cons.push(ws); ws.on('message', function incoming(message) { console.log('received ws msg:', message); var data = JSON.parse(message); app1.process_msg(ws, data); app2.process_msg(ws, data); }); ws.on('close', function(){ app2.close(); //close peridic poll that phase 2 does }); }); } /* ignore this code - 2/1/2016 var ws_cons = []; function broadcast(data){ for(var i in ws_cons){ try{ console.log('sending', i);//, ws); ws_cons[i].send(JSON.stringify(data)); } catch(e){ console.log('error ws', e); } } }*/ /* CCI improvements - [x] simpilify chaincode.json, remove discovery and api_url - [x] save chaincode.json as cc_<hash>.json - [x] have GET API that retruns all cc_<hash>.json file names - [x] have GET API that returns the cc_<hash>.json file - [x] allow cci to take in <hash> as url parameter - [ ] deploy on CCI actually runs through flow - [ ] - load spin icon - poll on new chaincode.json file name API - finally fade spin and rebuild UI from file - (depending on how this works maybe remove HTML5 local storage) */
app.use( serve_static(path.join(__dirname, 'public')) ); app.use(session({secret:'Somethignsomething1234!test', resave:true, saveUninitialized:true})); function setCustomCC(res, path) { if (serve_static.mime.lookup(path) === 'image/jpeg') res.setHeader('Cache-Control', 'public, max-age=2592000'); //30 days cache else if (serve_static.mime.lookup(path) === 'image/png') res.setHeader('Cache-Control', 'public, max-age=2592000');
random_line_split
process_packet.rs
use libc::size_t; use std::cell::RefCell; use std::net::{IpAddr,SocketAddr}; use std::os::raw::c_void; use std::panic; use std::rc::Rc; use std::slice; use std::str::FromStr; use time::precise_time_ns; use mio::PollOpt; use pnet::packet::Packet; use pnet::packet::ethernet::{EthernetPacket, EtherTypes}; use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::tcp::{TcpPacket,TcpFlags}; use bufferable_ssl::BufferableSSL; use client_driver::SchedSessionTimeout; use bufferable_tcp::BufferableTCP; use elligator; use evented_ssl_eavesdropper::EventedSSLEavesdropper; use flow_tracker::{Flow,WscaleAndMSS}; use PerCoreGlobal; use session_error::SessionError; use session_id::SessionId; use tapdance_session::TapdanceSession; use util; const TLS_TYPE_APPLICATION_DATA: u8 = 0x17; const SQUID_PROXY_ADDR: &'static str = "127.0.0.1"; const SQUID_PROXY_PORT: u16 = 1234; const STREAM_TIMEOUT_NS: u64 = 120*1000*1000*1000; // 120 seconds // The jumping off point for all of our logic. This function inspects a packet // that has come in the tap interface. We do not yet have any idea if we care // about it; it might not even be TLS. It might not even be TCP! #[no_mangle] pub extern "C" fn rust_process_packet(ptr: *mut PerCoreGlobal, raw_ethframe: *mut c_void, frame_len: size_t) { let mut global = unsafe { &mut *ptr }; let rust_view_len = frame_len as usize; let rust_view = unsafe { slice::from_raw_parts_mut(raw_ethframe as *mut u8, frame_len as usize) }; global.stats.packets_this_period += 1; global.stats.bytes_this_period += rust_view_len as u64; let eth_pkt = match EthernetPacket::new(rust_view) { Some(pkt) => pkt, None => return, }; let eth_payload = eth_pkt.payload(); let ip_data = match eth_pkt.get_ethertype() { EtherTypes::Vlan => { if eth_payload[2] == 0x08 && eth_payload[3] == 0x00 { //let vlan_id: u16 = (eth_payload[0] as u16)*256 // + (eth_payload[1] as u16); &eth_payload[4..] } else { return } }, EtherTypes::Ipv4 => &eth_payload[0..], _ => return, }; match Ipv4Packet::new(ip_data) { Some(pkt) => global.process_ipv4_packet(pkt, rust_view_len), None => return, } } fn is_tls_app_pkt(tcp_pkt: &TcpPacket) -> bool { let payload = tcp_pkt.payload(); payload.len() > 5 && payload[0] == TLS_TYPE_APPLICATION_DATA } impl PerCoreGlobal { // frame_len is supposed to be the length of the whole Ethernet frame. We're // only passing it here for plumbing reasons, and just for stat reporting. fn process_ipv4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize) { // Ignore packets that aren't TCP if ip_pkt.get_next_level_protocol() != IpNextHeaderProtocols::Tcp { return; } let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) { Some(pkt) => pkt, None => return, }; self.stats.tcp_packets_this_period += 1; // Ignore packets that aren't -> 443. // libpnet getters all return host order. Ignore the "u16be" in their // docs; interactions with pnet are purely host order. if tcp_pkt.get_destination() != 443 { return; } self.stats.tls_packets_this_period += 1; // (HTTPS, really) self.stats.tls_bytes_this_period += frame_len as u64; self.process_tls_pkt(&ip_pkt, &tcp_pkt); } // Takes an IPv4 packet // Assumes (for now) that TLS records are in a single TCP packet // (no fragmentation). // Fragments could be stored in the flow_tracker if needed. pub fn process_tls_pkt(&mut self, ip_pkt: &Ipv4Packet, tcp_pkt: &TcpPacket) { let flow = Flow::new(ip_pkt, tcp_pkt); if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() { return; } let tcp_flags = tcp_pkt.get_flags(); if (tcp_flags & TcpFlags::SYN) != 0 && (tcp_flags & TcpFlags::ACK) == 0 { self.stats.port_443_syns_this_period += 1; self.flow_tracker.begin_tracking_flow(&flow, tcp_pkt.packet().to_vec()); return; } if !self.flow_tracker.tracking_at_all(&flow) { return; } // Note that FINs and RSTs are welcome in consume_tcp_pkt() as well. if !self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) { // EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow. self.flow_tracker.drop(&flow); } else if self.flow_tracker.is_td(&flow) { // Forward packets from established overt flows into the tun // interface, so that they'll reach forge_socket. self.forward_to_forge_socket(ip_pkt); if (tcp_flags & TcpFlags::FIN) != 0 { // This stream (overt flow) is ending. The client might come and // resume the TapDance session with a new stream, so leave the // overall session state intact. The is_hup event's processing // takes care of starting the BufferableSSL's cleanup. // FlowTracker::notice_fin() will schedule a RST to be sent to // the decoy server; forge_socket handles the FIN handshake. self.flow_tracker.notice_fin(&flow); } } else if (tcp_flags & TcpFlags::FIN) != 0 { // non-TD flow FINd => drop self.flow_tracker.drop(&flow); return; } if (tcp_flags & TcpFlags::RST) != 0 { // End connection, remove any relevant state. // TODO clean up TapDance session state, if any // (TODO i believe that the earlier forward_to_forge_socket would // cause a clien is_error event to fire, which would then clean up // the session. should confirm.) self.flow_tracker.drop(&flow); return; } // This is a non-RST/FIN packet of a flow we are tracking, but that is // not known to be TapDance. That means this might be a tag-bearing // first TLS app data packet: establish a TD session if so. if !self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) { // ...buuut don't bother checking these known-irrelevant addresses: // coming from U. Michigan (35.0.0.0/9) // going to Google CDN servers in Michigan (192.122.185.0/24) // coming from windyegret's internet connection (192.122.200.253) // coming from more of U. Michigan (141.212.0.0/14) let src = ip_pkt.get_source().octets(); let dest = ip_pkt.get_destination().octets(); if src[0] == 35 && (src[1] & 128 == 0) || dest[0] == 192 && dest[1] == 122 && dest[2] == 185 || src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 || src[0] == 141 && (src[2] & 252) == 212 || !self.try_establish_tapdance(&flow, tcp_pkt) { // No tag in first TLS app data packet ==> definitely not TD. self.flow_tracker.drop(&flow); } } } fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet) { let ip_len = ip_pkt.packet().len(); // TODO: see if the PI flag to the TUN interface can actually take care // of this for us let mut tun_pkt = Vec::with_capacity(ip_len+4); // These mystery bytes are a link-layer header; the kernel "receives" // tun packets as if they were really physically "received". Since they // weren't physically received, they do not have an Ethernet header. It // looks like the tun setup has its own type of header, rather than just // making up a fake Ethernet header. tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]); tun_pkt.extend_from_slice(ip_pkt.packet()); // Send into tun device (can fail, but these are best-effort IP packets) self.tun.send(tun_pkt).unwrap_or_else(|e|{ warn!("failed to send packet into tun: {}", e); 0}); self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64; } // Inspects a TLS app data packet for a TapDance tag. If found, establishes // the flow as a TapDance stream (and starts a new session, if 1st stream). // Returns true iff a TapDance stream was successfully established. fn try_establish_tapdance(&mut self, flow: &Flow, tcp_pkt: &TcpPacket) -> bool { let tag_payload = elligator::extract_telex_tag(&self.priv_key, &tcp_pkt.payload()); self.stats.elligator_this_period += 1; if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN + TAG_CLI_RND_LEN + TAG_CON_ID_LEN { return false; } if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS // Decoy will ACK current packet with this value. (Host-order). let expect_ack = tcp_pkt.get_sequence() .wrapping_add(tcp_pkt.payload().len() as u32); let wscale_and_mss = self.flow_tracker.mark_tapdance_flow(flow, expect_ack); self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss) } else
} pub fn establish_bidi(&mut self, tcp_pkt: &TcpPacket, flow: &Flow, tag_payload: &Vec<u8>, wscale_and_mss: WscaleAndMSS) -> bool { let (_, master_key, server_random, client_random, session_id) = parse_tag_payload(tag_payload); let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt); let mut client_ssl = BufferableSSL::new(session_id); let ssl_success = client_ssl.construct_forged_ssl( tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr, master_key, client_random, server_random); if ssl_success { let (is_a_reconnect, rc, cov_error) = self.create_or_recall_tapdance_session(session_id); let ref mut td = rc.borrow_mut(); let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone()); if !td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) { td.end_whole_session_error(SessionError::ClientProtocol); return false; } td.expect_bidi_reconnect = false; if let Some(cov_err) = cov_error { td.end_whole_session_error(cov_err); self.cli_ssl_driver .sessions_to_drop.push_back(td.session_id); } let src_oct1: u8 = ((flow.src_ip & 0xff000000u32) >> 24) as u8; let src_oct2: u8 = ((flow.src_ip & 0x00ff0000u32) >> 16) as u8; if is_a_reconnect { self.stats.reconns_this_period += 1; td.send_reconnect_to_client(); td.cov_read_cli_write(&mut self.cov_tcp_driver.rereg_queuer, false); if td.both_half_closed() { // if errored, must mark for drop self.cli_ssl_driver.sessions_to_drop .push_back(td.session_id); } } else { let decoy_ip_str = util::inet_htoa(flow.dst_ip); info!("newsession {} {}.{}.x.x:{} -> {}:{}", session_id, src_oct1, src_oct2, flow.src_port, decoy_ip_str, flow.dst_port); td.decoy_ip = decoy_ip_str; if self.overloaded_decoys.contains(&flow.dst_ip) { td.end_whole_session_error(SessionError::DecoyOverload); self.cli_ssl_driver.sessions_to_drop .push_back(td.session_id); } } self.cli_ssl_driver.stream_timeouts.push_back( SchedSessionTimeout { drop_time: precise_time_ns() + STREAM_TIMEOUT_NS, id: session_id, stream_count: td.cli_pair.stream_count() }); info!("newstream {} {}.{}.x.x:{} -> {}:{}", session_id, src_oct1, src_oct2, flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); true } else { error!("make_forged_tls() returned 0! Tagged TLS not picked up \ as a TapDance stream :("); false } } pub fn establish_upload_only(&mut self, tcp_pkt: &TcpPacket, flow: &Flow, tag_payload: &Vec<u8>) -> bool { let (_, master_key, server_random, client_random, session_id) = parse_tag_payload(tag_payload); let mut passive_ssl = EventedSSLEavesdropper::new(session_id); let ssl_success = passive_ssl.construct_eavesdropped_ssl( tcp_pkt, master_key, client_random, server_random); if ssl_success { if let Some(rc) = self.id2sess.get(&session_id) { let inserted_tok = self.cli_psv_driver.tok2sess.insert(rc.clone()); let ref mut td = rc.borrow_mut(); if !td.cli_pair.set_passive_uploader(passive_ssl, inserted_tok, &self.cli_psv_poll) { td.end_whole_session_error(SessionError::ClientProtocol); return false; } td.expect_uploader_reconnect = false; // TODO? self.stats.reconns_UPL_this_period += 1; // TODO? (goes thru bidi) td.send_UPL_reconnect_to_client(); self.flow_tracker.mark_passive_td(flow, rc.clone()); self.cli_ssl_driver.stream_timeouts.push_back( SchedSessionTimeout { drop_time: precise_time_ns() + STREAM_TIMEOUT_NS, id: session_id, stream_count: td.cli_pair.stream_count() }); report!("newuploader {} {}:{} -> {}:{}", session_id, util::inet_htoa(flow.src_ip), flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); true } else { error!("This new upload-only stream does not belong to an \ ongoing session. A session's first stream must be \ bidi. Session ID: {}", session_id); report!("newuploader {} {}:{} -> {}:{}", session_id, util::inet_htoa(flow.src_ip), flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); report!("error {} {}", session_id, SessionError::ClientProtocol.to_string()); // (passive_ssl goes out of scope, "deluploader") false } } else { error!("make_forged_memory_tls() returned 0! Tagged TLS not picked \ up as a passive TapDance stream :("); false } } // Lookup the ongoing session with ID session_id, if it exists. If it does // not, make a new one (including initiating the Squid TCP connection). // Returns: Bool is whether the session was already there. // Option<SessionError> is to be filled if session creation failed. fn create_or_recall_tapdance_session(&mut self, session_id: SessionId) -> (bool, Rc<RefCell<TapdanceSession>>, Option<SessionError>) { let ref mut cov_tcp_poll = self.cov_tcp_poll; let ref mut tok_map = self.cov_tcp_driver.tok2sess; let recalled = self.id2sess.contains_key(&session_id); let mut cov_err = None; let rc = self.id2sess.entry(session_id).or_insert_with(|| { let td_rc = Rc::new(RefCell::new(TapdanceSession::new(session_id))); // New proxy connection to local proxy. unwrap() relies on // SQUID_PROXY_ADDR being a valid constant. let dest = IpAddr::from_str(SQUID_PROXY_ADDR).unwrap(); let sock_addr = SocketAddr::new(dest, SQUID_PROXY_PORT); // NOTE: this mio version of TcpStream is nonblocking! if let Ok(sock) = ::mio::tcp::TcpStream::connect(&sock_addr) { let ref mut td = td_rc.borrow_mut(); td.cov.set_stream(BufferableTCP::new(sock)); let inserted_tok = tok_map.insert(td_rc.clone()); td.cov.register(cov_tcp_poll, inserted_tok.val(), util::all_unix_events(), PollOpt::edge()) .unwrap_or_else(|e|{error!("tcp_driver 1st reg: {}", e);}); td.cov.set_tok(inserted_tok); } else { // TODO: actually, we're more concerned with out-of-fds, which // is more like StationInternal. But, how to distinguish? cov_err = Some(SessionError::CovertStream); } td_rc }); (recalled, rc.clone(), cov_err) } } // impl PerCoreGlobal // These consts tie the slice indexing in establish_tapdance_stream_from_tag() // to the length check in try_establish_tapdance(). // Current tag payload format: //=============================================================== // 1 byte................flags // 48 bytes master_key // 32 bytes..............server_random // 32 bytes client_random // 16 bytes..............connection_id const TAG_FLAGS_LEN: usize = 1; const TAG_M_KEY_LEN: usize = 48; const TAG_SRV_RND_LEN: usize = 32; const TAG_CLI_RND_LEN: usize = 32; const TAG_CON_ID_LEN: usize = 16; // Assumes you will only call it after checking // if tag_payload.len() >= TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN + // TAG_CLI_RND_LEN + TAG_CON_ID_LEN fn parse_tag_payload(tag_payload: &Vec<u8>) -> (u8, &[u8], &[u8], &[u8], SessionId) { let mut offset = 0; let flags = tag_payload[offset]; offset += TAG_FLAGS_LEN; let master_key = &tag_payload[offset..offset+TAG_M_KEY_LEN]; offset += TAG_M_KEY_LEN; let server_random = &tag_payload[offset..offset+TAG_SRV_RND_LEN]; offset += TAG_SRV_RND_LEN; let client_random = &tag_payload[offset..offset+TAG_CLI_RND_LEN]; offset += TAG_CLI_RND_LEN; let session_id_slice = &tag_payload[offset..offset+TAG_CON_ID_LEN]; // (do `offset += TAG_CON_ID_LEN` here if you need to read further) let session_id = SessionId::new( array_ref![session_id_slice,0,TAG_CON_ID_LEN]); (flags, master_key, server_random, client_random, session_id) }
{ // upload-only eavesdropped TLS // (don't mark as TD in FlowTracker until you have the Rc<RefCell>) self.establish_upload_only(tcp_pkt, flow, &tag_payload) }
conditional_block
process_packet.rs
use libc::size_t; use std::cell::RefCell; use std::net::{IpAddr,SocketAddr}; use std::os::raw::c_void; use std::panic; use std::rc::Rc; use std::slice; use std::str::FromStr; use time::precise_time_ns; use mio::PollOpt; use pnet::packet::Packet; use pnet::packet::ethernet::{EthernetPacket, EtherTypes}; use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::tcp::{TcpPacket,TcpFlags}; use bufferable_ssl::BufferableSSL; use client_driver::SchedSessionTimeout; use bufferable_tcp::BufferableTCP; use elligator; use evented_ssl_eavesdropper::EventedSSLEavesdropper; use flow_tracker::{Flow,WscaleAndMSS}; use PerCoreGlobal; use session_error::SessionError; use session_id::SessionId; use tapdance_session::TapdanceSession; use util; const TLS_TYPE_APPLICATION_DATA: u8 = 0x17; const SQUID_PROXY_ADDR: &'static str = "127.0.0.1"; const SQUID_PROXY_PORT: u16 = 1234; const STREAM_TIMEOUT_NS: u64 = 120*1000*1000*1000; // 120 seconds // The jumping off point for all of our logic. This function inspects a packet // that has come in the tap interface. We do not yet have any idea if we care // about it; it might not even be TLS. It might not even be TCP! #[no_mangle] pub extern "C" fn rust_process_packet(ptr: *mut PerCoreGlobal, raw_ethframe: *mut c_void, frame_len: size_t) { let mut global = unsafe { &mut *ptr }; let rust_view_len = frame_len as usize; let rust_view = unsafe { slice::from_raw_parts_mut(raw_ethframe as *mut u8, frame_len as usize) }; global.stats.packets_this_period += 1; global.stats.bytes_this_period += rust_view_len as u64; let eth_pkt = match EthernetPacket::new(rust_view) { Some(pkt) => pkt, None => return, }; let eth_payload = eth_pkt.payload(); let ip_data = match eth_pkt.get_ethertype() { EtherTypes::Vlan => { if eth_payload[2] == 0x08 && eth_payload[3] == 0x00 { //let vlan_id: u16 = (eth_payload[0] as u16)*256 // + (eth_payload[1] as u16); &eth_payload[4..] } else { return } }, EtherTypes::Ipv4 => &eth_payload[0..], _ => return, }; match Ipv4Packet::new(ip_data) { Some(pkt) => global.process_ipv4_packet(pkt, rust_view_len), None => return, } } fn is_tls_app_pkt(tcp_pkt: &TcpPacket) -> bool { let payload = tcp_pkt.payload(); payload.len() > 5 && payload[0] == TLS_TYPE_APPLICATION_DATA } impl PerCoreGlobal { // frame_len is supposed to be the length of the whole Ethernet frame. We're // only passing it here for plumbing reasons, and just for stat reporting. fn process_ipv4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize) { // Ignore packets that aren't TCP if ip_pkt.get_next_level_protocol() != IpNextHeaderProtocols::Tcp { return; } let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) { Some(pkt) => pkt, None => return, }; self.stats.tcp_packets_this_period += 1; // Ignore packets that aren't -> 443. // libpnet getters all return host order. Ignore the "u16be" in their // docs; interactions with pnet are purely host order. if tcp_pkt.get_destination() != 443 { return; } self.stats.tls_packets_this_period += 1; // (HTTPS, really) self.stats.tls_bytes_this_period += frame_len as u64; self.process_tls_pkt(&ip_pkt, &tcp_pkt); } // Takes an IPv4 packet // Assumes (for now) that TLS records are in a single TCP packet // (no fragmentation). // Fragments could be stored in the flow_tracker if needed. pub fn process_tls_pkt(&mut self, ip_pkt: &Ipv4Packet, tcp_pkt: &TcpPacket) { let flow = Flow::new(ip_pkt, tcp_pkt); if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() { return; } let tcp_flags = tcp_pkt.get_flags(); if (tcp_flags & TcpFlags::SYN) != 0 && (tcp_flags & TcpFlags::ACK) == 0 { self.stats.port_443_syns_this_period += 1; self.flow_tracker.begin_tracking_flow(&flow, tcp_pkt.packet().to_vec()); return; } if !self.flow_tracker.tracking_at_all(&flow) { return; } // Note that FINs and RSTs are welcome in consume_tcp_pkt() as well. if !self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) { // EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow. self.flow_tracker.drop(&flow); } else if self.flow_tracker.is_td(&flow) { // Forward packets from established overt flows into the tun // interface, so that they'll reach forge_socket. self.forward_to_forge_socket(ip_pkt); if (tcp_flags & TcpFlags::FIN) != 0 { // This stream (overt flow) is ending. The client might come and // resume the TapDance session with a new stream, so leave the // overall session state intact. The is_hup event's processing // takes care of starting the BufferableSSL's cleanup. // FlowTracker::notice_fin() will schedule a RST to be sent to // the decoy server; forge_socket handles the FIN handshake. self.flow_tracker.notice_fin(&flow); } } else if (tcp_flags & TcpFlags::FIN) != 0 { // non-TD flow FINd => drop self.flow_tracker.drop(&flow); return; } if (tcp_flags & TcpFlags::RST) != 0 { // End connection, remove any relevant state. // TODO clean up TapDance session state, if any // (TODO i believe that the earlier forward_to_forge_socket would // cause a clien is_error event to fire, which would then clean up // the session. should confirm.) self.flow_tracker.drop(&flow); return; } // This is a non-RST/FIN packet of a flow we are tracking, but that is // not known to be TapDance. That means this might be a tag-bearing // first TLS app data packet: establish a TD session if so. if !self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) { // ...buuut don't bother checking these known-irrelevant addresses: // coming from U. Michigan (35.0.0.0/9) // going to Google CDN servers in Michigan (192.122.185.0/24) // coming from windyegret's internet connection (192.122.200.253) // coming from more of U. Michigan (141.212.0.0/14) let src = ip_pkt.get_source().octets(); let dest = ip_pkt.get_destination().octets(); if src[0] == 35 && (src[1] & 128 == 0) || dest[0] == 192 && dest[1] == 122 && dest[2] == 185 || src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 || src[0] == 141 && (src[2] & 252) == 212 || !self.try_establish_tapdance(&flow, tcp_pkt) { // No tag in first TLS app data packet ==> definitely not TD. self.flow_tracker.drop(&flow); } } } fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet) { let ip_len = ip_pkt.packet().len(); // TODO: see if the PI flag to the TUN interface can actually take care // of this for us let mut tun_pkt = Vec::with_capacity(ip_len+4); // These mystery bytes are a link-layer header; the kernel "receives" // tun packets as if they were really physically "received". Since they // weren't physically received, they do not have an Ethernet header. It // looks like the tun setup has its own type of header, rather than just // making up a fake Ethernet header. tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]); tun_pkt.extend_from_slice(ip_pkt.packet()); // Send into tun device (can fail, but these are best-effort IP packets) self.tun.send(tun_pkt).unwrap_or_else(|e|{ warn!("failed to send packet into tun: {}", e); 0}); self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64; } // Inspects a TLS app data packet for a TapDance tag. If found, establishes // the flow as a TapDance stream (and starts a new session, if 1st stream). // Returns true iff a TapDance stream was successfully established. fn try_establish_tapdance(&mut self, flow: &Flow, tcp_pkt: &TcpPacket) -> bool { let tag_payload = elligator::extract_telex_tag(&self.priv_key, &tcp_pkt.payload()); self.stats.elligator_this_period += 1; if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN + TAG_CLI_RND_LEN + TAG_CON_ID_LEN { return false; } if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS // Decoy will ACK current packet with this value. (Host-order). let expect_ack = tcp_pkt.get_sequence() .wrapping_add(tcp_pkt.payload().len() as u32); let wscale_and_mss = self.flow_tracker.mark_tapdance_flow(flow, expect_ack); self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss) } else { // upload-only eavesdropped TLS // (don't mark as TD in FlowTracker until you have the Rc<RefCell>) self.establish_upload_only(tcp_pkt, flow, &tag_payload) } } pub fn establish_bidi(&mut self, tcp_pkt: &TcpPacket, flow: &Flow, tag_payload: &Vec<u8>, wscale_and_mss: WscaleAndMSS) -> bool
pub fn establish_upload_only(&mut self, tcp_pkt: &TcpPacket, flow: &Flow, tag_payload: &Vec<u8>) -> bool { let (_, master_key, server_random, client_random, session_id) = parse_tag_payload(tag_payload); let mut passive_ssl = EventedSSLEavesdropper::new(session_id); let ssl_success = passive_ssl.construct_eavesdropped_ssl( tcp_pkt, master_key, client_random, server_random); if ssl_success { if let Some(rc) = self.id2sess.get(&session_id) { let inserted_tok = self.cli_psv_driver.tok2sess.insert(rc.clone()); let ref mut td = rc.borrow_mut(); if !td.cli_pair.set_passive_uploader(passive_ssl, inserted_tok, &self.cli_psv_poll) { td.end_whole_session_error(SessionError::ClientProtocol); return false; } td.expect_uploader_reconnect = false; // TODO? self.stats.reconns_UPL_this_period += 1; // TODO? (goes thru bidi) td.send_UPL_reconnect_to_client(); self.flow_tracker.mark_passive_td(flow, rc.clone()); self.cli_ssl_driver.stream_timeouts.push_back( SchedSessionTimeout { drop_time: precise_time_ns() + STREAM_TIMEOUT_NS, id: session_id, stream_count: td.cli_pair.stream_count() }); report!("newuploader {} {}:{} -> {}:{}", session_id, util::inet_htoa(flow.src_ip), flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); true } else { error!("This new upload-only stream does not belong to an \ ongoing session. A session's first stream must be \ bidi. Session ID: {}", session_id); report!("newuploader {} {}:{} -> {}:{}", session_id, util::inet_htoa(flow.src_ip), flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); report!("error {} {}", session_id, SessionError::ClientProtocol.to_string()); // (passive_ssl goes out of scope, "deluploader") false } } else { error!("make_forged_memory_tls() returned 0! Tagged TLS not picked \ up as a passive TapDance stream :("); false } } // Lookup the ongoing session with ID session_id, if it exists. If it does // not, make a new one (including initiating the Squid TCP connection). // Returns: Bool is whether the session was already there. // Option<SessionError> is to be filled if session creation failed. fn create_or_recall_tapdance_session(&mut self, session_id: SessionId) -> (bool, Rc<RefCell<TapdanceSession>>, Option<SessionError>) { let ref mut cov_tcp_poll = self.cov_tcp_poll; let ref mut tok_map = self.cov_tcp_driver.tok2sess; let recalled = self.id2sess.contains_key(&session_id); let mut cov_err = None; let rc = self.id2sess.entry(session_id).or_insert_with(|| { let td_rc = Rc::new(RefCell::new(TapdanceSession::new(session_id))); // New proxy connection to local proxy. unwrap() relies on // SQUID_PROXY_ADDR being a valid constant. let dest = IpAddr::from_str(SQUID_PROXY_ADDR).unwrap(); let sock_addr = SocketAddr::new(dest, SQUID_PROXY_PORT); // NOTE: this mio version of TcpStream is nonblocking! if let Ok(sock) = ::mio::tcp::TcpStream::connect(&sock_addr) { let ref mut td = td_rc.borrow_mut(); td.cov.set_stream(BufferableTCP::new(sock)); let inserted_tok = tok_map.insert(td_rc.clone()); td.cov.register(cov_tcp_poll, inserted_tok.val(), util::all_unix_events(), PollOpt::edge()) .unwrap_or_else(|e|{error!("tcp_driver 1st reg: {}", e);}); td.cov.set_tok(inserted_tok); } else { // TODO: actually, we're more concerned with out-of-fds, which // is more like StationInternal. But, how to distinguish? cov_err = Some(SessionError::CovertStream); } td_rc }); (recalled, rc.clone(), cov_err) } } // impl PerCoreGlobal // These consts tie the slice indexing in establish_tapdance_stream_from_tag() // to the length check in try_establish_tapdance(). // Current tag payload format: //=============================================================== // 1 byte................flags // 48 bytes master_key // 32 bytes..............server_random // 32 bytes client_random // 16 bytes..............connection_id const TAG_FLAGS_LEN: usize = 1; const TAG_M_KEY_LEN: usize = 48; const TAG_SRV_RND_LEN: usize = 32; const TAG_CLI_RND_LEN: usize = 32; const TAG_CON_ID_LEN: usize = 16; // Assumes you will only call it after checking // if tag_payload.len() >= TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN + // TAG_CLI_RND_LEN + TAG_CON_ID_LEN fn parse_tag_payload(tag_payload: &Vec<u8>) -> (u8, &[u8], &[u8], &[u8], SessionId) { let mut offset = 0; let flags = tag_payload[offset]; offset += TAG_FLAGS_LEN; let master_key = &tag_payload[offset..offset+TAG_M_KEY_LEN]; offset += TAG_M_KEY_LEN; let server_random = &tag_payload[offset..offset+TAG_SRV_RND_LEN]; offset += TAG_SRV_RND_LEN; let client_random = &tag_payload[offset..offset+TAG_CLI_RND_LEN]; offset += TAG_CLI_RND_LEN; let session_id_slice = &tag_payload[offset..offset+TAG_CON_ID_LEN]; // (do `offset += TAG_CON_ID_LEN` here if you need to read further) let session_id = SessionId::new( array_ref![session_id_slice,0,TAG_CON_ID_LEN]); (flags, master_key, server_random, client_random, session_id) }
{ let (_, master_key, server_random, client_random, session_id) = parse_tag_payload(tag_payload); let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt); let mut client_ssl = BufferableSSL::new(session_id); let ssl_success = client_ssl.construct_forged_ssl( tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr, master_key, client_random, server_random); if ssl_success { let (is_a_reconnect, rc, cov_error) = self.create_or_recall_tapdance_session(session_id); let ref mut td = rc.borrow_mut(); let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone()); if !td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) { td.end_whole_session_error(SessionError::ClientProtocol); return false; } td.expect_bidi_reconnect = false; if let Some(cov_err) = cov_error { td.end_whole_session_error(cov_err); self.cli_ssl_driver .sessions_to_drop.push_back(td.session_id); } let src_oct1: u8 = ((flow.src_ip & 0xff000000u32) >> 24) as u8; let src_oct2: u8 = ((flow.src_ip & 0x00ff0000u32) >> 16) as u8; if is_a_reconnect { self.stats.reconns_this_period += 1; td.send_reconnect_to_client(); td.cov_read_cli_write(&mut self.cov_tcp_driver.rereg_queuer, false); if td.both_half_closed() { // if errored, must mark for drop self.cli_ssl_driver.sessions_to_drop .push_back(td.session_id); } } else { let decoy_ip_str = util::inet_htoa(flow.dst_ip); info!("newsession {} {}.{}.x.x:{} -> {}:{}", session_id, src_oct1, src_oct2, flow.src_port, decoy_ip_str, flow.dst_port); td.decoy_ip = decoy_ip_str; if self.overloaded_decoys.contains(&flow.dst_ip) { td.end_whole_session_error(SessionError::DecoyOverload); self.cli_ssl_driver.sessions_to_drop .push_back(td.session_id); } } self.cli_ssl_driver.stream_timeouts.push_back( SchedSessionTimeout { drop_time: precise_time_ns() + STREAM_TIMEOUT_NS, id: session_id, stream_count: td.cli_pair.stream_count() }); info!("newstream {} {}.{}.x.x:{} -> {}:{}", session_id, src_oct1, src_oct2, flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); true } else { error!("make_forged_tls() returned 0! Tagged TLS not picked up \ as a TapDance stream :("); false } }
identifier_body
process_packet.rs
use libc::size_t; use std::cell::RefCell; use std::net::{IpAddr,SocketAddr}; use std::os::raw::c_void; use std::panic; use std::rc::Rc; use std::slice; use std::str::FromStr; use time::precise_time_ns; use mio::PollOpt; use pnet::packet::Packet; use pnet::packet::ethernet::{EthernetPacket, EtherTypes}; use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::tcp::{TcpPacket,TcpFlags}; use bufferable_ssl::BufferableSSL; use client_driver::SchedSessionTimeout; use bufferable_tcp::BufferableTCP; use elligator; use evented_ssl_eavesdropper::EventedSSLEavesdropper; use flow_tracker::{Flow,WscaleAndMSS}; use PerCoreGlobal; use session_error::SessionError; use session_id::SessionId; use tapdance_session::TapdanceSession; use util; const TLS_TYPE_APPLICATION_DATA: u8 = 0x17; const SQUID_PROXY_ADDR: &'static str = "127.0.0.1"; const SQUID_PROXY_PORT: u16 = 1234; const STREAM_TIMEOUT_NS: u64 = 120*1000*1000*1000; // 120 seconds // The jumping off point for all of our logic. This function inspects a packet // that has come in the tap interface. We do not yet have any idea if we care // about it; it might not even be TLS. It might not even be TCP! #[no_mangle] pub extern "C" fn rust_process_packet(ptr: *mut PerCoreGlobal, raw_ethframe: *mut c_void, frame_len: size_t) { let mut global = unsafe { &mut *ptr }; let rust_view_len = frame_len as usize; let rust_view = unsafe { slice::from_raw_parts_mut(raw_ethframe as *mut u8, frame_len as usize) }; global.stats.packets_this_period += 1; global.stats.bytes_this_period += rust_view_len as u64; let eth_pkt = match EthernetPacket::new(rust_view) { Some(pkt) => pkt, None => return, }; let eth_payload = eth_pkt.payload(); let ip_data = match eth_pkt.get_ethertype() { EtherTypes::Vlan => { if eth_payload[2] == 0x08 && eth_payload[3] == 0x00 { //let vlan_id: u16 = (eth_payload[0] as u16)*256
}, EtherTypes::Ipv4 => &eth_payload[0..], _ => return, }; match Ipv4Packet::new(ip_data) { Some(pkt) => global.process_ipv4_packet(pkt, rust_view_len), None => return, } } fn is_tls_app_pkt(tcp_pkt: &TcpPacket) -> bool { let payload = tcp_pkt.payload(); payload.len() > 5 && payload[0] == TLS_TYPE_APPLICATION_DATA } impl PerCoreGlobal { // frame_len is supposed to be the length of the whole Ethernet frame. We're // only passing it here for plumbing reasons, and just for stat reporting. fn process_ipv4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize) { // Ignore packets that aren't TCP if ip_pkt.get_next_level_protocol() != IpNextHeaderProtocols::Tcp { return; } let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) { Some(pkt) => pkt, None => return, }; self.stats.tcp_packets_this_period += 1; // Ignore packets that aren't -> 443. // libpnet getters all return host order. Ignore the "u16be" in their // docs; interactions with pnet are purely host order. if tcp_pkt.get_destination() != 443 { return; } self.stats.tls_packets_this_period += 1; // (HTTPS, really) self.stats.tls_bytes_this_period += frame_len as u64; self.process_tls_pkt(&ip_pkt, &tcp_pkt); } // Takes an IPv4 packet // Assumes (for now) that TLS records are in a single TCP packet // (no fragmentation). // Fragments could be stored in the flow_tracker if needed. pub fn process_tls_pkt(&mut self, ip_pkt: &Ipv4Packet, tcp_pkt: &TcpPacket) { let flow = Flow::new(ip_pkt, tcp_pkt); if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() { return; } let tcp_flags = tcp_pkt.get_flags(); if (tcp_flags & TcpFlags::SYN) != 0 && (tcp_flags & TcpFlags::ACK) == 0 { self.stats.port_443_syns_this_period += 1; self.flow_tracker.begin_tracking_flow(&flow, tcp_pkt.packet().to_vec()); return; } if !self.flow_tracker.tracking_at_all(&flow) { return; } // Note that FINs and RSTs are welcome in consume_tcp_pkt() as well. if !self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) { // EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow. self.flow_tracker.drop(&flow); } else if self.flow_tracker.is_td(&flow) { // Forward packets from established overt flows into the tun // interface, so that they'll reach forge_socket. self.forward_to_forge_socket(ip_pkt); if (tcp_flags & TcpFlags::FIN) != 0 { // This stream (overt flow) is ending. The client might come and // resume the TapDance session with a new stream, so leave the // overall session state intact. The is_hup event's processing // takes care of starting the BufferableSSL's cleanup. // FlowTracker::notice_fin() will schedule a RST to be sent to // the decoy server; forge_socket handles the FIN handshake. self.flow_tracker.notice_fin(&flow); } } else if (tcp_flags & TcpFlags::FIN) != 0 { // non-TD flow FINd => drop self.flow_tracker.drop(&flow); return; } if (tcp_flags & TcpFlags::RST) != 0 { // End connection, remove any relevant state. // TODO clean up TapDance session state, if any // (TODO i believe that the earlier forward_to_forge_socket would // cause a clien is_error event to fire, which would then clean up // the session. should confirm.) self.flow_tracker.drop(&flow); return; } // This is a non-RST/FIN packet of a flow we are tracking, but that is // not known to be TapDance. That means this might be a tag-bearing // first TLS app data packet: establish a TD session if so. if !self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) { // ...buuut don't bother checking these known-irrelevant addresses: // coming from U. Michigan (35.0.0.0/9) // going to Google CDN servers in Michigan (192.122.185.0/24) // coming from windyegret's internet connection (192.122.200.253) // coming from more of U. Michigan (141.212.0.0/14) let src = ip_pkt.get_source().octets(); let dest = ip_pkt.get_destination().octets(); if src[0] == 35 && (src[1] & 128 == 0) || dest[0] == 192 && dest[1] == 122 && dest[2] == 185 || src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 || src[0] == 141 && (src[2] & 252) == 212 || !self.try_establish_tapdance(&flow, tcp_pkt) { // No tag in first TLS app data packet ==> definitely not TD. self.flow_tracker.drop(&flow); } } } fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet) { let ip_len = ip_pkt.packet().len(); // TODO: see if the PI flag to the TUN interface can actually take care // of this for us let mut tun_pkt = Vec::with_capacity(ip_len+4); // These mystery bytes are a link-layer header; the kernel "receives" // tun packets as if they were really physically "received". Since they // weren't physically received, they do not have an Ethernet header. It // looks like the tun setup has its own type of header, rather than just // making up a fake Ethernet header. tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]); tun_pkt.extend_from_slice(ip_pkt.packet()); // Send into tun device (can fail, but these are best-effort IP packets) self.tun.send(tun_pkt).unwrap_or_else(|e|{ warn!("failed to send packet into tun: {}", e); 0}); self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64; } // Inspects a TLS app data packet for a TapDance tag. If found, establishes // the flow as a TapDance stream (and starts a new session, if 1st stream). // Returns true iff a TapDance stream was successfully established. fn try_establish_tapdance(&mut self, flow: &Flow, tcp_pkt: &TcpPacket) -> bool { let tag_payload = elligator::extract_telex_tag(&self.priv_key, &tcp_pkt.payload()); self.stats.elligator_this_period += 1; if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN + TAG_CLI_RND_LEN + TAG_CON_ID_LEN { return false; } if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS // Decoy will ACK current packet with this value. (Host-order). let expect_ack = tcp_pkt.get_sequence() .wrapping_add(tcp_pkt.payload().len() as u32); let wscale_and_mss = self.flow_tracker.mark_tapdance_flow(flow, expect_ack); self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss) } else { // upload-only eavesdropped TLS // (don't mark as TD in FlowTracker until you have the Rc<RefCell>) self.establish_upload_only(tcp_pkt, flow, &tag_payload) } } pub fn establish_bidi(&mut self, tcp_pkt: &TcpPacket, flow: &Flow, tag_payload: &Vec<u8>, wscale_and_mss: WscaleAndMSS) -> bool { let (_, master_key, server_random, client_random, session_id) = parse_tag_payload(tag_payload); let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt); let mut client_ssl = BufferableSSL::new(session_id); let ssl_success = client_ssl.construct_forged_ssl( tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr, master_key, client_random, server_random); if ssl_success { let (is_a_reconnect, rc, cov_error) = self.create_or_recall_tapdance_session(session_id); let ref mut td = rc.borrow_mut(); let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone()); if !td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) { td.end_whole_session_error(SessionError::ClientProtocol); return false; } td.expect_bidi_reconnect = false; if let Some(cov_err) = cov_error { td.end_whole_session_error(cov_err); self.cli_ssl_driver .sessions_to_drop.push_back(td.session_id); } let src_oct1: u8 = ((flow.src_ip & 0xff000000u32) >> 24) as u8; let src_oct2: u8 = ((flow.src_ip & 0x00ff0000u32) >> 16) as u8; if is_a_reconnect { self.stats.reconns_this_period += 1; td.send_reconnect_to_client(); td.cov_read_cli_write(&mut self.cov_tcp_driver.rereg_queuer, false); if td.both_half_closed() { // if errored, must mark for drop self.cli_ssl_driver.sessions_to_drop .push_back(td.session_id); } } else { let decoy_ip_str = util::inet_htoa(flow.dst_ip); info!("newsession {} {}.{}.x.x:{} -> {}:{}", session_id, src_oct1, src_oct2, flow.src_port, decoy_ip_str, flow.dst_port); td.decoy_ip = decoy_ip_str; if self.overloaded_decoys.contains(&flow.dst_ip) { td.end_whole_session_error(SessionError::DecoyOverload); self.cli_ssl_driver.sessions_to_drop .push_back(td.session_id); } } self.cli_ssl_driver.stream_timeouts.push_back( SchedSessionTimeout { drop_time: precise_time_ns() + STREAM_TIMEOUT_NS, id: session_id, stream_count: td.cli_pair.stream_count() }); info!("newstream {} {}.{}.x.x:{} -> {}:{}", session_id, src_oct1, src_oct2, flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); true } else { error!("make_forged_tls() returned 0! Tagged TLS not picked up \ as a TapDance stream :("); false } } pub fn establish_upload_only(&mut self, tcp_pkt: &TcpPacket, flow: &Flow, tag_payload: &Vec<u8>) -> bool { let (_, master_key, server_random, client_random, session_id) = parse_tag_payload(tag_payload); let mut passive_ssl = EventedSSLEavesdropper::new(session_id); let ssl_success = passive_ssl.construct_eavesdropped_ssl( tcp_pkt, master_key, client_random, server_random); if ssl_success { if let Some(rc) = self.id2sess.get(&session_id) { let inserted_tok = self.cli_psv_driver.tok2sess.insert(rc.clone()); let ref mut td = rc.borrow_mut(); if !td.cli_pair.set_passive_uploader(passive_ssl, inserted_tok, &self.cli_psv_poll) { td.end_whole_session_error(SessionError::ClientProtocol); return false; } td.expect_uploader_reconnect = false; // TODO? self.stats.reconns_UPL_this_period += 1; // TODO? (goes thru bidi) td.send_UPL_reconnect_to_client(); self.flow_tracker.mark_passive_td(flow, rc.clone()); self.cli_ssl_driver.stream_timeouts.push_back( SchedSessionTimeout { drop_time: precise_time_ns() + STREAM_TIMEOUT_NS, id: session_id, stream_count: td.cli_pair.stream_count() }); report!("newuploader {} {}:{} -> {}:{}", session_id, util::inet_htoa(flow.src_ip), flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); true } else { error!("This new upload-only stream does not belong to an \ ongoing session. A session's first stream must be \ bidi. Session ID: {}", session_id); report!("newuploader {} {}:{} -> {}:{}", session_id, util::inet_htoa(flow.src_ip), flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); report!("error {} {}", session_id, SessionError::ClientProtocol.to_string()); // (passive_ssl goes out of scope, "deluploader") false } } else { error!("make_forged_memory_tls() returned 0! Tagged TLS not picked \ up as a passive TapDance stream :("); false } } // Lookup the ongoing session with ID session_id, if it exists. If it does // not, make a new one (including initiating the Squid TCP connection). // Returns: Bool is whether the session was already there. // Option<SessionError> is to be filled if session creation failed. fn create_or_recall_tapdance_session(&mut self, session_id: SessionId) -> (bool, Rc<RefCell<TapdanceSession>>, Option<SessionError>) { let ref mut cov_tcp_poll = self.cov_tcp_poll; let ref mut tok_map = self.cov_tcp_driver.tok2sess; let recalled = self.id2sess.contains_key(&session_id); let mut cov_err = None; let rc = self.id2sess.entry(session_id).or_insert_with(|| { let td_rc = Rc::new(RefCell::new(TapdanceSession::new(session_id))); // New proxy connection to local proxy. unwrap() relies on // SQUID_PROXY_ADDR being a valid constant. let dest = IpAddr::from_str(SQUID_PROXY_ADDR).unwrap(); let sock_addr = SocketAddr::new(dest, SQUID_PROXY_PORT); // NOTE: this mio version of TcpStream is nonblocking! if let Ok(sock) = ::mio::tcp::TcpStream::connect(&sock_addr) { let ref mut td = td_rc.borrow_mut(); td.cov.set_stream(BufferableTCP::new(sock)); let inserted_tok = tok_map.insert(td_rc.clone()); td.cov.register(cov_tcp_poll, inserted_tok.val(), util::all_unix_events(), PollOpt::edge()) .unwrap_or_else(|e|{error!("tcp_driver 1st reg: {}", e);}); td.cov.set_tok(inserted_tok); } else { // TODO: actually, we're more concerned with out-of-fds, which // is more like StationInternal. But, how to distinguish? cov_err = Some(SessionError::CovertStream); } td_rc }); (recalled, rc.clone(), cov_err) } } // impl PerCoreGlobal // These consts tie the slice indexing in establish_tapdance_stream_from_tag() // to the length check in try_establish_tapdance(). // Current tag payload format: //=============================================================== // 1 byte................flags // 48 bytes master_key // 32 bytes..............server_random // 32 bytes client_random // 16 bytes..............connection_id const TAG_FLAGS_LEN: usize = 1; const TAG_M_KEY_LEN: usize = 48; const TAG_SRV_RND_LEN: usize = 32; const TAG_CLI_RND_LEN: usize = 32; const TAG_CON_ID_LEN: usize = 16; // Assumes you will only call it after checking // if tag_payload.len() >= TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN + // TAG_CLI_RND_LEN + TAG_CON_ID_LEN fn parse_tag_payload(tag_payload: &Vec<u8>) -> (u8, &[u8], &[u8], &[u8], SessionId) { let mut offset = 0; let flags = tag_payload[offset]; offset += TAG_FLAGS_LEN; let master_key = &tag_payload[offset..offset+TAG_M_KEY_LEN]; offset += TAG_M_KEY_LEN; let server_random = &tag_payload[offset..offset+TAG_SRV_RND_LEN]; offset += TAG_SRV_RND_LEN; let client_random = &tag_payload[offset..offset+TAG_CLI_RND_LEN]; offset += TAG_CLI_RND_LEN; let session_id_slice = &tag_payload[offset..offset+TAG_CON_ID_LEN]; // (do `offset += TAG_CON_ID_LEN` here if you need to read further) let session_id = SessionId::new( array_ref![session_id_slice,0,TAG_CON_ID_LEN]); (flags, master_key, server_random, client_random, session_id) }
// + (eth_payload[1] as u16); &eth_payload[4..] } else { return }
random_line_split
process_packet.rs
use libc::size_t; use std::cell::RefCell; use std::net::{IpAddr,SocketAddr}; use std::os::raw::c_void; use std::panic; use std::rc::Rc; use std::slice; use std::str::FromStr; use time::precise_time_ns; use mio::PollOpt; use pnet::packet::Packet; use pnet::packet::ethernet::{EthernetPacket, EtherTypes}; use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::tcp::{TcpPacket,TcpFlags}; use bufferable_ssl::BufferableSSL; use client_driver::SchedSessionTimeout; use bufferable_tcp::BufferableTCP; use elligator; use evented_ssl_eavesdropper::EventedSSLEavesdropper; use flow_tracker::{Flow,WscaleAndMSS}; use PerCoreGlobal; use session_error::SessionError; use session_id::SessionId; use tapdance_session::TapdanceSession; use util; const TLS_TYPE_APPLICATION_DATA: u8 = 0x17; const SQUID_PROXY_ADDR: &'static str = "127.0.0.1"; const SQUID_PROXY_PORT: u16 = 1234; const STREAM_TIMEOUT_NS: u64 = 120*1000*1000*1000; // 120 seconds // The jumping off point for all of our logic. This function inspects a packet // that has come in the tap interface. We do not yet have any idea if we care // about it; it might not even be TLS. It might not even be TCP! #[no_mangle] pub extern "C" fn rust_process_packet(ptr: *mut PerCoreGlobal, raw_ethframe: *mut c_void, frame_len: size_t) { let mut global = unsafe { &mut *ptr }; let rust_view_len = frame_len as usize; let rust_view = unsafe { slice::from_raw_parts_mut(raw_ethframe as *mut u8, frame_len as usize) }; global.stats.packets_this_period += 1; global.stats.bytes_this_period += rust_view_len as u64; let eth_pkt = match EthernetPacket::new(rust_view) { Some(pkt) => pkt, None => return, }; let eth_payload = eth_pkt.payload(); let ip_data = match eth_pkt.get_ethertype() { EtherTypes::Vlan => { if eth_payload[2] == 0x08 && eth_payload[3] == 0x00 { //let vlan_id: u16 = (eth_payload[0] as u16)*256 // + (eth_payload[1] as u16); &eth_payload[4..] } else { return } }, EtherTypes::Ipv4 => &eth_payload[0..], _ => return, }; match Ipv4Packet::new(ip_data) { Some(pkt) => global.process_ipv4_packet(pkt, rust_view_len), None => return, } } fn is_tls_app_pkt(tcp_pkt: &TcpPacket) -> bool { let payload = tcp_pkt.payload(); payload.len() > 5 && payload[0] == TLS_TYPE_APPLICATION_DATA } impl PerCoreGlobal { // frame_len is supposed to be the length of the whole Ethernet frame. We're // only passing it here for plumbing reasons, and just for stat reporting. fn process_ipv4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize) { // Ignore packets that aren't TCP if ip_pkt.get_next_level_protocol() != IpNextHeaderProtocols::Tcp { return; } let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) { Some(pkt) => pkt, None => return, }; self.stats.tcp_packets_this_period += 1; // Ignore packets that aren't -> 443. // libpnet getters all return host order. Ignore the "u16be" in their // docs; interactions with pnet are purely host order. if tcp_pkt.get_destination() != 443 { return; } self.stats.tls_packets_this_period += 1; // (HTTPS, really) self.stats.tls_bytes_this_period += frame_len as u64; self.process_tls_pkt(&ip_pkt, &tcp_pkt); } // Takes an IPv4 packet // Assumes (for now) that TLS records are in a single TCP packet // (no fragmentation). // Fragments could be stored in the flow_tracker if needed. pub fn process_tls_pkt(&mut self, ip_pkt: &Ipv4Packet, tcp_pkt: &TcpPacket) { let flow = Flow::new(ip_pkt, tcp_pkt); if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() { return; } let tcp_flags = tcp_pkt.get_flags(); if (tcp_flags & TcpFlags::SYN) != 0 && (tcp_flags & TcpFlags::ACK) == 0 { self.stats.port_443_syns_this_period += 1; self.flow_tracker.begin_tracking_flow(&flow, tcp_pkt.packet().to_vec()); return; } if !self.flow_tracker.tracking_at_all(&flow) { return; } // Note that FINs and RSTs are welcome in consume_tcp_pkt() as well. if !self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) { // EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow. self.flow_tracker.drop(&flow); } else if self.flow_tracker.is_td(&flow) { // Forward packets from established overt flows into the tun // interface, so that they'll reach forge_socket. self.forward_to_forge_socket(ip_pkt); if (tcp_flags & TcpFlags::FIN) != 0 { // This stream (overt flow) is ending. The client might come and // resume the TapDance session with a new stream, so leave the // overall session state intact. The is_hup event's processing // takes care of starting the BufferableSSL's cleanup. // FlowTracker::notice_fin() will schedule a RST to be sent to // the decoy server; forge_socket handles the FIN handshake. self.flow_tracker.notice_fin(&flow); } } else if (tcp_flags & TcpFlags::FIN) != 0 { // non-TD flow FINd => drop self.flow_tracker.drop(&flow); return; } if (tcp_flags & TcpFlags::RST) != 0 { // End connection, remove any relevant state. // TODO clean up TapDance session state, if any // (TODO i believe that the earlier forward_to_forge_socket would // cause a clien is_error event to fire, which would then clean up // the session. should confirm.) self.flow_tracker.drop(&flow); return; } // This is a non-RST/FIN packet of a flow we are tracking, but that is // not known to be TapDance. That means this might be a tag-bearing // first TLS app data packet: establish a TD session if so. if !self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) { // ...buuut don't bother checking these known-irrelevant addresses: // coming from U. Michigan (35.0.0.0/9) // going to Google CDN servers in Michigan (192.122.185.0/24) // coming from windyegret's internet connection (192.122.200.253) // coming from more of U. Michigan (141.212.0.0/14) let src = ip_pkt.get_source().octets(); let dest = ip_pkt.get_destination().octets(); if src[0] == 35 && (src[1] & 128 == 0) || dest[0] == 192 && dest[1] == 122 && dest[2] == 185 || src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 || src[0] == 141 && (src[2] & 252) == 212 || !self.try_establish_tapdance(&flow, tcp_pkt) { // No tag in first TLS app data packet ==> definitely not TD. self.flow_tracker.drop(&flow); } } } fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet) { let ip_len = ip_pkt.packet().len(); // TODO: see if the PI flag to the TUN interface can actually take care // of this for us let mut tun_pkt = Vec::with_capacity(ip_len+4); // These mystery bytes are a link-layer header; the kernel "receives" // tun packets as if they were really physically "received". Since they // weren't physically received, they do not have an Ethernet header. It // looks like the tun setup has its own type of header, rather than just // making up a fake Ethernet header. tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]); tun_pkt.extend_from_slice(ip_pkt.packet()); // Send into tun device (can fail, but these are best-effort IP packets) self.tun.send(tun_pkt).unwrap_or_else(|e|{ warn!("failed to send packet into tun: {}", e); 0}); self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64; } // Inspects a TLS app data packet for a TapDance tag. If found, establishes // the flow as a TapDance stream (and starts a new session, if 1st stream). // Returns true iff a TapDance stream was successfully established. fn
(&mut self, flow: &Flow, tcp_pkt: &TcpPacket) -> bool { let tag_payload = elligator::extract_telex_tag(&self.priv_key, &tcp_pkt.payload()); self.stats.elligator_this_period += 1; if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN + TAG_CLI_RND_LEN + TAG_CON_ID_LEN { return false; } if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS // Decoy will ACK current packet with this value. (Host-order). let expect_ack = tcp_pkt.get_sequence() .wrapping_add(tcp_pkt.payload().len() as u32); let wscale_and_mss = self.flow_tracker.mark_tapdance_flow(flow, expect_ack); self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss) } else { // upload-only eavesdropped TLS // (don't mark as TD in FlowTracker until you have the Rc<RefCell>) self.establish_upload_only(tcp_pkt, flow, &tag_payload) } } pub fn establish_bidi(&mut self, tcp_pkt: &TcpPacket, flow: &Flow, tag_payload: &Vec<u8>, wscale_and_mss: WscaleAndMSS) -> bool { let (_, master_key, server_random, client_random, session_id) = parse_tag_payload(tag_payload); let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt); let mut client_ssl = BufferableSSL::new(session_id); let ssl_success = client_ssl.construct_forged_ssl( tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr, master_key, client_random, server_random); if ssl_success { let (is_a_reconnect, rc, cov_error) = self.create_or_recall_tapdance_session(session_id); let ref mut td = rc.borrow_mut(); let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone()); if !td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) { td.end_whole_session_error(SessionError::ClientProtocol); return false; } td.expect_bidi_reconnect = false; if let Some(cov_err) = cov_error { td.end_whole_session_error(cov_err); self.cli_ssl_driver .sessions_to_drop.push_back(td.session_id); } let src_oct1: u8 = ((flow.src_ip & 0xff000000u32) >> 24) as u8; let src_oct2: u8 = ((flow.src_ip & 0x00ff0000u32) >> 16) as u8; if is_a_reconnect { self.stats.reconns_this_period += 1; td.send_reconnect_to_client(); td.cov_read_cli_write(&mut self.cov_tcp_driver.rereg_queuer, false); if td.both_half_closed() { // if errored, must mark for drop self.cli_ssl_driver.sessions_to_drop .push_back(td.session_id); } } else { let decoy_ip_str = util::inet_htoa(flow.dst_ip); info!("newsession {} {}.{}.x.x:{} -> {}:{}", session_id, src_oct1, src_oct2, flow.src_port, decoy_ip_str, flow.dst_port); td.decoy_ip = decoy_ip_str; if self.overloaded_decoys.contains(&flow.dst_ip) { td.end_whole_session_error(SessionError::DecoyOverload); self.cli_ssl_driver.sessions_to_drop .push_back(td.session_id); } } self.cli_ssl_driver.stream_timeouts.push_back( SchedSessionTimeout { drop_time: precise_time_ns() + STREAM_TIMEOUT_NS, id: session_id, stream_count: td.cli_pair.stream_count() }); info!("newstream {} {}.{}.x.x:{} -> {}:{}", session_id, src_oct1, src_oct2, flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); true } else { error!("make_forged_tls() returned 0! Tagged TLS not picked up \ as a TapDance stream :("); false } } pub fn establish_upload_only(&mut self, tcp_pkt: &TcpPacket, flow: &Flow, tag_payload: &Vec<u8>) -> bool { let (_, master_key, server_random, client_random, session_id) = parse_tag_payload(tag_payload); let mut passive_ssl = EventedSSLEavesdropper::new(session_id); let ssl_success = passive_ssl.construct_eavesdropped_ssl( tcp_pkt, master_key, client_random, server_random); if ssl_success { if let Some(rc) = self.id2sess.get(&session_id) { let inserted_tok = self.cli_psv_driver.tok2sess.insert(rc.clone()); let ref mut td = rc.borrow_mut(); if !td.cli_pair.set_passive_uploader(passive_ssl, inserted_tok, &self.cli_psv_poll) { td.end_whole_session_error(SessionError::ClientProtocol); return false; } td.expect_uploader_reconnect = false; // TODO? self.stats.reconns_UPL_this_period += 1; // TODO? (goes thru bidi) td.send_UPL_reconnect_to_client(); self.flow_tracker.mark_passive_td(flow, rc.clone()); self.cli_ssl_driver.stream_timeouts.push_back( SchedSessionTimeout { drop_time: precise_time_ns() + STREAM_TIMEOUT_NS, id: session_id, stream_count: td.cli_pair.stream_count() }); report!("newuploader {} {}:{} -> {}:{}", session_id, util::inet_htoa(flow.src_ip), flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); true } else { error!("This new upload-only stream does not belong to an \ ongoing session. A session's first stream must be \ bidi. Session ID: {}", session_id); report!("newuploader {} {}:{} -> {}:{}", session_id, util::inet_htoa(flow.src_ip), flow.src_port, util::inet_htoa(flow.dst_ip), flow.dst_port); report!("error {} {}", session_id, SessionError::ClientProtocol.to_string()); // (passive_ssl goes out of scope, "deluploader") false } } else { error!("make_forged_memory_tls() returned 0! Tagged TLS not picked \ up as a passive TapDance stream :("); false } } // Lookup the ongoing session with ID session_id, if it exists. If it does // not, make a new one (including initiating the Squid TCP connection). // Returns: Bool is whether the session was already there. // Option<SessionError> is to be filled if session creation failed. fn create_or_recall_tapdance_session(&mut self, session_id: SessionId) -> (bool, Rc<RefCell<TapdanceSession>>, Option<SessionError>) { let ref mut cov_tcp_poll = self.cov_tcp_poll; let ref mut tok_map = self.cov_tcp_driver.tok2sess; let recalled = self.id2sess.contains_key(&session_id); let mut cov_err = None; let rc = self.id2sess.entry(session_id).or_insert_with(|| { let td_rc = Rc::new(RefCell::new(TapdanceSession::new(session_id))); // New proxy connection to local proxy. unwrap() relies on // SQUID_PROXY_ADDR being a valid constant. let dest = IpAddr::from_str(SQUID_PROXY_ADDR).unwrap(); let sock_addr = SocketAddr::new(dest, SQUID_PROXY_PORT); // NOTE: this mio version of TcpStream is nonblocking! if let Ok(sock) = ::mio::tcp::TcpStream::connect(&sock_addr) { let ref mut td = td_rc.borrow_mut(); td.cov.set_stream(BufferableTCP::new(sock)); let inserted_tok = tok_map.insert(td_rc.clone()); td.cov.register(cov_tcp_poll, inserted_tok.val(), util::all_unix_events(), PollOpt::edge()) .unwrap_or_else(|e|{error!("tcp_driver 1st reg: {}", e);}); td.cov.set_tok(inserted_tok); } else { // TODO: actually, we're more concerned with out-of-fds, which // is more like StationInternal. But, how to distinguish? cov_err = Some(SessionError::CovertStream); } td_rc }); (recalled, rc.clone(), cov_err) } } // impl PerCoreGlobal // These consts tie the slice indexing in establish_tapdance_stream_from_tag() // to the length check in try_establish_tapdance(). // Current tag payload format: //=============================================================== // 1 byte................flags // 48 bytes master_key // 32 bytes..............server_random // 32 bytes client_random // 16 bytes..............connection_id const TAG_FLAGS_LEN: usize = 1; const TAG_M_KEY_LEN: usize = 48; const TAG_SRV_RND_LEN: usize = 32; const TAG_CLI_RND_LEN: usize = 32; const TAG_CON_ID_LEN: usize = 16; // Assumes you will only call it after checking // if tag_payload.len() >= TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN + // TAG_CLI_RND_LEN + TAG_CON_ID_LEN fn parse_tag_payload(tag_payload: &Vec<u8>) -> (u8, &[u8], &[u8], &[u8], SessionId) { let mut offset = 0; let flags = tag_payload[offset]; offset += TAG_FLAGS_LEN; let master_key = &tag_payload[offset..offset+TAG_M_KEY_LEN]; offset += TAG_M_KEY_LEN; let server_random = &tag_payload[offset..offset+TAG_SRV_RND_LEN]; offset += TAG_SRV_RND_LEN; let client_random = &tag_payload[offset..offset+TAG_CLI_RND_LEN]; offset += TAG_CLI_RND_LEN; let session_id_slice = &tag_payload[offset..offset+TAG_CON_ID_LEN]; // (do `offset += TAG_CON_ID_LEN` here if you need to read further) let session_id = SessionId::new( array_ref![session_id_slice,0,TAG_CON_ID_LEN]); (flags, master_key, server_random, client_random, session_id) }
try_establish_tapdance
identifier_name
shard.go
// Licensed to LinDB under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. LinDB licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package tsdb import ( "context" "fmt" "io" "path/filepath" "strconv" "sync" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "github.com/lindb/lindb/constants" "github.com/lindb/lindb/kv" "github.com/lindb/lindb/monitoring" "github.com/lindb/lindb/pkg/logger" "github.com/lindb/lindb/pkg/option" "github.com/lindb/lindb/pkg/timeutil" "github.com/lindb/lindb/replication" pb "github.com/lindb/lindb/rpc/proto/field" "github.com/lindb/lindb/tsdb/indexdb" "github.com/lindb/lindb/tsdb/memdb" "github.com/lindb/lindb/tsdb/metadb" "github.com/lindb/lindb/tsdb/tblstore/invertedindex" ) //go:generate mockgen -source=./shard.go -destination=./shard_mock.go -package=tsdb // for testing var ( newReplicaSequenceFunc = newReplicaSequence newIntervalSegmentFunc = newIntervalSegment newKVStoreFunc = kv.NewStore newIndexDBFunc = indexdb.NewIndexDatabase newMemoryDBFunc = memdb.NewMemoryDatabase ) var ( writeMetricTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_write_metric_duration", Help: "Write metric duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) buildIndexTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_build_index_duration", Help: "Build index duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) memFlushTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_memory_database_flush_duration", Help: "Flush memory data duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) ) func init() { monitoring.StorageRegistry.MustRegister(buildIndexTimer) monitoring.StorageRegistry.MustRegister(writeMetricTimer) monitoring.StorageRegistry.MustRegister(memFlushTimer) } const ( replicaDir = "replica" segmentDir = "segment" indexParentDir = "index" forwardIndexDir = "forward" invertedIndexDir = "inverted" metaDir = "meta" tempDir = "temp" ) // Shard is a horizontal partition of metrics for LinDB. type Shard interface { // DatabaseName returns the database name DatabaseName() string // ShardID returns the shard id ShardID() int32 // ShardInfo returns the unique shard info ShardInfo() string // GetDataFamilies returns data family list by interval type and time range, return nil if not match GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily // MemoryDatabase returns memory database by given family time. MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) // IndexDatabase returns the index-database IndexDatabase() indexdb.IndexDatabase // Write writes the metric-point into memory-database. Write(metric *pb.Metric) error // GetOrCreateSequence gets the replica sequence by given remote peer if exist, else creates a new sequence GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) // Close releases shard's resource, such as flush data, spawned goroutines etc. io.Closer // Flush flushes index and memory data to disk Flush() error // NeedFlush checks if shard need to flush memory data NeedFlush() bool // IsFlushing checks if this shard is in flushing IsFlushing() bool // initIndexDatabase initializes index database initIndexDatabase() error } // shard implements Shard interface // directory tree: // xx/shard/1/ (path) // xx/shard/1/replica // xx/shard/1/temp/123213123131 // time of ns // xx/shard/1/meta/ // xx/shard/1/index/inverted/ // xx/shard/1/data/20191012/ // xx/shard/1/data/20191013/ type shard struct { databaseName string id int32 path string option option.DatabaseOption sequence ReplicaSequence families map[int64]memdb.MemoryDatabase // memory database for each family time indexDB indexdb.IndexDatabase metadata metadb.Metadata // write accept time range interval timeutil.Interval ahead timeutil.Interval behind timeutil.Interval // segments keeps all interval segments, // includes one smallest interval segment for writing data, and rollup interval segments segments map[timeutil.IntervalType]IntervalSegment segment IntervalSegment // smallest interval for writing data isFlushing atomic.Bool // restrict flusher concurrency flushCondition sync.WaitGroup // flush condition indexStore kv.Store // kv stores forwardFamily kv.Family // forward store invertedFamily kv.Family // inverted store rwMutex sync.RWMutex buildIndexTimer prometheus.Observer writeMetricTimer prometheus.Observer memFlushTimer prometheus.Observer } // newShard creates shard instance, if shard path exist then load shard data for init. // return error if fail. func newShard( db Database, shardID int32, shardPath string, option option.DatabaseOption, ) (Shard, error) { var err error if err = option.Validate(); err != nil { return nil, fmt.Errorf("engine option is invalid, err: %s", err) } var interval timeutil.Interval _ = interval.ValueOf(option.Interval) if err := mkDirIfNotExist(shardPath); err != nil { return nil, err } replicaSequence, err := newReplicaSequenceFunc(filepath.Join(shardPath, replicaDir)) if err != nil { return nil, err } shardIDStr := strconv.Itoa(int(shardID)) createdShard := &shard{ databaseName: db.Name(), id: shardID, path: shardPath, option: option, sequence: replicaSequence, families: make(map[int64]memdb.MemoryDatabase), metadata: db.Metadata(), interval: interval, segments: make(map[timeutil.IntervalType]IntervalSegment), isFlushing: *atomic.NewBool(false), buildIndexTimer: buildIndexTimer.WithLabelValues(db.Name(), shardIDStr), writeMetricTimer: writeMetricTimer.WithLabelValues(db.Name(), shardIDStr), memFlushTimer: memFlushTimer.WithLabelValues(db.Name(), shardIDStr), } // new segment for writing createdShard.segment, err = newIntervalSegmentFunc( interval, filepath.Join(shardPath, segmentDir, interval.Type().String())) if err != nil { return nil, err } _ = createdShard.ahead.ValueOf(option.Ahead) _ = createdShard.behind.ValueOf(option.Behind) // add writing segment into segment list createdShard.segments[interval.Type()] = createdShard.segment defer func() { if err != nil { if err := createdShard.Close(); err != nil { engineLogger.Error("close shard error when create shard fail", logger.String("shard", createdShard.path), logger.Error(err)) } } }() if err = createdShard.initIndexDatabase(); err != nil { return nil, fmt.Errorf("create index database for shard[%d] error: %s", shardID, err) } // add shard into global shard manager GetShardManager().AddShard(createdShard) return createdShard, nil } // DatabaseName returns the database name func (s *shard) DatabaseName() string { return s.databaseName } // ShardID returns the shard id func (s *shard) ShardID() int32 { return s.id } // ShardInfo returns the unique shard info func (s *shard) ShardInfo() string { return s.path } func (s *shard) GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) { return s.sequence.getOrCreateSequence(replicaPeer) } func (s *shard) IndexDatabase() indexdb.IndexDatabase { return s.indexDB } func (s *shard) GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily { segment, ok := s.segments[intervalType] if ok { return segment.getDataFamilies(timeRange) } return nil } // MemoryDatabase returns memory database by given family time. func (s *shard)
(familyTime int64) (memdb.MemoryDatabase, error) { var memDB memdb.MemoryDatabase s.rwMutex.RLock() defer s.rwMutex.RUnlock() memDB, ok := s.families[familyTime] if !ok { memDB, err := s.createMemoryDatabase() if err != nil { return nil, err } s.families[familyTime] = memDB } return memDB, nil } // Write writes the metric-point into memory-database. func (s *shard) Write(metric *pb.Metric) (err error) { if metric == nil { return constants.ErrNilMetric } if len(metric.Name) == 0 { return constants.ErrEmptyMetricName } if len(metric.Fields) == 0 { return constants.ErrEmptyField } timestamp := metric.Timestamp now := timeutil.Now() // check metric timestamp if in acceptable time range if (s.behind.Int64() > 0 && timestamp < now-s.behind.Int64()) || (s.ahead.Int64() > 0 && timestamp > now+s.ahead.Int64()) { return nil } ns := metric.Namespace if len(ns) == 0 { ns = constants.DefaultNamespace } metricID, err := s.metadata.MetadataDatabase().GenMetricID(ns, metric.Name) if err != nil { return err } var seriesID uint32 isCreated := false if len(metric.Tags) == 0 { // if metric without tags, uses default series id(0) seriesID = constants.SeriesIDWithoutTags } else { seriesID, isCreated, err = s.indexDB.GetOrCreateSeriesID(metricID, metric.TagsHash) if err != nil { return err } } if isCreated { // if series id is new, need build inverted index s.indexDB.BuildInvertIndex(ns, metric.Name, metric.Tags, seriesID) } buildIndexEnd := timeutil.Now() s.buildIndexTimer.Observe(float64(buildIndexEnd - now)) // calculate family start time and slot index intervalCalc := s.interval.Calculator() segmentTime := intervalCalc.CalcSegmentTime(timestamp) // day family := intervalCalc.CalcFamily(timestamp, segmentTime) // hours familyTime := intervalCalc.CalcFamilyStartTime(segmentTime, family) // family timestamp db, err := s.MemoryDatabase(familyTime) if err != nil { return err } // mark writing data db.AcquireWrite() // set write completed defer func() { db.CompleteWrite() s.writeMetricTimer.Observe(float64(timeutil.Now() - buildIndexEnd)) }() slotIndex := uint16(intervalCalc.CalcSlot(timestamp, familyTime, s.interval.Int64())) // slot offset of family // write metric point into memory db return db.Write(ns, metric.Name, metricID, seriesID, slotIndex, metric.Fields) } func (s *shard) Close() error { // wait previous flush job completed s.flushCondition.Wait() GetShardManager().RemoveShard(s) if s.indexDB != nil { if err := s.indexDB.Close(); err != nil { return err } } if s.indexStore != nil { if err := s.indexStore.Close(); err != nil { return err } } for _, family := range s.families { if err := s.flushMemoryDatabase(family); err != nil { return err } } s.ackReplicaSeq() return s.sequence.Close() } // IsFlushing checks if this shard is in flushing func (s *shard) IsFlushing() bool { return s.isFlushing.Load() } // NeedFlush checks if shard need to flush memory data func (s *shard) NeedFlush() bool { if s.IsFlushing() { return false } for _, memDB := range s.families { //TODO add time threshold??? return memDB.MemSize() > constants.ShardMemoryUsedThreshold } return false } // Flush flushes index and memory data to disk func (s *shard) Flush() (err error) { // another flush process is running if !s.isFlushing.CAS(false, true) { return nil } // 1. mark flush job doing s.flushCondition.Add(1) defer func() { //TODO add commit kv meta after ack successfully // mark flush job complete, notify s.flushCondition.Done() s.isFlushing.Store(false) }() //FIXME stone1100 // index flush if s.indexDB != nil { if err = s.indexDB.Flush(); err != nil { return err } } // flush memory database if need flush for _, memDB := range s.families { //TODO add time threshold??? if memDB.MemSize() > constants.ShardMemoryUsedThreshold { if err := s.flushMemoryDatabase(memDB); err != nil { return err } } } //FIXME(stone1100) need remove memory database if long time no data // finally, commit replica sequence s.ackReplicaSeq() return nil } // initIndexDatabase initializes the index database func (s *shard) initIndexDatabase() error { var err error storeOption := kv.DefaultStoreOption(filepath.Join(s.path, indexParentDir)) s.indexStore, err = newKVStoreFunc(storeOption.Path, storeOption) if err != nil { return err } s.forwardFamily, err = s.indexStore.CreateFamily( forwardIndexDir, kv.FamilyOption{ CompactThreshold: 0, Merger: string(invertedindex.SeriesForwardMerger)}) if err != nil { return err } s.invertedFamily, err = s.indexStore.CreateFamily( invertedIndexDir, kv.FamilyOption{ CompactThreshold: 0, Merger: string(invertedindex.SeriesInvertedMerger)}) if err != nil { return err } s.indexDB, err = newIndexDBFunc( context.TODO(), filepath.Join(s.path, metaDir), s.metadata, s.forwardFamily, s.invertedFamily) if err != nil { return err } return nil } // createMemoryDatabase creates a new memory database for writing data points func (s *shard) createMemoryDatabase() (memdb.MemoryDatabase, error) { return newMemoryDBFunc(memdb.MemoryDatabaseCfg{ Name: s.databaseName, Metadata: s.metadata, TempPath: filepath.Join(s.path, filepath.Join(tempDir, fmt.Sprintf("%d", timeutil.Now()))), }) } // flushMemoryDatabase flushes memory database to disk kv store func (s *shard) flushMemoryDatabase(memDB memdb.MemoryDatabase) error { startTime := timeutil.Now() defer s.memFlushTimer.Observe(float64(timeutil.Now() - startTime)) //FIXME(stone1100) //for _, familyTime := range memDB.Families() { // segmentName := s.interval.Calculator().GetSegment(familyTime) // segment, err := s.segment.GetOrCreateSegment(segmentName) // if err != nil { // return err // } // thisDataFamily, err := segment.GetDataFamily(familyTime) // if err != nil { // continue // } // // flush family data // if err := memDB.FlushFamilyTo( // metricsdata.NewFlusher(thisDataFamily.Family().NewFlusher()), familyTime); err != nil { // return err // } //} if err := memDB.Close(); err != nil { return err } return nil } // ackReplicaSeq commits the replica sequence // NOTICE: if fail, maybe data will write duplicate if system restart func (s *shard) ackReplicaSeq() { allHeads := s.sequence.getAllHeads() if err := s.sequence.ack(allHeads); err != nil { engineLogger.Error("ack replica sequence error", logger.String("shard", s.path), logger.Error(err)) } }
MemoryDatabase
identifier_name
shard.go
// Licensed to LinDB under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. LinDB licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package tsdb import ( "context" "fmt" "io" "path/filepath" "strconv" "sync" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "github.com/lindb/lindb/constants" "github.com/lindb/lindb/kv" "github.com/lindb/lindb/monitoring" "github.com/lindb/lindb/pkg/logger" "github.com/lindb/lindb/pkg/option" "github.com/lindb/lindb/pkg/timeutil" "github.com/lindb/lindb/replication" pb "github.com/lindb/lindb/rpc/proto/field" "github.com/lindb/lindb/tsdb/indexdb" "github.com/lindb/lindb/tsdb/memdb" "github.com/lindb/lindb/tsdb/metadb" "github.com/lindb/lindb/tsdb/tblstore/invertedindex" ) //go:generate mockgen -source=./shard.go -destination=./shard_mock.go -package=tsdb // for testing var ( newReplicaSequenceFunc = newReplicaSequence newIntervalSegmentFunc = newIntervalSegment newKVStoreFunc = kv.NewStore newIndexDBFunc = indexdb.NewIndexDatabase newMemoryDBFunc = memdb.NewMemoryDatabase ) var ( writeMetricTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_write_metric_duration", Help: "Write metric duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) buildIndexTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_build_index_duration", Help: "Build index duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) memFlushTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_memory_database_flush_duration", Help: "Flush memory data duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) ) func init() { monitoring.StorageRegistry.MustRegister(buildIndexTimer) monitoring.StorageRegistry.MustRegister(writeMetricTimer) monitoring.StorageRegistry.MustRegister(memFlushTimer) } const ( replicaDir = "replica" segmentDir = "segment" indexParentDir = "index" forwardIndexDir = "forward" invertedIndexDir = "inverted" metaDir = "meta" tempDir = "temp" ) // Shard is a horizontal partition of metrics for LinDB. type Shard interface { // DatabaseName returns the database name DatabaseName() string // ShardID returns the shard id ShardID() int32 // ShardInfo returns the unique shard info ShardInfo() string // GetDataFamilies returns data family list by interval type and time range, return nil if not match GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily // MemoryDatabase returns memory database by given family time. MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) // IndexDatabase returns the index-database IndexDatabase() indexdb.IndexDatabase // Write writes the metric-point into memory-database. Write(metric *pb.Metric) error // GetOrCreateSequence gets the replica sequence by given remote peer if exist, else creates a new sequence GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) // Close releases shard's resource, such as flush data, spawned goroutines etc. io.Closer // Flush flushes index and memory data to disk Flush() error // NeedFlush checks if shard need to flush memory data NeedFlush() bool // IsFlushing checks if this shard is in flushing IsFlushing() bool // initIndexDatabase initializes index database initIndexDatabase() error } // shard implements Shard interface // directory tree: // xx/shard/1/ (path) // xx/shard/1/replica // xx/shard/1/temp/123213123131 // time of ns // xx/shard/1/meta/
databaseName string id int32 path string option option.DatabaseOption sequence ReplicaSequence families map[int64]memdb.MemoryDatabase // memory database for each family time indexDB indexdb.IndexDatabase metadata metadb.Metadata // write accept time range interval timeutil.Interval ahead timeutil.Interval behind timeutil.Interval // segments keeps all interval segments, // includes one smallest interval segment for writing data, and rollup interval segments segments map[timeutil.IntervalType]IntervalSegment segment IntervalSegment // smallest interval for writing data isFlushing atomic.Bool // restrict flusher concurrency flushCondition sync.WaitGroup // flush condition indexStore kv.Store // kv stores forwardFamily kv.Family // forward store invertedFamily kv.Family // inverted store rwMutex sync.RWMutex buildIndexTimer prometheus.Observer writeMetricTimer prometheus.Observer memFlushTimer prometheus.Observer } // newShard creates shard instance, if shard path exist then load shard data for init. // return error if fail. func newShard( db Database, shardID int32, shardPath string, option option.DatabaseOption, ) (Shard, error) { var err error if err = option.Validate(); err != nil { return nil, fmt.Errorf("engine option is invalid, err: %s", err) } var interval timeutil.Interval _ = interval.ValueOf(option.Interval) if err := mkDirIfNotExist(shardPath); err != nil { return nil, err } replicaSequence, err := newReplicaSequenceFunc(filepath.Join(shardPath, replicaDir)) if err != nil { return nil, err } shardIDStr := strconv.Itoa(int(shardID)) createdShard := &shard{ databaseName: db.Name(), id: shardID, path: shardPath, option: option, sequence: replicaSequence, families: make(map[int64]memdb.MemoryDatabase), metadata: db.Metadata(), interval: interval, segments: make(map[timeutil.IntervalType]IntervalSegment), isFlushing: *atomic.NewBool(false), buildIndexTimer: buildIndexTimer.WithLabelValues(db.Name(), shardIDStr), writeMetricTimer: writeMetricTimer.WithLabelValues(db.Name(), shardIDStr), memFlushTimer: memFlushTimer.WithLabelValues(db.Name(), shardIDStr), } // new segment for writing createdShard.segment, err = newIntervalSegmentFunc( interval, filepath.Join(shardPath, segmentDir, interval.Type().String())) if err != nil { return nil, err } _ = createdShard.ahead.ValueOf(option.Ahead) _ = createdShard.behind.ValueOf(option.Behind) // add writing segment into segment list createdShard.segments[interval.Type()] = createdShard.segment defer func() { if err != nil { if err := createdShard.Close(); err != nil { engineLogger.Error("close shard error when create shard fail", logger.String("shard", createdShard.path), logger.Error(err)) } } }() if err = createdShard.initIndexDatabase(); err != nil { return nil, fmt.Errorf("create index database for shard[%d] error: %s", shardID, err) } // add shard into global shard manager GetShardManager().AddShard(createdShard) return createdShard, nil } // DatabaseName returns the database name func (s *shard) DatabaseName() string { return s.databaseName } // ShardID returns the shard id func (s *shard) ShardID() int32 { return s.id } // ShardInfo returns the unique shard info func (s *shard) ShardInfo() string { return s.path } func (s *shard) GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) { return s.sequence.getOrCreateSequence(replicaPeer) } func (s *shard) IndexDatabase() indexdb.IndexDatabase { return s.indexDB } func (s *shard) GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily { segment, ok := s.segments[intervalType] if ok { return segment.getDataFamilies(timeRange) } return nil } // MemoryDatabase returns memory database by given family time. func (s *shard) MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) { var memDB memdb.MemoryDatabase s.rwMutex.RLock() defer s.rwMutex.RUnlock() memDB, ok := s.families[familyTime] if !ok { memDB, err := s.createMemoryDatabase() if err != nil { return nil, err } s.families[familyTime] = memDB } return memDB, nil } // Write writes the metric-point into memory-database. func (s *shard) Write(metric *pb.Metric) (err error) { if metric == nil { return constants.ErrNilMetric } if len(metric.Name) == 0 { return constants.ErrEmptyMetricName } if len(metric.Fields) == 0 { return constants.ErrEmptyField } timestamp := metric.Timestamp now := timeutil.Now() // check metric timestamp if in acceptable time range if (s.behind.Int64() > 0 && timestamp < now-s.behind.Int64()) || (s.ahead.Int64() > 0 && timestamp > now+s.ahead.Int64()) { return nil } ns := metric.Namespace if len(ns) == 0 { ns = constants.DefaultNamespace } metricID, err := s.metadata.MetadataDatabase().GenMetricID(ns, metric.Name) if err != nil { return err } var seriesID uint32 isCreated := false if len(metric.Tags) == 0 { // if metric without tags, uses default series id(0) seriesID = constants.SeriesIDWithoutTags } else { seriesID, isCreated, err = s.indexDB.GetOrCreateSeriesID(metricID, metric.TagsHash) if err != nil { return err } } if isCreated { // if series id is new, need build inverted index s.indexDB.BuildInvertIndex(ns, metric.Name, metric.Tags, seriesID) } buildIndexEnd := timeutil.Now() s.buildIndexTimer.Observe(float64(buildIndexEnd - now)) // calculate family start time and slot index intervalCalc := s.interval.Calculator() segmentTime := intervalCalc.CalcSegmentTime(timestamp) // day family := intervalCalc.CalcFamily(timestamp, segmentTime) // hours familyTime := intervalCalc.CalcFamilyStartTime(segmentTime, family) // family timestamp db, err := s.MemoryDatabase(familyTime) if err != nil { return err } // mark writing data db.AcquireWrite() // set write completed defer func() { db.CompleteWrite() s.writeMetricTimer.Observe(float64(timeutil.Now() - buildIndexEnd)) }() slotIndex := uint16(intervalCalc.CalcSlot(timestamp, familyTime, s.interval.Int64())) // slot offset of family // write metric point into memory db return db.Write(ns, metric.Name, metricID, seriesID, slotIndex, metric.Fields) } func (s *shard) Close() error { // wait previous flush job completed s.flushCondition.Wait() GetShardManager().RemoveShard(s) if s.indexDB != nil { if err := s.indexDB.Close(); err != nil { return err } } if s.indexStore != nil { if err := s.indexStore.Close(); err != nil { return err } } for _, family := range s.families { if err := s.flushMemoryDatabase(family); err != nil { return err } } s.ackReplicaSeq() return s.sequence.Close() } // IsFlushing checks if this shard is in flushing func (s *shard) IsFlushing() bool { return s.isFlushing.Load() } // NeedFlush checks if shard need to flush memory data func (s *shard) NeedFlush() bool { if s.IsFlushing() { return false } for _, memDB := range s.families { //TODO add time threshold??? return memDB.MemSize() > constants.ShardMemoryUsedThreshold } return false } // Flush flushes index and memory data to disk func (s *shard) Flush() (err error) { // another flush process is running if !s.isFlushing.CAS(false, true) { return nil } // 1. mark flush job doing s.flushCondition.Add(1) defer func() { //TODO add commit kv meta after ack successfully // mark flush job complete, notify s.flushCondition.Done() s.isFlushing.Store(false) }() //FIXME stone1100 // index flush if s.indexDB != nil { if err = s.indexDB.Flush(); err != nil { return err } } // flush memory database if need flush for _, memDB := range s.families { //TODO add time threshold??? if memDB.MemSize() > constants.ShardMemoryUsedThreshold { if err := s.flushMemoryDatabase(memDB); err != nil { return err } } } //FIXME(stone1100) need remove memory database if long time no data // finally, commit replica sequence s.ackReplicaSeq() return nil } // initIndexDatabase initializes the index database func (s *shard) initIndexDatabase() error { var err error storeOption := kv.DefaultStoreOption(filepath.Join(s.path, indexParentDir)) s.indexStore, err = newKVStoreFunc(storeOption.Path, storeOption) if err != nil { return err } s.forwardFamily, err = s.indexStore.CreateFamily( forwardIndexDir, kv.FamilyOption{ CompactThreshold: 0, Merger: string(invertedindex.SeriesForwardMerger)}) if err != nil { return err } s.invertedFamily, err = s.indexStore.CreateFamily( invertedIndexDir, kv.FamilyOption{ CompactThreshold: 0, Merger: string(invertedindex.SeriesInvertedMerger)}) if err != nil { return err } s.indexDB, err = newIndexDBFunc( context.TODO(), filepath.Join(s.path, metaDir), s.metadata, s.forwardFamily, s.invertedFamily) if err != nil { return err } return nil } // createMemoryDatabase creates a new memory database for writing data points func (s *shard) createMemoryDatabase() (memdb.MemoryDatabase, error) { return newMemoryDBFunc(memdb.MemoryDatabaseCfg{ Name: s.databaseName, Metadata: s.metadata, TempPath: filepath.Join(s.path, filepath.Join(tempDir, fmt.Sprintf("%d", timeutil.Now()))), }) } // flushMemoryDatabase flushes memory database to disk kv store func (s *shard) flushMemoryDatabase(memDB memdb.MemoryDatabase) error { startTime := timeutil.Now() defer s.memFlushTimer.Observe(float64(timeutil.Now() - startTime)) //FIXME(stone1100) //for _, familyTime := range memDB.Families() { // segmentName := s.interval.Calculator().GetSegment(familyTime) // segment, err := s.segment.GetOrCreateSegment(segmentName) // if err != nil { // return err // } // thisDataFamily, err := segment.GetDataFamily(familyTime) // if err != nil { // continue // } // // flush family data // if err := memDB.FlushFamilyTo( // metricsdata.NewFlusher(thisDataFamily.Family().NewFlusher()), familyTime); err != nil { // return err // } //} if err := memDB.Close(); err != nil { return err } return nil } // ackReplicaSeq commits the replica sequence // NOTICE: if fail, maybe data will write duplicate if system restart func (s *shard) ackReplicaSeq() { allHeads := s.sequence.getAllHeads() if err := s.sequence.ack(allHeads); err != nil { engineLogger.Error("ack replica sequence error", logger.String("shard", s.path), logger.Error(err)) } }
// xx/shard/1/index/inverted/ // xx/shard/1/data/20191012/ // xx/shard/1/data/20191013/ type shard struct {
random_line_split
shard.go
// Licensed to LinDB under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. LinDB licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package tsdb import ( "context" "fmt" "io" "path/filepath" "strconv" "sync" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "github.com/lindb/lindb/constants" "github.com/lindb/lindb/kv" "github.com/lindb/lindb/monitoring" "github.com/lindb/lindb/pkg/logger" "github.com/lindb/lindb/pkg/option" "github.com/lindb/lindb/pkg/timeutil" "github.com/lindb/lindb/replication" pb "github.com/lindb/lindb/rpc/proto/field" "github.com/lindb/lindb/tsdb/indexdb" "github.com/lindb/lindb/tsdb/memdb" "github.com/lindb/lindb/tsdb/metadb" "github.com/lindb/lindb/tsdb/tblstore/invertedindex" ) //go:generate mockgen -source=./shard.go -destination=./shard_mock.go -package=tsdb // for testing var ( newReplicaSequenceFunc = newReplicaSequence newIntervalSegmentFunc = newIntervalSegment newKVStoreFunc = kv.NewStore newIndexDBFunc = indexdb.NewIndexDatabase newMemoryDBFunc = memdb.NewMemoryDatabase ) var ( writeMetricTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_write_metric_duration", Help: "Write metric duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) buildIndexTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_build_index_duration", Help: "Build index duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) memFlushTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_memory_database_flush_duration", Help: "Flush memory data duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) ) func init() { monitoring.StorageRegistry.MustRegister(buildIndexTimer) monitoring.StorageRegistry.MustRegister(writeMetricTimer) monitoring.StorageRegistry.MustRegister(memFlushTimer) } const ( replicaDir = "replica" segmentDir = "segment" indexParentDir = "index" forwardIndexDir = "forward" invertedIndexDir = "inverted" metaDir = "meta" tempDir = "temp" ) // Shard is a horizontal partition of metrics for LinDB. type Shard interface { // DatabaseName returns the database name DatabaseName() string // ShardID returns the shard id ShardID() int32 // ShardInfo returns the unique shard info ShardInfo() string // GetDataFamilies returns data family list by interval type and time range, return nil if not match GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily // MemoryDatabase returns memory database by given family time. MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) // IndexDatabase returns the index-database IndexDatabase() indexdb.IndexDatabase // Write writes the metric-point into memory-database. Write(metric *pb.Metric) error // GetOrCreateSequence gets the replica sequence by given remote peer if exist, else creates a new sequence GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) // Close releases shard's resource, such as flush data, spawned goroutines etc. io.Closer // Flush flushes index and memory data to disk Flush() error // NeedFlush checks if shard need to flush memory data NeedFlush() bool // IsFlushing checks if this shard is in flushing IsFlushing() bool // initIndexDatabase initializes index database initIndexDatabase() error } // shard implements Shard interface // directory tree: // xx/shard/1/ (path) // xx/shard/1/replica // xx/shard/1/temp/123213123131 // time of ns // xx/shard/1/meta/ // xx/shard/1/index/inverted/ // xx/shard/1/data/20191012/ // xx/shard/1/data/20191013/ type shard struct { databaseName string id int32 path string option option.DatabaseOption sequence ReplicaSequence families map[int64]memdb.MemoryDatabase // memory database for each family time indexDB indexdb.IndexDatabase metadata metadb.Metadata // write accept time range interval timeutil.Interval ahead timeutil.Interval behind timeutil.Interval // segments keeps all interval segments, // includes one smallest interval segment for writing data, and rollup interval segments segments map[timeutil.IntervalType]IntervalSegment segment IntervalSegment // smallest interval for writing data isFlushing atomic.Bool // restrict flusher concurrency flushCondition sync.WaitGroup // flush condition indexStore kv.Store // kv stores forwardFamily kv.Family // forward store invertedFamily kv.Family // inverted store rwMutex sync.RWMutex buildIndexTimer prometheus.Observer writeMetricTimer prometheus.Observer memFlushTimer prometheus.Observer } // newShard creates shard instance, if shard path exist then load shard data for init. // return error if fail. func newShard( db Database, shardID int32, shardPath string, option option.DatabaseOption, ) (Shard, error) { var err error if err = option.Validate(); err != nil { return nil, fmt.Errorf("engine option is invalid, err: %s", err) } var interval timeutil.Interval _ = interval.ValueOf(option.Interval) if err := mkDirIfNotExist(shardPath); err != nil { return nil, err } replicaSequence, err := newReplicaSequenceFunc(filepath.Join(shardPath, replicaDir)) if err != nil { return nil, err } shardIDStr := strconv.Itoa(int(shardID)) createdShard := &shard{ databaseName: db.Name(), id: shardID, path: shardPath, option: option, sequence: replicaSequence, families: make(map[int64]memdb.MemoryDatabase), metadata: db.Metadata(), interval: interval, segments: make(map[timeutil.IntervalType]IntervalSegment), isFlushing: *atomic.NewBool(false), buildIndexTimer: buildIndexTimer.WithLabelValues(db.Name(), shardIDStr), writeMetricTimer: writeMetricTimer.WithLabelValues(db.Name(), shardIDStr), memFlushTimer: memFlushTimer.WithLabelValues(db.Name(), shardIDStr), } // new segment for writing createdShard.segment, err = newIntervalSegmentFunc( interval, filepath.Join(shardPath, segmentDir, interval.Type().String())) if err != nil { return nil, err } _ = createdShard.ahead.ValueOf(option.Ahead) _ = createdShard.behind.ValueOf(option.Behind) // add writing segment into segment list createdShard.segments[interval.Type()] = createdShard.segment defer func() { if err != nil { if err := createdShard.Close(); err != nil { engineLogger.Error("close shard error when create shard fail", logger.String("shard", createdShard.path), logger.Error(err)) } } }() if err = createdShard.initIndexDatabase(); err != nil { return nil, fmt.Errorf("create index database for shard[%d] error: %s", shardID, err) } // add shard into global shard manager GetShardManager().AddShard(createdShard) return createdShard, nil } // DatabaseName returns the database name func (s *shard) DatabaseName() string { return s.databaseName } // ShardID returns the shard id func (s *shard) ShardID() int32 { return s.id } // ShardInfo returns the unique shard info func (s *shard) ShardInfo() string { return s.path } func (s *shard) GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) { return s.sequence.getOrCreateSequence(replicaPeer) } func (s *shard) IndexDatabase() indexdb.IndexDatabase { return s.indexDB } func (s *shard) GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily { segment, ok := s.segments[intervalType] if ok { return segment.getDataFamilies(timeRange) } return nil } // MemoryDatabase returns memory database by given family time. func (s *shard) MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) { var memDB memdb.MemoryDatabase s.rwMutex.RLock() defer s.rwMutex.RUnlock() memDB, ok := s.families[familyTime] if !ok { memDB, err := s.createMemoryDatabase() if err != nil { return nil, err } s.families[familyTime] = memDB } return memDB, nil } // Write writes the metric-point into memory-database. func (s *shard) Write(metric *pb.Metric) (err error) { if metric == nil { return constants.ErrNilMetric } if len(metric.Name) == 0 { return constants.ErrEmptyMetricName } if len(metric.Fields) == 0 { return constants.ErrEmptyField } timestamp := metric.Timestamp now := timeutil.Now() // check metric timestamp if in acceptable time range if (s.behind.Int64() > 0 && timestamp < now-s.behind.Int64()) || (s.ahead.Int64() > 0 && timestamp > now+s.ahead.Int64()) { return nil } ns := metric.Namespace if len(ns) == 0 { ns = constants.DefaultNamespace } metricID, err := s.metadata.MetadataDatabase().GenMetricID(ns, metric.Name) if err != nil { return err } var seriesID uint32 isCreated := false if len(metric.Tags) == 0 { // if metric without tags, uses default series id(0) seriesID = constants.SeriesIDWithoutTags } else { seriesID, isCreated, err = s.indexDB.GetOrCreateSeriesID(metricID, metric.TagsHash) if err != nil { return err } } if isCreated { // if series id is new, need build inverted index s.indexDB.BuildInvertIndex(ns, metric.Name, metric.Tags, seriesID) } buildIndexEnd := timeutil.Now() s.buildIndexTimer.Observe(float64(buildIndexEnd - now)) // calculate family start time and slot index intervalCalc := s.interval.Calculator() segmentTime := intervalCalc.CalcSegmentTime(timestamp) // day family := intervalCalc.CalcFamily(timestamp, segmentTime) // hours familyTime := intervalCalc.CalcFamilyStartTime(segmentTime, family) // family timestamp db, err := s.MemoryDatabase(familyTime) if err != nil { return err } // mark writing data db.AcquireWrite() // set write completed defer func() { db.CompleteWrite() s.writeMetricTimer.Observe(float64(timeutil.Now() - buildIndexEnd)) }() slotIndex := uint16(intervalCalc.CalcSlot(timestamp, familyTime, s.interval.Int64())) // slot offset of family // write metric point into memory db return db.Write(ns, metric.Name, metricID, seriesID, slotIndex, metric.Fields) } func (s *shard) Close() error { // wait previous flush job completed s.flushCondition.Wait() GetShardManager().RemoveShard(s) if s.indexDB != nil { if err := s.indexDB.Close(); err != nil { return err } } if s.indexStore != nil { if err := s.indexStore.Close(); err != nil { return err } } for _, family := range s.families { if err := s.flushMemoryDatabase(family); err != nil { return err } } s.ackReplicaSeq() return s.sequence.Close() } // IsFlushing checks if this shard is in flushing func (s *shard) IsFlushing() bool { return s.isFlushing.Load() } // NeedFlush checks if shard need to flush memory data func (s *shard) NeedFlush() bool { if s.IsFlushing() { return false } for _, memDB := range s.families { //TODO add time threshold??? return memDB.MemSize() > constants.ShardMemoryUsedThreshold } return false } // Flush flushes index and memory data to disk func (s *shard) Flush() (err error) { // another flush process is running if !s.isFlushing.CAS(false, true) { return nil } // 1. mark flush job doing s.flushCondition.Add(1) defer func() { //TODO add commit kv meta after ack successfully // mark flush job complete, notify s.flushCondition.Done() s.isFlushing.Store(false) }() //FIXME stone1100 // index flush if s.indexDB != nil { if err = s.indexDB.Flush(); err != nil { return err } } // flush memory database if need flush for _, memDB := range s.families { //TODO add time threshold??? if memDB.MemSize() > constants.ShardMemoryUsedThreshold { if err := s.flushMemoryDatabase(memDB); err != nil { return err } } } //FIXME(stone1100) need remove memory database if long time no data // finally, commit replica sequence s.ackReplicaSeq() return nil } // initIndexDatabase initializes the index database func (s *shard) initIndexDatabase() error
// createMemoryDatabase creates a new memory database for writing data points func (s *shard) createMemoryDatabase() (memdb.MemoryDatabase, error) { return newMemoryDBFunc(memdb.MemoryDatabaseCfg{ Name: s.databaseName, Metadata: s.metadata, TempPath: filepath.Join(s.path, filepath.Join(tempDir, fmt.Sprintf("%d", timeutil.Now()))), }) } // flushMemoryDatabase flushes memory database to disk kv store func (s *shard) flushMemoryDatabase(memDB memdb.MemoryDatabase) error { startTime := timeutil.Now() defer s.memFlushTimer.Observe(float64(timeutil.Now() - startTime)) //FIXME(stone1100) //for _, familyTime := range memDB.Families() { // segmentName := s.interval.Calculator().GetSegment(familyTime) // segment, err := s.segment.GetOrCreateSegment(segmentName) // if err != nil { // return err // } // thisDataFamily, err := segment.GetDataFamily(familyTime) // if err != nil { // continue // } // // flush family data // if err := memDB.FlushFamilyTo( // metricsdata.NewFlusher(thisDataFamily.Family().NewFlusher()), familyTime); err != nil { // return err // } //} if err := memDB.Close(); err != nil { return err } return nil } // ackReplicaSeq commits the replica sequence // NOTICE: if fail, maybe data will write duplicate if system restart func (s *shard) ackReplicaSeq() { allHeads := s.sequence.getAllHeads() if err := s.sequence.ack(allHeads); err != nil { engineLogger.Error("ack replica sequence error", logger.String("shard", s.path), logger.Error(err)) } }
{ var err error storeOption := kv.DefaultStoreOption(filepath.Join(s.path, indexParentDir)) s.indexStore, err = newKVStoreFunc(storeOption.Path, storeOption) if err != nil { return err } s.forwardFamily, err = s.indexStore.CreateFamily( forwardIndexDir, kv.FamilyOption{ CompactThreshold: 0, Merger: string(invertedindex.SeriesForwardMerger)}) if err != nil { return err } s.invertedFamily, err = s.indexStore.CreateFamily( invertedIndexDir, kv.FamilyOption{ CompactThreshold: 0, Merger: string(invertedindex.SeriesInvertedMerger)}) if err != nil { return err } s.indexDB, err = newIndexDBFunc( context.TODO(), filepath.Join(s.path, metaDir), s.metadata, s.forwardFamily, s.invertedFamily) if err != nil { return err } return nil }
identifier_body
shard.go
// Licensed to LinDB under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. LinDB licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package tsdb import ( "context" "fmt" "io" "path/filepath" "strconv" "sync" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "github.com/lindb/lindb/constants" "github.com/lindb/lindb/kv" "github.com/lindb/lindb/monitoring" "github.com/lindb/lindb/pkg/logger" "github.com/lindb/lindb/pkg/option" "github.com/lindb/lindb/pkg/timeutil" "github.com/lindb/lindb/replication" pb "github.com/lindb/lindb/rpc/proto/field" "github.com/lindb/lindb/tsdb/indexdb" "github.com/lindb/lindb/tsdb/memdb" "github.com/lindb/lindb/tsdb/metadb" "github.com/lindb/lindb/tsdb/tblstore/invertedindex" ) //go:generate mockgen -source=./shard.go -destination=./shard_mock.go -package=tsdb // for testing var ( newReplicaSequenceFunc = newReplicaSequence newIntervalSegmentFunc = newIntervalSegment newKVStoreFunc = kv.NewStore newIndexDBFunc = indexdb.NewIndexDatabase newMemoryDBFunc = memdb.NewMemoryDatabase ) var ( writeMetricTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_write_metric_duration", Help: "Write metric duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) buildIndexTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_build_index_duration", Help: "Build index duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) memFlushTimer = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "shard_memory_database_flush_duration", Help: "Flush memory data duration(ms).", Buckets: monitoring.DefaultHistogramBuckets, }, []string{"db", "shard"}, ) ) func init() { monitoring.StorageRegistry.MustRegister(buildIndexTimer) monitoring.StorageRegistry.MustRegister(writeMetricTimer) monitoring.StorageRegistry.MustRegister(memFlushTimer) } const ( replicaDir = "replica" segmentDir = "segment" indexParentDir = "index" forwardIndexDir = "forward" invertedIndexDir = "inverted" metaDir = "meta" tempDir = "temp" ) // Shard is a horizontal partition of metrics for LinDB. type Shard interface { // DatabaseName returns the database name DatabaseName() string // ShardID returns the shard id ShardID() int32 // ShardInfo returns the unique shard info ShardInfo() string // GetDataFamilies returns data family list by interval type and time range, return nil if not match GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily // MemoryDatabase returns memory database by given family time. MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) // IndexDatabase returns the index-database IndexDatabase() indexdb.IndexDatabase // Write writes the metric-point into memory-database. Write(metric *pb.Metric) error // GetOrCreateSequence gets the replica sequence by given remote peer if exist, else creates a new sequence GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) // Close releases shard's resource, such as flush data, spawned goroutines etc. io.Closer // Flush flushes index and memory data to disk Flush() error // NeedFlush checks if shard need to flush memory data NeedFlush() bool // IsFlushing checks if this shard is in flushing IsFlushing() bool // initIndexDatabase initializes index database initIndexDatabase() error } // shard implements Shard interface // directory tree: // xx/shard/1/ (path) // xx/shard/1/replica // xx/shard/1/temp/123213123131 // time of ns // xx/shard/1/meta/ // xx/shard/1/index/inverted/ // xx/shard/1/data/20191012/ // xx/shard/1/data/20191013/ type shard struct { databaseName string id int32 path string option option.DatabaseOption sequence ReplicaSequence families map[int64]memdb.MemoryDatabase // memory database for each family time indexDB indexdb.IndexDatabase metadata metadb.Metadata // write accept time range interval timeutil.Interval ahead timeutil.Interval behind timeutil.Interval // segments keeps all interval segments, // includes one smallest interval segment for writing data, and rollup interval segments segments map[timeutil.IntervalType]IntervalSegment segment IntervalSegment // smallest interval for writing data isFlushing atomic.Bool // restrict flusher concurrency flushCondition sync.WaitGroup // flush condition indexStore kv.Store // kv stores forwardFamily kv.Family // forward store invertedFamily kv.Family // inverted store rwMutex sync.RWMutex buildIndexTimer prometheus.Observer writeMetricTimer prometheus.Observer memFlushTimer prometheus.Observer } // newShard creates shard instance, if shard path exist then load shard data for init. // return error if fail. func newShard( db Database, shardID int32, shardPath string, option option.DatabaseOption, ) (Shard, error) { var err error if err = option.Validate(); err != nil { return nil, fmt.Errorf("engine option is invalid, err: %s", err) } var interval timeutil.Interval _ = interval.ValueOf(option.Interval) if err := mkDirIfNotExist(shardPath); err != nil { return nil, err } replicaSequence, err := newReplicaSequenceFunc(filepath.Join(shardPath, replicaDir)) if err != nil { return nil, err } shardIDStr := strconv.Itoa(int(shardID)) createdShard := &shard{ databaseName: db.Name(), id: shardID, path: shardPath, option: option, sequence: replicaSequence, families: make(map[int64]memdb.MemoryDatabase), metadata: db.Metadata(), interval: interval, segments: make(map[timeutil.IntervalType]IntervalSegment), isFlushing: *atomic.NewBool(false), buildIndexTimer: buildIndexTimer.WithLabelValues(db.Name(), shardIDStr), writeMetricTimer: writeMetricTimer.WithLabelValues(db.Name(), shardIDStr), memFlushTimer: memFlushTimer.WithLabelValues(db.Name(), shardIDStr), } // new segment for writing createdShard.segment, err = newIntervalSegmentFunc( interval, filepath.Join(shardPath, segmentDir, interval.Type().String())) if err != nil { return nil, err } _ = createdShard.ahead.ValueOf(option.Ahead) _ = createdShard.behind.ValueOf(option.Behind) // add writing segment into segment list createdShard.segments[interval.Type()] = createdShard.segment defer func() { if err != nil { if err := createdShard.Close(); err != nil { engineLogger.Error("close shard error when create shard fail", logger.String("shard", createdShard.path), logger.Error(err)) } } }() if err = createdShard.initIndexDatabase(); err != nil { return nil, fmt.Errorf("create index database for shard[%d] error: %s", shardID, err) } // add shard into global shard manager GetShardManager().AddShard(createdShard) return createdShard, nil } // DatabaseName returns the database name func (s *shard) DatabaseName() string { return s.databaseName } // ShardID returns the shard id func (s *shard) ShardID() int32 { return s.id } // ShardInfo returns the unique shard info func (s *shard) ShardInfo() string { return s.path } func (s *shard) GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) { return s.sequence.getOrCreateSequence(replicaPeer) } func (s *shard) IndexDatabase() indexdb.IndexDatabase { return s.indexDB } func (s *shard) GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily { segment, ok := s.segments[intervalType] if ok { return segment.getDataFamilies(timeRange) } return nil } // MemoryDatabase returns memory database by given family time. func (s *shard) MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) { var memDB memdb.MemoryDatabase s.rwMutex.RLock() defer s.rwMutex.RUnlock() memDB, ok := s.families[familyTime] if !ok { memDB, err := s.createMemoryDatabase() if err != nil { return nil, err } s.families[familyTime] = memDB } return memDB, nil } // Write writes the metric-point into memory-database. func (s *shard) Write(metric *pb.Metric) (err error) { if metric == nil { return constants.ErrNilMetric } if len(metric.Name) == 0 { return constants.ErrEmptyMetricName } if len(metric.Fields) == 0 { return constants.ErrEmptyField } timestamp := metric.Timestamp now := timeutil.Now() // check metric timestamp if in acceptable time range if (s.behind.Int64() > 0 && timestamp < now-s.behind.Int64()) || (s.ahead.Int64() > 0 && timestamp > now+s.ahead.Int64()) { return nil } ns := metric.Namespace if len(ns) == 0 { ns = constants.DefaultNamespace } metricID, err := s.metadata.MetadataDatabase().GenMetricID(ns, metric.Name) if err != nil { return err } var seriesID uint32 isCreated := false if len(metric.Tags) == 0 { // if metric without tags, uses default series id(0) seriesID = constants.SeriesIDWithoutTags } else { seriesID, isCreated, err = s.indexDB.GetOrCreateSeriesID(metricID, metric.TagsHash) if err != nil { return err } } if isCreated { // if series id is new, need build inverted index s.indexDB.BuildInvertIndex(ns, metric.Name, metric.Tags, seriesID) } buildIndexEnd := timeutil.Now() s.buildIndexTimer.Observe(float64(buildIndexEnd - now)) // calculate family start time and slot index intervalCalc := s.interval.Calculator() segmentTime := intervalCalc.CalcSegmentTime(timestamp) // day family := intervalCalc.CalcFamily(timestamp, segmentTime) // hours familyTime := intervalCalc.CalcFamilyStartTime(segmentTime, family) // family timestamp db, err := s.MemoryDatabase(familyTime) if err != nil { return err } // mark writing data db.AcquireWrite() // set write completed defer func() { db.CompleteWrite() s.writeMetricTimer.Observe(float64(timeutil.Now() - buildIndexEnd)) }() slotIndex := uint16(intervalCalc.CalcSlot(timestamp, familyTime, s.interval.Int64())) // slot offset of family // write metric point into memory db return db.Write(ns, metric.Name, metricID, seriesID, slotIndex, metric.Fields) } func (s *shard) Close() error { // wait previous flush job completed s.flushCondition.Wait() GetShardManager().RemoveShard(s) if s.indexDB != nil { if err := s.indexDB.Close(); err != nil { return err } } if s.indexStore != nil { if err := s.indexStore.Close(); err != nil { return err } } for _, family := range s.families { if err := s.flushMemoryDatabase(family); err != nil
} s.ackReplicaSeq() return s.sequence.Close() } // IsFlushing checks if this shard is in flushing func (s *shard) IsFlushing() bool { return s.isFlushing.Load() } // NeedFlush checks if shard need to flush memory data func (s *shard) NeedFlush() bool { if s.IsFlushing() { return false } for _, memDB := range s.families { //TODO add time threshold??? return memDB.MemSize() > constants.ShardMemoryUsedThreshold } return false } // Flush flushes index and memory data to disk func (s *shard) Flush() (err error) { // another flush process is running if !s.isFlushing.CAS(false, true) { return nil } // 1. mark flush job doing s.flushCondition.Add(1) defer func() { //TODO add commit kv meta after ack successfully // mark flush job complete, notify s.flushCondition.Done() s.isFlushing.Store(false) }() //FIXME stone1100 // index flush if s.indexDB != nil { if err = s.indexDB.Flush(); err != nil { return err } } // flush memory database if need flush for _, memDB := range s.families { //TODO add time threshold??? if memDB.MemSize() > constants.ShardMemoryUsedThreshold { if err := s.flushMemoryDatabase(memDB); err != nil { return err } } } //FIXME(stone1100) need remove memory database if long time no data // finally, commit replica sequence s.ackReplicaSeq() return nil } // initIndexDatabase initializes the index database func (s *shard) initIndexDatabase() error { var err error storeOption := kv.DefaultStoreOption(filepath.Join(s.path, indexParentDir)) s.indexStore, err = newKVStoreFunc(storeOption.Path, storeOption) if err != nil { return err } s.forwardFamily, err = s.indexStore.CreateFamily( forwardIndexDir, kv.FamilyOption{ CompactThreshold: 0, Merger: string(invertedindex.SeriesForwardMerger)}) if err != nil { return err } s.invertedFamily, err = s.indexStore.CreateFamily( invertedIndexDir, kv.FamilyOption{ CompactThreshold: 0, Merger: string(invertedindex.SeriesInvertedMerger)}) if err != nil { return err } s.indexDB, err = newIndexDBFunc( context.TODO(), filepath.Join(s.path, metaDir), s.metadata, s.forwardFamily, s.invertedFamily) if err != nil { return err } return nil } // createMemoryDatabase creates a new memory database for writing data points func (s *shard) createMemoryDatabase() (memdb.MemoryDatabase, error) { return newMemoryDBFunc(memdb.MemoryDatabaseCfg{ Name: s.databaseName, Metadata: s.metadata, TempPath: filepath.Join(s.path, filepath.Join(tempDir, fmt.Sprintf("%d", timeutil.Now()))), }) } // flushMemoryDatabase flushes memory database to disk kv store func (s *shard) flushMemoryDatabase(memDB memdb.MemoryDatabase) error { startTime := timeutil.Now() defer s.memFlushTimer.Observe(float64(timeutil.Now() - startTime)) //FIXME(stone1100) //for _, familyTime := range memDB.Families() { // segmentName := s.interval.Calculator().GetSegment(familyTime) // segment, err := s.segment.GetOrCreateSegment(segmentName) // if err != nil { // return err // } // thisDataFamily, err := segment.GetDataFamily(familyTime) // if err != nil { // continue // } // // flush family data // if err := memDB.FlushFamilyTo( // metricsdata.NewFlusher(thisDataFamily.Family().NewFlusher()), familyTime); err != nil { // return err // } //} if err := memDB.Close(); err != nil { return err } return nil } // ackReplicaSeq commits the replica sequence // NOTICE: if fail, maybe data will write duplicate if system restart func (s *shard) ackReplicaSeq() { allHeads := s.sequence.getAllHeads() if err := s.sequence.ack(allHeads); err != nil { engineLogger.Error("ack replica sequence error", logger.String("shard", s.path), logger.Error(err)) } }
{ return err }
conditional_block
agent.rs
use crate::agent::Capabilities; use crate::experiments::{Assignee, Experiment}; use crate::prelude::*; use crate::results::{DatabaseDB, EncodingType, ProgressData}; use crate::server::api_types::{AgentConfig, ApiResponse}; use crate::server::auth::{auth_filter, AuthDetails, TokenType}; use crate::server::messages::Message; use crate::server::{Data, GithubData, HttpError}; use crossbeam_channel::Sender; use failure::Compat; use http::Response; use hyper::Body; use std::collections::HashMap; use std::sync::{Arc, Condvar, Mutex}; use warp::{self, Filter, Rejection}; #[derive(Deserialize)] #[serde(rename_all = "kebab-case")] pub struct ExperimentData<T> { experiment_name: String, #[serde(flatten)] data: T, } pub fn routes( data: Arc<Data>, mutex: Arc<Mutex<Data>>, github_data: Option<Arc<GithubData>>, ) -> impl Filter<Extract = (Response<Body>,), Error = Rejection> + Clone { let data_cloned = data.clone(); let data_filter = warp::any().map(move || data_cloned.clone()); let mutex_filter = warp::any().map(move || mutex.clone()); let github_data_filter = warp::any().map(move || github_data.clone()); let config = warp::post() .and(warp::path("config")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_config); let next_experiment = warp::post() .and(warp::path("next-experiment")) .and(warp::path::end()) .and(mutex_filter.clone()) .and(github_data_filter) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_next_experiment); let next_crate = warp::post() .and(warp::path("next-crate")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_next_crate); let record_progress = warp::post() .and(warp::path("record-progress")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_record_progress); let heartbeat = warp::post() .and(warp::path("heartbeat")) .and(warp::path::end()) .and(data_filter) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_heartbeat); let error = warp::post() .and(warp::path("error")) .and(warp::path::end()) .and(warp::body::json()) .and(mutex_filter) .and(auth_filter(data, TokenType::Agent)) .map(endpoint_error); warp::any() .and( config .or(next_experiment) .unify() .or(next_crate) .unify() .or(record_progress) .unify() .or(heartbeat) .unify() .or(error) .unify(), ) .map(handle_results) .recover(handle_errors) .unify() } fn endpoint_config( caps: Capabilities, data: Arc<Data>, auth: AuthDetails, ) -> Fallible<Response<Body>> { data.agents.add_capabilities(&auth.name, &caps)?; Ok(ApiResponse::Success { result: AgentConfig { agent_name: auth.name, crater_config: data.config.clone(), }, } .into_response()?) } fn endpoint_next_experiment( mutex: Arc<Mutex<Data>>, github_data: Option<Arc<GithubData>>, auth: AuthDetails, ) -> Fallible<Response<Body>> { //we need to make sure that Experiment::next executes uninterrupted let data = mutex.lock().unwrap(); let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?; let result = if let Some((new, ex)) = next { if new { if let Some(github_data) = github_data.as_ref() { if let Some(ref github_issue) = ex.github_issue { Message::new() .line( "construction", format!("Experiment **`{}`** is now **running**", ex.name,), ) .send(&github_issue.api_url, &data, github_data)?; } } } Some(ex) } else { None }; Ok(ApiResponse::Success { result }.into_response()?) } fn endpoint_next_crate( experiment: String, data: Arc<Data>, _auth: AuthDetails, ) -> Fallible<Response<Body>> { let result: Option<crate::crates::Crate> = if let Some(ex) = Experiment::get(&data.db, &experiment)? { let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?; if crates.is_empty() { None } else { Some(crates.remove(0)) } } else { None }; Ok(ApiResponse::Success { result }.into_response()?) } #[derive(Clone)] pub struct RecordProgressThread { // String is the worker name queue: Sender<(ExperimentData<ProgressData>, String)>, in_flight_requests: Arc<(Mutex<usize>, Condvar)>, } impl RecordProgressThread { pub fn new( db: crate::db::Database, metrics: crate::server::metrics::Metrics, ) -> RecordProgressThread { // 64 message queue, after which we start load shedding automatically. let (tx, rx) = crossbeam_channel::bounded(64); let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new())); let this = RecordProgressThread { queue: tx, in_flight_requests, }; let ret = this.clone(); std::thread::spawn(move || loop { // Panics should already be logged and otherwise there's not much we // can/should do. let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let (result, worker_name) = rx.recv().unwrap(); this.block_until_idle(); let start = std::time::Instant::now(); if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() { let db = DatabaseDB::new(&db); if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) { // Failing to record a result is basically fine -- this // just means that we'll have to re-try this job. log::error!("Failed to store result into database: {:?}", e); crate::utils::report_failure(&e); } metrics.record_completed_jobs( &worker_name, &ex.name, result.data.results.len() as i64, ); if let Err(e) = db.clear_stale_records() { // Not a hard failure. We can continue even if we failed // to clear records from already completed runs... log::error!("Failed to clear stale records: {:?}", e); crate::utils::report_failure(&e); } metrics .crater_endpoint_time .with_label_values(&["record_progress"]) .observe(start.elapsed().as_secs_f64()); } })); }); ret } pub fn block_until_idle(&self) { // Wait until there are zero in-flight requests. // // Note: We do **not** keep the lock here for the subsequent // computation. That means that if we ever observe zero, then we're // going to kick off the below computation; obviously requests may keep // coming in -- we don't want to block those requests. // // The expectation that we will see zero here also implies that // the server is *sometimes* idle (i.e., we are not constantly // processing requests at 100% load). It's not clear that's 100% // a valid assumption, but if we are at 100% load in terms of // requests coming in, that's a problem in and of itself (since // the majority of expected requests are record-progress, which // should be *very* fast now that the work for them is async and // offloaded to this thread). // // Ignore the mutex guard (see above). drop( self.in_flight_requests .1 .wait_while( self.in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()), |g| *g != 0, ) .unwrap_or_else(|g| g.into_inner()), ); } pub fn start_request(&self) -> RequestGuard { *self .in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()) += 1; RequestGuard { thread: self.clone(), } } } pub struct RequestGuard { thread: RecordProgressThread, } impl Drop for RequestGuard { fn drop(&mut self) { *self .thread .in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()) -= 1; self.thread.in_flight_requests.1.notify_one(); } } // This endpoint does not use the mutex data wrapper to exclude running in // parallel with other endpoints, which may mean that we (for example) are // recording results for an abort'd experiment. This should generally be fine -- // the database already has foreign_keys enabled and that should ensure // appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug // elsewhere, not here). // // In practice it's pretty likely that we won't fully run in parallel anyway, // but this lets some of the work proceed without the lock being held, which is // generally positive. fn endpoint_record_progress( result: ExperimentData<ProgressData>, data: Arc<Data>, auth: AuthDetails, ) -> Fallible<Response<Body>> { match data .record_progress_worker .queue .try_send((result, auth.name)) { Ok(()) => Ok(ApiResponse::Success { result: true }.into_response()?), Err(crossbeam_channel::TrySendError::Full(_)) => { data.metrics.crater_bounced_record_progress.inc_by(1); Ok(ApiResponse::<()>::SlowDown.into_response()?) } Err(crossbeam_channel::TrySendError::Disconnected(_)) => unreachable!(), } } fn endpoint_heartbeat(data: Arc<Data>, auth: AuthDetails) -> Fallible<Response<Body>> { if let Some(rev) = auth.git_revision { data.agents.set_git_revision(&auth.name, &rev)?; } data.agents.record_heartbeat(&auth.name)?; Ok(ApiResponse::Success { result: true }.into_response()?) } fn endpoint_error( error: ExperimentData<HashMap<String, String>>, mutex: Arc<Mutex<Data>>, auth: AuthDetails, ) -> Fallible<Response<Body>> { log::error!( "agent {} failed while running {}: {:?}", auth.name, error.experiment_name, error.data.get("error") ); let data = mutex.lock().unwrap(); let ex = Experiment::get(&data.db, &error.experiment_name)? .ok_or_else(|| err_msg("no experiment run by this agent"))?; data.metrics.record_error(&auth.name, &ex.name); Ok(ApiResponse::Success { result: true }.into_response()?) } fn handle_results(resp: Fallible<Response<Body>>) -> Response<Body> { match resp { Ok(resp) => resp, Err(err) => ApiResponse::internal_error(err.to_string()) .into_response() .unwrap(), } } async fn handle_errors(err: Rejection) -> Result<Response<Body>, Rejection>
{ let error = if let Some(compat) = err.find::<Compat<HttpError>>() { Some(*compat.get_ref()) } else if err.is_not_found() { Some(HttpError::NotFound) } else { None }; match error { Some(HttpError::NotFound) => Ok(ApiResponse::not_found().into_response().unwrap()), Some(HttpError::Forbidden) => Ok(ApiResponse::unauthorized().into_response().unwrap()), None => Err(err), } }
identifier_body
agent.rs
use crate::agent::Capabilities; use crate::experiments::{Assignee, Experiment}; use crate::prelude::*; use crate::results::{DatabaseDB, EncodingType, ProgressData}; use crate::server::api_types::{AgentConfig, ApiResponse}; use crate::server::auth::{auth_filter, AuthDetails, TokenType}; use crate::server::messages::Message; use crate::server::{Data, GithubData, HttpError}; use crossbeam_channel::Sender; use failure::Compat; use http::Response; use hyper::Body; use std::collections::HashMap; use std::sync::{Arc, Condvar, Mutex}; use warp::{self, Filter, Rejection}; #[derive(Deserialize)] #[serde(rename_all = "kebab-case")] pub struct ExperimentData<T> { experiment_name: String, #[serde(flatten)] data: T, } pub fn routes( data: Arc<Data>, mutex: Arc<Mutex<Data>>, github_data: Option<Arc<GithubData>>, ) -> impl Filter<Extract = (Response<Body>,), Error = Rejection> + Clone { let data_cloned = data.clone(); let data_filter = warp::any().map(move || data_cloned.clone()); let mutex_filter = warp::any().map(move || mutex.clone()); let github_data_filter = warp::any().map(move || github_data.clone()); let config = warp::post() .and(warp::path("config")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_config); let next_experiment = warp::post() .and(warp::path("next-experiment")) .and(warp::path::end()) .and(mutex_filter.clone()) .and(github_data_filter) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_next_experiment); let next_crate = warp::post() .and(warp::path("next-crate")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_next_crate); let record_progress = warp::post() .and(warp::path("record-progress")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_record_progress); let heartbeat = warp::post() .and(warp::path("heartbeat")) .and(warp::path::end()) .and(data_filter) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_heartbeat); let error = warp::post() .and(warp::path("error")) .and(warp::path::end()) .and(warp::body::json()) .and(mutex_filter) .and(auth_filter(data, TokenType::Agent)) .map(endpoint_error); warp::any() .and( config .or(next_experiment) .unify() .or(next_crate) .unify() .or(record_progress) .unify() .or(heartbeat) .unify() .or(error) .unify(), ) .map(handle_results) .recover(handle_errors) .unify() } fn endpoint_config( caps: Capabilities, data: Arc<Data>, auth: AuthDetails, ) -> Fallible<Response<Body>> { data.agents.add_capabilities(&auth.name, &caps)?; Ok(ApiResponse::Success { result: AgentConfig { agent_name: auth.name, crater_config: data.config.clone(), }, } .into_response()?) } fn endpoint_next_experiment( mutex: Arc<Mutex<Data>>, github_data: Option<Arc<GithubData>>, auth: AuthDetails, ) -> Fallible<Response<Body>> { //we need to make sure that Experiment::next executes uninterrupted let data = mutex.lock().unwrap(); let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?; let result = if let Some((new, ex)) = next { if new { if let Some(github_data) = github_data.as_ref() { if let Some(ref github_issue) = ex.github_issue { Message::new() .line( "construction", format!("Experiment **`{}`** is now **running**", ex.name,), ) .send(&github_issue.api_url, &data, github_data)?; } } } Some(ex) } else { None }; Ok(ApiResponse::Success { result }.into_response()?) } fn endpoint_next_crate( experiment: String, data: Arc<Data>, _auth: AuthDetails, ) -> Fallible<Response<Body>> { let result: Option<crate::crates::Crate> = if let Some(ex) = Experiment::get(&data.db, &experiment)? { let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?; if crates.is_empty() { None } else { Some(crates.remove(0)) } } else { None }; Ok(ApiResponse::Success { result }.into_response()?) } #[derive(Clone)] pub struct RecordProgressThread { // String is the worker name queue: Sender<(ExperimentData<ProgressData>, String)>, in_flight_requests: Arc<(Mutex<usize>, Condvar)>, } impl RecordProgressThread { pub fn new( db: crate::db::Database, metrics: crate::server::metrics::Metrics, ) -> RecordProgressThread { // 64 message queue, after which we start load shedding automatically. let (tx, rx) = crossbeam_channel::bounded(64); let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new())); let this = RecordProgressThread { queue: tx, in_flight_requests, }; let ret = this.clone(); std::thread::spawn(move || loop { // Panics should already be logged and otherwise there's not much we // can/should do. let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let (result, worker_name) = rx.recv().unwrap(); this.block_until_idle(); let start = std::time::Instant::now(); if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() { let db = DatabaseDB::new(&db); if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) { // Failing to record a result is basically fine -- this // just means that we'll have to re-try this job. log::error!("Failed to store result into database: {:?}", e); crate::utils::report_failure(&e); } metrics.record_completed_jobs( &worker_name, &ex.name, result.data.results.len() as i64, ); if let Err(e) = db.clear_stale_records() { // Not a hard failure. We can continue even if we failed // to clear records from already completed runs... log::error!("Failed to clear stale records: {:?}", e); crate::utils::report_failure(&e); } metrics .crater_endpoint_time .with_label_values(&["record_progress"]) .observe(start.elapsed().as_secs_f64()); } })); }); ret } pub fn block_until_idle(&self) { // Wait until there are zero in-flight requests. // // Note: We do **not** keep the lock here for the subsequent // computation. That means that if we ever observe zero, then we're // going to kick off the below computation; obviously requests may keep // coming in -- we don't want to block those requests. // // The expectation that we will see zero here also implies that // the server is *sometimes* idle (i.e., we are not constantly // processing requests at 100% load). It's not clear that's 100% // a valid assumption, but if we are at 100% load in terms of // requests coming in, that's a problem in and of itself (since // the majority of expected requests are record-progress, which // should be *very* fast now that the work for them is async and // offloaded to this thread). //
.1 .wait_while( self.in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()), |g| *g != 0, ) .unwrap_or_else(|g| g.into_inner()), ); } pub fn start_request(&self) -> RequestGuard { *self .in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()) += 1; RequestGuard { thread: self.clone(), } } } pub struct RequestGuard { thread: RecordProgressThread, } impl Drop for RequestGuard { fn drop(&mut self) { *self .thread .in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()) -= 1; self.thread.in_flight_requests.1.notify_one(); } } // This endpoint does not use the mutex data wrapper to exclude running in // parallel with other endpoints, which may mean that we (for example) are // recording results for an abort'd experiment. This should generally be fine -- // the database already has foreign_keys enabled and that should ensure // appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug // elsewhere, not here). // // In practice it's pretty likely that we won't fully run in parallel anyway, // but this lets some of the work proceed without the lock being held, which is // generally positive. fn endpoint_record_progress( result: ExperimentData<ProgressData>, data: Arc<Data>, auth: AuthDetails, ) -> Fallible<Response<Body>> { match data .record_progress_worker .queue .try_send((result, auth.name)) { Ok(()) => Ok(ApiResponse::Success { result: true }.into_response()?), Err(crossbeam_channel::TrySendError::Full(_)) => { data.metrics.crater_bounced_record_progress.inc_by(1); Ok(ApiResponse::<()>::SlowDown.into_response()?) } Err(crossbeam_channel::TrySendError::Disconnected(_)) => unreachable!(), } } fn endpoint_heartbeat(data: Arc<Data>, auth: AuthDetails) -> Fallible<Response<Body>> { if let Some(rev) = auth.git_revision { data.agents.set_git_revision(&auth.name, &rev)?; } data.agents.record_heartbeat(&auth.name)?; Ok(ApiResponse::Success { result: true }.into_response()?) } fn endpoint_error( error: ExperimentData<HashMap<String, String>>, mutex: Arc<Mutex<Data>>, auth: AuthDetails, ) -> Fallible<Response<Body>> { log::error!( "agent {} failed while running {}: {:?}", auth.name, error.experiment_name, error.data.get("error") ); let data = mutex.lock().unwrap(); let ex = Experiment::get(&data.db, &error.experiment_name)? .ok_or_else(|| err_msg("no experiment run by this agent"))?; data.metrics.record_error(&auth.name, &ex.name); Ok(ApiResponse::Success { result: true }.into_response()?) } fn handle_results(resp: Fallible<Response<Body>>) -> Response<Body> { match resp { Ok(resp) => resp, Err(err) => ApiResponse::internal_error(err.to_string()) .into_response() .unwrap(), } } async fn handle_errors(err: Rejection) -> Result<Response<Body>, Rejection> { let error = if let Some(compat) = err.find::<Compat<HttpError>>() { Some(*compat.get_ref()) } else if err.is_not_found() { Some(HttpError::NotFound) } else { None }; match error { Some(HttpError::NotFound) => Ok(ApiResponse::not_found().into_response().unwrap()), Some(HttpError::Forbidden) => Ok(ApiResponse::unauthorized().into_response().unwrap()), None => Err(err), } }
// Ignore the mutex guard (see above). drop( self.in_flight_requests
random_line_split
agent.rs
use crate::agent::Capabilities; use crate::experiments::{Assignee, Experiment}; use crate::prelude::*; use crate::results::{DatabaseDB, EncodingType, ProgressData}; use crate::server::api_types::{AgentConfig, ApiResponse}; use crate::server::auth::{auth_filter, AuthDetails, TokenType}; use crate::server::messages::Message; use crate::server::{Data, GithubData, HttpError}; use crossbeam_channel::Sender; use failure::Compat; use http::Response; use hyper::Body; use std::collections::HashMap; use std::sync::{Arc, Condvar, Mutex}; use warp::{self, Filter, Rejection}; #[derive(Deserialize)] #[serde(rename_all = "kebab-case")] pub struct ExperimentData<T> { experiment_name: String, #[serde(flatten)] data: T, } pub fn routes( data: Arc<Data>, mutex: Arc<Mutex<Data>>, github_data: Option<Arc<GithubData>>, ) -> impl Filter<Extract = (Response<Body>,), Error = Rejection> + Clone { let data_cloned = data.clone(); let data_filter = warp::any().map(move || data_cloned.clone()); let mutex_filter = warp::any().map(move || mutex.clone()); let github_data_filter = warp::any().map(move || github_data.clone()); let config = warp::post() .and(warp::path("config")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_config); let next_experiment = warp::post() .and(warp::path("next-experiment")) .and(warp::path::end()) .and(mutex_filter.clone()) .and(github_data_filter) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_next_experiment); let next_crate = warp::post() .and(warp::path("next-crate")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_next_crate); let record_progress = warp::post() .and(warp::path("record-progress")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_record_progress); let heartbeat = warp::post() .and(warp::path("heartbeat")) .and(warp::path::end()) .and(data_filter) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_heartbeat); let error = warp::post() .and(warp::path("error")) .and(warp::path::end()) .and(warp::body::json()) .and(mutex_filter) .and(auth_filter(data, TokenType::Agent)) .map(endpoint_error); warp::any() .and( config .or(next_experiment) .unify() .or(next_crate) .unify() .or(record_progress) .unify() .or(heartbeat) .unify() .or(error) .unify(), ) .map(handle_results) .recover(handle_errors) .unify() } fn endpoint_config( caps: Capabilities, data: Arc<Data>, auth: AuthDetails, ) -> Fallible<Response<Body>> { data.agents.add_capabilities(&auth.name, &caps)?; Ok(ApiResponse::Success { result: AgentConfig { agent_name: auth.name, crater_config: data.config.clone(), }, } .into_response()?) } fn endpoint_next_experiment( mutex: Arc<Mutex<Data>>, github_data: Option<Arc<GithubData>>, auth: AuthDetails, ) -> Fallible<Response<Body>> { //we need to make sure that Experiment::next executes uninterrupted let data = mutex.lock().unwrap(); let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?; let result = if let Some((new, ex)) = next { if new { if let Some(github_data) = github_data.as_ref() { if let Some(ref github_issue) = ex.github_issue { Message::new() .line( "construction", format!("Experiment **`{}`** is now **running**", ex.name,), ) .send(&github_issue.api_url, &data, github_data)?; } } } Some(ex) } else { None }; Ok(ApiResponse::Success { result }.into_response()?) } fn endpoint_next_crate( experiment: String, data: Arc<Data>, _auth: AuthDetails, ) -> Fallible<Response<Body>> { let result: Option<crate::crates::Crate> = if let Some(ex) = Experiment::get(&data.db, &experiment)? { let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?; if crates.is_empty() { None } else { Some(crates.remove(0)) } } else
; Ok(ApiResponse::Success { result }.into_response()?) } #[derive(Clone)] pub struct RecordProgressThread { // String is the worker name queue: Sender<(ExperimentData<ProgressData>, String)>, in_flight_requests: Arc<(Mutex<usize>, Condvar)>, } impl RecordProgressThread { pub fn new( db: crate::db::Database, metrics: crate::server::metrics::Metrics, ) -> RecordProgressThread { // 64 message queue, after which we start load shedding automatically. let (tx, rx) = crossbeam_channel::bounded(64); let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new())); let this = RecordProgressThread { queue: tx, in_flight_requests, }; let ret = this.clone(); std::thread::spawn(move || loop { // Panics should already be logged and otherwise there's not much we // can/should do. let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let (result, worker_name) = rx.recv().unwrap(); this.block_until_idle(); let start = std::time::Instant::now(); if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() { let db = DatabaseDB::new(&db); if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) { // Failing to record a result is basically fine -- this // just means that we'll have to re-try this job. log::error!("Failed to store result into database: {:?}", e); crate::utils::report_failure(&e); } metrics.record_completed_jobs( &worker_name, &ex.name, result.data.results.len() as i64, ); if let Err(e) = db.clear_stale_records() { // Not a hard failure. We can continue even if we failed // to clear records from already completed runs... log::error!("Failed to clear stale records: {:?}", e); crate::utils::report_failure(&e); } metrics .crater_endpoint_time .with_label_values(&["record_progress"]) .observe(start.elapsed().as_secs_f64()); } })); }); ret } pub fn block_until_idle(&self) { // Wait until there are zero in-flight requests. // // Note: We do **not** keep the lock here for the subsequent // computation. That means that if we ever observe zero, then we're // going to kick off the below computation; obviously requests may keep // coming in -- we don't want to block those requests. // // The expectation that we will see zero here also implies that // the server is *sometimes* idle (i.e., we are not constantly // processing requests at 100% load). It's not clear that's 100% // a valid assumption, but if we are at 100% load in terms of // requests coming in, that's a problem in and of itself (since // the majority of expected requests are record-progress, which // should be *very* fast now that the work for them is async and // offloaded to this thread). // // Ignore the mutex guard (see above). drop( self.in_flight_requests .1 .wait_while( self.in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()), |g| *g != 0, ) .unwrap_or_else(|g| g.into_inner()), ); } pub fn start_request(&self) -> RequestGuard { *self .in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()) += 1; RequestGuard { thread: self.clone(), } } } pub struct RequestGuard { thread: RecordProgressThread, } impl Drop for RequestGuard { fn drop(&mut self) { *self .thread .in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()) -= 1; self.thread.in_flight_requests.1.notify_one(); } } // This endpoint does not use the mutex data wrapper to exclude running in // parallel with other endpoints, which may mean that we (for example) are // recording results for an abort'd experiment. This should generally be fine -- // the database already has foreign_keys enabled and that should ensure // appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug // elsewhere, not here). // // In practice it's pretty likely that we won't fully run in parallel anyway, // but this lets some of the work proceed without the lock being held, which is // generally positive. fn endpoint_record_progress( result: ExperimentData<ProgressData>, data: Arc<Data>, auth: AuthDetails, ) -> Fallible<Response<Body>> { match data .record_progress_worker .queue .try_send((result, auth.name)) { Ok(()) => Ok(ApiResponse::Success { result: true }.into_response()?), Err(crossbeam_channel::TrySendError::Full(_)) => { data.metrics.crater_bounced_record_progress.inc_by(1); Ok(ApiResponse::<()>::SlowDown.into_response()?) } Err(crossbeam_channel::TrySendError::Disconnected(_)) => unreachable!(), } } fn endpoint_heartbeat(data: Arc<Data>, auth: AuthDetails) -> Fallible<Response<Body>> { if let Some(rev) = auth.git_revision { data.agents.set_git_revision(&auth.name, &rev)?; } data.agents.record_heartbeat(&auth.name)?; Ok(ApiResponse::Success { result: true }.into_response()?) } fn endpoint_error( error: ExperimentData<HashMap<String, String>>, mutex: Arc<Mutex<Data>>, auth: AuthDetails, ) -> Fallible<Response<Body>> { log::error!( "agent {} failed while running {}: {:?}", auth.name, error.experiment_name, error.data.get("error") ); let data = mutex.lock().unwrap(); let ex = Experiment::get(&data.db, &error.experiment_name)? .ok_or_else(|| err_msg("no experiment run by this agent"))?; data.metrics.record_error(&auth.name, &ex.name); Ok(ApiResponse::Success { result: true }.into_response()?) } fn handle_results(resp: Fallible<Response<Body>>) -> Response<Body> { match resp { Ok(resp) => resp, Err(err) => ApiResponse::internal_error(err.to_string()) .into_response() .unwrap(), } } async fn handle_errors(err: Rejection) -> Result<Response<Body>, Rejection> { let error = if let Some(compat) = err.find::<Compat<HttpError>>() { Some(*compat.get_ref()) } else if err.is_not_found() { Some(HttpError::NotFound) } else { None }; match error { Some(HttpError::NotFound) => Ok(ApiResponse::not_found().into_response().unwrap()), Some(HttpError::Forbidden) => Ok(ApiResponse::unauthorized().into_response().unwrap()), None => Err(err), } }
{ None }
conditional_block
agent.rs
use crate::agent::Capabilities; use crate::experiments::{Assignee, Experiment}; use crate::prelude::*; use crate::results::{DatabaseDB, EncodingType, ProgressData}; use crate::server::api_types::{AgentConfig, ApiResponse}; use crate::server::auth::{auth_filter, AuthDetails, TokenType}; use crate::server::messages::Message; use crate::server::{Data, GithubData, HttpError}; use crossbeam_channel::Sender; use failure::Compat; use http::Response; use hyper::Body; use std::collections::HashMap; use std::sync::{Arc, Condvar, Mutex}; use warp::{self, Filter, Rejection}; #[derive(Deserialize)] #[serde(rename_all = "kebab-case")] pub struct ExperimentData<T> { experiment_name: String, #[serde(flatten)] data: T, } pub fn routes( data: Arc<Data>, mutex: Arc<Mutex<Data>>, github_data: Option<Arc<GithubData>>, ) -> impl Filter<Extract = (Response<Body>,), Error = Rejection> + Clone { let data_cloned = data.clone(); let data_filter = warp::any().map(move || data_cloned.clone()); let mutex_filter = warp::any().map(move || mutex.clone()); let github_data_filter = warp::any().map(move || github_data.clone()); let config = warp::post() .and(warp::path("config")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_config); let next_experiment = warp::post() .and(warp::path("next-experiment")) .and(warp::path::end()) .and(mutex_filter.clone()) .and(github_data_filter) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_next_experiment); let next_crate = warp::post() .and(warp::path("next-crate")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_next_crate); let record_progress = warp::post() .and(warp::path("record-progress")) .and(warp::path::end()) .and(warp::body::json()) .and(data_filter.clone()) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_record_progress); let heartbeat = warp::post() .and(warp::path("heartbeat")) .and(warp::path::end()) .and(data_filter) .and(auth_filter(data.clone(), TokenType::Agent)) .map(endpoint_heartbeat); let error = warp::post() .and(warp::path("error")) .and(warp::path::end()) .and(warp::body::json()) .and(mutex_filter) .and(auth_filter(data, TokenType::Agent)) .map(endpoint_error); warp::any() .and( config .or(next_experiment) .unify() .or(next_crate) .unify() .or(record_progress) .unify() .or(heartbeat) .unify() .or(error) .unify(), ) .map(handle_results) .recover(handle_errors) .unify() } fn endpoint_config( caps: Capabilities, data: Arc<Data>, auth: AuthDetails, ) -> Fallible<Response<Body>> { data.agents.add_capabilities(&auth.name, &caps)?; Ok(ApiResponse::Success { result: AgentConfig { agent_name: auth.name, crater_config: data.config.clone(), }, } .into_response()?) } fn endpoint_next_experiment( mutex: Arc<Mutex<Data>>, github_data: Option<Arc<GithubData>>, auth: AuthDetails, ) -> Fallible<Response<Body>> { //we need to make sure that Experiment::next executes uninterrupted let data = mutex.lock().unwrap(); let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?; let result = if let Some((new, ex)) = next { if new { if let Some(github_data) = github_data.as_ref() { if let Some(ref github_issue) = ex.github_issue { Message::new() .line( "construction", format!("Experiment **`{}`** is now **running**", ex.name,), ) .send(&github_issue.api_url, &data, github_data)?; } } } Some(ex) } else { None }; Ok(ApiResponse::Success { result }.into_response()?) } fn endpoint_next_crate( experiment: String, data: Arc<Data>, _auth: AuthDetails, ) -> Fallible<Response<Body>> { let result: Option<crate::crates::Crate> = if let Some(ex) = Experiment::get(&data.db, &experiment)? { let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?; if crates.is_empty() { None } else { Some(crates.remove(0)) } } else { None }; Ok(ApiResponse::Success { result }.into_response()?) } #[derive(Clone)] pub struct RecordProgressThread { // String is the worker name queue: Sender<(ExperimentData<ProgressData>, String)>, in_flight_requests: Arc<(Mutex<usize>, Condvar)>, } impl RecordProgressThread { pub fn new( db: crate::db::Database, metrics: crate::server::metrics::Metrics, ) -> RecordProgressThread { // 64 message queue, after which we start load shedding automatically. let (tx, rx) = crossbeam_channel::bounded(64); let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new())); let this = RecordProgressThread { queue: tx, in_flight_requests, }; let ret = this.clone(); std::thread::spawn(move || loop { // Panics should already be logged and otherwise there's not much we // can/should do. let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let (result, worker_name) = rx.recv().unwrap(); this.block_until_idle(); let start = std::time::Instant::now(); if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() { let db = DatabaseDB::new(&db); if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) { // Failing to record a result is basically fine -- this // just means that we'll have to re-try this job. log::error!("Failed to store result into database: {:?}", e); crate::utils::report_failure(&e); } metrics.record_completed_jobs( &worker_name, &ex.name, result.data.results.len() as i64, ); if let Err(e) = db.clear_stale_records() { // Not a hard failure. We can continue even if we failed // to clear records from already completed runs... log::error!("Failed to clear stale records: {:?}", e); crate::utils::report_failure(&e); } metrics .crater_endpoint_time .with_label_values(&["record_progress"]) .observe(start.elapsed().as_secs_f64()); } })); }); ret } pub fn block_until_idle(&self) { // Wait until there are zero in-flight requests. // // Note: We do **not** keep the lock here for the subsequent // computation. That means that if we ever observe zero, then we're // going to kick off the below computation; obviously requests may keep // coming in -- we don't want to block those requests. // // The expectation that we will see zero here also implies that // the server is *sometimes* idle (i.e., we are not constantly // processing requests at 100% load). It's not clear that's 100% // a valid assumption, but if we are at 100% load in terms of // requests coming in, that's a problem in and of itself (since // the majority of expected requests are record-progress, which // should be *very* fast now that the work for them is async and // offloaded to this thread). // // Ignore the mutex guard (see above). drop( self.in_flight_requests .1 .wait_while( self.in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()), |g| *g != 0, ) .unwrap_or_else(|g| g.into_inner()), ); } pub fn start_request(&self) -> RequestGuard { *self .in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()) += 1; RequestGuard { thread: self.clone(), } } } pub struct
{ thread: RecordProgressThread, } impl Drop for RequestGuard { fn drop(&mut self) { *self .thread .in_flight_requests .0 .lock() .unwrap_or_else(|l| l.into_inner()) -= 1; self.thread.in_flight_requests.1.notify_one(); } } // This endpoint does not use the mutex data wrapper to exclude running in // parallel with other endpoints, which may mean that we (for example) are // recording results for an abort'd experiment. This should generally be fine -- // the database already has foreign_keys enabled and that should ensure // appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug // elsewhere, not here). // // In practice it's pretty likely that we won't fully run in parallel anyway, // but this lets some of the work proceed without the lock being held, which is // generally positive. fn endpoint_record_progress( result: ExperimentData<ProgressData>, data: Arc<Data>, auth: AuthDetails, ) -> Fallible<Response<Body>> { match data .record_progress_worker .queue .try_send((result, auth.name)) { Ok(()) => Ok(ApiResponse::Success { result: true }.into_response()?), Err(crossbeam_channel::TrySendError::Full(_)) => { data.metrics.crater_bounced_record_progress.inc_by(1); Ok(ApiResponse::<()>::SlowDown.into_response()?) } Err(crossbeam_channel::TrySendError::Disconnected(_)) => unreachable!(), } } fn endpoint_heartbeat(data: Arc<Data>, auth: AuthDetails) -> Fallible<Response<Body>> { if let Some(rev) = auth.git_revision { data.agents.set_git_revision(&auth.name, &rev)?; } data.agents.record_heartbeat(&auth.name)?; Ok(ApiResponse::Success { result: true }.into_response()?) } fn endpoint_error( error: ExperimentData<HashMap<String, String>>, mutex: Arc<Mutex<Data>>, auth: AuthDetails, ) -> Fallible<Response<Body>> { log::error!( "agent {} failed while running {}: {:?}", auth.name, error.experiment_name, error.data.get("error") ); let data = mutex.lock().unwrap(); let ex = Experiment::get(&data.db, &error.experiment_name)? .ok_or_else(|| err_msg("no experiment run by this agent"))?; data.metrics.record_error(&auth.name, &ex.name); Ok(ApiResponse::Success { result: true }.into_response()?) } fn handle_results(resp: Fallible<Response<Body>>) -> Response<Body> { match resp { Ok(resp) => resp, Err(err) => ApiResponse::internal_error(err.to_string()) .into_response() .unwrap(), } } async fn handle_errors(err: Rejection) -> Result<Response<Body>, Rejection> { let error = if let Some(compat) = err.find::<Compat<HttpError>>() { Some(*compat.get_ref()) } else if err.is_not_found() { Some(HttpError::NotFound) } else { None }; match error { Some(HttpError::NotFound) => Ok(ApiResponse::not_found().into_response().unwrap()), Some(HttpError::Forbidden) => Ok(ApiResponse::unauthorized().into_response().unwrap()), None => Err(err), } }
RequestGuard
identifier_name
message.rs
//! https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-marshaling use byteorder::{LittleEndian, BigEndian, ReadBytesExt, ByteOrder, WriteBytesExt}; use crate::names::{BusName, InterfaceName, ErrorName, MemberName}; use crate::writer::{DbusWriter, DbusWrite}; use crate::reader::{DbusReader, DbusRead}; use crate::type_system::{ObjectPath, Signature, UnixFd, Serial}; use std::io; #[cfg(test)] mod tests { fn reverse<T: Clone>(xs: &[T]) -> Vec<T> { let mut rev = vec!(); for x in xs.iter() { rev.insert(0, x.clone()) } rev } #[cfg(test)] mod tests { use crate::message::tests::reverse; quickcheck! { fn prop(xs: Vec<u32>) -> bool { xs == reverse(&reverse(&xs)) } } } } /// The maximum length of a message, including header, header alignment padding, /// and body is 2 to the 27th power or 134217728 (128 MiB). /// Implementations must not send or accept messages exceeding this size. const MAX_MESSAGE_SIZE: u32 = 2^27; /// A message consists of a header and a body. If you think of a message as a package, /// the header is the address, and the body contains the package contents. /// Both header and body use the D-Bus [type system](https://dbus.freedesktop.org/doc/dbus-specification.html#type-system) and format for serializing data. struct Message { /// The message delivery system uses the header information to figure out /// where to send the message and how to interpret it. header: Header, /// The body of the message is made up of zero or more arguments, /// which are typed values, such as an integer or a byte array. body: Body, } impl Message { fn write<T>(&self, writer:T) -> Result<(), io::Error> where T: io::Write { let mut writer = DbusWriter::new(writer); match self.header.endianess_flag { EndianessFlag::LittleEndian => { self.header.write::<T, LittleEndian>(&mut writer)?; self.body.write::<T, LittleEndian>(&mut writer)?; }, EndianessFlag::BigEndian => { self.header.write::<T, BigEndian>(&mut writer)?; self.body.write::<T, BigEndian>(&mut writer)?; }, }; Ok(()) } } /// Endianness flag; ASCII 'l' for little-endian or ASCII 'B' for big-endian. /// Both header and body are in this endianness. #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum EndianessFlag { LittleEndian, BigEndian, } impl DbusWrite for EndianessFlag { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { match self { EndianessFlag::LittleEndian => writer.write_u8(b'l'), EndianessFlag::BigEndian => writer.write_u8(b'B'), } } } impl DbusRead<EndianessFlag> for EndianessFlag { fn read<T1, T2>(&self, reader: &mut DbusReader<T1>) -> Result<EndianessFlag, io::Error> where T1: io::Read, T2: ByteOrder { match reader.read_u8()? { b'l' => Ok(EndianessFlag::LittleEndian), b'B' => Ok(EndianessFlag::BigEndian), x => { let str_err = format!("Invalid endianess `{}`", x); Err(io::Error::new(io::ErrorKind::InvalidData, str_err)) }, } } } /// Message type. Unknown types must be ignored. #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum MessageType { /// This is an invalid type. Invalid = 0, /// Method call. This message type may prompt a reply. MethodCall = 1, /// Method reply with returned data. MethodReturn = 2, /// Error reply. If the first argument exists /// and is a string, it is an error message. Error = 3, /// Signal emission. Signal = 4, } /// Major protocol version of the sending application. /// If the major protocol version of the receiving application does not match, /// the applications will not be able to communicate and the D-Bus connection must be disconnected. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct MajorProtocolVersion(pub u8); impl DbusWrite for MajorProtocolVersion { fn
<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { writer.write_u8(self.0) } } bitflags! { struct HeaderFlags: u8 { /// This message does not expect method return replies or error replies, /// even if it is of a type that can have a reply; the reply should be omitted. const NO_REPLY_EXPECTED = 0x1; /// The bus must not launch an owner for the destination name in response to this message. const NO_AUTO_START = 0x1; /// This flag may be set on a method call message to inform the receiving side that the caller /// is prepared to wait for interactive authorization, which might take a considerable time to complete. const ALLOW_INTERACTIVE_AUTHORIZATION = 0x4; } } /// The array at the end of the header contains header fields, /// where each field is a 1-byte field code followed by a field value. /// A header must contain the required header fields for its message type, /// and zero or more of any optional header fields. #[repr(u8)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] enum HeaderFieldCode { /// Not a valid field name (error if it appears in a message) Invalid = 0, /// The object to send a call to, or the object a signal is emitted from. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Path = 1, /// The interface to invoke a method call on, or that a signal is emitted from. /// Required in `MessageType::Signal`. Interface = 2, /// The member, either the method name or signal name. /// This header field is controlled by the message sender. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Member = 3, /// The name of the error that occurred, for errors. /// Required in `MessageType::Error`. ErrorName = 4, /// The serial number of the message this message is a reply to. /// Required in `MessageType::Error` and `MessageType::MethodReturn`. ReplySerial = 5, /// The name of the connection this message is intended for. /// Optional. Destination = 6, /// Unique name of the sending connection. This field is usually only meaningful /// in combination with the message bus, but other servers may define their own meanings for it. /// Optional. Sender = 7, /// The signature of the message body. If omitted, it is assumed to be the empty signature "". /// Optional. Signature = 8, /// The number of Unix file descriptors that accompany the message. /// If omitted, it is assumed that no Unix file descriptors accompany the message. UnixFds = 9, } /// The array at the end of the header contains header fields, /// where each field is a 1-byte field code followed by a field value. /// A header must contain the required header fields for its message type, /// and zero or more of any optional header fields. /// #[repr(u8)] enum HeaderField { /// Not a valid field name (error if it appears in a message) Invalid, /// The object to send a call to, or the object a signal is emitted from. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Path(ObjectPath), /// The interface to invoke a method call on, or that a signal is emitted from. /// Required in `MessageType::Signal`. Interface(InterfaceName), /// The member, either the method name or signal name. /// This header field is controlled by the message sender. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Member(MemberName), /// The name of the error that occurred, for errors. /// Required in `MessageType::Error`. ErrorName(ErrorName), /// The serial number of the message this message is a reply to. /// Required in `MessageType::Error` and `MessageType::MethodReturn`. ReplySerial(Serial), /// The name of the connection this message is intended for. /// Optional. Destination(String), /// Unique name of the sending connection. This field is usually only meaningful /// in combination with the message bus, but other servers may define their own meanings for it. /// Optional. Sender(String), /// The signature of the message body. If omitted, it is assumed to be the empty signature "". /// Optional. Signature(Signature), /// The number of Unix file descriptors that accompany the message. /// If omitted, it is assumed that no Unix file descriptors accompany the message. UnixFds(u32), } impl DbusWrite for HeaderField { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { match self { HeaderField::Invalid => return Err(io::Error::new(io::ErrorKind::InvalidInput, "HeaderField::Invalid can not be marshaled!")), HeaderField::Path(object_path) => object_path.write::<_, T2>(writer)?, HeaderField::Interface(interface_name) => interface_name.write::<_, T2>(writer)?, HeaderField::Member(member_name) => member_name.write::<_, T2>(writer)?, HeaderField::ErrorName(error_name) => error_name.write::<_, T2>(writer)?, HeaderField::ReplySerial(serial) => serial.write::<_, T2>(writer)?, HeaderField::Destination(destination) => writer.write_string::<T2>(destination)?, HeaderField::Sender(sender) => writer.write_string::<T2>(sender)?, HeaderField::Signature(signature) => signature.write::<_, T2>(writer)?, HeaderField::UnixFds(fd) => writer.write_u32::<T2>(*fd)?, }; Ok(()) } } /// The length of the header must be a multiple of 8, allowing the body to begin on /// an 8-byte boundary when storing the entire message in a single buffer. /// If the header does not naturally end on an 8-byte boundary up to 7 bytes of /// nul-initialized alignment padding must be added. /// https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-header-fields struct Header { endianess_flag: EndianessFlag, /// Message type. Unknown types must be ignored. message_type: MessageType, /// Bitwise OR of flags. Unknown flags must be ignored. flags: HeaderFlags, /// Major protocol version of the sending application. /// If the major protocol version of the receiving application does not match, /// the applications will not be able to communicate and the D-Bus connection must be disconnected. major_protocol_version: MajorProtocolVersion, /// Length in bytes of the message body, starting from the end of the header. /// The header ends after its alignment padding to an 8-boundary. length_message_body: u32, /// The serial of this message, used as a cookie by the sender to identify /// the reply corresponding to this request. This must not be zero. serial: Serial, /// An array of zero or more header fields where the byte is the field code, /// and the variant is the field value. The message type determines which fields are required. header_fields: Vec<(HeaderFieldCode, HeaderField)>, } impl DbusWrite for Header { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { writer.write_u8(self.endianess_flag as u8)?; writer.write_u8(self.message_type as u8)?; writer.write_u8(self.flags.bits())?; writer.write_u8(self.major_protocol_version.0)?; writer.write_u32::<T2>(self.length_message_body)?; writer.write_u32::<T2>(self.serial.0)?; for (ref code, ref field) in self.header_fields.iter().by_ref() { writer.write_u8(code.clone() as u8)?; field.write::<T1, T2>(writer)?; } Ok(()) } } struct Body { } impl DbusWrite for Body { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { unimplemented!(); } }
write
identifier_name
message.rs
//! https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-marshaling use byteorder::{LittleEndian, BigEndian, ReadBytesExt, ByteOrder, WriteBytesExt}; use crate::names::{BusName, InterfaceName, ErrorName, MemberName}; use crate::writer::{DbusWriter, DbusWrite}; use crate::reader::{DbusReader, DbusRead}; use crate::type_system::{ObjectPath, Signature, UnixFd, Serial}; use std::io; #[cfg(test)] mod tests { fn reverse<T: Clone>(xs: &[T]) -> Vec<T> { let mut rev = vec!(); for x in xs.iter() { rev.insert(0, x.clone()) } rev } #[cfg(test)] mod tests { use crate::message::tests::reverse; quickcheck! { fn prop(xs: Vec<u32>) -> bool { xs == reverse(&reverse(&xs)) } } } } /// The maximum length of a message, including header, header alignment padding, /// and body is 2 to the 27th power or 134217728 (128 MiB). /// Implementations must not send or accept messages exceeding this size. const MAX_MESSAGE_SIZE: u32 = 2^27; /// A message consists of a header and a body. If you think of a message as a package, /// the header is the address, and the body contains the package contents. /// Both header and body use the D-Bus [type system](https://dbus.freedesktop.org/doc/dbus-specification.html#type-system) and format for serializing data.
/// The body of the message is made up of zero or more arguments, /// which are typed values, such as an integer or a byte array. body: Body, } impl Message { fn write<T>(&self, writer:T) -> Result<(), io::Error> where T: io::Write { let mut writer = DbusWriter::new(writer); match self.header.endianess_flag { EndianessFlag::LittleEndian => { self.header.write::<T, LittleEndian>(&mut writer)?; self.body.write::<T, LittleEndian>(&mut writer)?; }, EndianessFlag::BigEndian => { self.header.write::<T, BigEndian>(&mut writer)?; self.body.write::<T, BigEndian>(&mut writer)?; }, }; Ok(()) } } /// Endianness flag; ASCII 'l' for little-endian or ASCII 'B' for big-endian. /// Both header and body are in this endianness. #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum EndianessFlag { LittleEndian, BigEndian, } impl DbusWrite for EndianessFlag { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { match self { EndianessFlag::LittleEndian => writer.write_u8(b'l'), EndianessFlag::BigEndian => writer.write_u8(b'B'), } } } impl DbusRead<EndianessFlag> for EndianessFlag { fn read<T1, T2>(&self, reader: &mut DbusReader<T1>) -> Result<EndianessFlag, io::Error> where T1: io::Read, T2: ByteOrder { match reader.read_u8()? { b'l' => Ok(EndianessFlag::LittleEndian), b'B' => Ok(EndianessFlag::BigEndian), x => { let str_err = format!("Invalid endianess `{}`", x); Err(io::Error::new(io::ErrorKind::InvalidData, str_err)) }, } } } /// Message type. Unknown types must be ignored. #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum MessageType { /// This is an invalid type. Invalid = 0, /// Method call. This message type may prompt a reply. MethodCall = 1, /// Method reply with returned data. MethodReturn = 2, /// Error reply. If the first argument exists /// and is a string, it is an error message. Error = 3, /// Signal emission. Signal = 4, } /// Major protocol version of the sending application. /// If the major protocol version of the receiving application does not match, /// the applications will not be able to communicate and the D-Bus connection must be disconnected. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct MajorProtocolVersion(pub u8); impl DbusWrite for MajorProtocolVersion { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { writer.write_u8(self.0) } } bitflags! { struct HeaderFlags: u8 { /// This message does not expect method return replies or error replies, /// even if it is of a type that can have a reply; the reply should be omitted. const NO_REPLY_EXPECTED = 0x1; /// The bus must not launch an owner for the destination name in response to this message. const NO_AUTO_START = 0x1; /// This flag may be set on a method call message to inform the receiving side that the caller /// is prepared to wait for interactive authorization, which might take a considerable time to complete. const ALLOW_INTERACTIVE_AUTHORIZATION = 0x4; } } /// The array at the end of the header contains header fields, /// where each field is a 1-byte field code followed by a field value. /// A header must contain the required header fields for its message type, /// and zero or more of any optional header fields. #[repr(u8)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] enum HeaderFieldCode { /// Not a valid field name (error if it appears in a message) Invalid = 0, /// The object to send a call to, or the object a signal is emitted from. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Path = 1, /// The interface to invoke a method call on, or that a signal is emitted from. /// Required in `MessageType::Signal`. Interface = 2, /// The member, either the method name or signal name. /// This header field is controlled by the message sender. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Member = 3, /// The name of the error that occurred, for errors. /// Required in `MessageType::Error`. ErrorName = 4, /// The serial number of the message this message is a reply to. /// Required in `MessageType::Error` and `MessageType::MethodReturn`. ReplySerial = 5, /// The name of the connection this message is intended for. /// Optional. Destination = 6, /// Unique name of the sending connection. This field is usually only meaningful /// in combination with the message bus, but other servers may define their own meanings for it. /// Optional. Sender = 7, /// The signature of the message body. If omitted, it is assumed to be the empty signature "". /// Optional. Signature = 8, /// The number of Unix file descriptors that accompany the message. /// If omitted, it is assumed that no Unix file descriptors accompany the message. UnixFds = 9, } /// The array at the end of the header contains header fields, /// where each field is a 1-byte field code followed by a field value. /// A header must contain the required header fields for its message type, /// and zero or more of any optional header fields. /// #[repr(u8)] enum HeaderField { /// Not a valid field name (error if it appears in a message) Invalid, /// The object to send a call to, or the object a signal is emitted from. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Path(ObjectPath), /// The interface to invoke a method call on, or that a signal is emitted from. /// Required in `MessageType::Signal`. Interface(InterfaceName), /// The member, either the method name or signal name. /// This header field is controlled by the message sender. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Member(MemberName), /// The name of the error that occurred, for errors. /// Required in `MessageType::Error`. ErrorName(ErrorName), /// The serial number of the message this message is a reply to. /// Required in `MessageType::Error` and `MessageType::MethodReturn`. ReplySerial(Serial), /// The name of the connection this message is intended for. /// Optional. Destination(String), /// Unique name of the sending connection. This field is usually only meaningful /// in combination with the message bus, but other servers may define their own meanings for it. /// Optional. Sender(String), /// The signature of the message body. If omitted, it is assumed to be the empty signature "". /// Optional. Signature(Signature), /// The number of Unix file descriptors that accompany the message. /// If omitted, it is assumed that no Unix file descriptors accompany the message. UnixFds(u32), } impl DbusWrite for HeaderField { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { match self { HeaderField::Invalid => return Err(io::Error::new(io::ErrorKind::InvalidInput, "HeaderField::Invalid can not be marshaled!")), HeaderField::Path(object_path) => object_path.write::<_, T2>(writer)?, HeaderField::Interface(interface_name) => interface_name.write::<_, T2>(writer)?, HeaderField::Member(member_name) => member_name.write::<_, T2>(writer)?, HeaderField::ErrorName(error_name) => error_name.write::<_, T2>(writer)?, HeaderField::ReplySerial(serial) => serial.write::<_, T2>(writer)?, HeaderField::Destination(destination) => writer.write_string::<T2>(destination)?, HeaderField::Sender(sender) => writer.write_string::<T2>(sender)?, HeaderField::Signature(signature) => signature.write::<_, T2>(writer)?, HeaderField::UnixFds(fd) => writer.write_u32::<T2>(*fd)?, }; Ok(()) } } /// The length of the header must be a multiple of 8, allowing the body to begin on /// an 8-byte boundary when storing the entire message in a single buffer. /// If the header does not naturally end on an 8-byte boundary up to 7 bytes of /// nul-initialized alignment padding must be added. /// https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-header-fields struct Header { endianess_flag: EndianessFlag, /// Message type. Unknown types must be ignored. message_type: MessageType, /// Bitwise OR of flags. Unknown flags must be ignored. flags: HeaderFlags, /// Major protocol version of the sending application. /// If the major protocol version of the receiving application does not match, /// the applications will not be able to communicate and the D-Bus connection must be disconnected. major_protocol_version: MajorProtocolVersion, /// Length in bytes of the message body, starting from the end of the header. /// The header ends after its alignment padding to an 8-boundary. length_message_body: u32, /// The serial of this message, used as a cookie by the sender to identify /// the reply corresponding to this request. This must not be zero. serial: Serial, /// An array of zero or more header fields where the byte is the field code, /// and the variant is the field value. The message type determines which fields are required. header_fields: Vec<(HeaderFieldCode, HeaderField)>, } impl DbusWrite for Header { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { writer.write_u8(self.endianess_flag as u8)?; writer.write_u8(self.message_type as u8)?; writer.write_u8(self.flags.bits())?; writer.write_u8(self.major_protocol_version.0)?; writer.write_u32::<T2>(self.length_message_body)?; writer.write_u32::<T2>(self.serial.0)?; for (ref code, ref field) in self.header_fields.iter().by_ref() { writer.write_u8(code.clone() as u8)?; field.write::<T1, T2>(writer)?; } Ok(()) } } struct Body { } impl DbusWrite for Body { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { unimplemented!(); } }
struct Message { /// The message delivery system uses the header information to figure out /// where to send the message and how to interpret it. header: Header,
random_line_split
message.rs
//! https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-marshaling use byteorder::{LittleEndian, BigEndian, ReadBytesExt, ByteOrder, WriteBytesExt}; use crate::names::{BusName, InterfaceName, ErrorName, MemberName}; use crate::writer::{DbusWriter, DbusWrite}; use crate::reader::{DbusReader, DbusRead}; use crate::type_system::{ObjectPath, Signature, UnixFd, Serial}; use std::io; #[cfg(test)] mod tests { fn reverse<T: Clone>(xs: &[T]) -> Vec<T> { let mut rev = vec!(); for x in xs.iter() { rev.insert(0, x.clone()) } rev } #[cfg(test)] mod tests { use crate::message::tests::reverse; quickcheck! { fn prop(xs: Vec<u32>) -> bool { xs == reverse(&reverse(&xs)) } } } } /// The maximum length of a message, including header, header alignment padding, /// and body is 2 to the 27th power or 134217728 (128 MiB). /// Implementations must not send or accept messages exceeding this size. const MAX_MESSAGE_SIZE: u32 = 2^27; /// A message consists of a header and a body. If you think of a message as a package, /// the header is the address, and the body contains the package contents. /// Both header and body use the D-Bus [type system](https://dbus.freedesktop.org/doc/dbus-specification.html#type-system) and format for serializing data. struct Message { /// The message delivery system uses the header information to figure out /// where to send the message and how to interpret it. header: Header, /// The body of the message is made up of zero or more arguments, /// which are typed values, such as an integer or a byte array. body: Body, } impl Message { fn write<T>(&self, writer:T) -> Result<(), io::Error> where T: io::Write { let mut writer = DbusWriter::new(writer); match self.header.endianess_flag { EndianessFlag::LittleEndian => { self.header.write::<T, LittleEndian>(&mut writer)?; self.body.write::<T, LittleEndian>(&mut writer)?; }, EndianessFlag::BigEndian => { self.header.write::<T, BigEndian>(&mut writer)?; self.body.write::<T, BigEndian>(&mut writer)?; }, }; Ok(()) } } /// Endianness flag; ASCII 'l' for little-endian or ASCII 'B' for big-endian. /// Both header and body are in this endianness. #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum EndianessFlag { LittleEndian, BigEndian, } impl DbusWrite for EndianessFlag { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { match self { EndianessFlag::LittleEndian => writer.write_u8(b'l'), EndianessFlag::BigEndian => writer.write_u8(b'B'), } } } impl DbusRead<EndianessFlag> for EndianessFlag { fn read<T1, T2>(&self, reader: &mut DbusReader<T1>) -> Result<EndianessFlag, io::Error> where T1: io::Read, T2: ByteOrder { match reader.read_u8()? { b'l' => Ok(EndianessFlag::LittleEndian), b'B' => Ok(EndianessFlag::BigEndian), x => { let str_err = format!("Invalid endianess `{}`", x); Err(io::Error::new(io::ErrorKind::InvalidData, str_err)) }, } } } /// Message type. Unknown types must be ignored. #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum MessageType { /// This is an invalid type. Invalid = 0, /// Method call. This message type may prompt a reply. MethodCall = 1, /// Method reply with returned data. MethodReturn = 2, /// Error reply. If the first argument exists /// and is a string, it is an error message. Error = 3, /// Signal emission. Signal = 4, } /// Major protocol version of the sending application. /// If the major protocol version of the receiving application does not match, /// the applications will not be able to communicate and the D-Bus connection must be disconnected. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct MajorProtocolVersion(pub u8); impl DbusWrite for MajorProtocolVersion { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { writer.write_u8(self.0) } } bitflags! { struct HeaderFlags: u8 { /// This message does not expect method return replies or error replies, /// even if it is of a type that can have a reply; the reply should be omitted. const NO_REPLY_EXPECTED = 0x1; /// The bus must not launch an owner for the destination name in response to this message. const NO_AUTO_START = 0x1; /// This flag may be set on a method call message to inform the receiving side that the caller /// is prepared to wait for interactive authorization, which might take a considerable time to complete. const ALLOW_INTERACTIVE_AUTHORIZATION = 0x4; } } /// The array at the end of the header contains header fields, /// where each field is a 1-byte field code followed by a field value. /// A header must contain the required header fields for its message type, /// and zero or more of any optional header fields. #[repr(u8)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] enum HeaderFieldCode { /// Not a valid field name (error if it appears in a message) Invalid = 0, /// The object to send a call to, or the object a signal is emitted from. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Path = 1, /// The interface to invoke a method call on, or that a signal is emitted from. /// Required in `MessageType::Signal`. Interface = 2, /// The member, either the method name or signal name. /// This header field is controlled by the message sender. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Member = 3, /// The name of the error that occurred, for errors. /// Required in `MessageType::Error`. ErrorName = 4, /// The serial number of the message this message is a reply to. /// Required in `MessageType::Error` and `MessageType::MethodReturn`. ReplySerial = 5, /// The name of the connection this message is intended for. /// Optional. Destination = 6, /// Unique name of the sending connection. This field is usually only meaningful /// in combination with the message bus, but other servers may define their own meanings for it. /// Optional. Sender = 7, /// The signature of the message body. If omitted, it is assumed to be the empty signature "". /// Optional. Signature = 8, /// The number of Unix file descriptors that accompany the message. /// If omitted, it is assumed that no Unix file descriptors accompany the message. UnixFds = 9, } /// The array at the end of the header contains header fields, /// where each field is a 1-byte field code followed by a field value. /// A header must contain the required header fields for its message type, /// and zero or more of any optional header fields. /// #[repr(u8)] enum HeaderField { /// Not a valid field name (error if it appears in a message) Invalid, /// The object to send a call to, or the object a signal is emitted from. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Path(ObjectPath), /// The interface to invoke a method call on, or that a signal is emitted from. /// Required in `MessageType::Signal`. Interface(InterfaceName), /// The member, either the method name or signal name. /// This header field is controlled by the message sender. /// Required in `MessageType::MethodCall` and `MessageType::Signal`. Member(MemberName), /// The name of the error that occurred, for errors. /// Required in `MessageType::Error`. ErrorName(ErrorName), /// The serial number of the message this message is a reply to. /// Required in `MessageType::Error` and `MessageType::MethodReturn`. ReplySerial(Serial), /// The name of the connection this message is intended for. /// Optional. Destination(String), /// Unique name of the sending connection. This field is usually only meaningful /// in combination with the message bus, but other servers may define their own meanings for it. /// Optional. Sender(String), /// The signature of the message body. If omitted, it is assumed to be the empty signature "". /// Optional. Signature(Signature), /// The number of Unix file descriptors that accompany the message. /// If omitted, it is assumed that no Unix file descriptors accompany the message. UnixFds(u32), } impl DbusWrite for HeaderField { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { match self { HeaderField::Invalid => return Err(io::Error::new(io::ErrorKind::InvalidInput, "HeaderField::Invalid can not be marshaled!")), HeaderField::Path(object_path) => object_path.write::<_, T2>(writer)?, HeaderField::Interface(interface_name) => interface_name.write::<_, T2>(writer)?, HeaderField::Member(member_name) => member_name.write::<_, T2>(writer)?, HeaderField::ErrorName(error_name) => error_name.write::<_, T2>(writer)?, HeaderField::ReplySerial(serial) => serial.write::<_, T2>(writer)?, HeaderField::Destination(destination) => writer.write_string::<T2>(destination)?, HeaderField::Sender(sender) => writer.write_string::<T2>(sender)?, HeaderField::Signature(signature) => signature.write::<_, T2>(writer)?, HeaderField::UnixFds(fd) => writer.write_u32::<T2>(*fd)?, }; Ok(()) } } /// The length of the header must be a multiple of 8, allowing the body to begin on /// an 8-byte boundary when storing the entire message in a single buffer. /// If the header does not naturally end on an 8-byte boundary up to 7 bytes of /// nul-initialized alignment padding must be added. /// https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-header-fields struct Header { endianess_flag: EndianessFlag, /// Message type. Unknown types must be ignored. message_type: MessageType, /// Bitwise OR of flags. Unknown flags must be ignored. flags: HeaderFlags, /// Major protocol version of the sending application. /// If the major protocol version of the receiving application does not match, /// the applications will not be able to communicate and the D-Bus connection must be disconnected. major_protocol_version: MajorProtocolVersion, /// Length in bytes of the message body, starting from the end of the header. /// The header ends after its alignment padding to an 8-boundary. length_message_body: u32, /// The serial of this message, used as a cookie by the sender to identify /// the reply corresponding to this request. This must not be zero. serial: Serial, /// An array of zero or more header fields where the byte is the field code, /// and the variant is the field value. The message type determines which fields are required. header_fields: Vec<(HeaderFieldCode, HeaderField)>, } impl DbusWrite for Header { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder
} struct Body { } impl DbusWrite for Body { fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error> where T1: io::Write, T2: ByteOrder { unimplemented!(); } }
{ writer.write_u8(self.endianess_flag as u8)?; writer.write_u8(self.message_type as u8)?; writer.write_u8(self.flags.bits())?; writer.write_u8(self.major_protocol_version.0)?; writer.write_u32::<T2>(self.length_message_body)?; writer.write_u32::<T2>(self.serial.0)?; for (ref code, ref field) in self.header_fields.iter().by_ref() { writer.write_u8(code.clone() as u8)?; field.write::<T1, T2>(writer)?; } Ok(()) }
identifier_body
genotype.go
package genotype // // this package implements // import ( "bufio" "bytes" "compress/gzip" "encoding/gob" "fmt" "hash/fnv" "log" "os" "strings" "time" ) const ( NO_CGST = "NA" ) // QueryList contains indexed queries and the names of all sequences type QueryList struct { Index QueryIndex Names []string // list of query fasta names SeedSize int Cgst CGST } // QueryIndex maps hashes of kmers to all possible source locations type QueryIndex map[uint32][]QueryPos // QueryPos provides an index into a sequence name, and a position type QueryPos struct { Name int // array index to name of query Pos int // position of kmer in query Content string // entire query sequence } // GeneName names a short sequence type GeneName string // AlleleResult is a list of allele names type AlleleResult map[string]bool // GenomeAlleleResult maps gene to list of allele names type GenomeAlleleResult map[GeneName]AlleleResult // stringToHash returns a 32-bit hash of a string func stringToHash(s string) uint32 { h := fnv.New32a() h.Write([]byte(s)) return h.Sum32() } func logm(level string, msg string, verbose bool) { if verbose || level != "DEBUG" { fmt.Fprintf(os.Stderr, "%s: %s: %s\n", time.Now().String(), level, msg) } } func checkResult(e error) { if e != nil { log.Fatal(e) } } // joinAlleles takes a list of alleles and returns a comma separated list of them func joinAlleles(alleles AlleleResult) string { keys := make([]string, 0, len(alleles)) for k := range alleles { keys = append(keys, k) } return strings.Join(keys, ",") } // reverse complements a single nucleotide func reverse(in byte) byte { result, ok := REVERSE_MAP[in] if !ok { log.Fatal("failed to reverse complement") } return result } var REVERSE_MAP = map[byte]byte{'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'} // reverseComplement reverses and complements a string func reverseComplement(in *bytes.Buffer) []byte { var result []byte = make([]byte, in.Len(), in.Len()) for pos := in.Len() - 1; pos >= 0; pos-- { current, ok := in.ReadByte() checkResult(ok) result[pos] = reverse(current) } return result } // addSearchableSequence adds an allele sequence to the hash func addSearchableSequence(content string, sequence int, db QueryList) { // for an exact match we only have to hash the start of the query if len(content) < db.SeedSize
position := 0 kmer := content[position : position+db.SeedSize] kmerHash := stringToHash(kmer) entry := QueryPos{Name: sequence, Pos: position, Content: content} query, ok := db.Index[kmerHash] if !ok { query = make([]QueryPos, 0) } query = append(query, entry) db.Index[kmerHash] = query } // searchSequence iterates over content and populates result with genes and matching func searchSequence(content string, db QueryList, result GenomeAlleleResult, verbose bool, reverseComplement bool) { // populates result with a map from gene name to list of found alleles for position := 0; position <= len(content)-db.SeedSize; position += 1 { kmer := content[position : position+db.SeedSize] // kmer at curreent position in content kmerHash := stringToHash(kmer) query, ok := db.Index[kmerHash] if ok { // logm("DEBUG", fmt.Sprintf("found %v potential locations for %s", len(query), content), verbose) for _, candidate := range query { // check for a match if position-candidate.Pos >= 0 && position-candidate.Pos+len(candidate.Content) <= len(content) && content[position-candidate.Pos:position-candidate.Pos+len(candidate.Content)] == candidate.Content { // it's a match, split the sequence name into gene and allele geneAllele := strings.Split(db.Names[candidate.Name], "_") alleles, ok := result[GeneName(geneAllele[0])] if !ok { alleles = make(AlleleResult, 0) } alleles[geneAllele[1]] = true if reverseComplement { logm("DEBUG", fmt.Sprintf("%s found at reverse complement -%v (%v)", db.Names[candidate.Name], position-candidate.Pos, len(content)-len(candidate.Content)-position+candidate.Pos), verbose) } else { logm("DEBUG", fmt.Sprintf("%s found at %v", db.Names[candidate.Name], position-candidate.Pos), verbose) } result[GeneName(geneAllele[0])] = alleles } else { // logm("DEBUG", fmt.Sprintf("didn't match %s", candidate.Content), verbose) } } } else { // logm("DEBUG", fmt.Sprintf("didn't find hash for %s", content), verbose) } } } // IndexSequences generates an index from a gzipped list of alleles for genotyping func IndexSequences(sequences []string, cgstFilename string, seedSize int, verbose bool) QueryList { logm("INFO", fmt.Sprintf("processing with cgst file '%s', seed size %v: %v sequence file(s)", cgstFilename, seedSize, len(sequences)), verbose) var queryList QueryList queryList.SeedSize = seedSize queryList.Index = make(QueryIndex) queryList.Cgst = CreateCGST(cgstFilename, verbose) var content *bytes.Buffer lines := 0 sequenceCount := 0 for _, sequenceFilename := range sequences { logm("INFO", fmt.Sprintf("processing '%s'...", sequenceFilename), verbose) // open sequences (either .fa or .fa.gz) file for reading file, err := os.Open(sequenceFilename) checkResult(err) var scanner *bufio.Scanner if strings.HasSuffix(sequenceFilename, ".gz") { gr, err := gzip.NewReader(file) checkResult(err) scanner = bufio.NewScanner(gr) } else { scanner = bufio.NewScanner(file) } // index sequences for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, ">") { if content != nil { addSearchableSequence(content.String(), sequenceCount-1, queryList) } queryList.Names = append(queryList.Names, line[1:]) sequenceCount++ content = new(bytes.Buffer) } else { (*content).WriteString(line) } lines++ if lines%1000000 == 0 { logm("INFO", fmt.Sprintf("processing %s: %v lines %v sequences. %v kmers", sequenceFilename, lines, len(queryList.Names), len(queryList.Index)), false) } } addSearchableSequence(content.String(), sequenceCount-1, queryList) logm("INFO", fmt.Sprintf("processing '%s': done", sequenceFilename), verbose) file.Close() } logm("INFO", fmt.Sprintf("processing %v file(s): done. %v sequences", len(sequences), len(queryList.Names)), verbose) return queryList } // SaveIndex writes an indexed collection of indexes for use with the genotype command func SaveIndex(target string, source QueryList, verbose bool) { logm("INFO", fmt.Sprintf("saving index to %s...", target), verbose) file, err := os.Create(target) checkResult(err) defer file.Close() gr := gzip.NewWriter(file) defer gr.Close() encoder := gob.NewEncoder(gr) err = encoder.Encode(source.Names) checkResult(err) logm("INFO", fmt.Sprintf("%v sequence names saved", len(source.Names)), verbose) err = encoder.Encode(source.SeedSize) checkResult(err) err = encoder.Encode(source.Cgst) checkResult(err) // save the index, but go has a size limit indexSize := len(source.Index) err = encoder.Encode(indexSize) checkResult(err) logm("INFO", fmt.Sprintf("%v queries to save...", indexSize), verbose) count := 0 for key, value := range source.Index { err = encoder.Encode(key) checkResult(err) err = encoder.Encode(value) checkResult(err) count++ if count%10000 == 0 { logm("INFO", fmt.Sprintf("processing: saved %v items", count), false) } } logm("INFO", fmt.Sprintf("saving index to %s: done", target), verbose) } func LoadIndex(source string, verbose bool) QueryList { logm("INFO", fmt.Sprintf("loading index from %s...", source), verbose) var result QueryList // open fa.gz file file, err := os.Open(source) checkResult(err) defer file.Close() gr, err := gzip.NewReader(file) checkResult(err) defer gr.Close() decoder := gob.NewDecoder(gr) err = decoder.Decode(&result.Names) checkResult(err) logm("INFO", fmt.Sprintf("%v sequence names restored", len(result.Names)), verbose) err = decoder.Decode(&result.SeedSize) checkResult(err) err = decoder.Decode(&result.Cgst) checkResult(err) var indexSize int err = decoder.Decode(&indexSize) checkResult(err) result.Index = make(QueryIndex) count := 0 for i := 0; i < indexSize; i++ { var key uint32 var val []QueryPos err = decoder.Decode(&key) checkResult(err) err = decoder.Decode(&val) checkResult(err) result.Index[key] = val count++ if count%10000 == 0 { logm("INFO", fmt.Sprintf("processing: loaded %v items", count), false) } // logm("DEBUG", fmt.Sprintf("last key: %v, values: %v", key, len(val)), verbose) } logm("INFO", fmt.Sprintf("loading index from %s - loaded %v: done", source, len(result.Index)), verbose) return result } // FindAlleles finds alleles that match a genome and generates cgst information func FindAlleles(db QueryList, mismatches int, genomes []string, verbose bool) { logm("INFO", "find alleles...", verbose) // genotype each genome - list matching sequences fmt.Fprintf(os.Stdout, "Filename\tcgST\t%v\n", strings.Join(db.Cgst.GeneNames, "\t")) var lines int var sequenceCount int var content *bytes.Buffer for _, genomeFilename := range genomes { logm("INFO", fmt.Sprintf("processing genome: %s", genomeFilename), verbose) var result GenomeAlleleResult = make(GenomeAlleleResult) file, err := os.Open(genomeFilename) checkResult(err) defer file.Close() lines = 0 sequenceCount = 0 content = new(bytes.Buffer) r := bufio.NewReader(file) scanner := bufio.NewScanner(r) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, ">") { if content != nil { searchSequence(content.String(), db, result, verbose, false) searchSequence(string(reverseComplement(content)), db, result, verbose, true) } sequenceCount++ content = new(bytes.Buffer) } else { (*content).WriteString(line) } lines++ } searchSequence(content.String(), db, result, verbose, false) searchSequence(string(reverseComplement(content)), db, result, verbose, true) logm("INFO", fmt.Sprintf("done genome: %s. %v lines. %v sequences.", genomeFilename, lines, sequenceCount), verbose) writeResults(genomeFilename, result, db.Cgst) } logm("INFO", "find alleles: done", verbose) } func writeResults(filename string, result GenomeAlleleResult, cgst CGST) { genomeAlleles := make([]string, 0, len(cgst.GeneNames)) for _, gene := range cgst.GeneNames { alleles, ok := result[GeneName(gene)] if ok { genomeAlleles = append(genomeAlleles, joinAlleles(alleles)) } else { genomeAlleles = append(genomeAlleles, "N") } } // see if it matches a cgstId alleles := strings.Join(genomeAlleles, "\t") /* cgstId, ok := cgst.FindFirst(genomeAlleles) if !ok { cgstId = NO_CGST } */ cgstId, matches := cgst.FindBest(genomeAlleles) if matches == len(alleles) { fmt.Fprintf(os.Stdout, "%s\t%s\t%s\n", filename, cgstId, alleles) // filename, cgST, alleles } else { fmt.Fprintf(os.Stdout, "%s\t%s (%v/%v)\t%s\n", filename, cgstId, matches, len(alleles), alleles) // filename, cgST, alleles } }
{ logm("WARN", fmt.Sprintf("sequence %v is length %v, shorter than seed size %v", sequence, len(content), db.SeedSize), false) return }
conditional_block
genotype.go
package genotype // // this package implements // import ( "bufio" "bytes" "compress/gzip" "encoding/gob" "fmt" "hash/fnv" "log" "os" "strings" "time" ) const ( NO_CGST = "NA" ) // QueryList contains indexed queries and the names of all sequences type QueryList struct { Index QueryIndex Names []string // list of query fasta names SeedSize int Cgst CGST } // QueryIndex maps hashes of kmers to all possible source locations type QueryIndex map[uint32][]QueryPos // QueryPos provides an index into a sequence name, and a position type QueryPos struct { Name int // array index to name of query Pos int // position of kmer in query Content string // entire query sequence } // GeneName names a short sequence type GeneName string // AlleleResult is a list of allele names type AlleleResult map[string]bool // GenomeAlleleResult maps gene to list of allele names type GenomeAlleleResult map[GeneName]AlleleResult // stringToHash returns a 32-bit hash of a string func stringToHash(s string) uint32 { h := fnv.New32a() h.Write([]byte(s)) return h.Sum32() } func logm(level string, msg string, verbose bool) { if verbose || level != "DEBUG" { fmt.Fprintf(os.Stderr, "%s: %s: %s\n", time.Now().String(), level, msg) } } func checkResult(e error) { if e != nil { log.Fatal(e) } } // joinAlleles takes a list of alleles and returns a comma separated list of them func joinAlleles(alleles AlleleResult) string { keys := make([]string, 0, len(alleles)) for k := range alleles { keys = append(keys, k) } return strings.Join(keys, ",") } // reverse complements a single nucleotide func reverse(in byte) byte { result, ok := REVERSE_MAP[in] if !ok { log.Fatal("failed to reverse complement") } return result } var REVERSE_MAP = map[byte]byte{'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'} // reverseComplement reverses and complements a string func reverseComplement(in *bytes.Buffer) []byte { var result []byte = make([]byte, in.Len(), in.Len()) for pos := in.Len() - 1; pos >= 0; pos-- { current, ok := in.ReadByte() checkResult(ok) result[pos] = reverse(current) } return result } // addSearchableSequence adds an allele sequence to the hash func addSearchableSequence(content string, sequence int, db QueryList)
// searchSequence iterates over content and populates result with genes and matching func searchSequence(content string, db QueryList, result GenomeAlleleResult, verbose bool, reverseComplement bool) { // populates result with a map from gene name to list of found alleles for position := 0; position <= len(content)-db.SeedSize; position += 1 { kmer := content[position : position+db.SeedSize] // kmer at curreent position in content kmerHash := stringToHash(kmer) query, ok := db.Index[kmerHash] if ok { // logm("DEBUG", fmt.Sprintf("found %v potential locations for %s", len(query), content), verbose) for _, candidate := range query { // check for a match if position-candidate.Pos >= 0 && position-candidate.Pos+len(candidate.Content) <= len(content) && content[position-candidate.Pos:position-candidate.Pos+len(candidate.Content)] == candidate.Content { // it's a match, split the sequence name into gene and allele geneAllele := strings.Split(db.Names[candidate.Name], "_") alleles, ok := result[GeneName(geneAllele[0])] if !ok { alleles = make(AlleleResult, 0) } alleles[geneAllele[1]] = true if reverseComplement { logm("DEBUG", fmt.Sprintf("%s found at reverse complement -%v (%v)", db.Names[candidate.Name], position-candidate.Pos, len(content)-len(candidate.Content)-position+candidate.Pos), verbose) } else { logm("DEBUG", fmt.Sprintf("%s found at %v", db.Names[candidate.Name], position-candidate.Pos), verbose) } result[GeneName(geneAllele[0])] = alleles } else { // logm("DEBUG", fmt.Sprintf("didn't match %s", candidate.Content), verbose) } } } else { // logm("DEBUG", fmt.Sprintf("didn't find hash for %s", content), verbose) } } } // IndexSequences generates an index from a gzipped list of alleles for genotyping func IndexSequences(sequences []string, cgstFilename string, seedSize int, verbose bool) QueryList { logm("INFO", fmt.Sprintf("processing with cgst file '%s', seed size %v: %v sequence file(s)", cgstFilename, seedSize, len(sequences)), verbose) var queryList QueryList queryList.SeedSize = seedSize queryList.Index = make(QueryIndex) queryList.Cgst = CreateCGST(cgstFilename, verbose) var content *bytes.Buffer lines := 0 sequenceCount := 0 for _, sequenceFilename := range sequences { logm("INFO", fmt.Sprintf("processing '%s'...", sequenceFilename), verbose) // open sequences (either .fa or .fa.gz) file for reading file, err := os.Open(sequenceFilename) checkResult(err) var scanner *bufio.Scanner if strings.HasSuffix(sequenceFilename, ".gz") { gr, err := gzip.NewReader(file) checkResult(err) scanner = bufio.NewScanner(gr) } else { scanner = bufio.NewScanner(file) } // index sequences for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, ">") { if content != nil { addSearchableSequence(content.String(), sequenceCount-1, queryList) } queryList.Names = append(queryList.Names, line[1:]) sequenceCount++ content = new(bytes.Buffer) } else { (*content).WriteString(line) } lines++ if lines%1000000 == 0 { logm("INFO", fmt.Sprintf("processing %s: %v lines %v sequences. %v kmers", sequenceFilename, lines, len(queryList.Names), len(queryList.Index)), false) } } addSearchableSequence(content.String(), sequenceCount-1, queryList) logm("INFO", fmt.Sprintf("processing '%s': done", sequenceFilename), verbose) file.Close() } logm("INFO", fmt.Sprintf("processing %v file(s): done. %v sequences", len(sequences), len(queryList.Names)), verbose) return queryList } // SaveIndex writes an indexed collection of indexes for use with the genotype command func SaveIndex(target string, source QueryList, verbose bool) { logm("INFO", fmt.Sprintf("saving index to %s...", target), verbose) file, err := os.Create(target) checkResult(err) defer file.Close() gr := gzip.NewWriter(file) defer gr.Close() encoder := gob.NewEncoder(gr) err = encoder.Encode(source.Names) checkResult(err) logm("INFO", fmt.Sprintf("%v sequence names saved", len(source.Names)), verbose) err = encoder.Encode(source.SeedSize) checkResult(err) err = encoder.Encode(source.Cgst) checkResult(err) // save the index, but go has a size limit indexSize := len(source.Index) err = encoder.Encode(indexSize) checkResult(err) logm("INFO", fmt.Sprintf("%v queries to save...", indexSize), verbose) count := 0 for key, value := range source.Index { err = encoder.Encode(key) checkResult(err) err = encoder.Encode(value) checkResult(err) count++ if count%10000 == 0 { logm("INFO", fmt.Sprintf("processing: saved %v items", count), false) } } logm("INFO", fmt.Sprintf("saving index to %s: done", target), verbose) } func LoadIndex(source string, verbose bool) QueryList { logm("INFO", fmt.Sprintf("loading index from %s...", source), verbose) var result QueryList // open fa.gz file file, err := os.Open(source) checkResult(err) defer file.Close() gr, err := gzip.NewReader(file) checkResult(err) defer gr.Close() decoder := gob.NewDecoder(gr) err = decoder.Decode(&result.Names) checkResult(err) logm("INFO", fmt.Sprintf("%v sequence names restored", len(result.Names)), verbose) err = decoder.Decode(&result.SeedSize) checkResult(err) err = decoder.Decode(&result.Cgst) checkResult(err) var indexSize int err = decoder.Decode(&indexSize) checkResult(err) result.Index = make(QueryIndex) count := 0 for i := 0; i < indexSize; i++ { var key uint32 var val []QueryPos err = decoder.Decode(&key) checkResult(err) err = decoder.Decode(&val) checkResult(err) result.Index[key] = val count++ if count%10000 == 0 { logm("INFO", fmt.Sprintf("processing: loaded %v items", count), false) } // logm("DEBUG", fmt.Sprintf("last key: %v, values: %v", key, len(val)), verbose) } logm("INFO", fmt.Sprintf("loading index from %s - loaded %v: done", source, len(result.Index)), verbose) return result } // FindAlleles finds alleles that match a genome and generates cgst information func FindAlleles(db QueryList, mismatches int, genomes []string, verbose bool) { logm("INFO", "find alleles...", verbose) // genotype each genome - list matching sequences fmt.Fprintf(os.Stdout, "Filename\tcgST\t%v\n", strings.Join(db.Cgst.GeneNames, "\t")) var lines int var sequenceCount int var content *bytes.Buffer for _, genomeFilename := range genomes { logm("INFO", fmt.Sprintf("processing genome: %s", genomeFilename), verbose) var result GenomeAlleleResult = make(GenomeAlleleResult) file, err := os.Open(genomeFilename) checkResult(err) defer file.Close() lines = 0 sequenceCount = 0 content = new(bytes.Buffer) r := bufio.NewReader(file) scanner := bufio.NewScanner(r) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, ">") { if content != nil { searchSequence(content.String(), db, result, verbose, false) searchSequence(string(reverseComplement(content)), db, result, verbose, true) } sequenceCount++ content = new(bytes.Buffer) } else { (*content).WriteString(line) } lines++ } searchSequence(content.String(), db, result, verbose, false) searchSequence(string(reverseComplement(content)), db, result, verbose, true) logm("INFO", fmt.Sprintf("done genome: %s. %v lines. %v sequences.", genomeFilename, lines, sequenceCount), verbose) writeResults(genomeFilename, result, db.Cgst) } logm("INFO", "find alleles: done", verbose) } func writeResults(filename string, result GenomeAlleleResult, cgst CGST) { genomeAlleles := make([]string, 0, len(cgst.GeneNames)) for _, gene := range cgst.GeneNames { alleles, ok := result[GeneName(gene)] if ok { genomeAlleles = append(genomeAlleles, joinAlleles(alleles)) } else { genomeAlleles = append(genomeAlleles, "N") } } // see if it matches a cgstId alleles := strings.Join(genomeAlleles, "\t") /* cgstId, ok := cgst.FindFirst(genomeAlleles) if !ok { cgstId = NO_CGST } */ cgstId, matches := cgst.FindBest(genomeAlleles) if matches == len(alleles) { fmt.Fprintf(os.Stdout, "%s\t%s\t%s\n", filename, cgstId, alleles) // filename, cgST, alleles } else { fmt.Fprintf(os.Stdout, "%s\t%s (%v/%v)\t%s\n", filename, cgstId, matches, len(alleles), alleles) // filename, cgST, alleles } }
{ // for an exact match we only have to hash the start of the query if len(content) < db.SeedSize { logm("WARN", fmt.Sprintf("sequence %v is length %v, shorter than seed size %v", sequence, len(content), db.SeedSize), false) return } position := 0 kmer := content[position : position+db.SeedSize] kmerHash := stringToHash(kmer) entry := QueryPos{Name: sequence, Pos: position, Content: content} query, ok := db.Index[kmerHash] if !ok { query = make([]QueryPos, 0) } query = append(query, entry) db.Index[kmerHash] = query }
identifier_body
genotype.go
package genotype // // this package implements // import ( "bufio" "bytes" "compress/gzip" "encoding/gob" "fmt" "hash/fnv" "log" "os" "strings" "time" ) const ( NO_CGST = "NA" ) // QueryList contains indexed queries and the names of all sequences type QueryList struct { Index QueryIndex Names []string // list of query fasta names SeedSize int Cgst CGST } // QueryIndex maps hashes of kmers to all possible source locations type QueryIndex map[uint32][]QueryPos // QueryPos provides an index into a sequence name, and a position type QueryPos struct { Name int // array index to name of query Pos int // position of kmer in query Content string // entire query sequence } // GeneName names a short sequence type GeneName string // AlleleResult is a list of allele names type AlleleResult map[string]bool // GenomeAlleleResult maps gene to list of allele names type GenomeAlleleResult map[GeneName]AlleleResult // stringToHash returns a 32-bit hash of a string func stringToHash(s string) uint32 { h := fnv.New32a() h.Write([]byte(s)) return h.Sum32() } func
(level string, msg string, verbose bool) { if verbose || level != "DEBUG" { fmt.Fprintf(os.Stderr, "%s: %s: %s\n", time.Now().String(), level, msg) } } func checkResult(e error) { if e != nil { log.Fatal(e) } } // joinAlleles takes a list of alleles and returns a comma separated list of them func joinAlleles(alleles AlleleResult) string { keys := make([]string, 0, len(alleles)) for k := range alleles { keys = append(keys, k) } return strings.Join(keys, ",") } // reverse complements a single nucleotide func reverse(in byte) byte { result, ok := REVERSE_MAP[in] if !ok { log.Fatal("failed to reverse complement") } return result } var REVERSE_MAP = map[byte]byte{'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'} // reverseComplement reverses and complements a string func reverseComplement(in *bytes.Buffer) []byte { var result []byte = make([]byte, in.Len(), in.Len()) for pos := in.Len() - 1; pos >= 0; pos-- { current, ok := in.ReadByte() checkResult(ok) result[pos] = reverse(current) } return result } // addSearchableSequence adds an allele sequence to the hash func addSearchableSequence(content string, sequence int, db QueryList) { // for an exact match we only have to hash the start of the query if len(content) < db.SeedSize { logm("WARN", fmt.Sprintf("sequence %v is length %v, shorter than seed size %v", sequence, len(content), db.SeedSize), false) return } position := 0 kmer := content[position : position+db.SeedSize] kmerHash := stringToHash(kmer) entry := QueryPos{Name: sequence, Pos: position, Content: content} query, ok := db.Index[kmerHash] if !ok { query = make([]QueryPos, 0) } query = append(query, entry) db.Index[kmerHash] = query } // searchSequence iterates over content and populates result with genes and matching func searchSequence(content string, db QueryList, result GenomeAlleleResult, verbose bool, reverseComplement bool) { // populates result with a map from gene name to list of found alleles for position := 0; position <= len(content)-db.SeedSize; position += 1 { kmer := content[position : position+db.SeedSize] // kmer at curreent position in content kmerHash := stringToHash(kmer) query, ok := db.Index[kmerHash] if ok { // logm("DEBUG", fmt.Sprintf("found %v potential locations for %s", len(query), content), verbose) for _, candidate := range query { // check for a match if position-candidate.Pos >= 0 && position-candidate.Pos+len(candidate.Content) <= len(content) && content[position-candidate.Pos:position-candidate.Pos+len(candidate.Content)] == candidate.Content { // it's a match, split the sequence name into gene and allele geneAllele := strings.Split(db.Names[candidate.Name], "_") alleles, ok := result[GeneName(geneAllele[0])] if !ok { alleles = make(AlleleResult, 0) } alleles[geneAllele[1]] = true if reverseComplement { logm("DEBUG", fmt.Sprintf("%s found at reverse complement -%v (%v)", db.Names[candidate.Name], position-candidate.Pos, len(content)-len(candidate.Content)-position+candidate.Pos), verbose) } else { logm("DEBUG", fmt.Sprintf("%s found at %v", db.Names[candidate.Name], position-candidate.Pos), verbose) } result[GeneName(geneAllele[0])] = alleles } else { // logm("DEBUG", fmt.Sprintf("didn't match %s", candidate.Content), verbose) } } } else { // logm("DEBUG", fmt.Sprintf("didn't find hash for %s", content), verbose) } } } // IndexSequences generates an index from a gzipped list of alleles for genotyping func IndexSequences(sequences []string, cgstFilename string, seedSize int, verbose bool) QueryList { logm("INFO", fmt.Sprintf("processing with cgst file '%s', seed size %v: %v sequence file(s)", cgstFilename, seedSize, len(sequences)), verbose) var queryList QueryList queryList.SeedSize = seedSize queryList.Index = make(QueryIndex) queryList.Cgst = CreateCGST(cgstFilename, verbose) var content *bytes.Buffer lines := 0 sequenceCount := 0 for _, sequenceFilename := range sequences { logm("INFO", fmt.Sprintf("processing '%s'...", sequenceFilename), verbose) // open sequences (either .fa or .fa.gz) file for reading file, err := os.Open(sequenceFilename) checkResult(err) var scanner *bufio.Scanner if strings.HasSuffix(sequenceFilename, ".gz") { gr, err := gzip.NewReader(file) checkResult(err) scanner = bufio.NewScanner(gr) } else { scanner = bufio.NewScanner(file) } // index sequences for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, ">") { if content != nil { addSearchableSequence(content.String(), sequenceCount-1, queryList) } queryList.Names = append(queryList.Names, line[1:]) sequenceCount++ content = new(bytes.Buffer) } else { (*content).WriteString(line) } lines++ if lines%1000000 == 0 { logm("INFO", fmt.Sprintf("processing %s: %v lines %v sequences. %v kmers", sequenceFilename, lines, len(queryList.Names), len(queryList.Index)), false) } } addSearchableSequence(content.String(), sequenceCount-1, queryList) logm("INFO", fmt.Sprintf("processing '%s': done", sequenceFilename), verbose) file.Close() } logm("INFO", fmt.Sprintf("processing %v file(s): done. %v sequences", len(sequences), len(queryList.Names)), verbose) return queryList } // SaveIndex writes an indexed collection of indexes for use with the genotype command func SaveIndex(target string, source QueryList, verbose bool) { logm("INFO", fmt.Sprintf("saving index to %s...", target), verbose) file, err := os.Create(target) checkResult(err) defer file.Close() gr := gzip.NewWriter(file) defer gr.Close() encoder := gob.NewEncoder(gr) err = encoder.Encode(source.Names) checkResult(err) logm("INFO", fmt.Sprintf("%v sequence names saved", len(source.Names)), verbose) err = encoder.Encode(source.SeedSize) checkResult(err) err = encoder.Encode(source.Cgst) checkResult(err) // save the index, but go has a size limit indexSize := len(source.Index) err = encoder.Encode(indexSize) checkResult(err) logm("INFO", fmt.Sprintf("%v queries to save...", indexSize), verbose) count := 0 for key, value := range source.Index { err = encoder.Encode(key) checkResult(err) err = encoder.Encode(value) checkResult(err) count++ if count%10000 == 0 { logm("INFO", fmt.Sprintf("processing: saved %v items", count), false) } } logm("INFO", fmt.Sprintf("saving index to %s: done", target), verbose) } func LoadIndex(source string, verbose bool) QueryList { logm("INFO", fmt.Sprintf("loading index from %s...", source), verbose) var result QueryList // open fa.gz file file, err := os.Open(source) checkResult(err) defer file.Close() gr, err := gzip.NewReader(file) checkResult(err) defer gr.Close() decoder := gob.NewDecoder(gr) err = decoder.Decode(&result.Names) checkResult(err) logm("INFO", fmt.Sprintf("%v sequence names restored", len(result.Names)), verbose) err = decoder.Decode(&result.SeedSize) checkResult(err) err = decoder.Decode(&result.Cgst) checkResult(err) var indexSize int err = decoder.Decode(&indexSize) checkResult(err) result.Index = make(QueryIndex) count := 0 for i := 0; i < indexSize; i++ { var key uint32 var val []QueryPos err = decoder.Decode(&key) checkResult(err) err = decoder.Decode(&val) checkResult(err) result.Index[key] = val count++ if count%10000 == 0 { logm("INFO", fmt.Sprintf("processing: loaded %v items", count), false) } // logm("DEBUG", fmt.Sprintf("last key: %v, values: %v", key, len(val)), verbose) } logm("INFO", fmt.Sprintf("loading index from %s - loaded %v: done", source, len(result.Index)), verbose) return result } // FindAlleles finds alleles that match a genome and generates cgst information func FindAlleles(db QueryList, mismatches int, genomes []string, verbose bool) { logm("INFO", "find alleles...", verbose) // genotype each genome - list matching sequences fmt.Fprintf(os.Stdout, "Filename\tcgST\t%v\n", strings.Join(db.Cgst.GeneNames, "\t")) var lines int var sequenceCount int var content *bytes.Buffer for _, genomeFilename := range genomes { logm("INFO", fmt.Sprintf("processing genome: %s", genomeFilename), verbose) var result GenomeAlleleResult = make(GenomeAlleleResult) file, err := os.Open(genomeFilename) checkResult(err) defer file.Close() lines = 0 sequenceCount = 0 content = new(bytes.Buffer) r := bufio.NewReader(file) scanner := bufio.NewScanner(r) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, ">") { if content != nil { searchSequence(content.String(), db, result, verbose, false) searchSequence(string(reverseComplement(content)), db, result, verbose, true) } sequenceCount++ content = new(bytes.Buffer) } else { (*content).WriteString(line) } lines++ } searchSequence(content.String(), db, result, verbose, false) searchSequence(string(reverseComplement(content)), db, result, verbose, true) logm("INFO", fmt.Sprintf("done genome: %s. %v lines. %v sequences.", genomeFilename, lines, sequenceCount), verbose) writeResults(genomeFilename, result, db.Cgst) } logm("INFO", "find alleles: done", verbose) } func writeResults(filename string, result GenomeAlleleResult, cgst CGST) { genomeAlleles := make([]string, 0, len(cgst.GeneNames)) for _, gene := range cgst.GeneNames { alleles, ok := result[GeneName(gene)] if ok { genomeAlleles = append(genomeAlleles, joinAlleles(alleles)) } else { genomeAlleles = append(genomeAlleles, "N") } } // see if it matches a cgstId alleles := strings.Join(genomeAlleles, "\t") /* cgstId, ok := cgst.FindFirst(genomeAlleles) if !ok { cgstId = NO_CGST } */ cgstId, matches := cgst.FindBest(genomeAlleles) if matches == len(alleles) { fmt.Fprintf(os.Stdout, "%s\t%s\t%s\n", filename, cgstId, alleles) // filename, cgST, alleles } else { fmt.Fprintf(os.Stdout, "%s\t%s (%v/%v)\t%s\n", filename, cgstId, matches, len(alleles), alleles) // filename, cgST, alleles } }
logm
identifier_name
genotype.go
package genotype // // this package implements // import ( "bufio" "bytes" "compress/gzip" "encoding/gob" "fmt" "hash/fnv" "log" "os" "strings" "time" ) const ( NO_CGST = "NA" ) // QueryList contains indexed queries and the names of all sequences type QueryList struct { Index QueryIndex Names []string // list of query fasta names SeedSize int Cgst CGST } // QueryIndex maps hashes of kmers to all possible source locations type QueryIndex map[uint32][]QueryPos // QueryPos provides an index into a sequence name, and a position type QueryPos struct { Name int // array index to name of query Pos int // position of kmer in query Content string // entire query sequence } // GeneName names a short sequence type GeneName string // AlleleResult is a list of allele names type AlleleResult map[string]bool // GenomeAlleleResult maps gene to list of allele names type GenomeAlleleResult map[GeneName]AlleleResult // stringToHash returns a 32-bit hash of a string func stringToHash(s string) uint32 { h := fnv.New32a() h.Write([]byte(s)) return h.Sum32() } func logm(level string, msg string, verbose bool) { if verbose || level != "DEBUG" { fmt.Fprintf(os.Stderr, "%s: %s: %s\n", time.Now().String(), level, msg) } } func checkResult(e error) { if e != nil { log.Fatal(e) } } // joinAlleles takes a list of alleles and returns a comma separated list of them func joinAlleles(alleles AlleleResult) string { keys := make([]string, 0, len(alleles)) for k := range alleles { keys = append(keys, k) } return strings.Join(keys, ",") } // reverse complements a single nucleotide func reverse(in byte) byte { result, ok := REVERSE_MAP[in] if !ok { log.Fatal("failed to reverse complement") } return result } var REVERSE_MAP = map[byte]byte{'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'} // reverseComplement reverses and complements a string func reverseComplement(in *bytes.Buffer) []byte { var result []byte = make([]byte, in.Len(), in.Len()) for pos := in.Len() - 1; pos >= 0; pos-- { current, ok := in.ReadByte() checkResult(ok) result[pos] = reverse(current) } return result
} // addSearchableSequence adds an allele sequence to the hash func addSearchableSequence(content string, sequence int, db QueryList) { // for an exact match we only have to hash the start of the query if len(content) < db.SeedSize { logm("WARN", fmt.Sprintf("sequence %v is length %v, shorter than seed size %v", sequence, len(content), db.SeedSize), false) return } position := 0 kmer := content[position : position+db.SeedSize] kmerHash := stringToHash(kmer) entry := QueryPos{Name: sequence, Pos: position, Content: content} query, ok := db.Index[kmerHash] if !ok { query = make([]QueryPos, 0) } query = append(query, entry) db.Index[kmerHash] = query } // searchSequence iterates over content and populates result with genes and matching func searchSequence(content string, db QueryList, result GenomeAlleleResult, verbose bool, reverseComplement bool) { // populates result with a map from gene name to list of found alleles for position := 0; position <= len(content)-db.SeedSize; position += 1 { kmer := content[position : position+db.SeedSize] // kmer at curreent position in content kmerHash := stringToHash(kmer) query, ok := db.Index[kmerHash] if ok { // logm("DEBUG", fmt.Sprintf("found %v potential locations for %s", len(query), content), verbose) for _, candidate := range query { // check for a match if position-candidate.Pos >= 0 && position-candidate.Pos+len(candidate.Content) <= len(content) && content[position-candidate.Pos:position-candidate.Pos+len(candidate.Content)] == candidate.Content { // it's a match, split the sequence name into gene and allele geneAllele := strings.Split(db.Names[candidate.Name], "_") alleles, ok := result[GeneName(geneAllele[0])] if !ok { alleles = make(AlleleResult, 0) } alleles[geneAllele[1]] = true if reverseComplement { logm("DEBUG", fmt.Sprintf("%s found at reverse complement -%v (%v)", db.Names[candidate.Name], position-candidate.Pos, len(content)-len(candidate.Content)-position+candidate.Pos), verbose) } else { logm("DEBUG", fmt.Sprintf("%s found at %v", db.Names[candidate.Name], position-candidate.Pos), verbose) } result[GeneName(geneAllele[0])] = alleles } else { // logm("DEBUG", fmt.Sprintf("didn't match %s", candidate.Content), verbose) } } } else { // logm("DEBUG", fmt.Sprintf("didn't find hash for %s", content), verbose) } } } // IndexSequences generates an index from a gzipped list of alleles for genotyping func IndexSequences(sequences []string, cgstFilename string, seedSize int, verbose bool) QueryList { logm("INFO", fmt.Sprintf("processing with cgst file '%s', seed size %v: %v sequence file(s)", cgstFilename, seedSize, len(sequences)), verbose) var queryList QueryList queryList.SeedSize = seedSize queryList.Index = make(QueryIndex) queryList.Cgst = CreateCGST(cgstFilename, verbose) var content *bytes.Buffer lines := 0 sequenceCount := 0 for _, sequenceFilename := range sequences { logm("INFO", fmt.Sprintf("processing '%s'...", sequenceFilename), verbose) // open sequences (either .fa or .fa.gz) file for reading file, err := os.Open(sequenceFilename) checkResult(err) var scanner *bufio.Scanner if strings.HasSuffix(sequenceFilename, ".gz") { gr, err := gzip.NewReader(file) checkResult(err) scanner = bufio.NewScanner(gr) } else { scanner = bufio.NewScanner(file) } // index sequences for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, ">") { if content != nil { addSearchableSequence(content.String(), sequenceCount-1, queryList) } queryList.Names = append(queryList.Names, line[1:]) sequenceCount++ content = new(bytes.Buffer) } else { (*content).WriteString(line) } lines++ if lines%1000000 == 0 { logm("INFO", fmt.Sprintf("processing %s: %v lines %v sequences. %v kmers", sequenceFilename, lines, len(queryList.Names), len(queryList.Index)), false) } } addSearchableSequence(content.String(), sequenceCount-1, queryList) logm("INFO", fmt.Sprintf("processing '%s': done", sequenceFilename), verbose) file.Close() } logm("INFO", fmt.Sprintf("processing %v file(s): done. %v sequences", len(sequences), len(queryList.Names)), verbose) return queryList } // SaveIndex writes an indexed collection of indexes for use with the genotype command func SaveIndex(target string, source QueryList, verbose bool) { logm("INFO", fmt.Sprintf("saving index to %s...", target), verbose) file, err := os.Create(target) checkResult(err) defer file.Close() gr := gzip.NewWriter(file) defer gr.Close() encoder := gob.NewEncoder(gr) err = encoder.Encode(source.Names) checkResult(err) logm("INFO", fmt.Sprintf("%v sequence names saved", len(source.Names)), verbose) err = encoder.Encode(source.SeedSize) checkResult(err) err = encoder.Encode(source.Cgst) checkResult(err) // save the index, but go has a size limit indexSize := len(source.Index) err = encoder.Encode(indexSize) checkResult(err) logm("INFO", fmt.Sprintf("%v queries to save...", indexSize), verbose) count := 0 for key, value := range source.Index { err = encoder.Encode(key) checkResult(err) err = encoder.Encode(value) checkResult(err) count++ if count%10000 == 0 { logm("INFO", fmt.Sprintf("processing: saved %v items", count), false) } } logm("INFO", fmt.Sprintf("saving index to %s: done", target), verbose) } func LoadIndex(source string, verbose bool) QueryList { logm("INFO", fmt.Sprintf("loading index from %s...", source), verbose) var result QueryList // open fa.gz file file, err := os.Open(source) checkResult(err) defer file.Close() gr, err := gzip.NewReader(file) checkResult(err) defer gr.Close() decoder := gob.NewDecoder(gr) err = decoder.Decode(&result.Names) checkResult(err) logm("INFO", fmt.Sprintf("%v sequence names restored", len(result.Names)), verbose) err = decoder.Decode(&result.SeedSize) checkResult(err) err = decoder.Decode(&result.Cgst) checkResult(err) var indexSize int err = decoder.Decode(&indexSize) checkResult(err) result.Index = make(QueryIndex) count := 0 for i := 0; i < indexSize; i++ { var key uint32 var val []QueryPos err = decoder.Decode(&key) checkResult(err) err = decoder.Decode(&val) checkResult(err) result.Index[key] = val count++ if count%10000 == 0 { logm("INFO", fmt.Sprintf("processing: loaded %v items", count), false) } // logm("DEBUG", fmt.Sprintf("last key: %v, values: %v", key, len(val)), verbose) } logm("INFO", fmt.Sprintf("loading index from %s - loaded %v: done", source, len(result.Index)), verbose) return result } // FindAlleles finds alleles that match a genome and generates cgst information func FindAlleles(db QueryList, mismatches int, genomes []string, verbose bool) { logm("INFO", "find alleles...", verbose) // genotype each genome - list matching sequences fmt.Fprintf(os.Stdout, "Filename\tcgST\t%v\n", strings.Join(db.Cgst.GeneNames, "\t")) var lines int var sequenceCount int var content *bytes.Buffer for _, genomeFilename := range genomes { logm("INFO", fmt.Sprintf("processing genome: %s", genomeFilename), verbose) var result GenomeAlleleResult = make(GenomeAlleleResult) file, err := os.Open(genomeFilename) checkResult(err) defer file.Close() lines = 0 sequenceCount = 0 content = new(bytes.Buffer) r := bufio.NewReader(file) scanner := bufio.NewScanner(r) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, ">") { if content != nil { searchSequence(content.String(), db, result, verbose, false) searchSequence(string(reverseComplement(content)), db, result, verbose, true) } sequenceCount++ content = new(bytes.Buffer) } else { (*content).WriteString(line) } lines++ } searchSequence(content.String(), db, result, verbose, false) searchSequence(string(reverseComplement(content)), db, result, verbose, true) logm("INFO", fmt.Sprintf("done genome: %s. %v lines. %v sequences.", genomeFilename, lines, sequenceCount), verbose) writeResults(genomeFilename, result, db.Cgst) } logm("INFO", "find alleles: done", verbose) } func writeResults(filename string, result GenomeAlleleResult, cgst CGST) { genomeAlleles := make([]string, 0, len(cgst.GeneNames)) for _, gene := range cgst.GeneNames { alleles, ok := result[GeneName(gene)] if ok { genomeAlleles = append(genomeAlleles, joinAlleles(alleles)) } else { genomeAlleles = append(genomeAlleles, "N") } } // see if it matches a cgstId alleles := strings.Join(genomeAlleles, "\t") /* cgstId, ok := cgst.FindFirst(genomeAlleles) if !ok { cgstId = NO_CGST } */ cgstId, matches := cgst.FindBest(genomeAlleles) if matches == len(alleles) { fmt.Fprintf(os.Stdout, "%s\t%s\t%s\n", filename, cgstId, alleles) // filename, cgST, alleles } else { fmt.Fprintf(os.Stdout, "%s\t%s (%v/%v)\t%s\n", filename, cgstId, matches, len(alleles), alleles) // filename, cgST, alleles } }
random_line_split
lib.rs
mod business_logic_layer; mod data_access_layer; mod entities; use crate::business_logic_layer as bll; pub use crate::data_access_layer::MAX_DATAGRAM_SIZE; use crate::data_access_layer::{TypedClientSocket, TypedServerSocket}; pub use crate::entities::Exception; use std::collections::HashMap; use std::net::{SocketAddr, ToSocketAddrs}; use std::time::Duration; #[derive(Debug)] ///Events from server. pub enum ServerEvent { ///Error on read data from socket. ExceptionOnRecv(Exception), ///Error on write data to socket. ExceptionOnSend((SocketAddr, Exception)), } pub type ContinueRunning = bool; ///Game to use with server must implement this trait. pub trait Game { /// delta_time: time elapsed from last call. /// command: ordered commands commands from server. /// from: Address of command sender. /// Returns bool value indicating /// should server continue running if false stops server. /// Called only when new commands come to server. /// Commands ordered and with some guarantees. fn handle_command( &mut self, delta_time: Duration, commands: Vec<Vec<u8>>, from: SocketAddr, ) -> ContinueRunning; ///Gets new state to send to client. /// delta_time: time elapsed throw last call. /// Returns bytes with new game state for client. /// Called once in about 30 milliseconds. /// Sends state only to clients connected to server. ///Ordered and without some guarantees. /// If returns empty Vec<u8> then server skips sending it and go to next iteration fn draw(&mut self, delta_time: Duration) -> Vec<u8>; ///Allow client with this IP Address work with server. /// If false server don't send new state to this client. /// Usually don't implement this method. Use default implementation. fn allow_connect(&mut self, _from: &SocketAddr) -> bool { true } ///Handles events from server. /// Returns bool value. /// If returns false stops server. /// Usually don't implement this method. Use default implementation. fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning { true } ///Client to add to recv state from server. /// If returns not None then servers on draw sends new state to this client. /// If client with this IP Address already connected then nothing happens. /// Usually don't implement this method. Use default implementation. fn add_client(&mut self) -> Option<SocketAddr> { None } ///Disconnect this client from server and don't send new state to them. /// Usually don't implement this method. Use default implementation. fn remove_client(&mut self) -> Option<SocketAddr> { None } } /// Client used to communicate with [`GameServer`]. Must be singleton in your app. pub struct ClientSocket { socket: TypedClientSocket, client: bll::Client, } impl ClientSocket { ///Create new client and listen on port to recv packets from server_address and send its to them. pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> { Ok(ClientSocket { socket: TypedClientSocket::new(port, server_address)?, client: bll::Client::new(), }) } ///Send data to server /// Don't block current thread /// may wait up to 30 milliseconds if you send commands too often ///Commands ordered and with some guarantees. pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> { let command = self.client.send(command); self.socket.write(&command) } ///Reads data from server. /// Don't block current thread. /// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available. ///Data ordered and without some guarantees. pub fn recv(&mut self) -> Result<Vec<u8>, Exception> { let state = self.socket.read()?; let (state, lost) = self.client.recv(state)?; for command in lost { self.socket.write(&command)?; } Ok(state) } } struct ServerSocket { socket: TypedServerSocket, servers: HashMap<SocketAddr, bll::Server>, } impl ServerSocket { pub fn new(port: u16) -> Result<ServerSocket, Exception> { Ok(ServerSocket { socket: TypedServerSocket::new(port)?, servers: HashMap::new(), }) } pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> { let (command, from) = self.socket.read()?; self.add(&from); let command = self.servers.get_mut(&from).unwrap().recv(command)?; Ok((command, from)) } pub fn
(&mut self, client: &SocketAddr) { self.servers.remove(&client); } pub fn add(&mut self, client: &SocketAddr) { if !self.servers.contains_key(client) { self.servers.insert(client.clone(), bll::Server::new()); } } pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> { let mut exceptions = Vec::new(); for (a, s) in &mut self.servers { let _ = self .socket .write(a, &s.send(state.clone())) .map_err(|e| exceptions.push((*a, e))); } exceptions } } const DRAW_PERIOD_IN_MILLIS: u64 = 30; ///Game server to run [`Game`] pub struct GameServer<T: Game> { game: T, socket: ServerSocket, is_running: bool, draw_timer: bll::timer::WaitTimer, update_timer: bll::timer::ElapsedTimer, after_draw_elapsed_timer: bll::timer::ElapsedTimer, } impl<T: Game> GameServer<T> { ///Crates new server listening port pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> { Ok(GameServer { game, socket: ServerSocket::new(port)?, is_running: true, draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS), update_timer: bll::timer::ElapsedTimer::new(), after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(), }) } ///Runs game update - draw circle. /// Blocks current thread. pub fn run(&mut self) { while self.is_running { self.update(); self.draw() } } fn draw(&mut self) { if self.draw_timer.continue_execution() { let state = self.game.draw(self.after_draw_elapsed_timer.elapsed()); if state.is_empty() { return; } self.game.add_client().map(|a| self.socket.add(&a)); self.game.remove_client().map(|a| self.socket.remove(&a)); self.is_running &= self .socket .send_to_all(state) .into_iter() .map(|ex| { self.game .handle_server_event(ServerEvent::ExceptionOnSend(ex)) }) .all(|b| b); } } fn update(&mut self) { let _ = self .socket .recv() .map(|(commands, from)| { if self.game.allow_connect(&from) { self.is_running &= self.game .handle_command(self.update_timer.elapsed(), commands, from); } else { self.socket.remove(&from); } }) .map_err(|e| { self.is_running &= self .game .handle_server_event(ServerEvent::ExceptionOnRecv(e)) }); } } //trait Game { // fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>; //} // //struct GameProxy { // game: std::sync::Arc<std::sync::Mutex<Game>> //} // //impl GameProxy { // fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy { //// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap(); //// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap(); // GameProxy { game } // } // // fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> { // let mut game = self.game.lock().unwrap(); // game.update(delta_time, commands, from_address) // } //} ///// Client used to communicate with server. Must be singleton in your app //pub struct Client { // commands: mpsc::Sender<Vec<u8>>, // states: mpsc::Receiver<Vec<u8>>, //} // //impl Client { // ///Create new client and listen on port to recv packets from server_address and send its to them // pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> { // let mut client = crate::business_logic_layer::Client::new(port, server_address)?; // crate::data_access_layer::logger::init(LevelFilter::Info)?; // let (tx, rx) = Client::run_process(client); // Ok(Client { commands: tx, states: rx }) // } // // fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) { // let (tx1, rx1) = mpsc::channel(); // let (tx2, rx2) = mpsc::channel(); // thread::spawn(move || { // const SEND_TIMEOUT: time::Duration = time::Duration::from_millis(30); // let mut timer = time::Instant::now(); // loop { // if timer.elapsed() > SEND_TIMEOUT { // timer = time::Instant::now(); // match rx1.try_recv() { // Ok(b) => client.send(b).map_err(|e| error!("{}", e)), // Err(mpsc::TryRecvError::Disconnected) => break, // Err(e) => Err(error!("{}", e)), // }; // }; // client.recv() // .map_err(|e|error!("{}",e)) // .and_then(|b| tx2.send(b) // .map_err(|e|error!("{}",e))); // // } // }); // (tx1, rx2) // } // // ///Send data to server // /// Don't block current thread // pub fn send(&self, command: Vec<u8>) { // self.commands.send(command).map_err(|e| error!("{}", e)); // } // // ///Reads data fro server // /// Don't block current thread // /// Return None if there is no data available // pub fn recv(&self) -> Option<Vec<u8>> { // self.states.try_recv().ok() // } //} //#[cfg(test)] //mod tests { // #[test] // fn it_works() { // assert_eq!(1, 1); // } //}
remove
identifier_name
lib.rs
mod business_logic_layer; mod data_access_layer; mod entities; use crate::business_logic_layer as bll; pub use crate::data_access_layer::MAX_DATAGRAM_SIZE; use crate::data_access_layer::{TypedClientSocket, TypedServerSocket}; pub use crate::entities::Exception; use std::collections::HashMap; use std::net::{SocketAddr, ToSocketAddrs}; use std::time::Duration; #[derive(Debug)] ///Events from server. pub enum ServerEvent { ///Error on read data from socket. ExceptionOnRecv(Exception), ///Error on write data to socket. ExceptionOnSend((SocketAddr, Exception)), } pub type ContinueRunning = bool; ///Game to use with server must implement this trait. pub trait Game { /// delta_time: time elapsed from last call. /// command: ordered commands commands from server. /// from: Address of command sender. /// Returns bool value indicating /// should server continue running if false stops server. /// Called only when new commands come to server. /// Commands ordered and with some guarantees. fn handle_command( &mut self, delta_time: Duration, commands: Vec<Vec<u8>>, from: SocketAddr, ) -> ContinueRunning; ///Gets new state to send to client. /// delta_time: time elapsed throw last call. /// Returns bytes with new game state for client. /// Called once in about 30 milliseconds. /// Sends state only to clients connected to server. ///Ordered and without some guarantees. /// If returns empty Vec<u8> then server skips sending it and go to next iteration fn draw(&mut self, delta_time: Duration) -> Vec<u8>; ///Allow client with this IP Address work with server. /// If false server don't send new state to this client. /// Usually don't implement this method. Use default implementation. fn allow_connect(&mut self, _from: &SocketAddr) -> bool { true } ///Handles events from server. /// Returns bool value. /// If returns false stops server. /// Usually don't implement this method. Use default implementation. fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning { true } ///Client to add to recv state from server. /// If returns not None then servers on draw sends new state to this client. /// If client with this IP Address already connected then nothing happens. /// Usually don't implement this method. Use default implementation. fn add_client(&mut self) -> Option<SocketAddr> { None } ///Disconnect this client from server and don't send new state to them. /// Usually don't implement this method. Use default implementation. fn remove_client(&mut self) -> Option<SocketAddr> { None } } /// Client used to communicate with [`GameServer`]. Must be singleton in your app. pub struct ClientSocket { socket: TypedClientSocket, client: bll::Client, } impl ClientSocket { ///Create new client and listen on port to recv packets from server_address and send its to them. pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> { Ok(ClientSocket { socket: TypedClientSocket::new(port, server_address)?, client: bll::Client::new(), }) } ///Send data to server /// Don't block current thread /// may wait up to 30 milliseconds if you send commands too often ///Commands ordered and with some guarantees. pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> { let command = self.client.send(command); self.socket.write(&command) } ///Reads data from server. /// Don't block current thread. /// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available. ///Data ordered and without some guarantees. pub fn recv(&mut self) -> Result<Vec<u8>, Exception>
} struct ServerSocket { socket: TypedServerSocket, servers: HashMap<SocketAddr, bll::Server>, } impl ServerSocket { pub fn new(port: u16) -> Result<ServerSocket, Exception> { Ok(ServerSocket { socket: TypedServerSocket::new(port)?, servers: HashMap::new(), }) } pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> { let (command, from) = self.socket.read()?; self.add(&from); let command = self.servers.get_mut(&from).unwrap().recv(command)?; Ok((command, from)) } pub fn remove(&mut self, client: &SocketAddr) { self.servers.remove(&client); } pub fn add(&mut self, client: &SocketAddr) { if !self.servers.contains_key(client) { self.servers.insert(client.clone(), bll::Server::new()); } } pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> { let mut exceptions = Vec::new(); for (a, s) in &mut self.servers { let _ = self .socket .write(a, &s.send(state.clone())) .map_err(|e| exceptions.push((*a, e))); } exceptions } } const DRAW_PERIOD_IN_MILLIS: u64 = 30; ///Game server to run [`Game`] pub struct GameServer<T: Game> { game: T, socket: ServerSocket, is_running: bool, draw_timer: bll::timer::WaitTimer, update_timer: bll::timer::ElapsedTimer, after_draw_elapsed_timer: bll::timer::ElapsedTimer, } impl<T: Game> GameServer<T> { ///Crates new server listening port pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> { Ok(GameServer { game, socket: ServerSocket::new(port)?, is_running: true, draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS), update_timer: bll::timer::ElapsedTimer::new(), after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(), }) } ///Runs game update - draw circle. /// Blocks current thread. pub fn run(&mut self) { while self.is_running { self.update(); self.draw() } } fn draw(&mut self) { if self.draw_timer.continue_execution() { let state = self.game.draw(self.after_draw_elapsed_timer.elapsed()); if state.is_empty() { return; } self.game.add_client().map(|a| self.socket.add(&a)); self.game.remove_client().map(|a| self.socket.remove(&a)); self.is_running &= self .socket .send_to_all(state) .into_iter() .map(|ex| { self.game .handle_server_event(ServerEvent::ExceptionOnSend(ex)) }) .all(|b| b); } } fn update(&mut self) { let _ = self .socket .recv() .map(|(commands, from)| { if self.game.allow_connect(&from) { self.is_running &= self.game .handle_command(self.update_timer.elapsed(), commands, from); } else { self.socket.remove(&from); } }) .map_err(|e| { self.is_running &= self .game .handle_server_event(ServerEvent::ExceptionOnRecv(e)) }); } } //trait Game { // fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>; //} // //struct GameProxy { // game: std::sync::Arc<std::sync::Mutex<Game>> //} // //impl GameProxy { // fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy { //// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap(); //// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap(); // GameProxy { game } // } // // fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> { // let mut game = self.game.lock().unwrap(); // game.update(delta_time, commands, from_address) // } //} ///// Client used to communicate with server. Must be singleton in your app //pub struct Client { // commands: mpsc::Sender<Vec<u8>>, // states: mpsc::Receiver<Vec<u8>>, //} // //impl Client { // ///Create new client and listen on port to recv packets from server_address and send its to them // pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> { // let mut client = crate::business_logic_layer::Client::new(port, server_address)?; // crate::data_access_layer::logger::init(LevelFilter::Info)?; // let (tx, rx) = Client::run_process(client); // Ok(Client { commands: tx, states: rx }) // } // // fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) { // let (tx1, rx1) = mpsc::channel(); // let (tx2, rx2) = mpsc::channel(); // thread::spawn(move || { // const SEND_TIMEOUT: time::Duration = time::Duration::from_millis(30); // let mut timer = time::Instant::now(); // loop { // if timer.elapsed() > SEND_TIMEOUT { // timer = time::Instant::now(); // match rx1.try_recv() { // Ok(b) => client.send(b).map_err(|e| error!("{}", e)), // Err(mpsc::TryRecvError::Disconnected) => break, // Err(e) => Err(error!("{}", e)), // }; // }; // client.recv() // .map_err(|e|error!("{}",e)) // .and_then(|b| tx2.send(b) // .map_err(|e|error!("{}",e))); // // } // }); // (tx1, rx2) // } // // ///Send data to server // /// Don't block current thread // pub fn send(&self, command: Vec<u8>) { // self.commands.send(command).map_err(|e| error!("{}", e)); // } // // ///Reads data fro server // /// Don't block current thread // /// Return None if there is no data available // pub fn recv(&self) -> Option<Vec<u8>> { // self.states.try_recv().ok() // } //} //#[cfg(test)] //mod tests { // #[test] // fn it_works() { // assert_eq!(1, 1); // } //}
{ let state = self.socket.read()?; let (state, lost) = self.client.recv(state)?; for command in lost { self.socket.write(&command)?; } Ok(state) }
identifier_body
lib.rs
mod business_logic_layer; mod data_access_layer; mod entities; use crate::business_logic_layer as bll; pub use crate::data_access_layer::MAX_DATAGRAM_SIZE; use crate::data_access_layer::{TypedClientSocket, TypedServerSocket}; pub use crate::entities::Exception; use std::collections::HashMap; use std::net::{SocketAddr, ToSocketAddrs}; use std::time::Duration; #[derive(Debug)] ///Events from server. pub enum ServerEvent { ///Error on read data from socket. ExceptionOnRecv(Exception), ///Error on write data to socket. ExceptionOnSend((SocketAddr, Exception)), } pub type ContinueRunning = bool; ///Game to use with server must implement this trait. pub trait Game { /// delta_time: time elapsed from last call. /// command: ordered commands commands from server. /// from: Address of command sender. /// Returns bool value indicating /// should server continue running if false stops server. /// Called only when new commands come to server. /// Commands ordered and with some guarantees. fn handle_command( &mut self, delta_time: Duration, commands: Vec<Vec<u8>>, from: SocketAddr, ) -> ContinueRunning; ///Gets new state to send to client. /// delta_time: time elapsed throw last call. /// Returns bytes with new game state for client. /// Called once in about 30 milliseconds. /// Sends state only to clients connected to server. ///Ordered and without some guarantees. /// If returns empty Vec<u8> then server skips sending it and go to next iteration fn draw(&mut self, delta_time: Duration) -> Vec<u8>; ///Allow client with this IP Address work with server. /// If false server don't send new state to this client. /// Usually don't implement this method. Use default implementation. fn allow_connect(&mut self, _from: &SocketAddr) -> bool { true } ///Handles events from server. /// Returns bool value. /// If returns false stops server. /// Usually don't implement this method. Use default implementation. fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning { true } ///Client to add to recv state from server. /// If returns not None then servers on draw sends new state to this client. /// If client with this IP Address already connected then nothing happens. /// Usually don't implement this method. Use default implementation. fn add_client(&mut self) -> Option<SocketAddr> { None } ///Disconnect this client from server and don't send new state to them. /// Usually don't implement this method. Use default implementation. fn remove_client(&mut self) -> Option<SocketAddr> { None } } /// Client used to communicate with [`GameServer`]. Must be singleton in your app. pub struct ClientSocket { socket: TypedClientSocket, client: bll::Client, } impl ClientSocket { ///Create new client and listen on port to recv packets from server_address and send its to them. pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> { Ok(ClientSocket { socket: TypedClientSocket::new(port, server_address)?, client: bll::Client::new(), }) } ///Send data to server /// Don't block current thread /// may wait up to 30 milliseconds if you send commands too often ///Commands ordered and with some guarantees. pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> { let command = self.client.send(command); self.socket.write(&command) } ///Reads data from server. /// Don't block current thread. /// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available. ///Data ordered and without some guarantees. pub fn recv(&mut self) -> Result<Vec<u8>, Exception> { let state = self.socket.read()?; let (state, lost) = self.client.recv(state)?; for command in lost { self.socket.write(&command)?; } Ok(state) } } struct ServerSocket { socket: TypedServerSocket, servers: HashMap<SocketAddr, bll::Server>, } impl ServerSocket { pub fn new(port: u16) -> Result<ServerSocket, Exception> { Ok(ServerSocket { socket: TypedServerSocket::new(port)?, servers: HashMap::new(), }) } pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> { let (command, from) = self.socket.read()?; self.add(&from); let command = self.servers.get_mut(&from).unwrap().recv(command)?; Ok((command, from)) } pub fn remove(&mut self, client: &SocketAddr) { self.servers.remove(&client); } pub fn add(&mut self, client: &SocketAddr) { if !self.servers.contains_key(client)
} pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> { let mut exceptions = Vec::new(); for (a, s) in &mut self.servers { let _ = self .socket .write(a, &s.send(state.clone())) .map_err(|e| exceptions.push((*a, e))); } exceptions } } const DRAW_PERIOD_IN_MILLIS: u64 = 30; ///Game server to run [`Game`] pub struct GameServer<T: Game> { game: T, socket: ServerSocket, is_running: bool, draw_timer: bll::timer::WaitTimer, update_timer: bll::timer::ElapsedTimer, after_draw_elapsed_timer: bll::timer::ElapsedTimer, } impl<T: Game> GameServer<T> { ///Crates new server listening port pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> { Ok(GameServer { game, socket: ServerSocket::new(port)?, is_running: true, draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS), update_timer: bll::timer::ElapsedTimer::new(), after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(), }) } ///Runs game update - draw circle. /// Blocks current thread. pub fn run(&mut self) { while self.is_running { self.update(); self.draw() } } fn draw(&mut self) { if self.draw_timer.continue_execution() { let state = self.game.draw(self.after_draw_elapsed_timer.elapsed()); if state.is_empty() { return; } self.game.add_client().map(|a| self.socket.add(&a)); self.game.remove_client().map(|a| self.socket.remove(&a)); self.is_running &= self .socket .send_to_all(state) .into_iter() .map(|ex| { self.game .handle_server_event(ServerEvent::ExceptionOnSend(ex)) }) .all(|b| b); } } fn update(&mut self) { let _ = self .socket .recv() .map(|(commands, from)| { if self.game.allow_connect(&from) { self.is_running &= self.game .handle_command(self.update_timer.elapsed(), commands, from); } else { self.socket.remove(&from); } }) .map_err(|e| { self.is_running &= self .game .handle_server_event(ServerEvent::ExceptionOnRecv(e)) }); } } //trait Game { // fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>; //} // //struct GameProxy { // game: std::sync::Arc<std::sync::Mutex<Game>> //} // //impl GameProxy { // fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy { //// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap(); //// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap(); // GameProxy { game } // } // // fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> { // let mut game = self.game.lock().unwrap(); // game.update(delta_time, commands, from_address) // } //} ///// Client used to communicate with server. Must be singleton in your app //pub struct Client { // commands: mpsc::Sender<Vec<u8>>, // states: mpsc::Receiver<Vec<u8>>, //} // //impl Client { // ///Create new client and listen on port to recv packets from server_address and send its to them // pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> { // let mut client = crate::business_logic_layer::Client::new(port, server_address)?; // crate::data_access_layer::logger::init(LevelFilter::Info)?; // let (tx, rx) = Client::run_process(client); // Ok(Client { commands: tx, states: rx }) // } // // fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) { // let (tx1, rx1) = mpsc::channel(); // let (tx2, rx2) = mpsc::channel(); // thread::spawn(move || { // const SEND_TIMEOUT: time::Duration = time::Duration::from_millis(30); // let mut timer = time::Instant::now(); // loop { // if timer.elapsed() > SEND_TIMEOUT { // timer = time::Instant::now(); // match rx1.try_recv() { // Ok(b) => client.send(b).map_err(|e| error!("{}", e)), // Err(mpsc::TryRecvError::Disconnected) => break, // Err(e) => Err(error!("{}", e)), // }; // }; // client.recv() // .map_err(|e|error!("{}",e)) // .and_then(|b| tx2.send(b) // .map_err(|e|error!("{}",e))); // // } // }); // (tx1, rx2) // } // // ///Send data to server // /// Don't block current thread // pub fn send(&self, command: Vec<u8>) { // self.commands.send(command).map_err(|e| error!("{}", e)); // } // // ///Reads data fro server // /// Don't block current thread // /// Return None if there is no data available // pub fn recv(&self) -> Option<Vec<u8>> { // self.states.try_recv().ok() // } //} //#[cfg(test)] //mod tests { // #[test] // fn it_works() { // assert_eq!(1, 1); // } //}
{ self.servers.insert(client.clone(), bll::Server::new()); }
conditional_block
lib.rs
mod business_logic_layer; mod data_access_layer; mod entities; use crate::business_logic_layer as bll; pub use crate::data_access_layer::MAX_DATAGRAM_SIZE; use crate::data_access_layer::{TypedClientSocket, TypedServerSocket}; pub use crate::entities::Exception; use std::collections::HashMap; use std::net::{SocketAddr, ToSocketAddrs}; use std::time::Duration; #[derive(Debug)] ///Events from server. pub enum ServerEvent { ///Error on read data from socket. ExceptionOnRecv(Exception), ///Error on write data to socket. ExceptionOnSend((SocketAddr, Exception)), } pub type ContinueRunning = bool; ///Game to use with server must implement this trait. pub trait Game { /// delta_time: time elapsed from last call. /// command: ordered commands commands from server. /// from: Address of command sender. /// Returns bool value indicating /// should server continue running if false stops server. /// Called only when new commands come to server. /// Commands ordered and with some guarantees. fn handle_command( &mut self, delta_time: Duration, commands: Vec<Vec<u8>>, from: SocketAddr, ) -> ContinueRunning; ///Gets new state to send to client. /// delta_time: time elapsed throw last call. /// Returns bytes with new game state for client. /// Called once in about 30 milliseconds. /// Sends state only to clients connected to server. ///Ordered and without some guarantees. /// If returns empty Vec<u8> then server skips sending it and go to next iteration fn draw(&mut self, delta_time: Duration) -> Vec<u8>; ///Allow client with this IP Address work with server. /// If false server don't send new state to this client. /// Usually don't implement this method. Use default implementation. fn allow_connect(&mut self, _from: &SocketAddr) -> bool { true } ///Handles events from server. /// Returns bool value. /// If returns false stops server. /// Usually don't implement this method. Use default implementation. fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning { true } ///Client to add to recv state from server. /// If returns not None then servers on draw sends new state to this client. /// If client with this IP Address already connected then nothing happens. /// Usually don't implement this method. Use default implementation. fn add_client(&mut self) -> Option<SocketAddr> { None } ///Disconnect this client from server and don't send new state to them. /// Usually don't implement this method. Use default implementation. fn remove_client(&mut self) -> Option<SocketAddr> { None } } /// Client used to communicate with [`GameServer`]. Must be singleton in your app. pub struct ClientSocket { socket: TypedClientSocket, client: bll::Client, } impl ClientSocket { ///Create new client and listen on port to recv packets from server_address and send its to them. pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> { Ok(ClientSocket { socket: TypedClientSocket::new(port, server_address)?, client: bll::Client::new(), }) } ///Send data to server /// Don't block current thread /// may wait up to 30 milliseconds if you send commands too often ///Commands ordered and with some guarantees. pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> { let command = self.client.send(command); self.socket.write(&command) } ///Reads data from server. /// Don't block current thread. /// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available. ///Data ordered and without some guarantees. pub fn recv(&mut self) -> Result<Vec<u8>, Exception> { let state = self.socket.read()?; let (state, lost) = self.client.recv(state)?; for command in lost { self.socket.write(&command)?; } Ok(state) } } struct ServerSocket { socket: TypedServerSocket, servers: HashMap<SocketAddr, bll::Server>, } impl ServerSocket { pub fn new(port: u16) -> Result<ServerSocket, Exception> { Ok(ServerSocket { socket: TypedServerSocket::new(port)?, servers: HashMap::new(), }) } pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> { let (command, from) = self.socket.read()?; self.add(&from); let command = self.servers.get_mut(&from).unwrap().recv(command)?; Ok((command, from)) } pub fn remove(&mut self, client: &SocketAddr) { self.servers.remove(&client); } pub fn add(&mut self, client: &SocketAddr) { if !self.servers.contains_key(client) { self.servers.insert(client.clone(), bll::Server::new()); } } pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> { let mut exceptions = Vec::new(); for (a, s) in &mut self.servers {
exceptions } } const DRAW_PERIOD_IN_MILLIS: u64 = 30; ///Game server to run [`Game`] pub struct GameServer<T: Game> { game: T, socket: ServerSocket, is_running: bool, draw_timer: bll::timer::WaitTimer, update_timer: bll::timer::ElapsedTimer, after_draw_elapsed_timer: bll::timer::ElapsedTimer, } impl<T: Game> GameServer<T> { ///Crates new server listening port pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> { Ok(GameServer { game, socket: ServerSocket::new(port)?, is_running: true, draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS), update_timer: bll::timer::ElapsedTimer::new(), after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(), }) } ///Runs game update - draw circle. /// Blocks current thread. pub fn run(&mut self) { while self.is_running { self.update(); self.draw() } } fn draw(&mut self) { if self.draw_timer.continue_execution() { let state = self.game.draw(self.after_draw_elapsed_timer.elapsed()); if state.is_empty() { return; } self.game.add_client().map(|a| self.socket.add(&a)); self.game.remove_client().map(|a| self.socket.remove(&a)); self.is_running &= self .socket .send_to_all(state) .into_iter() .map(|ex| { self.game .handle_server_event(ServerEvent::ExceptionOnSend(ex)) }) .all(|b| b); } } fn update(&mut self) { let _ = self .socket .recv() .map(|(commands, from)| { if self.game.allow_connect(&from) { self.is_running &= self.game .handle_command(self.update_timer.elapsed(), commands, from); } else { self.socket.remove(&from); } }) .map_err(|e| { self.is_running &= self .game .handle_server_event(ServerEvent::ExceptionOnRecv(e)) }); } } //trait Game { // fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>; //} // //struct GameProxy { // game: std::sync::Arc<std::sync::Mutex<Game>> //} // //impl GameProxy { // fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy { //// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap(); //// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap(); // GameProxy { game } // } // // fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> { // let mut game = self.game.lock().unwrap(); // game.update(delta_time, commands, from_address) // } //} ///// Client used to communicate with server. Must be singleton in your app //pub struct Client { // commands: mpsc::Sender<Vec<u8>>, // states: mpsc::Receiver<Vec<u8>>, //} // //impl Client { // ///Create new client and listen on port to recv packets from server_address and send its to them // pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> { // let mut client = crate::business_logic_layer::Client::new(port, server_address)?; // crate::data_access_layer::logger::init(LevelFilter::Info)?; // let (tx, rx) = Client::run_process(client); // Ok(Client { commands: tx, states: rx }) // } // // fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) { // let (tx1, rx1) = mpsc::channel(); // let (tx2, rx2) = mpsc::channel(); // thread::spawn(move || { // const SEND_TIMEOUT: time::Duration = time::Duration::from_millis(30); // let mut timer = time::Instant::now(); // loop { // if timer.elapsed() > SEND_TIMEOUT { // timer = time::Instant::now(); // match rx1.try_recv() { // Ok(b) => client.send(b).map_err(|e| error!("{}", e)), // Err(mpsc::TryRecvError::Disconnected) => break, // Err(e) => Err(error!("{}", e)), // }; // }; // client.recv() // .map_err(|e|error!("{}",e)) // .and_then(|b| tx2.send(b) // .map_err(|e|error!("{}",e))); // // } // }); // (tx1, rx2) // } // // ///Send data to server // /// Don't block current thread // pub fn send(&self, command: Vec<u8>) { // self.commands.send(command).map_err(|e| error!("{}", e)); // } // // ///Reads data fro server // /// Don't block current thread // /// Return None if there is no data available // pub fn recv(&self) -> Option<Vec<u8>> { // self.states.try_recv().ok() // } //} //#[cfg(test)] //mod tests { // #[test] // fn it_works() { // assert_eq!(1, 1); // } //}
let _ = self .socket .write(a, &s.send(state.clone())) .map_err(|e| exceptions.push((*a, e))); }
random_line_split